file_path
stringlengths
20
202
content
stringlengths
9
3.85M
size
int64
9
3.85M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
8
993
alphanum_fraction
float64
0.26
0.93
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/docs/index.rst
Isaac Python REPL [omni.isaac.repl] ###################################
72
reStructuredText
23.333326
35
0.388889
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/util.py
""" pygments.util ~~~~~~~~~~~~~ Utility functions. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from io import TextIOWrapper split_path_re = re.compile(r'[/\\ ]') doctype_lookup_re = re.compile(r''' <!DOCTYPE\s+( [a-zA-Z_][a-zA-Z0-9]* (?: \s+ # optional in HTML5 [a-zA-Z_][a-zA-Z0-9]*\s+ "[^"]*")? ) [^>]*> ''', re.DOTALL | re.MULTILINE | re.VERBOSE) tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>', re.IGNORECASE | re.DOTALL | re.MULTILINE) xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I) class ClassNotFound(ValueError): """Raised if one of the lookup functions didn't find a matching class.""" class OptionError(Exception): pass def get_choice_opt(options, optname, allowed, default=None, normcase=False): string = options.get(optname, default) if normcase: string = string.lower() if string not in allowed: raise OptionError('Value for option %s must be one of %s' % (optname, ', '.join(map(str, allowed)))) return string def get_bool_opt(options, optname, default=None): string = options.get(optname, default) if isinstance(string, bool): return string elif isinstance(string, int): return bool(string) elif not isinstance(string, str): raise OptionError('Invalid type %r for option %s; use ' '1/0, yes/no, true/false, on/off' % ( string, optname)) elif string.lower() in ('1', 'yes', 'true', 'on'): return True elif string.lower() in ('0', 'no', 'false', 'off'): return False else: raise OptionError('Invalid value %r for option %s; use ' '1/0, yes/no, true/false, on/off' % ( string, optname)) def get_int_opt(options, optname, default=None): string = options.get(optname, default) try: return int(string) except TypeError: raise OptionError('Invalid type %r for option %s; you ' 'must give an integer value' % ( string, optname)) except ValueError: raise OptionError('Invalid value %r for option %s; you ' 'must give an integer value' % ( string, optname)) def get_list_opt(options, optname, default=None): val = options.get(optname, default) if isinstance(val, str): return val.split() elif isinstance(val, (list, tuple)): return list(val) else: raise OptionError('Invalid type %r for option %s; you ' 'must give a list value' % ( val, optname)) def docstring_headline(obj): if not obj.__doc__: return '' res = [] for line in obj.__doc__.strip().splitlines(): if line.strip(): res.append(" " + line.strip()) else: break return ''.join(res).lstrip() def make_analysator(f): """Return a static text analyser function that returns float values.""" def text_analyse(text): try: rv = f(text) except Exception: return 0.0 if not rv: return 0.0 try: return min(1.0, max(0.0, float(rv))) except (ValueError, TypeError): return 0.0 text_analyse.__doc__ = f.__doc__ return staticmethod(text_analyse) def shebang_matches(text, regex): r"""Check if the given regular expression matches the last part of the shebang if one exists. >>> from pygments.util import shebang_matches >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?') True >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?') True >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?') False >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?') False >>> shebang_matches('#!/usr/bin/startsomethingwith python', ... r'python(2\.\d)?') True It also checks for common windows executable file extensions:: >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?') True Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does the same as ``'perl -e'``) Note that this method automatically searches the whole string (eg: the regular expression is wrapped in ``'^$'``) """ index = text.find('\n') if index >= 0: first_line = text[:index].lower() else: first_line = text.lower() if first_line.startswith('#!'): try: found = [x for x in split_path_re.split(first_line[2:].strip()) if x and not x.startswith('-')][-1] except IndexError: return False regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE) if regex.search(found) is not None: return True return False def doctype_matches(text, regex): """Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"' """ m = doctype_lookup_re.search(text) if m is None: return False doctype = m.group(1) return re.compile(regex, re.I).match(doctype.strip()) is not None def html_doctype_matches(text): """Check if the file looks like it has a html doctype.""" return doctype_matches(text, r'html') _looks_like_xml_cache = {} def looks_like_xml(text): """Check if a doctype exists or if we have some tags.""" if xml_decl_re.match(text): return True key = hash(text) try: return _looks_like_xml_cache[key] except KeyError: m = doctype_lookup_re.search(text) if m is not None: return True rv = tag_re.search(text[:1000]) is not None _looks_like_xml_cache[key] = rv return rv def surrogatepair(c): """Given a unicode character code with length greater than 16 bits, return the two 16 bit surrogate pair. """ # From example D28 of: # http://www.unicode.org/book/ch03.pdf return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff))) def format_lines(var_name, seq, raw=False, indent_level=0): """Formats a sequence of strings for output.""" lines = [] base_indent = ' ' * indent_level * 4 inner_indent = ' ' * (indent_level + 1) * 4 lines.append(base_indent + var_name + ' = (') if raw: # These should be preformatted reprs of, say, tuples. for i in seq: lines.append(inner_indent + i + ',') else: for i in seq: # Force use of single quotes r = repr(i + '"') lines.append(inner_indent + r[:-2] + r[-1] + ',') lines.append(base_indent + ')') return '\n'.join(lines) def duplicates_removed(it, already_seen=()): """ Returns a list with duplicates removed from the iterable `it`. Order is preserved. """ lst = [] seen = set() for i in it: if i in seen or i in already_seen: continue lst.append(i) seen.add(i) return lst class Future: """Generic class to defer some work. Handled specially in RegexLexerMeta, to support regex string construction at first use. """ def get(self): raise NotImplementedError def guess_decode(text): """Decode *text* with guessed encoding. First try UTF-8; this should fail for non-UTF-8 encodings. Then try the preferred locale encoding. Fall back to latin-1, which always works. """ try: text = text.decode('utf-8') return text, 'utf-8' except UnicodeDecodeError: try: import locale prefencoding = locale.getpreferredencoding() text = text.decode() return text, prefencoding except (UnicodeDecodeError, LookupError): text = text.decode('latin1') return text, 'latin1' def guess_decode_from_terminal(text, term): """Decode *text* coming from terminal *term*. First try the terminal encoding, if given. Then try UTF-8. Then try the preferred locale encoding. Fall back to latin-1, which always works. """ if getattr(term, 'encoding', None): try: text = text.decode(term.encoding) except UnicodeDecodeError: pass else: return text, term.encoding return guess_decode(text) def terminal_encoding(term): """Return our best guess of encoding for the given *term*.""" if getattr(term, 'encoding', None): return term.encoding import locale return locale.getpreferredencoding() class UnclosingTextIOWrapper(TextIOWrapper): # Don't close underlying buffer on destruction. def close(self): self.flush()
9,110
Python
28.485437
80
0.568825
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/cmdline.py
""" pygments.cmdline ~~~~~~~~~~~~~~~~ Command line interface. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import sys import shutil import argparse from textwrap import dedent from pygments import __version__, highlight from pygments.util import ClassNotFound, OptionError, docstring_headline, \ guess_decode, guess_decode_from_terminal, terminal_encoding, \ UnclosingTextIOWrapper from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \ load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename from pygments.lexers.special import TextLexer from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter from pygments.formatters import get_all_formatters, get_formatter_by_name, \ load_formatter_from_file, get_formatter_for_filename, find_formatter_class from pygments.formatters.terminal import TerminalFormatter from pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter from pygments.filters import get_all_filters, find_filter_class from pygments.styles import get_all_styles, get_style_by_name def _parse_options(o_strs): opts = {} if not o_strs: return opts for o_str in o_strs: if not o_str.strip(): continue o_args = o_str.split(',') for o_arg in o_args: o_arg = o_arg.strip() try: o_key, o_val = o_arg.split('=', 1) o_key = o_key.strip() o_val = o_val.strip() except ValueError: opts[o_arg] = True else: opts[o_key] = o_val return opts def _parse_filters(f_strs): filters = [] if not f_strs: return filters for f_str in f_strs: if ':' in f_str: fname, fopts = f_str.split(':', 1) filters.append((fname, _parse_options([fopts]))) else: filters.append((f_str, {})) return filters def _print_help(what, name): try: if what == 'lexer': cls = get_lexer_by_name(name) print("Help on the %s lexer:" % cls.name) print(dedent(cls.__doc__)) elif what == 'formatter': cls = find_formatter_class(name) print("Help on the %s formatter:" % cls.name) print(dedent(cls.__doc__)) elif what == 'filter': cls = find_filter_class(name) print("Help on the %s filter:" % name) print(dedent(cls.__doc__)) return 0 except (AttributeError, ValueError): print("%s not found!" % what, file=sys.stderr) return 1 def _print_list(what): if what == 'lexer': print() print("Lexers:") print("~~~~~~~") info = [] for fullname, names, exts, _ in get_all_lexers(): tup = (', '.join(names)+':', fullname, exts and '(filenames ' + ', '.join(exts) + ')' or '') info.append(tup) info.sort() for i in info: print(('* %s\n %s %s') % i) elif what == 'formatter': print() print("Formatters:") print("~~~~~~~~~~~") info = [] for cls in get_all_formatters(): doc = docstring_headline(cls) tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and '(filenames ' + ', '.join(cls.filenames) + ')' or '') info.append(tup) info.sort() for i in info: print(('* %s\n %s %s') % i) elif what == 'filter': print() print("Filters:") print("~~~~~~~~") for name in get_all_filters(): cls = find_filter_class(name) print("* " + name + ':') print(" %s" % docstring_headline(cls)) elif what == 'style': print() print("Styles:") print("~~~~~~~") for name in get_all_styles(): cls = get_style_by_name(name) print("* " + name + ':') print(" %s" % docstring_headline(cls)) def _print_list_as_json(requested_items): import json result = {} if 'lexer' in requested_items: info = {} for fullname, names, filenames, mimetypes in get_all_lexers(): info[fullname] = { 'aliases': names, 'filenames': filenames, 'mimetypes': mimetypes } result['lexers'] = info if 'formatter' in requested_items: info = {} for cls in get_all_formatters(): doc = docstring_headline(cls) info[cls.name] = { 'aliases': cls.aliases, 'filenames': cls.filenames, 'doc': doc } result['formatters'] = info if 'filter' in requested_items: info = {} for name in get_all_filters(): cls = find_filter_class(name) info[name] = { 'doc': docstring_headline(cls) } result['filters'] = info if 'style' in requested_items: info = {} for name in get_all_styles(): cls = get_style_by_name(name) info[name] = { 'doc': docstring_headline(cls) } result['styles'] = info json.dump(result, sys.stdout) def main_inner(parser, argns): if argns.help: parser.print_help() return 0 if argns.V: print('Pygments version %s, (c) 2006-2022 by Georg Brandl, Matthäus ' 'Chajdas and contributors.' % __version__) return 0 def is_only_option(opt): return not any(v for (k, v) in vars(argns).items() if k != opt) # handle ``pygmentize -L`` if argns.L is not None: arg_set = set() for k, v in vars(argns).items(): if v: arg_set.add(k) arg_set.discard('L') arg_set.discard('json') if arg_set: parser.print_help(sys.stderr) return 2 # print version if not argns.json: main(['', '-V']) allowed_types = {'lexer', 'formatter', 'filter', 'style'} largs = [arg.rstrip('s') for arg in argns.L] if any(arg not in allowed_types for arg in largs): parser.print_help(sys.stderr) return 0 if not largs: largs = allowed_types if not argns.json: for arg in largs: _print_list(arg) else: _print_list_as_json(largs) return 0 # handle ``pygmentize -H`` if argns.H: if not is_only_option('H'): parser.print_help(sys.stderr) return 2 what, name = argns.H if what not in ('lexer', 'formatter', 'filter'): parser.print_help(sys.stderr) return 2 return _print_help(what, name) # parse -O options parsed_opts = _parse_options(argns.O or []) # parse -P options for p_opt in argns.P or []: try: name, value = p_opt.split('=', 1) except ValueError: parsed_opts[p_opt] = True else: parsed_opts[name] = value # encodings inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding')) outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding')) # handle ``pygmentize -N`` if argns.N: lexer = find_lexer_class_for_filename(argns.N) if lexer is None: lexer = TextLexer print(lexer.aliases[0]) return 0 # handle ``pygmentize -C`` if argns.C: inp = sys.stdin.buffer.read() try: lexer = guess_lexer(inp, inencoding=inencoding) except ClassNotFound: lexer = TextLexer print(lexer.aliases[0]) return 0 # handle ``pygmentize -S`` S_opt = argns.S a_opt = argns.a if S_opt is not None: f_opt = argns.f if not f_opt: parser.print_help(sys.stderr) return 2 if argns.l or argns.INPUTFILE: parser.print_help(sys.stderr) return 2 try: parsed_opts['style'] = S_opt fmter = get_formatter_by_name(f_opt, **parsed_opts) except ClassNotFound as err: print(err, file=sys.stderr) return 1 print(fmter.get_style_defs(a_opt or '')) return 0 # if no -S is given, -a is not allowed if argns.a is not None: parser.print_help(sys.stderr) return 2 # parse -F options F_opts = _parse_filters(argns.F or []) # -x: allow custom (eXternal) lexers and formatters allow_custom_lexer_formatter = bool(argns.x) # select lexer lexer = None # given by name? lexername = argns.l if lexername: # custom lexer, located relative to user's cwd if allow_custom_lexer_formatter and '.py' in lexername: try: filename = None name = None if ':' in lexername: filename, name = lexername.rsplit(':', 1) if '.py' in name: # This can happen on Windows: If the lexername is # C:\lexer.py -- return to normal load path in that case name = None if filename and name: lexer = load_lexer_from_file(filename, name, **parsed_opts) else: lexer = load_lexer_from_file(lexername, **parsed_opts) except ClassNotFound as err: print('Error:', err, file=sys.stderr) return 1 else: try: lexer = get_lexer_by_name(lexername, **parsed_opts) except (OptionError, ClassNotFound) as err: print('Error:', err, file=sys.stderr) return 1 # read input code code = None if argns.INPUTFILE: if argns.s: print('Error: -s option not usable when input file specified', file=sys.stderr) return 2 infn = argns.INPUTFILE try: with open(infn, 'rb') as infp: code = infp.read() except Exception as err: print('Error: cannot read infile:', err, file=sys.stderr) return 1 if not inencoding: code, inencoding = guess_decode(code) # do we have to guess the lexer? if not lexer: try: lexer = get_lexer_for_filename(infn, code, **parsed_opts) except ClassNotFound as err: if argns.g: try: lexer = guess_lexer(code, **parsed_opts) except ClassNotFound: lexer = TextLexer(**parsed_opts) else: print('Error:', err, file=sys.stderr) return 1 except OptionError as err: print('Error:', err, file=sys.stderr) return 1 elif not argns.s: # treat stdin as full file (-s support is later) # read code from terminal, always in binary mode since we want to # decode ourselves and be tolerant with it code = sys.stdin.buffer.read() # use .buffer to get a binary stream if not inencoding: code, inencoding = guess_decode_from_terminal(code, sys.stdin) # else the lexer will do the decoding if not lexer: try: lexer = guess_lexer(code, **parsed_opts) except ClassNotFound: lexer = TextLexer(**parsed_opts) else: # -s option needs a lexer with -l if not lexer: print('Error: when using -s a lexer has to be selected with -l', file=sys.stderr) return 2 # process filters for fname, fopts in F_opts: try: lexer.add_filter(fname, **fopts) except ClassNotFound as err: print('Error:', err, file=sys.stderr) return 1 # select formatter outfn = argns.o fmter = argns.f if fmter: # custom formatter, located relative to user's cwd if allow_custom_lexer_formatter and '.py' in fmter: try: filename = None name = None if ':' in fmter: # Same logic as above for custom lexer filename, name = fmter.rsplit(':', 1) if '.py' in name: name = None if filename and name: fmter = load_formatter_from_file(filename, name, **parsed_opts) else: fmter = load_formatter_from_file(fmter, **parsed_opts) except ClassNotFound as err: print('Error:', err, file=sys.stderr) return 1 else: try: fmter = get_formatter_by_name(fmter, **parsed_opts) except (OptionError, ClassNotFound) as err: print('Error:', err, file=sys.stderr) return 1 if outfn: if not fmter: try: fmter = get_formatter_for_filename(outfn, **parsed_opts) except (OptionError, ClassNotFound) as err: print('Error:', err, file=sys.stderr) return 1 try: outfile = open(outfn, 'wb') except Exception as err: print('Error: cannot open outfile:', err, file=sys.stderr) return 1 else: if not fmter: if os.environ.get('COLORTERM','') in ('truecolor', '24bit'): fmter = TerminalTrueColorFormatter(**parsed_opts) elif '256' in os.environ.get('TERM', ''): fmter = Terminal256Formatter(**parsed_opts) else: fmter = TerminalFormatter(**parsed_opts) outfile = sys.stdout.buffer # determine output encoding if not explicitly selected if not outencoding: if outfn: # output file? use lexer encoding for now (can still be None) fmter.encoding = inencoding else: # else use terminal encoding fmter.encoding = terminal_encoding(sys.stdout) # provide coloring under Windows, if possible if not outfn and sys.platform in ('win32', 'cygwin') and \ fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover # unfortunately colorama doesn't support binary streams on Py3 outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding) fmter.encoding = None try: import colorama.initialise except ImportError: pass else: outfile = colorama.initialise.wrap_stream( outfile, convert=None, strip=None, autoreset=False, wrap=True) # When using the LaTeX formatter and the option `escapeinside` is # specified, we need a special lexer which collects escaped text # before running the chosen language lexer. escapeinside = parsed_opts.get('escapeinside', '') if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter): left = escapeinside[0] right = escapeinside[1] lexer = LatexEmbeddedLexer(left, right, lexer) # ... and do it! if not argns.s: # process whole input as per normal... try: highlight(code, lexer, fmter, outfile) finally: if outfn: outfile.close() return 0 else: # line by line processing of stdin (eg: for 'tail -f')... try: while 1: line = sys.stdin.buffer.readline() if not line: break if not inencoding: line = guess_decode_from_terminal(line, sys.stdin)[0] highlight(line, lexer, fmter, outfile) if hasattr(outfile, 'flush'): outfile.flush() return 0 except KeyboardInterrupt: # pragma: no cover return 0 finally: if outfn: outfile.close() class HelpFormatter(argparse.HelpFormatter): def __init__(self, prog, indent_increment=2, max_help_position=16, width=None): if width is None: try: width = shutil.get_terminal_size().columns - 2 except Exception: pass argparse.HelpFormatter.__init__(self, prog, indent_increment, max_help_position, width) def main(args=sys.argv): """ Main command line entry point. """ desc = "Highlight an input file and write the result to an output file." parser = argparse.ArgumentParser(description=desc, add_help=False, formatter_class=HelpFormatter) operation = parser.add_argument_group('Main operation') lexersel = operation.add_mutually_exclusive_group() lexersel.add_argument( '-l', metavar='LEXER', help='Specify the lexer to use. (Query names with -L.) If not ' 'given and -g is not present, the lexer is guessed from the filename.') lexersel.add_argument( '-g', action='store_true', help='Guess the lexer from the file contents, or pass through ' 'as plain text if nothing can be guessed.') operation.add_argument( '-F', metavar='FILTER[:options]', action='append', help='Add a filter to the token stream. (Query names with -L.) ' 'Filter options are given after a colon if necessary.') operation.add_argument( '-f', metavar='FORMATTER', help='Specify the formatter to use. (Query names with -L.) ' 'If not given, the formatter is guessed from the output filename, ' 'and defaults to the terminal formatter if the output is to the ' 'terminal or an unknown file extension.') operation.add_argument( '-O', metavar='OPTION=value[,OPTION=value,...]', action='append', help='Give options to the lexer and formatter as a comma-separated ' 'list of key-value pairs. ' 'Example: `-O bg=light,python=cool`.') operation.add_argument( '-P', metavar='OPTION=value', action='append', help='Give a single option to the lexer and formatter - with this ' 'you can pass options whose value contains commas and equal signs. ' 'Example: `-P "heading=Pygments, the Python highlighter"`.') operation.add_argument( '-o', metavar='OUTPUTFILE', help='Where to write the output. Defaults to standard output.') operation.add_argument( 'INPUTFILE', nargs='?', help='Where to read the input. Defaults to standard input.') flags = parser.add_argument_group('Operation flags') flags.add_argument( '-v', action='store_true', help='Print a detailed traceback on unhandled exceptions, which ' 'is useful for debugging and bug reports.') flags.add_argument( '-s', action='store_true', help='Process lines one at a time until EOF, rather than waiting to ' 'process the entire file. This only works for stdin, only for lexers ' 'with no line-spanning constructs, and is intended for streaming ' 'input such as you get from `tail -f`. ' 'Example usage: `tail -f sql.log | pygmentize -s -l sql`.') flags.add_argument( '-x', action='store_true', help='Allow custom lexers and formatters to be loaded from a .py file ' 'relative to the current working directory. For example, ' '`-l ./customlexer.py -x`. By default, this option expects a file ' 'with a class named CustomLexer or CustomFormatter; you can also ' 'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). ' 'Users should be very careful not to use this option with untrusted ' 'files, because it will import and run them.') flags.add_argument('--json', help='Output as JSON. This can ' 'be only used in conjunction with -L.', default=False, action='store_true') special_modes_group = parser.add_argument_group( 'Special modes - do not do any highlighting') special_modes = special_modes_group.add_mutually_exclusive_group() special_modes.add_argument( '-S', metavar='STYLE -f formatter', help='Print style definitions for STYLE for a formatter ' 'given with -f. The argument given by -a is formatter ' 'dependent.') special_modes.add_argument( '-L', nargs='*', metavar='WHAT', help='List lexers, formatters, styles or filters -- ' 'give additional arguments for the thing(s) you want to list ' '(e.g. "styles"), or omit them to list everything.') special_modes.add_argument( '-N', metavar='FILENAME', help='Guess and print out a lexer name based solely on the given ' 'filename. Does not take input or highlight anything. If no specific ' 'lexer can be determined, "text" is printed.') special_modes.add_argument( '-C', action='store_true', help='Like -N, but print out a lexer name based solely on ' 'a given content from standard input.') special_modes.add_argument( '-H', action='store', nargs=2, metavar=('NAME', 'TYPE'), help='Print detailed help for the object <name> of type <type>, ' 'where <type> is one of "lexer", "formatter" or "filter".') special_modes.add_argument( '-V', action='store_true', help='Print the package version.') special_modes.add_argument( '-h', '--help', action='store_true', help='Print this help.') special_modes_group.add_argument( '-a', metavar='ARG', help='Formatter-specific additional argument for the -S (print ' 'style sheet) mode.') argns = parser.parse_args(args[1:]) try: return main_inner(parser, argns) except BrokenPipeError: # someone closed our stdout, e.g. by quitting a pager. return 0 except Exception: if argns.v: print(file=sys.stderr) print('*' * 65, file=sys.stderr) print('An unhandled exception occurred while highlighting.', file=sys.stderr) print('Please report the whole traceback to the issue tracker at', file=sys.stderr) print('<https://github.com/pygments/pygments/issues>.', file=sys.stderr) print('*' * 65, file=sys.stderr) print(file=sys.stderr) raise import traceback info = traceback.format_exception(*sys.exc_info()) msg = info[-1].strip() if len(info) >= 3: # extract relevant file and position info msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:] print(file=sys.stderr) print('*** Error while highlighting:', file=sys.stderr) print(msg, file=sys.stderr) print('*** If this is a bug you want to report, please rerun with -v.', file=sys.stderr) return 1
23,529
Python
34.171898
92
0.548302
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/formatter.py
""" pygments.formatter ~~~~~~~~~~~~~~~~~~ Base formatter class. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import codecs from pygments.util import get_bool_opt from pygments.styles import get_style_by_name __all__ = ['Formatter'] def _lookup_style(style): if isinstance(style, str): return get_style_by_name(style) return style class Formatter: """ Converts a token stream to text. Options accepted: ``style`` The style to use, can be a string or a Style subclass (default: "default"). Not used by e.g. the TerminalFormatter. ``full`` Tells the formatter to output a "full" document, i.e. a complete self-contained document. This doesn't have any effect for some formatters (default: false). ``title`` If ``full`` is true, the title that should be used to caption the document (default: ''). ``encoding`` If given, must be an encoding name. This will be used to convert the Unicode token strings to byte strings in the output. If it is "" or None, Unicode strings will be written to the output file, which most file-like objects do not support (default: None). ``outencoding`` Overrides ``encoding`` if given. """ #: Name of the formatter name = None #: Shortcuts for the formatter aliases = [] #: fn match rules filenames = [] #: If True, this formatter outputs Unicode strings when no encoding #: option is given. unicodeoutput = True def __init__(self, **options): self.style = _lookup_style(options.get('style', 'default')) self.full = get_bool_opt(options, 'full', False) self.title = options.get('title', '') self.encoding = options.get('encoding', None) or None if self.encoding in ('guess', 'chardet'): # can happen for e.g. pygmentize -O encoding=guess self.encoding = 'utf-8' self.encoding = options.get('outencoding') or self.encoding self.options = options def get_style_defs(self, arg=''): """ Return the style definitions for the current style as a string. ``arg`` is an additional argument whose meaning depends on the formatter used. Note that ``arg`` can also be a list or tuple for some formatters like the html formatter. """ return '' def format(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. """ if self.encoding: # wrap the outfile in a StreamWriter outfile = codecs.lookup(self.encoding)[3](outfile) return self.format_unencoded(tokensource, outfile)
2,893
Python
29.463158
75
0.614241
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/style.py
""" pygments.style ~~~~~~~~~~~~~~ Basic style object. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.token import Token, STANDARD_TYPES # Default mapping of ansixxx to RGB colors. _ansimap = { # dark 'ansiblack': '000000', 'ansired': '7f0000', 'ansigreen': '007f00', 'ansiyellow': '7f7fe0', 'ansiblue': '00007f', 'ansimagenta': '7f007f', 'ansicyan': '007f7f', 'ansigray': 'e5e5e5', # normal 'ansibrightblack': '555555', 'ansibrightred': 'ff0000', 'ansibrightgreen': '00ff00', 'ansibrightyellow': 'ffff00', 'ansibrightblue': '0000ff', 'ansibrightmagenta': 'ff00ff', 'ansibrightcyan': '00ffff', 'ansiwhite': 'ffffff', } # mapping of deprecated #ansixxx colors to new color names _deprecated_ansicolors = { # dark '#ansiblack': 'ansiblack', '#ansidarkred': 'ansired', '#ansidarkgreen': 'ansigreen', '#ansibrown': 'ansiyellow', '#ansidarkblue': 'ansiblue', '#ansipurple': 'ansimagenta', '#ansiteal': 'ansicyan', '#ansilightgray': 'ansigray', # normal '#ansidarkgray': 'ansibrightblack', '#ansired': 'ansibrightred', '#ansigreen': 'ansibrightgreen', '#ansiyellow': 'ansibrightyellow', '#ansiblue': 'ansibrightblue', '#ansifuchsia': 'ansibrightmagenta', '#ansiturquoise': 'ansibrightcyan', '#ansiwhite': 'ansiwhite', } ansicolors = set(_ansimap) class StyleMeta(type): def __new__(mcs, name, bases, dct): obj = type.__new__(mcs, name, bases, dct) for token in STANDARD_TYPES: if token not in obj.styles: obj.styles[token] = '' def colorformat(text): if text in ansicolors: return text if text[0:1] == '#': col = text[1:] if len(col) == 6: return col elif len(col) == 3: return col[0] * 2 + col[1] * 2 + col[2] * 2 elif text == '': return '' elif text.startswith('var') or text.startswith('calc'): return text assert False, "wrong color format %r" % text _styles = obj._styles = {} for ttype in obj.styles: for token in ttype.split(): if token in _styles: continue ndef = _styles.get(token.parent, None) styledefs = obj.styles.get(token, '').split() if not ndef or token is None: ndef = ['', 0, 0, 0, '', '', 0, 0, 0] elif 'noinherit' in styledefs and token is not Token: ndef = _styles[Token][:] else: ndef = ndef[:] _styles[token] = ndef for styledef in obj.styles.get(token, '').split(): if styledef == 'noinherit': pass elif styledef == 'bold': ndef[1] = 1 elif styledef == 'nobold': ndef[1] = 0 elif styledef == 'italic': ndef[2] = 1 elif styledef == 'noitalic': ndef[2] = 0 elif styledef == 'underline': ndef[3] = 1 elif styledef == 'nounderline': ndef[3] = 0 elif styledef[:3] == 'bg:': ndef[4] = colorformat(styledef[3:]) elif styledef[:7] == 'border:': ndef[5] = colorformat(styledef[7:]) elif styledef == 'roman': ndef[6] = 1 elif styledef == 'sans': ndef[7] = 1 elif styledef == 'mono': ndef[8] = 1 else: ndef[0] = colorformat(styledef) return obj def style_for_token(cls, token): t = cls._styles[token] ansicolor = bgansicolor = None color = t[0] if color in _deprecated_ansicolors: color = _deprecated_ansicolors[color] if color in ansicolors: ansicolor = color color = _ansimap[color] bgcolor = t[4] if bgcolor in _deprecated_ansicolors: bgcolor = _deprecated_ansicolors[bgcolor] if bgcolor in ansicolors: bgansicolor = bgcolor bgcolor = _ansimap[bgcolor] return { 'color': color or None, 'bold': bool(t[1]), 'italic': bool(t[2]), 'underline': bool(t[3]), 'bgcolor': bgcolor or None, 'border': t[5] or None, 'roman': bool(t[6]) or None, 'sans': bool(t[7]) or None, 'mono': bool(t[8]) or None, 'ansicolor': ansicolor, 'bgansicolor': bgansicolor, } def list_styles(cls): return list(cls) def styles_token(cls, ttype): return ttype in cls._styles def __iter__(cls): for token in cls._styles: yield token, cls.style_for_token(token) def __len__(cls): return len(cls._styles) class Style(metaclass=StyleMeta): #: overall background color (``None`` means transparent) background_color = '#ffffff' #: highlight background color highlight_color = '#ffffcc' #: line number font color line_number_color = 'inherit' #: line number background color line_number_background_color = 'transparent' #: special line number font color line_number_special_color = '#000000' #: special line number background color line_number_special_background_color = '#ffffc0' #: Style definitions for individual token types. styles = {} # Attribute for lexers defined within Pygments. If set # to True, the style is not shown in the style gallery # on the website. This is intended for language-specific # styles. web_style_gallery_exclude = False
6,245
Python
30.545454
70
0.501521
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/sphinxext.py
""" pygments.sphinxext ~~~~~~~~~~~~~~~~~~ Sphinx extension to generate automatic documentation of lexers, formatters and filters. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import sys from docutils import nodes from docutils.statemachine import ViewList from docutils.parsers.rst import Directive from sphinx.util.nodes import nested_parse_with_titles MODULEDOC = ''' .. module:: %s %s %s ''' LEXERDOC = ''' .. class:: %s :Short names: %s :Filenames: %s :MIME types: %s %s ''' FMTERDOC = ''' .. class:: %s :Short names: %s :Filenames: %s %s ''' FILTERDOC = ''' .. class:: %s :Name: %s %s ''' class PygmentsDoc(Directive): """ A directive to collect all lexers/formatters/filters and generate autoclass directives for them. """ has_content = False required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False option_spec = {} def run(self): self.filenames = set() if self.arguments[0] == 'lexers': out = self.document_lexers() elif self.arguments[0] == 'formatters': out = self.document_formatters() elif self.arguments[0] == 'filters': out = self.document_filters() elif self.arguments[0] == 'lexers_overview': out = self.document_lexers_overview() else: raise Exception('invalid argument for "pygmentsdoc" directive') node = nodes.compound() vl = ViewList(out.split('\n'), source='') nested_parse_with_titles(self.state, vl, node) for fn in self.filenames: self.state.document.settings.record_dependencies.add(fn) return node.children def document_lexers_overview(self): """Generate a tabular overview of all lexers. The columns are the lexer name, the extensions handled by this lexer (or "None"), the aliases and a link to the lexer class.""" from pygments.lexers._mapping import LEXERS import pygments.lexers out = [] table = [] def format_link(name, url): if url: return f'`{name} <{url}>`_' return name for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()): lexer_cls = pygments.lexers.find_lexer_class(data[1]) extensions = lexer_cls.filenames + lexer_cls.alias_filenames table.append({ 'name': format_link(data[1], lexer_cls.url), 'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None', 'aliases': ', '.join(data[2]), 'class': f'{data[0]}.{classname}' }) column_names = ['name', 'extensions', 'aliases', 'class'] column_lengths = [max([len(row[column]) for row in table if row[column]]) for column in column_names] def write_row(*columns): """Format a table row""" out = [] for l, c in zip(column_lengths, columns): if c: out.append(c.ljust(l)) else: out.append(' '*l) return ' '.join(out) def write_seperator(): """Write a table separator row""" sep = ['='*c for c in column_lengths] return write_row(*sep) out.append(write_seperator()) out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class')) out.append(write_seperator()) for row in table: out.append(write_row( row['name'], row['extensions'], row['aliases'], f':class:`~{row["class"]}`')) out.append(write_seperator()) return '\n'.join(out) def document_lexers(self): from pygments.lexers._mapping import LEXERS out = [] modules = {} moduledocstrings = {} for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]): module = data[0] mod = __import__(module, None, None, [classname]) self.filenames.add(mod.__file__) cls = getattr(mod, classname) if not cls.__doc__: print("Warning: %s does not have a docstring." % classname) docstring = cls.__doc__ if isinstance(docstring, bytes): docstring = docstring.decode('utf8') modules.setdefault(module, []).append(( classname, ', '.join(data[2]) or 'None', ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None', ', '.join(data[4]) or 'None', docstring)) if module not in moduledocstrings: moddoc = mod.__doc__ if isinstance(moddoc, bytes): moddoc = moddoc.decode('utf8') moduledocstrings[module] = moddoc for module, lexers in sorted(modules.items(), key=lambda x: x[0]): if moduledocstrings[module] is None: raise Exception("Missing docstring for %s" % (module,)) heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.') out.append(MODULEDOC % (module, heading, '-'*len(heading))) for data in lexers: out.append(LEXERDOC % data) return ''.join(out) def document_formatters(self): from pygments.formatters import FORMATTERS out = [] for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]): module = data[0] mod = __import__(module, None, None, [classname]) self.filenames.add(mod.__file__) cls = getattr(mod, classname) docstring = cls.__doc__ if isinstance(docstring, bytes): docstring = docstring.decode('utf8') heading = cls.__name__ out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None', ', '.join(data[3]).replace('*', '\\*') or 'None', docstring)) return ''.join(out) def document_filters(self): from pygments.filters import FILTERS out = [] for name, cls in FILTERS.items(): self.filenames.add(sys.modules[cls.__module__].__file__) docstring = cls.__doc__ if isinstance(docstring, bytes): docstring = docstring.decode('utf8') out.append(FILTERDOC % (cls.__name__, name, docstring)) return ''.join(out) def setup(app): app.add_directive('pygmentsdoc', PygmentsDoc)
6,816
Python
30.270642
101
0.531397
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexer.py
""" pygments.lexer ~~~~~~~~~~~~~~ Base lexer classes. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re import sys import time from pygments.filter import apply_filters, Filter from pygments.filters import get_filter_by_name from pygments.token import Error, Text, Other, Whitespace, _TokenType from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ make_analysator, Future, guess_decode from pygments.regexopt import regex_opt __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', 'default', 'words', 'line_re'] line_re = re.compile('.*?\n') _encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), (b'\xff\xfe\0\0', 'utf-32'), (b'\0\0\xfe\xff', 'utf-32be'), (b'\xff\xfe', 'utf-16'), (b'\xfe\xff', 'utf-16be')] _default_analyse = staticmethod(lambda x: 0.0) class LexerMeta(type): """ This metaclass automagically converts ``analyse_text`` methods into static methods which always return float values. """ def __new__(mcs, name, bases, d): if 'analyse_text' in d: d['analyse_text'] = make_analysator(d['analyse_text']) return type.__new__(mcs, name, bases, d) class Lexer(metaclass=LexerMeta): """ Lexer for a specific language. Basic options recognized: ``stripnl`` Strip leading and trailing newlines from the input (default: True). ``stripall`` Strip all leading and trailing whitespace from the input (default: False). ``ensurenl`` Make sure that the input ends with a newline (default: True). This is required for some lexers that consume input linewise. .. versionadded:: 1.3 ``tabsize`` If given and greater than 0, expand tabs in the input (default: 0). ``encoding`` If given, must be an encoding name. This encoding will be used to convert the input string to Unicode, if it is not already a Unicode string (default: ``'guess'``, which uses a simple UTF-8 / Locale / Latin1 detection. Can also be ``'chardet'`` to use the chardet library, if it is installed. ``inencoding`` Overrides the ``encoding`` if given. """ #: Name of the lexer name = None #: URL of the language specification/definition url = None #: Shortcuts for the lexer aliases = [] #: File name globs filenames = [] #: Secondary file name globs alias_filenames = [] #: MIME types mimetypes = [] #: Priority, should multiple lexers match and no content is provided priority = 0 def __init__(self, **options): self.options = options self.stripnl = get_bool_opt(options, 'stripnl', True) self.stripall = get_bool_opt(options, 'stripall', False) self.ensurenl = get_bool_opt(options, 'ensurenl', True) self.tabsize = get_int_opt(options, 'tabsize', 0) self.encoding = options.get('encoding', 'guess') self.encoding = options.get('inencoding') or self.encoding self.filters = [] for filter_ in get_list_opt(options, 'filters', ()): self.add_filter(filter_) def __repr__(self): if self.options: return '<pygments.lexers.%s with %r>' % (self.__class__.__name__, self.options) else: return '<pygments.lexers.%s>' % self.__class__.__name__ def add_filter(self, filter_, **options): """ Add a new stream filter to this lexer. """ if not isinstance(filter_, Filter): filter_ = get_filter_by_name(filter_, **options) self.filters.append(filter_) def analyse_text(text): """ Has to return a float between ``0`` and ``1`` that indicates if a lexer wants to highlight this text. Used by ``guess_lexer``. If this method returns ``0`` it won't highlight it in any case, if it returns ``1`` highlighting with this lexer is guaranteed. The `LexerMeta` metaclass automatically wraps this function so that it works like a static method (no ``self`` or ``cls`` parameter) and the return value is automatically converted to `float`. If the return value is an object that is boolean `False` it's the same as if the return values was ``0.0``. """ def get_tokens(self, text, unfiltered=False): """ Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. """ if not isinstance(text, str): if self.encoding == 'guess': text, _ = guess_decode(text) elif self.encoding == 'chardet': try: import chardet except ImportError as e: raise ImportError('To enable chardet encoding guessing, ' 'please install the chardet library ' 'from http://chardet.feedparser.org/') from e # check for BOM first decoded = None for bom, encoding in _encoding_map: if text.startswith(bom): decoded = text[len(bom):].decode(encoding, 'replace') break # no BOM found, so use chardet if decoded is None: enc = chardet.detect(text[:1024]) # Guess using first 1KB decoded = text.decode(enc.get('encoding') or 'utf-8', 'replace') text = decoded else: text = text.decode(self.encoding) if text.startswith('\ufeff'): text = text[len('\ufeff'):] else: if text.startswith('\ufeff'): text = text[len('\ufeff'):] # text now *is* a unicode string text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') if self.stripall: text = text.strip() elif self.stripnl: text = text.strip('\n') if self.tabsize > 0: text = text.expandtabs(self.tabsize) if self.ensurenl and not text.endswith('\n'): text += '\n' def streamer(): for _, t, v in self.get_tokens_unprocessed(text): yield t, v stream = streamer() if not unfiltered: stream = apply_filters(stream, self.filters, self) return stream def get_tokens_unprocessed(self, text): """ Return an iterable of (index, tokentype, value) pairs where "index" is the starting position of the token within the input text. In subclasses, implement this method as a generator to maximize effectiveness. """ raise NotImplementedError class DelegatingLexer(Lexer): """ This lexer takes two lexer as arguments. A root lexer and a language lexer. First everything is scanned using the language lexer, afterwards all ``Other`` tokens are lexed using the root lexer. The lexers from the ``template`` lexer package use this base lexer. """ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): self.root_lexer = _root_lexer(**options) self.language_lexer = _language_lexer(**options) self.needle = _needle Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): buffered = '' insertions = [] lng_buffer = [] for i, t, v in self.language_lexer.get_tokens_unprocessed(text): if t is self.needle: if lng_buffer: insertions.append((len(buffered), lng_buffer)) lng_buffer = [] buffered += v else: lng_buffer.append((i, t, v)) if lng_buffer: insertions.append((len(buffered), lng_buffer)) return do_insertions(insertions, self.root_lexer.get_tokens_unprocessed(buffered)) # ------------------------------------------------------------------------------ # RegexLexer and ExtendedRegexLexer # class include(str): # pylint: disable=invalid-name """ Indicates that a state should include rules from another state. """ pass class _inherit: """ Indicates the a state should inherit from its superclass. """ def __repr__(self): return 'inherit' inherit = _inherit() # pylint: disable=invalid-name class combined(tuple): # pylint: disable=invalid-name """ Indicates a state combined from multiple states. """ def __new__(cls, *args): return tuple.__new__(cls, args) def __init__(self, *args): # tuple.__init__ doesn't do anything pass class _PseudoMatch: """ A pseudo match object constructed from a string. """ def __init__(self, start, text): self._text = text self._start = start def start(self, arg=None): return self._start def end(self, arg=None): return self._start + len(self._text) def group(self, arg=None): if arg: raise IndexError('No such group') return self._text def groups(self): return (self._text,) def groupdict(self): return {} def bygroups(*args): """ Callback that yields multiple actions for each group in the match. """ def callback(lexer, match, ctx=None): for i, action in enumerate(args): if action is None: continue elif type(action) is _TokenType: data = match.group(i + 1) if data: yield match.start(i + 1), action, data else: data = match.group(i + 1) if data is not None: if ctx: ctx.pos = match.start(i + 1) for item in action(lexer, _PseudoMatch(match.start(i + 1), data), ctx): if item: yield item if ctx: ctx.pos = match.end() return callback class _This: """ Special singleton used for indicating the caller class. Used by ``using``. """ this = _This() def using(_other, **kwargs): """ Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. """ gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this: def callback(lexer, match, ctx=None): # if keyword arguments are given the callback # function has to create a new lexer instance if kwargs: # XXX: cache that somehow kwargs.update(lexer.options) lx = lexer.__class__(**kwargs) else: lx = lexer s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() else: def callback(lexer, match, ctx=None): # XXX: cache that somehow kwargs.update(lexer.options) lx = _other(**kwargs) s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() return callback class default: """ Indicates a state or state action (e.g. #pop) to apply. For example default('#pop') is equivalent to ('', Token, '#pop') Note that state tuples may be used as well. .. versionadded:: 2.0 """ def __init__(self, state): self.state = state class words(Future): """ Indicates a list of literal words that is transformed into an optimized regex that matches any of the words. .. versionadded:: 2.0 """ def __init__(self, words, prefix='', suffix=''): self.words = words self.prefix = prefix self.suffix = suffix def get(self): return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix) class RegexLexerMeta(LexerMeta): """ Metaclass for RegexLexer, creates the self._tokens attribute from self.tokens on the first instantiation. """ def _process_regex(cls, regex, rflags, state): """Preprocess the regular expression component of a token definition.""" if isinstance(regex, Future): regex = regex.get() return re.compile(regex, rflags).match def _process_token(cls, token): """Preprocess the token component of a token definition.""" assert type(token) is _TokenType or callable(token), \ 'token type must be simple type or callable, not %r' % (token,) return token def _process_new_state(cls, new_state, unprocessed, processed): """Preprocess the state transition action of a token definition.""" if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state def _process_state(cls, unprocessed, processed, state): """Preprocess a single state definition.""" assert type(state) is str, "wrong state name %r" % state assert state[0] != '#', "invalid state name %r" % state if state in processed: return processed[state] tokens = processed[state] = [] rflags = cls.flags for tdef in unprocessed[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state tokens.extend(cls._process_state(unprocessed, processed, str(tdef))) continue if isinstance(tdef, _inherit): # should be processed already, but may not in the case of: # 1. the state has no counterpart in any parent # 2. the state includes more than one 'inherit' continue if isinstance(tdef, default): new_state = cls._process_new_state(tdef.state, unprocessed, processed) tokens.append((re.compile('').match, None, new_state)) continue assert type(tdef) is tuple, "wrong rule def %r" % tdef try: rex = cls._process_regex(tdef[0], rflags, state) except Exception as err: raise ValueError("uncompilable regex %r in state %r of %r: %s" % (tdef[0], state, cls, err)) from err token = cls._process_token(tdef[1]) if len(tdef) == 2: new_state = None else: new_state = cls._process_new_state(tdef[2], unprocessed, processed) tokens.append((rex, token, new_state)) return tokens def process_tokendef(cls, name, tokendefs=None): """Preprocess a dictionary of token definitions.""" processed = cls._all_tokens[name] = {} tokendefs = tokendefs or cls.tokens[name] for state in list(tokendefs): cls._process_state(tokendefs, processed, state) return processed def get_tokendefs(cls): """ Merge tokens from superclasses in MRO order, returning a single tokendef dictionary. Any state that is not defined by a subclass will be inherited automatically. States that *are* defined by subclasses will, by default, override that state in the superclass. If a subclass wishes to inherit definitions from a superclass, it can use the special value "inherit", which will cause the superclass' state definition to be included at that point in the state. """ tokens = {} inheritable = {} for c in cls.__mro__: toks = c.__dict__.get('tokens', {}) for state, items in toks.items(): curitems = tokens.get(state) if curitems is None: # N.b. because this is assigned by reference, sufficiently # deep hierarchies are processed incrementally (e.g. for # A(B), B(C), C(RegexLexer), B will be premodified so X(B) # will not see any inherits in B). tokens[state] = items try: inherit_ndx = items.index(inherit) except ValueError: continue inheritable[state] = inherit_ndx continue inherit_ndx = inheritable.pop(state, None) if inherit_ndx is None: continue # Replace the "inherit" value with the items curitems[inherit_ndx:inherit_ndx+1] = items try: # N.b. this is the index in items (that is, the superclass # copy), so offset required when storing below. new_inh_ndx = items.index(inherit) except ValueError: pass else: inheritable[state] = inherit_ndx + new_inh_ndx return tokens def __call__(cls, *args, **kwds): """Instantiate cls after preprocessing its token definitions.""" if '_tokens' not in cls.__dict__: cls._all_tokens = {} cls._tmpname = 0 if hasattr(cls, 'token_variants') and cls.token_variants: # don't process yet pass else: cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) return type.__call__(cls, *args, **kwds) class RegexLexer(Lexer, metaclass=RegexLexerMeta): """ Base for simple stateful regular expression-based lexers. Simplifies the lexing process so that you need only provide a list of states and regular expressions. """ #: Flags for compiling the regular expressions. #: Defaults to MULTILINE. flags = re.MULTILINE #: At all time there is a stack of states. Initially, the stack contains #: a single state 'root'. The top of the stack is called "the current state". #: #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` #: #: ``new_state`` can be omitted to signify no state transition. #: If ``new_state`` is a string, it is pushed on the stack. This ensure #: the new current state is ``new_state``. #: If ``new_state`` is a tuple of strings, all of those strings are pushed #: on the stack and the current state will be the last element of the list. #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` #: to signify a new, anonymous state combined from the rules of two #: or more existing ones. #: Furthermore, it can be '#pop' to signify going back one step in #: the state stack, or '#push' to push the current state on the stack #: again. Note that if you push while in a combined state, the combined #: state itself is pushed, and not only the state in which the rule is #: defined. #: #: The tuple can also be replaced with ``include('state')``, in which #: case the rules from the state named by the string are included in the #: current one. tokens = {} def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. ``stack`` is the initial stack (default: ``['root']``) """ pos = 0 tokendefs = self._tokens statestack = list(stack) statetokens = tokendefs[statestack[-1]] while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, pos) if m: if action is not None: if type(action) is _TokenType: yield pos, action, m.group() else: yield from action(self, m) pos = m.end() if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(statestack) > 1: statestack.pop() elif state == '#push': statestack.append(statestack[-1]) else: statestack.append(state) elif isinstance(new_state, int): # pop, but keep at least one state on the stack # (random code leading to unexpected pops should # not allow exceptions) if abs(new_state) >= len(statestack): del statestack[1:] else: del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[statestack[-1]] break else: # We are here only if all state tokens have been considered # and there was not a match on any of them. try: if text[pos] == '\n': # at EOL, reset state to "root" statestack = ['root'] statetokens = tokendefs['root'] yield pos, Whitespace, '\n' pos += 1 continue yield pos, Error, text[pos] pos += 1 except IndexError: break class LexerContext: """ A helper object that holds lexer position data. """ def __init__(self, text, pos, stack=None, end=None): self.text = text self.pos = pos self.end = end or len(text) # end=0 not supported ;-) self.stack = stack or ['root'] def __repr__(self): return 'LexerContext(%r, %r, %r)' % ( self.text, self.pos, self.stack) class ExtendedRegexLexer(RegexLexer): """ A RegexLexer that uses a context object to store its state. """ def get_tokens_unprocessed(self, text=None, context=None): """ Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. """ tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: yield from action(self, m, ctx) if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(ctx.stack) > 1: ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # see RegexLexer for why this check is made if abs(new_state) >= len(ctx.stack): del ctx.stack[1:] else: del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, '\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break def do_insertions(insertions, tokens): """ Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. TODO: clean up the code here. """ insertions = iter(insertions) try: index, itokens = next(insertions) except StopIteration: # no insertions yield from tokens return realpos = None insleft = True # iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: # first iteration. store the position of first item if realpos is None: realpos = i oldi = 0 while insleft and i + len(v) >= index: tmpval = v[oldi:index - i] if tmpval: yield realpos, t, tmpval realpos += len(tmpval) for it_index, it_token, it_value in itokens: yield realpos, it_token, it_value realpos += len(it_value) oldi = index - i try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary if oldi < len(v): yield realpos, t, v[oldi:] realpos += len(v) - oldi # leftover tokens while insleft: # no normal tokens, set realpos to zero realpos = realpos or 0 for p, t, v in itokens: yield realpos, t, v realpos += len(v) try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary class ProfilingRegexLexerMeta(RegexLexerMeta): """Metaclass for ProfilingRegexLexer, collects regex timing info.""" def _process_regex(cls, regex, rflags, state): if isinstance(regex, words): rex = regex_opt(regex.words, prefix=regex.prefix, suffix=regex.suffix) else: rex = regex compiled = re.compile(rex, rflags) def match_func(text, pos, endpos=sys.maxsize): info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) t0 = time.time() res = compiled.match(text, pos, endpos) t1 = time.time() info[0] += 1 info[1] += t1 - t0 return res return match_func class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta): """Drop-in replacement for RegexLexer that does profiling of its regexes.""" _prof_data = [] _prof_sort_index = 4 # defaults to time per call def get_tokens_unprocessed(self, text, stack=('root',)): # this needs to be a stack, since using(this) will produce nested calls self.__class__._prof_data.append({}) yield from RegexLexer.get_tokens_unprocessed(self, text, stack) rawdata = self.__class__._prof_data.pop() data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], n, 1000 * t, 1000 * t / n) for ((s, r), (n, t)) in rawdata.items()), key=lambda x: x[self._prof_sort_index], reverse=True) sum_total = sum(x[3] for x in data) print() print('Profiling result for %s lexing %d chars in %.3f ms' % (self.__class__.__name__, len(text), sum_total)) print('=' * 110) print('%-20s %-64s ncalls tottime percall' % ('state', 'regex')) print('-' * 110) for d in data: print('%-20s %-65s %5d %8.4f %8.4f' % d) print('=' * 110)
31,987
Python
35.18552
86
0.518992
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/__init__.py
""" Pygments ~~~~~~~~ Pygments is a syntax highlighting package written in Python. It is a generic syntax highlighter for general use in all kinds of software such as forum systems, wikis or other applications that need to prettify source code. Highlights are: * a wide range of common languages and markup formats is supported * special attention is paid to details, increasing quality by a fair amount * support for new languages and formats are added easily * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image formats that PIL supports, and ANSI sequences * it is usable as a command-line tool and as a library * ... and it highlights even Brainfuck! The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``. .. _Pygments master branch: https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from io import StringIO, BytesIO __version__ = '2.14.0' __docformat__ = 'restructuredtext' __all__ = ['lex', 'format', 'highlight'] def lex(code, lexer): """ Lex ``code`` with ``lexer`` and return an iterable of tokens. """ try: return lexer.get_tokens(code) except TypeError: # Heuristic to catch a common mistake. from pygments.lexer import RegexLexer if isinstance(lexer, type) and issubclass(lexer, RegexLexer): raise TypeError('lex() argument must be a lexer instance, ' 'not a class') raise def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin """ Format a tokenlist ``tokens`` with the formatter ``formatter``. If ``outfile`` is given and a valid file object (an object with a ``write`` method), the result will be written to it, otherwise it is returned as a string. """ try: if not outfile: realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() formatter.format(tokens, realoutfile) return realoutfile.getvalue() else: formatter.format(tokens, outfile) except TypeError: # Heuristic to catch a common mistake. from pygments.formatter import Formatter if isinstance(formatter, type) and issubclass(formatter, Formatter): raise TypeError('format() argument must be a formatter instance, ' 'not a class') raise def highlight(code, lexer, formatter, outfile=None): """ Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``. If ``outfile`` is given and a valid file object (an object with a ``write`` method), the result will be written to it, otherwise it is returned as a string. """ return format(lex(code, lexer), formatter, outfile)
2,975
Python
34.855421
90
0.654118
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/regexopt.py
""" pygments.regexopt ~~~~~~~~~~~~~~~~~ An algorithm that generates optimized regexes for matching long lists of literal strings. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from re import escape from os.path import commonprefix from itertools import groupby from operator import itemgetter CS_ESCAPE = re.compile(r'[\[\^\\\-\]]') FIRST_ELEMENT = itemgetter(0) def make_charset(letters): return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']' def regex_opt_inner(strings, open_paren): """Return a regex that matches any string in the sorted list of strings.""" close_paren = open_paren and ')' or '' # print strings, repr(open_paren) if not strings: # print '-> nothing left' return '' first = strings[0] if len(strings) == 1: # print '-> only 1 string' return open_paren + escape(first) + close_paren if not first: # print '-> first string empty' return open_paren + regex_opt_inner(strings[1:], '(?:') \ + '?' + close_paren if len(first) == 1: # multiple one-char strings? make a charset oneletter = [] rest = [] for s in strings: if len(s) == 1: oneletter.append(s) else: rest.append(s) if len(oneletter) > 1: # do we have more than one oneletter string? if rest: # print '-> 1-character + rest' return open_paren + regex_opt_inner(rest, '') + '|' \ + make_charset(oneletter) + close_paren # print '-> only 1-character' return open_paren + make_charset(oneletter) + close_paren prefix = commonprefix(strings) if prefix: plen = len(prefix) # we have a prefix for all strings # print '-> prefix:', prefix return open_paren + escape(prefix) \ + regex_opt_inner([s[plen:] for s in strings], '(?:') \ + close_paren # is there a suffix? strings_rev = [s[::-1] for s in strings] suffix = commonprefix(strings_rev) if suffix: slen = len(suffix) # print '-> suffix:', suffix[::-1] return open_paren \ + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \ + escape(suffix[::-1]) + close_paren # recurse on common 1-string prefixes # print '-> last resort' return open_paren + \ '|'.join(regex_opt_inner(list(group[1]), '') for group in groupby(strings, lambda s: s[0] == first[0])) \ + close_paren def regex_opt(strings, prefix='', suffix=''): """Return a compiled regex that matches any string in the given list. The strings to match must be literal strings, not regexes. They will be regex-escaped. *prefix* and *suffix* are pre- and appended to the final regex. """ strings = sorted(strings) return prefix + regex_opt_inner(strings, '(') + suffix
3,072
Python
32.402174
82
0.570638
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/scanner.py
""" pygments.scanner ~~~~~~~~~~~~~~~~ This library implements a regex based scanner. Some languages like Pascal are easy to parse but have some keywords that depend on the context. Because of this it's impossible to lex that just by using a regular expression lexer like the `RegexLexer`. Have a look at the `DelphiLexer` to get an idea of how to use this scanner. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re class EndOfText(RuntimeError): """ Raise if end of text is reached and the user tried to call a match function. """ class Scanner: """ Simple scanner All method patterns are regular expression strings (not compiled expressions!) """ def __init__(self, text, flags=0): """ :param text: The text which should be scanned :param flags: default regular expression flags """ self.data = text self.data_length = len(text) self.start_pos = 0 self.pos = 0 self.flags = flags self.last = None self.match = None self._re_cache = {} def eos(self): """`True` if the scanner reached the end of text.""" return self.pos >= self.data_length eos = property(eos, eos.__doc__) def check(self, pattern): """ Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead. """ if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) return self._re_cache[pattern].match(self.data, self.pos) def test(self, pattern): """Apply a pattern on the current position and check if it patches. Doesn't touch pos. """ return self.check(pattern) is not None def scan(self, pattern): """ Scan the text for the given pattern and update pos/match and related fields. The return value is a boolean that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``pos`` is the end position. """ if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) self.last = self.match m = self._re_cache[pattern].match(self.data, self.pos) if m is None: return False self.start_pos = m.start() self.pos = m.end() self.match = m.group() return True def get_char(self): """Scan exactly one char.""" self.scan('.') def __repr__(self): return '<%s %d/%d>' % ( self.__class__.__name__, self.pos, self.data_length )
3,092
Python
28.457143
70
0.578913
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/console.py
""" pygments.console ~~~~~~~~~~~~~~~~ Format colored console output. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ esc = "\x1b[" codes = {} codes[""] = "" codes["reset"] = esc + "39;49;00m" codes["bold"] = esc + "01m" codes["faint"] = esc + "02m" codes["standout"] = esc + "03m" codes["underline"] = esc + "04m" codes["blink"] = esc + "05m" codes["overline"] = esc + "06m" dark_colors = ["black", "red", "green", "yellow", "blue", "magenta", "cyan", "gray"] light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue", "brightmagenta", "brightcyan", "white"] x = 30 for d, l in zip(dark_colors, light_colors): codes[d] = esc + "%im" % x codes[l] = esc + "%im" % (60 + x) x += 1 del d, l, x codes["white"] = codes["bold"] def reset_color(): return codes["reset"] def colorize(color_key, text): return codes[color_key] + text + codes["reset"] def ansiformat(attr, text): """ Format ``text`` with a color and/or some attributes:: color normal color *color* bold color _color_ underlined color +color+ blinking color """ result = [] if attr[:1] == attr[-1:] == '+': result.append(codes['blink']) attr = attr[1:-1] if attr[:1] == attr[-1:] == '*': result.append(codes['bold']) attr = attr[1:-1] if attr[:1] == attr[-1:] == '_': result.append(codes['underline']) attr = attr[1:-1] result.append(codes[attr]) result.append(text) result.append(codes['reset']) return ''.join(result)
1,697
Python
22.915493
88
0.542722
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/token.py
""" pygments.token ~~~~~~~~~~~~~~ Basic token types and the standard tokens. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ class _TokenType(tuple): parent = None def split(self): buf = [] node = self while node is not None: buf.append(node) node = node.parent buf.reverse() return buf def __init__(self, *args): # no need to call super.__init__ self.subtypes = set() def __contains__(self, val): return self is val or ( type(val) is self.__class__ and val[:len(self)] == self ) def __getattr__(self, val): if not val or not val[0].isupper(): return tuple.__getattribute__(self, val) new = _TokenType(self + (val,)) setattr(self, val, new) self.subtypes.add(new) new.parent = self return new def __repr__(self): return 'Token' + (self and '.' or '') + '.'.join(self) def __copy__(self): # These instances are supposed to be singletons return self def __deepcopy__(self, memo): # These instances are supposed to be singletons return self Token = _TokenType() # Special token types Text = Token.Text Whitespace = Text.Whitespace Escape = Token.Escape Error = Token.Error # Text that doesn't belong to this lexer (e.g. HTML in PHP) Other = Token.Other # Common token types for source code Keyword = Token.Keyword Name = Token.Name Literal = Token.Literal String = Literal.String Number = Literal.Number Punctuation = Token.Punctuation Operator = Token.Operator Comment = Token.Comment # Generic types for non-source code Generic = Token.Generic # String and some others are not direct children of Token. # alias them: Token.Token = Token Token.String = String Token.Number = Number def is_token_subtype(ttype, other): """ Return True if ``ttype`` is a subtype of ``other``. exists for backwards compatibility. use ``ttype in other`` now. """ return ttype in other def string_to_tokentype(s): """ Convert a string into a token type:: >>> string_to_token('String.Double') Token.Literal.String.Double >>> string_to_token('Token.Literal.Number') Token.Literal.Number >>> string_to_token('') Token Tokens that are already tokens are returned unchanged: >>> string_to_token(String) Token.Literal.String """ if isinstance(s, _TokenType): return s if not s: return Token node = Token for item in s.split('.'): node = getattr(node, item) return node # Map standard token types to short names, used in CSS class naming. # If you add a new item, please be sure to run this file to perform # a consistency check for duplicate values. STANDARD_TYPES = { Token: '', Text: '', Whitespace: 'w', Escape: 'esc', Error: 'err', Other: 'x', Keyword: 'k', Keyword.Constant: 'kc', Keyword.Declaration: 'kd', Keyword.Namespace: 'kn', Keyword.Pseudo: 'kp', Keyword.Reserved: 'kr', Keyword.Type: 'kt', Name: 'n', Name.Attribute: 'na', Name.Builtin: 'nb', Name.Builtin.Pseudo: 'bp', Name.Class: 'nc', Name.Constant: 'no', Name.Decorator: 'nd', Name.Entity: 'ni', Name.Exception: 'ne', Name.Function: 'nf', Name.Function.Magic: 'fm', Name.Property: 'py', Name.Label: 'nl', Name.Namespace: 'nn', Name.Other: 'nx', Name.Tag: 'nt', Name.Variable: 'nv', Name.Variable.Class: 'vc', Name.Variable.Global: 'vg', Name.Variable.Instance: 'vi', Name.Variable.Magic: 'vm', Literal: 'l', Literal.Date: 'ld', String: 's', String.Affix: 'sa', String.Backtick: 'sb', String.Char: 'sc', String.Delimiter: 'dl', String.Doc: 'sd', String.Double: 's2', String.Escape: 'se', String.Heredoc: 'sh', String.Interpol: 'si', String.Other: 'sx', String.Regex: 'sr', String.Single: 's1', String.Symbol: 'ss', Number: 'm', Number.Bin: 'mb', Number.Float: 'mf', Number.Hex: 'mh', Number.Integer: 'mi', Number.Integer.Long: 'il', Number.Oct: 'mo', Operator: 'o', Operator.Word: 'ow', Punctuation: 'p', Punctuation.Marker: 'pm', Comment: 'c', Comment.Hashbang: 'ch', Comment.Multiline: 'cm', Comment.Preproc: 'cp', Comment.PreprocFile: 'cpf', Comment.Single: 'c1', Comment.Special: 'cs', Generic: 'g', Generic.Deleted: 'gd', Generic.Emph: 'ge', Generic.Error: 'gr', Generic.Heading: 'gh', Generic.Inserted: 'gi', Generic.Output: 'go', Generic.Prompt: 'gp', Generic.Strong: 'gs', Generic.Subheading: 'gu', Generic.Traceback: 'gt', }
6,184
Python
27.901869
70
0.466688
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/filter.py
""" pygments.filter ~~~~~~~~~~~~~~~ Module that implements the default filter. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ def apply_filters(stream, filters, lexer=None): """ Use this method to apply an iterable of filters to a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`. """ def _apply(filter_, stream): yield from filter_.filter(lexer, stream) for filter_ in filters: stream = _apply(filter_, stream) return stream def simplefilter(f): """ Decorator that converts a function into a filter:: @simplefilter def lowercase(self, lexer, stream, options): for ttype, value in stream: yield ttype, value.lower() """ return type(f.__name__, (FunctionFilter,), { '__module__': getattr(f, '__module__'), '__doc__': f.__doc__, 'function': f, }) class Filter: """ Default filter. Subclass this class or use the `simplefilter` decorator to create own filters. """ def __init__(self, **options): self.options = options def filter(self, lexer, stream): raise NotImplementedError() class FunctionFilter(Filter): """ Abstract class used by `simplefilter` to create simple function filters on the fly. The `simplefilter` decorator automatically creates subclasses of this class for functions passed to it. """ function = None def __init__(self, **options): if not hasattr(self, 'function'): raise TypeError('%r used without bound function' % self.__class__.__name__) Filter.__init__(self, **options) def filter(self, lexer, stream): # pylint: disable=not-callable yield from self.function(lexer, stream, self.options)
1,938
Python
25.930555
70
0.603199
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/modeline.py
""" pygments.modeline ~~~~~~~~~~~~~~~~~ A simple modeline parser (based on pymodeline). :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re __all__ = ['get_filetype_from_buffer'] modeline_re = re.compile(r''' (?: vi | vim | ex ) (?: [<=>]? \d* )? : .* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ ) ''', re.VERBOSE) def get_filetype_from_line(l): m = modeline_re.search(l) if m: return m.group(1) def get_filetype_from_buffer(buf, max_lines=5): """ Scan the buffer for modelines and return filetype if one is found. """ lines = buf.splitlines() for l in lines[-1:-max_lines-1:-1]: ret = get_filetype_from_line(l) if ret: return ret for i in range(max_lines, -1, -1): if i < len(lines): ret = get_filetype_from_line(lines[i]) if ret: return ret return None
986
Python
21.431818
70
0.541582
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/plugin.py
""" pygments.plugin ~~~~~~~~~~~~~~~ Pygments plugin interface. By default, this tries to use ``importlib.metadata``, which is in the Python standard library since Python 3.8, or its ``importlib_metadata`` backport for earlier versions of Python. It falls back on ``pkg_resources`` if not found. Finally, if ``pkg_resources`` is not found either, no plugins are loaded at all. lexer plugins:: [pygments.lexers] yourlexer = yourmodule:YourLexer formatter plugins:: [pygments.formatters] yourformatter = yourformatter:YourFormatter /.ext = yourformatter:YourFormatter As you can see, you can define extensions for the formatter with a leading slash. syntax plugins:: [pygments.styles] yourstyle = yourstyle:YourStyle filter plugin:: [pygments.filter] yourfilter = yourfilter:YourFilter :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ LEXER_ENTRY_POINT = 'pygments.lexers' FORMATTER_ENTRY_POINT = 'pygments.formatters' STYLE_ENTRY_POINT = 'pygments.styles' FILTER_ENTRY_POINT = 'pygments.filters' def iter_entry_points(group_name): try: from importlib.metadata import entry_points except ImportError: try: from importlib_metadata import entry_points except ImportError: try: from pkg_resources import iter_entry_points except (ImportError, OSError): return [] else: return iter_entry_points(group_name) groups = entry_points() if hasattr(groups, 'select'): # New interface in Python 3.10 and newer versions of the # importlib_metadata backport. return groups.select(group=group_name) else: # Older interface, deprecated in Python 3.10 and recent # importlib_metadata, but we need it in Python 3.8 and 3.9. return groups.get(group_name, []) def find_plugin_lexers(): for entrypoint in iter_entry_points(LEXER_ENTRY_POINT): yield entrypoint.load() def find_plugin_formatters(): for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT): yield entrypoint.name, entrypoint.load() def find_plugin_styles(): for entrypoint in iter_entry_points(STYLE_ENTRY_POINT): yield entrypoint.name, entrypoint.load() def find_plugin_filters(): for entrypoint in iter_entry_points(FILTER_ENTRY_POINT): yield entrypoint.name, entrypoint.load()
2,579
Python
27.988764
70
0.658782
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/__main__.py
""" pygments.__main__ ~~~~~~~~~~~~~~~~~ Main entry point for ``python -m pygments``. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import sys import pygments.cmdline try: sys.exit(pygments.cmdline.main(sys.argv)) except KeyboardInterrupt: sys.exit(1)
348
Python
18.388888
70
0.643678
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/filters/__init__.py
""" pygments.filters ~~~~~~~~~~~~~~~~ Module containing filter lookup functions and default filters. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \ string_to_tokentype from pygments.filter import Filter from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \ get_choice_opt, ClassNotFound, OptionError from pygments.plugin import find_plugin_filters def find_filter_class(filtername): """Lookup a filter by name. Return None if not found.""" if filtername in FILTERS: return FILTERS[filtername] for name, cls in find_plugin_filters(): if name == filtername: return cls return None def get_filter_by_name(filtername, **options): """Return an instantiated filter. Options are passed to the filter initializer if wanted. Raise a ClassNotFound if not found. """ cls = find_filter_class(filtername) if cls: return cls(**options) else: raise ClassNotFound('filter %r not found' % filtername) def get_all_filters(): """Return a generator of all filter names.""" yield from FILTERS for name, _ in find_plugin_filters(): yield name def _replace_special(ttype, value, regex, specialttype, replacefunc=lambda x: x): last = 0 for match in regex.finditer(value): start, end = match.start(), match.end() if start != last: yield ttype, value[last:start] yield specialttype, replacefunc(value[start:end]) last = end if last != len(value): yield ttype, value[last:] class CodeTagFilter(Filter): """Highlight special code tags in comments and docstrings. Options accepted: `codetags` : list of strings A list of strings that are flagged as code tags. The default is to highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``. .. versionchanged:: 2.13 Now recognizes ``FIXME`` by default. """ def __init__(self, **options): Filter.__init__(self, **options) tags = get_list_opt(options, 'codetags', ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE']) self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([ re.escape(tag) for tag in tags if tag ])) def filter(self, lexer, stream): regex = self.tag_re for ttype, value in stream: if ttype in String.Doc or \ ttype in Comment and \ ttype not in Comment.Preproc: yield from _replace_special(ttype, value, regex, Comment.Special) else: yield ttype, value class SymbolFilter(Filter): """Convert mathematical symbols such as \\<longrightarrow> in Isabelle or \\longrightarrow in LaTeX into Unicode characters. This is mostly useful for HTML or console output when you want to approximate the source rendering you'd see in an IDE. Options accepted: `lang` : string The symbol language. Must be one of ``'isabelle'`` or ``'latex'``. The default is ``'isabelle'``. """ latex_symbols = { '\\alpha' : '\U000003b1', '\\beta' : '\U000003b2', '\\gamma' : '\U000003b3', '\\delta' : '\U000003b4', '\\varepsilon' : '\U000003b5', '\\zeta' : '\U000003b6', '\\eta' : '\U000003b7', '\\vartheta' : '\U000003b8', '\\iota' : '\U000003b9', '\\kappa' : '\U000003ba', '\\lambda' : '\U000003bb', '\\mu' : '\U000003bc', '\\nu' : '\U000003bd', '\\xi' : '\U000003be', '\\pi' : '\U000003c0', '\\varrho' : '\U000003c1', '\\sigma' : '\U000003c3', '\\tau' : '\U000003c4', '\\upsilon' : '\U000003c5', '\\varphi' : '\U000003c6', '\\chi' : '\U000003c7', '\\psi' : '\U000003c8', '\\omega' : '\U000003c9', '\\Gamma' : '\U00000393', '\\Delta' : '\U00000394', '\\Theta' : '\U00000398', '\\Lambda' : '\U0000039b', '\\Xi' : '\U0000039e', '\\Pi' : '\U000003a0', '\\Sigma' : '\U000003a3', '\\Upsilon' : '\U000003a5', '\\Phi' : '\U000003a6', '\\Psi' : '\U000003a8', '\\Omega' : '\U000003a9', '\\leftarrow' : '\U00002190', '\\longleftarrow' : '\U000027f5', '\\rightarrow' : '\U00002192', '\\longrightarrow' : '\U000027f6', '\\Leftarrow' : '\U000021d0', '\\Longleftarrow' : '\U000027f8', '\\Rightarrow' : '\U000021d2', '\\Longrightarrow' : '\U000027f9', '\\leftrightarrow' : '\U00002194', '\\longleftrightarrow' : '\U000027f7', '\\Leftrightarrow' : '\U000021d4', '\\Longleftrightarrow' : '\U000027fa', '\\mapsto' : '\U000021a6', '\\longmapsto' : '\U000027fc', '\\relbar' : '\U00002500', '\\Relbar' : '\U00002550', '\\hookleftarrow' : '\U000021a9', '\\hookrightarrow' : '\U000021aa', '\\leftharpoondown' : '\U000021bd', '\\rightharpoondown' : '\U000021c1', '\\leftharpoonup' : '\U000021bc', '\\rightharpoonup' : '\U000021c0', '\\rightleftharpoons' : '\U000021cc', '\\leadsto' : '\U0000219d', '\\downharpoonleft' : '\U000021c3', '\\downharpoonright' : '\U000021c2', '\\upharpoonleft' : '\U000021bf', '\\upharpoonright' : '\U000021be', '\\restriction' : '\U000021be', '\\uparrow' : '\U00002191', '\\Uparrow' : '\U000021d1', '\\downarrow' : '\U00002193', '\\Downarrow' : '\U000021d3', '\\updownarrow' : '\U00002195', '\\Updownarrow' : '\U000021d5', '\\langle' : '\U000027e8', '\\rangle' : '\U000027e9', '\\lceil' : '\U00002308', '\\rceil' : '\U00002309', '\\lfloor' : '\U0000230a', '\\rfloor' : '\U0000230b', '\\flqq' : '\U000000ab', '\\frqq' : '\U000000bb', '\\bot' : '\U000022a5', '\\top' : '\U000022a4', '\\wedge' : '\U00002227', '\\bigwedge' : '\U000022c0', '\\vee' : '\U00002228', '\\bigvee' : '\U000022c1', '\\forall' : '\U00002200', '\\exists' : '\U00002203', '\\nexists' : '\U00002204', '\\neg' : '\U000000ac', '\\Box' : '\U000025a1', '\\Diamond' : '\U000025c7', '\\vdash' : '\U000022a2', '\\models' : '\U000022a8', '\\dashv' : '\U000022a3', '\\surd' : '\U0000221a', '\\le' : '\U00002264', '\\ge' : '\U00002265', '\\ll' : '\U0000226a', '\\gg' : '\U0000226b', '\\lesssim' : '\U00002272', '\\gtrsim' : '\U00002273', '\\lessapprox' : '\U00002a85', '\\gtrapprox' : '\U00002a86', '\\in' : '\U00002208', '\\notin' : '\U00002209', '\\subset' : '\U00002282', '\\supset' : '\U00002283', '\\subseteq' : '\U00002286', '\\supseteq' : '\U00002287', '\\sqsubset' : '\U0000228f', '\\sqsupset' : '\U00002290', '\\sqsubseteq' : '\U00002291', '\\sqsupseteq' : '\U00002292', '\\cap' : '\U00002229', '\\bigcap' : '\U000022c2', '\\cup' : '\U0000222a', '\\bigcup' : '\U000022c3', '\\sqcup' : '\U00002294', '\\bigsqcup' : '\U00002a06', '\\sqcap' : '\U00002293', '\\Bigsqcap' : '\U00002a05', '\\setminus' : '\U00002216', '\\propto' : '\U0000221d', '\\uplus' : '\U0000228e', '\\bigplus' : '\U00002a04', '\\sim' : '\U0000223c', '\\doteq' : '\U00002250', '\\simeq' : '\U00002243', '\\approx' : '\U00002248', '\\asymp' : '\U0000224d', '\\cong' : '\U00002245', '\\equiv' : '\U00002261', '\\Join' : '\U000022c8', '\\bowtie' : '\U00002a1d', '\\prec' : '\U0000227a', '\\succ' : '\U0000227b', '\\preceq' : '\U0000227c', '\\succeq' : '\U0000227d', '\\parallel' : '\U00002225', '\\mid' : '\U000000a6', '\\pm' : '\U000000b1', '\\mp' : '\U00002213', '\\times' : '\U000000d7', '\\div' : '\U000000f7', '\\cdot' : '\U000022c5', '\\star' : '\U000022c6', '\\circ' : '\U00002218', '\\dagger' : '\U00002020', '\\ddagger' : '\U00002021', '\\lhd' : '\U000022b2', '\\rhd' : '\U000022b3', '\\unlhd' : '\U000022b4', '\\unrhd' : '\U000022b5', '\\triangleleft' : '\U000025c3', '\\triangleright' : '\U000025b9', '\\triangle' : '\U000025b3', '\\triangleq' : '\U0000225c', '\\oplus' : '\U00002295', '\\bigoplus' : '\U00002a01', '\\otimes' : '\U00002297', '\\bigotimes' : '\U00002a02', '\\odot' : '\U00002299', '\\bigodot' : '\U00002a00', '\\ominus' : '\U00002296', '\\oslash' : '\U00002298', '\\dots' : '\U00002026', '\\cdots' : '\U000022ef', '\\sum' : '\U00002211', '\\prod' : '\U0000220f', '\\coprod' : '\U00002210', '\\infty' : '\U0000221e', '\\int' : '\U0000222b', '\\oint' : '\U0000222e', '\\clubsuit' : '\U00002663', '\\diamondsuit' : '\U00002662', '\\heartsuit' : '\U00002661', '\\spadesuit' : '\U00002660', '\\aleph' : '\U00002135', '\\emptyset' : '\U00002205', '\\nabla' : '\U00002207', '\\partial' : '\U00002202', '\\flat' : '\U0000266d', '\\natural' : '\U0000266e', '\\sharp' : '\U0000266f', '\\angle' : '\U00002220', '\\copyright' : '\U000000a9', '\\textregistered' : '\U000000ae', '\\textonequarter' : '\U000000bc', '\\textonehalf' : '\U000000bd', '\\textthreequarters' : '\U000000be', '\\textordfeminine' : '\U000000aa', '\\textordmasculine' : '\U000000ba', '\\euro' : '\U000020ac', '\\pounds' : '\U000000a3', '\\yen' : '\U000000a5', '\\textcent' : '\U000000a2', '\\textcurrency' : '\U000000a4', '\\textdegree' : '\U000000b0', } isabelle_symbols = { '\\<zero>' : '\U0001d7ec', '\\<one>' : '\U0001d7ed', '\\<two>' : '\U0001d7ee', '\\<three>' : '\U0001d7ef', '\\<four>' : '\U0001d7f0', '\\<five>' : '\U0001d7f1', '\\<six>' : '\U0001d7f2', '\\<seven>' : '\U0001d7f3', '\\<eight>' : '\U0001d7f4', '\\<nine>' : '\U0001d7f5', '\\<A>' : '\U0001d49c', '\\<B>' : '\U0000212c', '\\<C>' : '\U0001d49e', '\\<D>' : '\U0001d49f', '\\<E>' : '\U00002130', '\\<F>' : '\U00002131', '\\<G>' : '\U0001d4a2', '\\<H>' : '\U0000210b', '\\<I>' : '\U00002110', '\\<J>' : '\U0001d4a5', '\\<K>' : '\U0001d4a6', '\\<L>' : '\U00002112', '\\<M>' : '\U00002133', '\\<N>' : '\U0001d4a9', '\\<O>' : '\U0001d4aa', '\\<P>' : '\U0001d4ab', '\\<Q>' : '\U0001d4ac', '\\<R>' : '\U0000211b', '\\<S>' : '\U0001d4ae', '\\<T>' : '\U0001d4af', '\\<U>' : '\U0001d4b0', '\\<V>' : '\U0001d4b1', '\\<W>' : '\U0001d4b2', '\\<X>' : '\U0001d4b3', '\\<Y>' : '\U0001d4b4', '\\<Z>' : '\U0001d4b5', '\\<a>' : '\U0001d5ba', '\\<b>' : '\U0001d5bb', '\\<c>' : '\U0001d5bc', '\\<d>' : '\U0001d5bd', '\\<e>' : '\U0001d5be', '\\<f>' : '\U0001d5bf', '\\<g>' : '\U0001d5c0', '\\<h>' : '\U0001d5c1', '\\<i>' : '\U0001d5c2', '\\<j>' : '\U0001d5c3', '\\<k>' : '\U0001d5c4', '\\<l>' : '\U0001d5c5', '\\<m>' : '\U0001d5c6', '\\<n>' : '\U0001d5c7', '\\<o>' : '\U0001d5c8', '\\<p>' : '\U0001d5c9', '\\<q>' : '\U0001d5ca', '\\<r>' : '\U0001d5cb', '\\<s>' : '\U0001d5cc', '\\<t>' : '\U0001d5cd', '\\<u>' : '\U0001d5ce', '\\<v>' : '\U0001d5cf', '\\<w>' : '\U0001d5d0', '\\<x>' : '\U0001d5d1', '\\<y>' : '\U0001d5d2', '\\<z>' : '\U0001d5d3', '\\<AA>' : '\U0001d504', '\\<BB>' : '\U0001d505', '\\<CC>' : '\U0000212d', '\\<DD>' : '\U0001d507', '\\<EE>' : '\U0001d508', '\\<FF>' : '\U0001d509', '\\<GG>' : '\U0001d50a', '\\<HH>' : '\U0000210c', '\\<II>' : '\U00002111', '\\<JJ>' : '\U0001d50d', '\\<KK>' : '\U0001d50e', '\\<LL>' : '\U0001d50f', '\\<MM>' : '\U0001d510', '\\<NN>' : '\U0001d511', '\\<OO>' : '\U0001d512', '\\<PP>' : '\U0001d513', '\\<QQ>' : '\U0001d514', '\\<RR>' : '\U0000211c', '\\<SS>' : '\U0001d516', '\\<TT>' : '\U0001d517', '\\<UU>' : '\U0001d518', '\\<VV>' : '\U0001d519', '\\<WW>' : '\U0001d51a', '\\<XX>' : '\U0001d51b', '\\<YY>' : '\U0001d51c', '\\<ZZ>' : '\U00002128', '\\<aa>' : '\U0001d51e', '\\<bb>' : '\U0001d51f', '\\<cc>' : '\U0001d520', '\\<dd>' : '\U0001d521', '\\<ee>' : '\U0001d522', '\\<ff>' : '\U0001d523', '\\<gg>' : '\U0001d524', '\\<hh>' : '\U0001d525', '\\<ii>' : '\U0001d526', '\\<jj>' : '\U0001d527', '\\<kk>' : '\U0001d528', '\\<ll>' : '\U0001d529', '\\<mm>' : '\U0001d52a', '\\<nn>' : '\U0001d52b', '\\<oo>' : '\U0001d52c', '\\<pp>' : '\U0001d52d', '\\<qq>' : '\U0001d52e', '\\<rr>' : '\U0001d52f', '\\<ss>' : '\U0001d530', '\\<tt>' : '\U0001d531', '\\<uu>' : '\U0001d532', '\\<vv>' : '\U0001d533', '\\<ww>' : '\U0001d534', '\\<xx>' : '\U0001d535', '\\<yy>' : '\U0001d536', '\\<zz>' : '\U0001d537', '\\<alpha>' : '\U000003b1', '\\<beta>' : '\U000003b2', '\\<gamma>' : '\U000003b3', '\\<delta>' : '\U000003b4', '\\<epsilon>' : '\U000003b5', '\\<zeta>' : '\U000003b6', '\\<eta>' : '\U000003b7', '\\<theta>' : '\U000003b8', '\\<iota>' : '\U000003b9', '\\<kappa>' : '\U000003ba', '\\<lambda>' : '\U000003bb', '\\<mu>' : '\U000003bc', '\\<nu>' : '\U000003bd', '\\<xi>' : '\U000003be', '\\<pi>' : '\U000003c0', '\\<rho>' : '\U000003c1', '\\<sigma>' : '\U000003c3', '\\<tau>' : '\U000003c4', '\\<upsilon>' : '\U000003c5', '\\<phi>' : '\U000003c6', '\\<chi>' : '\U000003c7', '\\<psi>' : '\U000003c8', '\\<omega>' : '\U000003c9', '\\<Gamma>' : '\U00000393', '\\<Delta>' : '\U00000394', '\\<Theta>' : '\U00000398', '\\<Lambda>' : '\U0000039b', '\\<Xi>' : '\U0000039e', '\\<Pi>' : '\U000003a0', '\\<Sigma>' : '\U000003a3', '\\<Upsilon>' : '\U000003a5', '\\<Phi>' : '\U000003a6', '\\<Psi>' : '\U000003a8', '\\<Omega>' : '\U000003a9', '\\<bool>' : '\U0001d539', '\\<complex>' : '\U00002102', '\\<nat>' : '\U00002115', '\\<rat>' : '\U0000211a', '\\<real>' : '\U0000211d', '\\<int>' : '\U00002124', '\\<leftarrow>' : '\U00002190', '\\<longleftarrow>' : '\U000027f5', '\\<rightarrow>' : '\U00002192', '\\<longrightarrow>' : '\U000027f6', '\\<Leftarrow>' : '\U000021d0', '\\<Longleftarrow>' : '\U000027f8', '\\<Rightarrow>' : '\U000021d2', '\\<Longrightarrow>' : '\U000027f9', '\\<leftrightarrow>' : '\U00002194', '\\<longleftrightarrow>' : '\U000027f7', '\\<Leftrightarrow>' : '\U000021d4', '\\<Longleftrightarrow>' : '\U000027fa', '\\<mapsto>' : '\U000021a6', '\\<longmapsto>' : '\U000027fc', '\\<midarrow>' : '\U00002500', '\\<Midarrow>' : '\U00002550', '\\<hookleftarrow>' : '\U000021a9', '\\<hookrightarrow>' : '\U000021aa', '\\<leftharpoondown>' : '\U000021bd', '\\<rightharpoondown>' : '\U000021c1', '\\<leftharpoonup>' : '\U000021bc', '\\<rightharpoonup>' : '\U000021c0', '\\<rightleftharpoons>' : '\U000021cc', '\\<leadsto>' : '\U0000219d', '\\<downharpoonleft>' : '\U000021c3', '\\<downharpoonright>' : '\U000021c2', '\\<upharpoonleft>' : '\U000021bf', '\\<upharpoonright>' : '\U000021be', '\\<restriction>' : '\U000021be', '\\<Colon>' : '\U00002237', '\\<up>' : '\U00002191', '\\<Up>' : '\U000021d1', '\\<down>' : '\U00002193', '\\<Down>' : '\U000021d3', '\\<updown>' : '\U00002195', '\\<Updown>' : '\U000021d5', '\\<langle>' : '\U000027e8', '\\<rangle>' : '\U000027e9', '\\<lceil>' : '\U00002308', '\\<rceil>' : '\U00002309', '\\<lfloor>' : '\U0000230a', '\\<rfloor>' : '\U0000230b', '\\<lparr>' : '\U00002987', '\\<rparr>' : '\U00002988', '\\<lbrakk>' : '\U000027e6', '\\<rbrakk>' : '\U000027e7', '\\<lbrace>' : '\U00002983', '\\<rbrace>' : '\U00002984', '\\<guillemotleft>' : '\U000000ab', '\\<guillemotright>' : '\U000000bb', '\\<bottom>' : '\U000022a5', '\\<top>' : '\U000022a4', '\\<and>' : '\U00002227', '\\<And>' : '\U000022c0', '\\<or>' : '\U00002228', '\\<Or>' : '\U000022c1', '\\<forall>' : '\U00002200', '\\<exists>' : '\U00002203', '\\<nexists>' : '\U00002204', '\\<not>' : '\U000000ac', '\\<box>' : '\U000025a1', '\\<diamond>' : '\U000025c7', '\\<turnstile>' : '\U000022a2', '\\<Turnstile>' : '\U000022a8', '\\<tturnstile>' : '\U000022a9', '\\<TTurnstile>' : '\U000022ab', '\\<stileturn>' : '\U000022a3', '\\<surd>' : '\U0000221a', '\\<le>' : '\U00002264', '\\<ge>' : '\U00002265', '\\<lless>' : '\U0000226a', '\\<ggreater>' : '\U0000226b', '\\<lesssim>' : '\U00002272', '\\<greatersim>' : '\U00002273', '\\<lessapprox>' : '\U00002a85', '\\<greaterapprox>' : '\U00002a86', '\\<in>' : '\U00002208', '\\<notin>' : '\U00002209', '\\<subset>' : '\U00002282', '\\<supset>' : '\U00002283', '\\<subseteq>' : '\U00002286', '\\<supseteq>' : '\U00002287', '\\<sqsubset>' : '\U0000228f', '\\<sqsupset>' : '\U00002290', '\\<sqsubseteq>' : '\U00002291', '\\<sqsupseteq>' : '\U00002292', '\\<inter>' : '\U00002229', '\\<Inter>' : '\U000022c2', '\\<union>' : '\U0000222a', '\\<Union>' : '\U000022c3', '\\<squnion>' : '\U00002294', '\\<Squnion>' : '\U00002a06', '\\<sqinter>' : '\U00002293', '\\<Sqinter>' : '\U00002a05', '\\<setminus>' : '\U00002216', '\\<propto>' : '\U0000221d', '\\<uplus>' : '\U0000228e', '\\<Uplus>' : '\U00002a04', '\\<noteq>' : '\U00002260', '\\<sim>' : '\U0000223c', '\\<doteq>' : '\U00002250', '\\<simeq>' : '\U00002243', '\\<approx>' : '\U00002248', '\\<asymp>' : '\U0000224d', '\\<cong>' : '\U00002245', '\\<smile>' : '\U00002323', '\\<equiv>' : '\U00002261', '\\<frown>' : '\U00002322', '\\<Join>' : '\U000022c8', '\\<bowtie>' : '\U00002a1d', '\\<prec>' : '\U0000227a', '\\<succ>' : '\U0000227b', '\\<preceq>' : '\U0000227c', '\\<succeq>' : '\U0000227d', '\\<parallel>' : '\U00002225', '\\<bar>' : '\U000000a6', '\\<plusminus>' : '\U000000b1', '\\<minusplus>' : '\U00002213', '\\<times>' : '\U000000d7', '\\<div>' : '\U000000f7', '\\<cdot>' : '\U000022c5', '\\<star>' : '\U000022c6', '\\<bullet>' : '\U00002219', '\\<circ>' : '\U00002218', '\\<dagger>' : '\U00002020', '\\<ddagger>' : '\U00002021', '\\<lhd>' : '\U000022b2', '\\<rhd>' : '\U000022b3', '\\<unlhd>' : '\U000022b4', '\\<unrhd>' : '\U000022b5', '\\<triangleleft>' : '\U000025c3', '\\<triangleright>' : '\U000025b9', '\\<triangle>' : '\U000025b3', '\\<triangleq>' : '\U0000225c', '\\<oplus>' : '\U00002295', '\\<Oplus>' : '\U00002a01', '\\<otimes>' : '\U00002297', '\\<Otimes>' : '\U00002a02', '\\<odot>' : '\U00002299', '\\<Odot>' : '\U00002a00', '\\<ominus>' : '\U00002296', '\\<oslash>' : '\U00002298', '\\<dots>' : '\U00002026', '\\<cdots>' : '\U000022ef', '\\<Sum>' : '\U00002211', '\\<Prod>' : '\U0000220f', '\\<Coprod>' : '\U00002210', '\\<infinity>' : '\U0000221e', '\\<integral>' : '\U0000222b', '\\<ointegral>' : '\U0000222e', '\\<clubsuit>' : '\U00002663', '\\<diamondsuit>' : '\U00002662', '\\<heartsuit>' : '\U00002661', '\\<spadesuit>' : '\U00002660', '\\<aleph>' : '\U00002135', '\\<emptyset>' : '\U00002205', '\\<nabla>' : '\U00002207', '\\<partial>' : '\U00002202', '\\<flat>' : '\U0000266d', '\\<natural>' : '\U0000266e', '\\<sharp>' : '\U0000266f', '\\<angle>' : '\U00002220', '\\<copyright>' : '\U000000a9', '\\<registered>' : '\U000000ae', '\\<hyphen>' : '\U000000ad', '\\<inverse>' : '\U000000af', '\\<onequarter>' : '\U000000bc', '\\<onehalf>' : '\U000000bd', '\\<threequarters>' : '\U000000be', '\\<ordfeminine>' : '\U000000aa', '\\<ordmasculine>' : '\U000000ba', '\\<section>' : '\U000000a7', '\\<paragraph>' : '\U000000b6', '\\<exclamdown>' : '\U000000a1', '\\<questiondown>' : '\U000000bf', '\\<euro>' : '\U000020ac', '\\<pounds>' : '\U000000a3', '\\<yen>' : '\U000000a5', '\\<cent>' : '\U000000a2', '\\<currency>' : '\U000000a4', '\\<degree>' : '\U000000b0', '\\<amalg>' : '\U00002a3f', '\\<mho>' : '\U00002127', '\\<lozenge>' : '\U000025ca', '\\<wp>' : '\U00002118', '\\<wrong>' : '\U00002240', '\\<struct>' : '\U000022c4', '\\<acute>' : '\U000000b4', '\\<index>' : '\U00000131', '\\<dieresis>' : '\U000000a8', '\\<cedilla>' : '\U000000b8', '\\<hungarumlaut>' : '\U000002dd', '\\<some>' : '\U000003f5', '\\<newline>' : '\U000023ce', '\\<open>' : '\U00002039', '\\<close>' : '\U0000203a', '\\<here>' : '\U00002302', '\\<^sub>' : '\U000021e9', '\\<^sup>' : '\U000021e7', '\\<^bold>' : '\U00002759', '\\<^bsub>' : '\U000021d8', '\\<^esub>' : '\U000021d9', '\\<^bsup>' : '\U000021d7', '\\<^esup>' : '\U000021d6', } lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols} def __init__(self, **options): Filter.__init__(self, **options) lang = get_choice_opt(options, 'lang', ['isabelle', 'latex'], 'isabelle') self.symbols = self.lang_map[lang] def filter(self, lexer, stream): for ttype, value in stream: if value in self.symbols: yield ttype, self.symbols[value] else: yield ttype, value class KeywordCaseFilter(Filter): """Convert keywords to lowercase or uppercase or capitalize them, which means first letter uppercase, rest lowercase. This can be useful e.g. if you highlight Pascal code and want to adapt the code to your styleguide. Options accepted: `case` : string The casing to convert keywords to. Must be one of ``'lower'``, ``'upper'`` or ``'capitalize'``. The default is ``'lower'``. """ def __init__(self, **options): Filter.__init__(self, **options) case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower') self.convert = getattr(str, case) def filter(self, lexer, stream): for ttype, value in stream: if ttype in Keyword: yield ttype, self.convert(value) else: yield ttype, value class NameHighlightFilter(Filter): """Highlight a normal Name (and Name.*) token with a different token type. Example:: filter = NameHighlightFilter( names=['foo', 'bar', 'baz'], tokentype=Name.Function, ) This would highlight the names "foo", "bar" and "baz" as functions. `Name.Function` is the default token type. Options accepted: `names` : list of strings A list of names that should be given the different token type. There is no default. `tokentype` : TokenType or string A token type or a string containing a token type name that is used for highlighting the strings in `names`. The default is `Name.Function`. """ def __init__(self, **options): Filter.__init__(self, **options) self.names = set(get_list_opt(options, 'names', [])) tokentype = options.get('tokentype') if tokentype: self.tokentype = string_to_tokentype(tokentype) else: self.tokentype = Name.Function def filter(self, lexer, stream): for ttype, value in stream: if ttype in Name and value in self.names: yield self.tokentype, value else: yield ttype, value class ErrorToken(Exception): pass class RaiseOnErrorTokenFilter(Filter): """Raise an exception when the lexer generates an error token. Options accepted: `excclass` : Exception class The exception class to raise. The default is `pygments.filters.ErrorToken`. .. versionadded:: 0.8 """ def __init__(self, **options): Filter.__init__(self, **options) self.exception = options.get('excclass', ErrorToken) try: # issubclass() will raise TypeError if first argument is not a class if not issubclass(self.exception, Exception): raise TypeError except TypeError: raise OptionError('excclass option is not an exception class') def filter(self, lexer, stream): for ttype, value in stream: if ttype is Error: raise self.exception(value) yield ttype, value class VisibleWhitespaceFilter(Filter): """Convert tabs, newlines and/or spaces to visible characters. Options accepted: `spaces` : string or bool If this is a one-character string, spaces will be replaces by this string. If it is another true value, spaces will be replaced by ``·`` (unicode MIDDLE DOT). If it is a false value, spaces will not be replaced. The default is ``False``. `tabs` : string or bool The same as for `spaces`, but the default replacement character is ``»`` (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value is ``False``. Note: this will not work if the `tabsize` option for the lexer is nonzero, as tabs will already have been expanded then. `tabsize` : int If tabs are to be replaced by this filter (see the `tabs` option), this is the total number of characters that a tab should be expanded to. The default is ``8``. `newlines` : string or bool The same as for `spaces`, but the default replacement character is ``¶`` (unicode PILCROW SIGN). The default value is ``False``. `wstokentype` : bool If true, give whitespace the special `Whitespace` token type. This allows styling the visible whitespace differently (e.g. greyed out), but it can disrupt background colors. The default is ``True``. .. versionadded:: 0.8 """ def __init__(self, **options): Filter.__init__(self, **options) for name, default in [('spaces', '·'), ('tabs', '»'), ('newlines', '¶')]: opt = options.get(name, False) if isinstance(opt, str) and len(opt) == 1: setattr(self, name, opt) else: setattr(self, name, (opt and default or '')) tabsize = get_int_opt(options, 'tabsize', 8) if self.tabs: self.tabs += ' ' * (tabsize - 1) if self.newlines: self.newlines += '\n' self.wstt = get_bool_opt(options, 'wstokentype', True) def filter(self, lexer, stream): if self.wstt: spaces = self.spaces or ' ' tabs = self.tabs or '\t' newlines = self.newlines or '\n' regex = re.compile(r'\s') def replacefunc(wschar): if wschar == ' ': return spaces elif wschar == '\t': return tabs elif wschar == '\n': return newlines return wschar for ttype, value in stream: yield from _replace_special(ttype, value, regex, Whitespace, replacefunc) else: spaces, tabs, newlines = self.spaces, self.tabs, self.newlines # simpler processing for ttype, value in stream: if spaces: value = value.replace(' ', spaces) if tabs: value = value.replace('\t', tabs) if newlines: value = value.replace('\n', newlines) yield ttype, value class GobbleFilter(Filter): """Gobbles source code lines (eats initial characters). This filter drops the first ``n`` characters off every line of code. This may be useful when the source code fed to the lexer is indented by a fixed amount of space that isn't desired in the output. Options accepted: `n` : int The number of characters to gobble. .. versionadded:: 1.2 """ def __init__(self, **options): Filter.__init__(self, **options) self.n = get_int_opt(options, 'n', 0) def gobble(self, value, left): if left < len(value): return value[left:], 0 else: return '', left - len(value) def filter(self, lexer, stream): n = self.n left = n # How many characters left to gobble. for ttype, value in stream: # Remove ``left`` tokens from first line, ``n`` from all others. parts = value.split('\n') (parts[0], left) = self.gobble(parts[0], left) for i in range(1, len(parts)): (parts[i], left) = self.gobble(parts[i], n) value = '\n'.join(parts) if value != '': yield ttype, value class TokenMergeFilter(Filter): """Merges consecutive tokens with the same token type in the output stream of a lexer. .. versionadded:: 1.2 """ def __init__(self, **options): Filter.__init__(self, **options) def filter(self, lexer, stream): current_type = None current_value = None for ttype, value in stream: if ttype is current_type: current_value += value else: if current_type is not None: yield current_type, current_value current_type = ttype current_value = value if current_type is not None: yield current_type, current_value FILTERS = { 'codetagify': CodeTagFilter, 'keywordcase': KeywordCaseFilter, 'highlight': NameHighlightFilter, 'raiseonerror': RaiseOnErrorTokenFilter, 'whitespace': VisibleWhitespaceFilter, 'gobble': GobbleFilter, 'tokenmerge': TokenMergeFilter, 'symbols': SymbolFilter, }
40,332
Python
41.861849
81
0.38017
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/python.py
""" pygments.lexers.python ~~~~~~~~~~~~~~~~~~~~~~ Lexers for Python and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re import keyword from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \ default, words, combined, do_insertions, this, line_re from pygments.util import get_bool_opt, shebang_matches from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Other, Error, Whitespace from pygments import unistring as uni __all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', 'Python2Lexer', 'Python2TracebackLexer', 'CythonLexer', 'DgLexer', 'NumPyLexer'] class PythonLexer(RegexLexer): """ For Python source code (version 3.x). .. versionadded:: 0.10 .. versionchanged:: 2.5 This is now the default ``PythonLexer``. It is still available as the alias ``Python3Lexer``. """ name = 'Python' url = 'http://www.python.org' aliases = ['python', 'py', 'sage', 'python3', 'py3'] filenames = [ '*.py', '*.pyw', # Type stubs '*.pyi', # Jython '*.jy', # Sage '*.sage', # SCons '*.sc', 'SConstruct', 'SConscript', # Skylark/Starlark (used by Bazel, Buck, and Pants) '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', # Twisted Application infrastructure '*.tac', ] mimetypes = ['text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3'] uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) def innerstring_rules(ttype): return [ # the old style '%s' % (...) string formatting (still valid in Py3) (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' '[hlL]?[E-GXc-giorsaux%]', String.Interpol), # the new style '{}'.format(...) string formatting (r'\{' r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name r'(\![sra])?' # conversion r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' r'\}', String.Interpol), # backslashes, quotes and formatting signs must be parsed one at a time (r'[^\\\'"%{\n]+', ttype), (r'[\'"\\]', ttype), # unhandled string formatting sign (r'%|(\{{1,2})', ttype) # newlines are an error (use "nl" state) ] def fstring_rules(ttype): return [ # Assuming that a '}' is the closing brace after format specifier. # Sadly, this means that we won't detect syntax error. But it's # more important to parse correct syntax correctly, than to # highlight invalid syntax. (r'\}', String.Interpol), (r'\{', String.Interpol, 'expr-inside-fstring'), # backslashes, quotes and formatting signs must be parsed one at a time (r'[^\\\'"{}\n]+', ttype), (r'[\'"\\]', ttype), # newlines are an error (use "nl" state) ] tokens = { 'root': [ (r'\n', Whitespace), (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', bygroups(Whitespace, String.Affix, String.Doc)), (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", bygroups(Whitespace, String.Affix, String.Doc)), (r'\A#!.+$', Comment.Hashbang), (r'#.*$', Comment.Single), (r'\\\n', Text), (r'\\', Text), include('keywords'), include('soft-keywords'), (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'fromimport'), (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'import'), include('expr'), ], 'expr': [ # raw f-strings ('(?i)(rf|fr)(""")', bygroups(String.Affix, String.Double), combined('rfstringescape', 'tdqf')), ("(?i)(rf|fr)(''')", bygroups(String.Affix, String.Single), combined('rfstringescape', 'tsqf')), ('(?i)(rf|fr)(")', bygroups(String.Affix, String.Double), combined('rfstringescape', 'dqf')), ("(?i)(rf|fr)(')", bygroups(String.Affix, String.Single), combined('rfstringescape', 'sqf')), # non-raw f-strings ('([fF])(""")', bygroups(String.Affix, String.Double), combined('fstringescape', 'tdqf')), ("([fF])(''')", bygroups(String.Affix, String.Single), combined('fstringescape', 'tsqf')), ('([fF])(")', bygroups(String.Affix, String.Double), combined('fstringescape', 'dqf')), ("([fF])(')", bygroups(String.Affix, String.Single), combined('fstringescape', 'sqf')), # raw bytes and strings ('(?i)(rb|br|r)(""")', bygroups(String.Affix, String.Double), 'tdqs'), ("(?i)(rb|br|r)(''')", bygroups(String.Affix, String.Single), 'tsqs'), ('(?i)(rb|br|r)(")', bygroups(String.Affix, String.Double), 'dqs'), ("(?i)(rb|br|r)(')", bygroups(String.Affix, String.Single), 'sqs'), # non-raw strings ('([uU]?)(""")', bygroups(String.Affix, String.Double), combined('stringescape', 'tdqs')), ("([uU]?)(''')", bygroups(String.Affix, String.Single), combined('stringescape', 'tsqs')), ('([uU]?)(")', bygroups(String.Affix, String.Double), combined('stringescape', 'dqs')), ("([uU]?)(')", bygroups(String.Affix, String.Single), combined('stringescape', 'sqs')), # non-raw bytes ('([bB])(""")', bygroups(String.Affix, String.Double), combined('bytesescape', 'tdqs')), ("([bB])(''')", bygroups(String.Affix, String.Single), combined('bytesescape', 'tsqs')), ('([bB])(")', bygroups(String.Affix, String.Double), combined('bytesescape', 'dqs')), ("([bB])(')", bygroups(String.Affix, String.Single), combined('bytesescape', 'sqs')), (r'[^\S\n]+', Text), include('numbers'), (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), (r'[]{}:(),;[]', Punctuation), (r'(in|is|and|or|not)\b', Operator.Word), include('expr-keywords'), include('builtins'), include('magicfuncs'), include('magicvars'), include('name'), ], 'expr-inside-fstring': [ (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), # without format specifier (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) r'(\![sraf])?' # conversion r'\}', String.Interpol, '#pop'), # with format specifier # we'll catch the remaining '}' in the outer scope (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) r'(\![sraf])?' # conversion r':', String.Interpol, '#pop'), (r'\s+', Whitespace), # allow new lines include('expr'), ], 'expr-inside-fstring-inner': [ (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), (r'[])}]', Punctuation, '#pop'), (r'\s+', Whitespace), # allow new lines include('expr'), ], 'expr-keywords': [ # Based on https://docs.python.org/3/reference/expressions.html (words(( 'async for', 'await', 'else', 'for', 'if', 'lambda', 'yield', 'yield from'), suffix=r'\b'), Keyword), (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), ], 'keywords': [ (words(( 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', 'yield from', 'as', 'with'), suffix=r'\b'), Keyword), (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), ], 'soft-keywords': [ # `match`, `case` and `_` soft keywords (r'(^[ \t]*)' # at beginning of line + possible indentation r'(match|case)\b' # a possible keyword r'(?![ \t]*(?:' # not followed by... r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't r'|'.join(keyword.kwlist) + r')\b))', # pattern matching bygroups(Text, Keyword), 'soft-keywords-inner'), ], 'soft-keywords-inner': [ # optional `_` keyword (r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)), default('#pop') ], 'builtins': [ (words(( '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'breakpoint', 'bytes', 'chr', 'classmethod', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter', 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin), (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo), (words(( 'ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning', 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', 'NotImplementedError', 'OSError', 'OverflowError', 'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning', 'RuntimeError', 'RuntimeWarning', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning', 'WindowsError', 'ZeroDivisionError', # new builtin exceptions from PEP 3151 'BlockingIOError', 'ChildProcessError', 'ConnectionError', 'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError', 'ConnectionResetError', 'FileExistsError', 'FileNotFoundError', 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError', 'PermissionError', 'ProcessLookupError', 'TimeoutError', # others new in Python 3 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError', 'EncodingWarning'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Exception), ], 'magicfuncs': [ (words(( '__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__', '__and__', '__anext__', '__await__', '__bool__', '__bytes__', '__call__', '__complex__', '__contains__', '__del__', '__delattr__', '__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__', '__eq__', '__exit__', '__float__', '__floordiv__', '__format__', '__ge__', '__get__', '__getattr__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__', '__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__', '__imul__', '__index__', '__init__', '__instancecheck__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__', '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__', '__next__', '__or__', '__pos__', '__pow__', '__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__', '__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__', '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__', '__str__', '__sub__', '__subclasscheck__', '__truediv__', '__xor__'), suffix=r'\b'), Name.Function.Magic), ], 'magicvars': [ (words(( '__annotations__', '__bases__', '__class__', '__closure__', '__code__', '__defaults__', '__dict__', '__doc__', '__file__', '__func__', '__globals__', '__kwdefaults__', '__module__', '__mro__', '__name__', '__objclass__', '__qualname__', '__self__', '__slots__', '__weakref__'), suffix=r'\b'), Name.Variable.Magic), ], 'numbers': [ (r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)' r'([eE][+-]?\d(?:_?\d)*)?', Number.Float), (r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float), (r'0[oO](?:_?[0-7])+', Number.Oct), (r'0[bB](?:_?[01])+', Number.Bin), (r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex), (r'\d(?:_?\d)*', Number.Integer), ], 'name': [ (r'@' + uni_name, Name.Decorator), (r'@', Operator), # new matrix multiplication operator (uni_name, Name), ], 'funcname': [ include('magicfuncs'), (uni_name, Name.Function, '#pop'), default('#pop'), ], 'classname': [ (uni_name, Name.Class, '#pop'), ], 'import': [ (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)), (r'\.', Name.Namespace), (uni_name, Name.Namespace), (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)), default('#pop') # all else: go back ], 'fromimport': [ (r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'), (r'\.', Name.Namespace), # if None occurs here, it's "raise x from None", since None can # never be a module name (r'None\b', Name.Builtin.Pseudo, '#pop'), (uni_name, Name.Namespace), default('#pop'), ], 'rfstringescape': [ (r'\{\{', String.Escape), (r'\}\}', String.Escape), ], 'fstringescape': [ include('rfstringescape'), include('stringescape'), ], 'bytesescape': [ (r'\\([\\abfnrtv"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) ], 'stringescape': [ (r'\\(N\{.*?\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8})', String.Escape), include('bytesescape') ], 'fstrings-single': fstring_rules(String.Single), 'fstrings-double': fstring_rules(String.Double), 'strings-single': innerstring_rules(String.Single), 'strings-double': innerstring_rules(String.Double), 'dqf': [ (r'"', String.Double, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings include('fstrings-double') ], 'sqf': [ (r"'", String.Single, '#pop'), (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings include('fstrings-single') ], 'dqs': [ (r'"', String.Double, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings include('strings-double') ], 'sqs': [ (r"'", String.Single, '#pop'), (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings include('strings-single') ], 'tdqf': [ (r'"""', String.Double, '#pop'), include('fstrings-double'), (r'\n', String.Double) ], 'tsqf': [ (r"'''", String.Single, '#pop'), include('fstrings-single'), (r'\n', String.Single) ], 'tdqs': [ (r'"""', String.Double, '#pop'), include('strings-double'), (r'\n', String.Double) ], 'tsqs': [ (r"'''", String.Single, '#pop'), include('strings-single'), (r'\n', String.Single) ], } def analyse_text(text): return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \ 'import ' in text[:1000] Python3Lexer = PythonLexer class Python2Lexer(RegexLexer): """ For Python 2.x source code. .. versionchanged:: 2.5 This class has been renamed from ``PythonLexer``. ``PythonLexer`` now refers to the Python 3 variant. File name patterns like ``*.py`` have been moved to Python 3 as well. """ name = 'Python 2.x' url = 'http://www.python.org' aliases = ['python2', 'py2'] filenames = [] # now taken over by PythonLexer (3.x) mimetypes = ['text/x-python2', 'application/x-python2'] def innerstring_rules(ttype): return [ # the old style '%s' % (...) string formatting (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' '[hlL]?[E-GXc-giorsux%]', String.Interpol), # backslashes, quotes and formatting signs must be parsed one at a time (r'[^\\\'"%\n]+', ttype), (r'[\'"\\]', ttype), # unhandled string formatting sign (r'%', ttype), # newlines are an error (use "nl" state) ] tokens = { 'root': [ (r'\n', Whitespace), (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', bygroups(Whitespace, String.Affix, String.Doc)), (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", bygroups(Whitespace, String.Affix, String.Doc)), (r'[^\S\n]+', Text), (r'\A#!.+$', Comment.Hashbang), (r'#.*$', Comment.Single), (r'[]{}:(),;[]', Punctuation), (r'\\\n', Text), (r'\\', Text), (r'(in|is|and|or|not)\b', Operator.Word), (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), include('keywords'), (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'fromimport'), (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'import'), include('builtins'), include('magicfuncs'), include('magicvars'), include('backtick'), ('([rR]|[uUbB][rR]|[rR][uUbB])(""")', bygroups(String.Affix, String.Double), 'tdqs'), ("([rR]|[uUbB][rR]|[rR][uUbB])(''')", bygroups(String.Affix, String.Single), 'tsqs'), ('([rR]|[uUbB][rR]|[rR][uUbB])(")', bygroups(String.Affix, String.Double), 'dqs'), ("([rR]|[uUbB][rR]|[rR][uUbB])(')", bygroups(String.Affix, String.Single), 'sqs'), ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), combined('stringescape', 'tdqs')), ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), combined('stringescape', 'tsqs')), ('([uUbB]?)(")', bygroups(String.Affix, String.Double), combined('stringescape', 'dqs')), ("([uUbB]?)(')", bygroups(String.Affix, String.Single), combined('stringescape', 'sqs')), include('name'), include('numbers'), ], 'keywords': [ (words(( 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass', 'print', 'raise', 'return', 'try', 'while', 'yield', 'yield from', 'as', 'with'), suffix=r'\b'), Keyword), ], 'builtins': [ (words(( '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin), (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls' r')\b', Name.Builtin.Pseudo), (words(( 'ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning', 'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning', 'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Exception), ], 'magicfuncs': [ (words(( '__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__', '__complex__', '__contains__', '__del__', '__delattr__', '__delete__', '__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__', '__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__', '__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__', '__index__', '__init__', '__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__', '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__', '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__', '__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__', '__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__', '__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__', '__unicode__', '__xor__'), suffix=r'\b'), Name.Function.Magic), ], 'magicvars': [ (words(( '__bases__', '__class__', '__closure__', '__code__', '__defaults__', '__dict__', '__doc__', '__file__', '__func__', '__globals__', '__metaclass__', '__module__', '__mro__', '__name__', '__self__', '__slots__', '__weakref__'), suffix=r'\b'), Name.Variable.Magic), ], 'numbers': [ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), (r'\d+[eE][+-]?[0-9]+j?', Number.Float), (r'0[0-7]+j?', Number.Oct), (r'0[bB][01]+', Number.Bin), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+j?', Number.Integer) ], 'backtick': [ ('`.*?`', String.Backtick), ], 'name': [ (r'@[\w.]+', Name.Decorator), (r'[a-zA-Z_]\w*', Name), ], 'funcname': [ include('magicfuncs'), (r'[a-zA-Z_]\w*', Name.Function, '#pop'), default('#pop'), ], 'classname': [ (r'[a-zA-Z_]\w*', Name.Class, '#pop') ], 'import': [ (r'(?:[ \t]|\\\n)+', Text), (r'as\b', Keyword.Namespace), (r',', Operator), (r'[a-zA-Z_][\w.]*', Name.Namespace), default('#pop') # all else: go back ], 'fromimport': [ (r'(?:[ \t]|\\\n)+', Text), (r'import\b', Keyword.Namespace, '#pop'), # if None occurs here, it's "raise x from None", since None can # never be a module name (r'None\b', Name.Builtin.Pseudo, '#pop'), # sadly, in "raise x from y" y will be highlighted as namespace too (r'[a-zA-Z_.][\w.]*', Name.Namespace), # anything else here also means "raise x from y" and is therefore # not an error default('#pop'), ], 'stringescape': [ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) ], 'strings-single': innerstring_rules(String.Single), 'strings-double': innerstring_rules(String.Double), 'dqs': [ (r'"', String.Double, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings include('strings-double') ], 'sqs': [ (r"'", String.Single, '#pop'), (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings include('strings-single') ], 'tdqs': [ (r'"""', String.Double, '#pop'), include('strings-double'), (r'\n', String.Double) ], 'tsqs': [ (r"'''", String.Single, '#pop'), include('strings-single'), (r'\n', String.Single) ], } def analyse_text(text): return shebang_matches(text, r'pythonw?2(\.\d)?') class PythonConsoleLexer(Lexer): """ For Python console output or doctests, such as: .. sourcecode:: pycon >>> a = 'foo' >>> print a foo >>> 1 / 0 Traceback (most recent call last): File "<stdin>", line 1, in <module> ZeroDivisionError: integer division or modulo by zero Additional options: `python3` Use Python 3 lexer for code. Default is ``True``. .. versionadded:: 1.0 .. versionchanged:: 2.5 Now defaults to ``True``. """ name = 'Python console session' aliases = ['pycon'] mimetypes = ['text/x-python-doctest'] def __init__(self, **options): self.python3 = get_bool_opt(options, 'python3', True) Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): if self.python3: pylexer = PythonLexer(**self.options) tblexer = PythonTracebackLexer(**self.options) else: pylexer = Python2Lexer(**self.options) tblexer = Python2TracebackLexer(**self.options) curcode = '' insertions = [] curtb = '' tbindex = 0 tb = 0 for match in line_re.finditer(text): line = match.group() if line.startswith('>>> ') or line.startswith('... '): tb = 0 insertions.append((len(curcode), [(0, Generic.Prompt, line[:4])])) curcode += line[4:] elif line.rstrip() == '...' and not tb: # only a new >>> prompt can end an exception block # otherwise an ellipsis in place of the traceback frames # will be mishandled insertions.append((len(curcode), [(0, Generic.Prompt, '...')])) curcode += line[3:] else: if curcode: yield from do_insertions( insertions, pylexer.get_tokens_unprocessed(curcode)) curcode = '' insertions = [] if (line.startswith('Traceback (most recent call last):') or re.match(' File "[^"]+", line \\d+\\n$', line)): tb = 1 curtb = line tbindex = match.start() elif line == 'KeyboardInterrupt\n': yield match.start(), Name.Class, line elif tb: curtb += line if not (line.startswith(' ') or line.strip() == '...'): tb = 0 for i, t, v in tblexer.get_tokens_unprocessed(curtb): yield tbindex+i, t, v curtb = '' else: yield match.start(), Generic.Output, line if curcode: yield from do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)) if curtb: for i, t, v in tblexer.get_tokens_unprocessed(curtb): yield tbindex+i, t, v class PythonTracebackLexer(RegexLexer): """ For Python 3.x tracebacks, with support for chained exceptions. .. versionadded:: 1.0 .. versionchanged:: 2.5 This is now the default ``PythonTracebackLexer``. It is still available as the alias ``Python3TracebackLexer``. """ name = 'Python Traceback' aliases = ['pytb', 'py3tb'] filenames = ['*.pytb', '*.py3tb'] mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback'] tokens = { 'root': [ (r'\n', Whitespace), (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), (r'^During handling of the above exception, another ' r'exception occurred:\n\n', Generic.Traceback), (r'^The above exception was the direct cause of the ' r'following exception:\n\n', Generic.Traceback), (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), (r'^.*\n', Other), ], 'intb': [ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)), (r'^( File )("[^"]+")(, line )(\d+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Whitespace)), (r'^( )(.+)(\n)', bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'), (r'^([ \t]*)(\.\.\.)(\n)', bygroups(Whitespace, Comment, Whitespace)), # for doctests... (r'^([^:]+)(: )(.+)(\n)', bygroups(Generic.Error, Text, Name, Whitespace), '#pop'), (r'^([a-zA-Z_][\w.]*)(:?\n)', bygroups(Generic.Error, Whitespace), '#pop') ], 'markers': [ # Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>` # error locations in Python 3.11+, or single-caret markers # for syntax errors before that. (r'^( {4,})([~^]+)(\n)', bygroups(Whitespace, Punctuation.Marker, Whitespace), '#pop'), default('#pop'), ], } Python3TracebackLexer = PythonTracebackLexer class Python2TracebackLexer(RegexLexer): """ For Python tracebacks. .. versionadded:: 0.7 .. versionchanged:: 2.5 This class has been renamed from ``PythonTracebackLexer``. ``PythonTracebackLexer`` now refers to the Python 3 variant. """ name = 'Python 2.x Traceback' aliases = ['py2tb'] filenames = ['*.py2tb'] mimetypes = ['text/x-python2-traceback'] tokens = { 'root': [ # Cover both (most recent call last) and (innermost last) # The optional ^C allows us to catch keyboard interrupt signals. (r'^(\^C)?(Traceback.*\n)', bygroups(Text, Generic.Traceback), 'intb'), # SyntaxError starts with this. (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), (r'^.*\n', Other), ], 'intb': [ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)), (r'^( File )("[^"]+")(, line )(\d+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Whitespace)), (r'^( )(.+)(\n)', bygroups(Text, using(Python2Lexer), Whitespace), 'marker'), (r'^([ \t]*)(\.\.\.)(\n)', bygroups(Text, Comment, Whitespace)), # for doctests... (r'^([^:]+)(: )(.+)(\n)', bygroups(Generic.Error, Text, Name, Whitespace), '#pop'), (r'^([a-zA-Z_]\w*)(:?\n)', bygroups(Generic.Error, Whitespace), '#pop') ], 'marker': [ # For syntax errors. (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'), default('#pop'), ], } class CythonLexer(RegexLexer): """ For Pyrex and Cython source code. .. versionadded:: 1.1 """ name = 'Cython' url = 'http://cython.org' aliases = ['cython', 'pyx', 'pyrex'] filenames = ['*.pyx', '*.pxd', '*.pxi'] mimetypes = ['text/x-cython', 'application/x-cython'] tokens = { 'root': [ (r'\n', Whitespace), (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)), (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)), (r'[^\S\n]+', Text), (r'#.*$', Comment), (r'[]{}:(),;[]', Punctuation), (r'\\\n', Whitespace), (r'\\', Text), (r'(in|is|and|or|not)\b', Operator.Word), (r'(<)([a-zA-Z0-9.?]+)(>)', bygroups(Punctuation, Keyword.Type, Punctuation)), (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', bygroups(Keyword, Number.Integer, Operator, Name, Operator, Name, Punctuation)), include('keywords'), (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'), (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'), # (should actually start a block with only cdefs) (r'(cdef)(:)', bygroups(Keyword, Punctuation)), (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'), (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'), (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'), include('builtins'), include('backtick'), ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), ('[uU]?"""', String, combined('stringescape', 'tdqs')), ("[uU]?'''", String, combined('stringescape', 'tsqs')), ('[uU]?"', String, combined('stringescape', 'dqs')), ("[uU]?'", String, combined('stringescape', 'sqs')), include('name'), include('numbers'), ], 'keywords': [ (words(( 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil', 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print', 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'), Keyword), (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), ], 'builtins': [ (words(( '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint', 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t', 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned', 'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin), (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL' r')\b', Name.Builtin.Pseudo), (words(( 'ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning', 'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Exception), ], 'numbers': [ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'0\d+', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+', Number.Integer) ], 'backtick': [ ('`.*?`', String.Backtick), ], 'name': [ (r'@\w+', Name.Decorator), (r'[a-zA-Z_]\w*', Name), ], 'funcname': [ (r'[a-zA-Z_]\w*', Name.Function, '#pop') ], 'cdef': [ (r'(public|readonly|extern|api|inline)\b', Keyword.Reserved), (r'(struct|enum|union|class)\b', Keyword), (r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)', bygroups(Name.Function, Text), '#pop'), (r'([a-zA-Z_]\w*)(\s*)(,)', bygroups(Name.Function, Text, Punctuation)), (r'from\b', Keyword, '#pop'), (r'as\b', Keyword), (r':', Punctuation, '#pop'), (r'(?=["\'])', Text, '#pop'), (r'[a-zA-Z_]\w*', Keyword.Type), (r'.', Text), ], 'classname': [ (r'[a-zA-Z_]\w*', Name.Class, '#pop') ], 'import': [ (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)), (r'[a-zA-Z_][\w.]*', Name.Namespace), (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)), default('#pop') # all else: go back ], 'fromimport': [ (r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'), (r'[a-zA-Z_.][\w.]*', Name.Namespace), # ``cdef foo from "header"``, or ``for foo from 0 < i < 10`` default('#pop'), ], 'stringescape': [ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) ], 'strings': [ (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' '[hlL]?[E-GXc-giorsux%]', String.Interpol), (r'[^\\\'"%\n]+', String), # quotes, percents and backslashes must be parsed one at a time (r'[\'"\\]', String), # unhandled string formatting sign (r'%', String) # newlines are an error (use "nl" state) ], 'nl': [ (r'\n', String) ], 'dqs': [ (r'"', String, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), include('strings'), include('nl') ], 'tsqs': [ (r"'''", String, '#pop'), include('strings'), include('nl') ], } class DgLexer(RegexLexer): """ Lexer for dg, a functional and object-oriented programming language running on the CPython 3 VM. .. versionadded:: 1.6 """ name = 'dg' aliases = ['dg'] filenames = ['*.dg'] mimetypes = ['text/x-dg'] tokens = { 'root': [ (r'\s+', Text), (r'#.*?$', Comment.Single), (r'(?i)0b[01]+', Number.Bin), (r'(?i)0o[0-7]+', Number.Oct), (r'(?i)0x[0-9a-f]+', Number.Hex), (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float), (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float), (r'(?i)[+-]?[0-9]+j?', Number.Integer), (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')), (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')), (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')), (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')), (r"`\w+'*`", Operator), (r'\b(and|in|is|or|where)\b', Operator.Word), (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator), (words(( 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'', 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object', 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str', 'super', 'tuple', 'tuple\'', 'type'), prefix=r'(?<!\.)', suffix=r'(?![\'\w])'), Name.Builtin), (words(( '__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile', 'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate', 'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst', 'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init', 'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len', 'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow', 'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd', 'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'(?![\'\w])'), Name.Builtin), (r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])", Name.Builtin.Pseudo), (r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])", Name.Exception), (r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|" r"SystemExit)(?!['\w])", Name.Exception), (r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|" r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved), (r"[A-Z_]+'*(?!['\w])", Name), (r"[A-Z]\w+'*(?!['\w])", Keyword.Type), (r"\w+'*", Name), (r'[()]', Punctuation), (r'.', Error), ], 'stringescape': [ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) ], 'string': [ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' '[hlL]?[E-GXc-giorsux%]', String.Interpol), (r'[^\\\'"%\n]+', String), # quotes, percents and backslashes must be parsed one at a time (r'[\'"\\]', String), # unhandled string formatting sign (r'%', String), (r'\n', String) ], 'dqs': [ (r'"', String, '#pop') ], 'sqs': [ (r"'", String, '#pop') ], 'tdqs': [ (r'"""', String, '#pop') ], 'tsqs': [ (r"'''", String, '#pop') ], } class NumPyLexer(PythonLexer): """ A Python lexer recognizing Numerical Python builtins. .. versionadded:: 0.10 """ name = 'NumPy' url = 'https://numpy.org/' aliases = ['numpy'] # override the mimetypes to not inherit them from python mimetypes = [] filenames = [] EXTRA_KEYWORDS = { 'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose', 'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append', 'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', 'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal', 'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange', 'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray', 'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett', 'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman', 'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_', 'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type', 'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate', 'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov', 'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate', 'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide', 'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye', 'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill', 'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud', 'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer', 'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring', 'generic', 'get_array_wrap', 'get_include', 'get_numarray_include', 'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize', 'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater', 'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram', 'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0', 'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info', 'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d', 'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj', 'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf', 'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_', 'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_', 'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort', 'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace', 'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype', 'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min', 'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan', 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum', 'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer', 'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones', 'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload', 'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv', 'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod', 'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers', 'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close', 'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require', 'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll', 'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_', 'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select', 'set_numeric_ops', 'set_printoptions', 'set_string_function', 'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj', 'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape', 'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh', 'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source', 'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std', 'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot', 'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose', 'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict', 'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index', 'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises', 'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like' } def get_tokens_unprocessed(self, text): for index, token, value in \ PythonLexer.get_tokens_unprocessed(self, text): if token is Name and value in self.EXTRA_KEYWORDS: yield index, Keyword.Pseudo, value else: yield index, token, value def analyse_text(text): ltext = text[:1000] return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or 'import ' in ltext) \ and ('import numpy' in ltext or 'from numpy import' in ltext)
53,524
Python
43.419087
97
0.455347
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/smithy.py
""" pygments.lexers.smithy ~~~~~~~~~~~~~~~~~~~~~~ Lexers for the Smithy IDL. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Text, Comment, Keyword, Name, String, \ Number, Whitespace, Punctuation __all__ = ['SmithyLexer'] class SmithyLexer(RegexLexer): """ For Smithy IDL .. versionadded:: 2.10 """ name = 'Smithy' url = 'https://awslabs.github.io/smithy/' filenames = ['*.smithy'] aliases = ['smithy'] unquoted = r'[A-Za-z0-9_\.#$-]+' identifier = r"[A-Za-z0-9_\.#$-]+" simple_shapes = ( 'use', 'byte', 'short', 'integer', 'long', 'float', 'document', 'double', 'bigInteger', 'bigDecimal', 'boolean', 'blob', 'string', 'timestamp', ) aggregate_shapes = ( 'apply', 'list', 'map', 'set', 'structure', 'union', 'resource', 'operation', 'service', 'trait' ) tokens = { 'root': [ (r'///.*$', Comment.Multiline), (r'//.*$', Comment), (r'@[0-9a-zA-Z\.#-]*', Name.Decorator), (r'(=)', Name.Decorator), (r'^(\$version)(:)(.+)', bygroups(Keyword.Declaration, Name.Decorator, Name.Class)), (r'^(namespace)(\s+' + identifier + r')\b', bygroups(Keyword.Declaration, Name.Class)), (words(simple_shapes, prefix=r'^', suffix=r'(\s+' + identifier + r')\b'), bygroups(Keyword.Declaration, Name.Class)), (words(aggregate_shapes, prefix=r'^', suffix=r'(\s+' + identifier + r')'), bygroups(Keyword.Declaration, Name.Class)), (r'^(metadata)(\s+)((?:\S+)|(?:\"[^"]+\"))(\s*)(=)', bygroups(Keyword.Declaration, Whitespace, Name.Class, Whitespace, Name.Decorator)), (r"(true|false|null)", Keyword.Constant), (r"(-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?)", Number), (identifier + ":", Name.Label), (identifier, Name.Variable.Class), (r'\[', Text, "#push"), (r'\]', Text, "#pop"), (r'\(', Text, "#push"), (r'\)', Text, "#pop"), (r'\{', Text, "#push"), (r'\}', Text, "#pop"), (r'"{3}(\\\\|\n|\\")*"{3}', String.Doc), (r'"(\\\\|\n|\\"|[^"])*"', String.Double), (r"'(\\\\|\n|\\'|[^'])*'", String.Single), (r'[:,]+', Punctuation), (r'\s+', Whitespace), ] }
2,660
Python
32.683544
75
0.457143
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/procfile.py
""" pygments.lexers.procfile ~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for Procfile file format. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups from pygments.token import Name, Number, String, Text, Punctuation __all__ = ["ProcfileLexer"] class ProcfileLexer(RegexLexer): """ Lexer for Procfile file format. The format is used to run processes on Heroku or is used by Foreman or Honcho tools. .. versionadded:: 2.10 """ name = 'Procfile' url = 'https://devcenter.heroku.com/articles/procfile#procfile-format' aliases = ['procfile'] filenames = ['Procfile'] tokens = { 'root': [ (r'^([a-z]+)(:)', bygroups(Name.Label, Punctuation)), (r'\s+', Text.Whitespace), (r'"[^"]*"', String), (r"'[^']*'", String), (r'[0-9]+', Number.Integer), (r'\$[a-zA-Z_][\w]*', Name.Variable), (r'(\w+)(=)(\w+)', bygroups(Name.Variable, Punctuation, String)), (r'([\w\-\./]+)', Text), ], }
1,156
Python
25.906976
77
0.544118
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/roboconf.py
""" pygments.lexers.roboconf ~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Roboconf DSL. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words, re from pygments.token import Text, Operator, Keyword, Name, Comment __all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer'] class RoboconfGraphLexer(RegexLexer): """ Lexer for Roboconf graph files. .. versionadded:: 2.1 """ name = 'Roboconf Graph' aliases = ['roboconf-graph'] filenames = ['*.graph'] flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ # Skip white spaces (r'\s+', Text), # There is one operator (r'=', Operator), # Keywords (words(('facet', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword), (words(( 'installer', 'extends', 'exports', 'imports', 'facets', 'children'), suffix=r'\s*:?', prefix=r'\b'), Name), # Comments (r'#.*\n', Comment), # Default (r'[^#]', Text), (r'.*\n', Text) ] } class RoboconfInstancesLexer(RegexLexer): """ Lexer for Roboconf instances files. .. versionadded:: 2.1 """ name = 'Roboconf Instances' aliases = ['roboconf-instances'] filenames = ['*.instances'] flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ # Skip white spaces (r'\s+', Text), # Keywords (words(('instance of', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword), (words(('name', 'count'), suffix=r's*:?', prefix=r'\b'), Name), (r'\s*[\w.-]+\s*:', Name), # Comments (r'#.*\n', Comment), # Default (r'[^#]', Text), (r'.*\n', Text) ] }
1,962
Python
22.939024
87
0.487258
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_scheme_builtins.py
""" pygments.lexers._scheme_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Scheme builtins. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # Autogenerated by external/scheme-builtins-generator.scm # using Guile 3.0.5.130-5a1e7. scheme_keywords = { "*unspecified*", "...", "=>", "@", "@@", "_", "add-to-load-path", "and", "begin", "begin-deprecated", "case", "case-lambda", "case-lambda*", "cond", "cond-expand", "current-filename", "current-source-location", "debug-set!", "define", "define*", "define-inlinable", "define-library", "define-macro", "define-module", "define-once", "define-option-interface", "define-private", "define-public", "define-record-type", "define-syntax", "define-syntax-parameter", "define-syntax-rule", "define-values", "defmacro", "defmacro-public", "delay", "do", "else", "eval-when", "export", "export!", "export-syntax", "false-if-exception", "identifier-syntax", "if", "import", "include", "include-ci", "include-from-path", "include-library-declarations", "lambda", "lambda*", "let", "let*", "let*-values", "let-syntax", "let-values", "letrec", "letrec*", "letrec-syntax", "library", "load", "match", "match-lambda", "match-lambda*", "match-let", "match-let*", "match-letrec", "or", "parameterize", "print-set!", "quasiquote", "quasisyntax", "quote", "quote-syntax", "re-export", "re-export-syntax", "read-set!", "require-extension", "set!", "start-stack", "syntax", "syntax-case", "syntax-error", "syntax-parameterize", "syntax-rules", "unless", "unquote", "unquote-splicing", "unsyntax", "unsyntax-splicing", "use-modules", "when", "while", "with-ellipsis", "with-fluids", "with-syntax", "λ", } scheme_builtins = { "$sc-dispatch", "%char-set-dump", "%get-pre-modules-obarray", "%get-stack-size", "%global-site-dir", "%init-rdelim-builtins", "%init-rw-builtins", "%library-dir", "%load-announce", "%load-hook", "%make-void-port", "%package-data-dir", "%port-property", "%print-module", "%resolve-variable", "%search-load-path", "%set-port-property!", "%site-ccache-dir", "%site-dir", "%start-stack", "%string-dump", "%symbol-dump", "%warn-auto-compilation-enabled", "*", "+", "-", "->bool", "->char-set", "/", "1+", "1-", "<", "<=", "=", ">", ">=", "abort-to-prompt", "abort-to-prompt*", "abs", "absolute-file-name?", "accept", "access?", "acons", "acos", "acosh", "add-hook!", "addrinfo:addr", "addrinfo:canonname", "addrinfo:fam", "addrinfo:flags", "addrinfo:protocol", "addrinfo:socktype", "adjust-port-revealed!", "alarm", "alist-cons", "alist-copy", "alist-delete", "alist-delete!", "allocate-struct", "and-map", "and=>", "angle", "any", "append", "append!", "append-map", "append-map!", "append-reverse", "append-reverse!", "apply", "array->list", "array-cell-ref", "array-cell-set!", "array-contents", "array-copy!", "array-copy-in-order!", "array-dimensions", "array-equal?", "array-fill!", "array-for-each", "array-in-bounds?", "array-index-map!", "array-length", "array-map!", "array-map-in-order!", "array-rank", "array-ref", "array-set!", "array-shape", "array-slice", "array-slice-for-each", "array-slice-for-each-in-order", "array-type", "array-type-code", "array?", "ash", "asin", "asinh", "assert-load-verbosity", "assoc", "assoc-ref", "assoc-remove!", "assoc-set!", "assq", "assq-ref", "assq-remove!", "assq-set!", "assv", "assv-ref", "assv-remove!", "assv-set!", "atan", "atanh", "autoload-done!", "autoload-done-or-in-progress?", "autoload-in-progress!", "backtrace", "basename", "batch-mode?", "beautify-user-module!", "bind", "bind-textdomain-codeset", "bindtextdomain", "bit-count", "bit-count*", "bit-extract", "bit-invert!", "bit-position", "bit-set*!", "bitvector", "bitvector->list", "bitvector-bit-clear?", "bitvector-bit-set?", "bitvector-clear-all-bits!", "bitvector-clear-bit!", "bitvector-clear-bits!", "bitvector-count", "bitvector-count-bits", "bitvector-fill!", "bitvector-flip-all-bits!", "bitvector-length", "bitvector-position", "bitvector-ref", "bitvector-set!", "bitvector-set-all-bits!", "bitvector-set-bit!", "bitvector-set-bits!", "bitvector?", "boolean?", "bound-identifier=?", "break", "break!", "caaaar", "caaadr", "caaar", "caadar", "caaddr", "caadr", "caar", "cadaar", "cadadr", "cadar", "caddar", "cadddr", "caddr", "cadr", "call-with-blocked-asyncs", "call-with-current-continuation", "call-with-deferred-observers", "call-with-include-port", "call-with-input-file", "call-with-input-string", "call-with-module-autoload-lock", "call-with-output-file", "call-with-output-string", "call-with-port", "call-with-prompt", "call-with-unblocked-asyncs", "call-with-values", "call/cc", "canonicalize-path", "car", "car+cdr", "catch", "cdaaar", "cdaadr", "cdaar", "cdadar", "cdaddr", "cdadr", "cdar", "cddaar", "cddadr", "cddar", "cdddar", "cddddr", "cdddr", "cddr", "cdr", "ceiling", "ceiling-quotient", "ceiling-remainder", "ceiling/", "centered-quotient", "centered-remainder", "centered/", "char->integer", "char-alphabetic?", "char-ci<=?", "char-ci<?", "char-ci=?", "char-ci>=?", "char-ci>?", "char-downcase", "char-general-category", "char-is-both?", "char-lower-case?", "char-numeric?", "char-ready?", "char-set", "char-set->list", "char-set->string", "char-set-adjoin", "char-set-adjoin!", "char-set-any", "char-set-complement", "char-set-complement!", "char-set-contains?", "char-set-copy", "char-set-count", "char-set-cursor", "char-set-cursor-next", "char-set-delete", "char-set-delete!", "char-set-diff+intersection", "char-set-diff+intersection!", "char-set-difference", "char-set-difference!", "char-set-every", "char-set-filter", "char-set-filter!", "char-set-fold", "char-set-for-each", "char-set-hash", "char-set-intersection", "char-set-intersection!", "char-set-map", "char-set-ref", "char-set-size", "char-set-unfold", "char-set-unfold!", "char-set-union", "char-set-union!", "char-set-xor", "char-set-xor!", "char-set<=", "char-set=", "char-set?", "char-titlecase", "char-upcase", "char-upper-case?", "char-whitespace?", "char<=?", "char<?", "char=?", "char>=?", "char>?", "char?", "chdir", "chmod", "chown", "chroot", "circular-list", "circular-list?", "close", "close-fdes", "close-input-port", "close-output-port", "close-port", "closedir", "command-line", "complex?", "compose", "concatenate", "concatenate!", "cond-expand-provide", "connect", "cons", "cons*", "cons-source", "const", "convert-assignment", "copy-file", "copy-random-state", "copy-tree", "cos", "cosh", "count", "crypt", "ctermid", "current-dynamic-state", "current-error-port", "current-input-port", "current-language", "current-load-port", "current-module", "current-output-port", "current-time", "current-warning-port", "datum->random-state", "datum->syntax", "debug-disable", "debug-enable", "debug-options", "debug-options-interface", "default-duplicate-binding-handler", "default-duplicate-binding-procedures", "default-prompt-tag", "define!", "define-module*", "defined?", "delete", "delete!", "delete-duplicates", "delete-duplicates!", "delete-file", "delete1!", "delq", "delq!", "delq1!", "delv", "delv!", "delv1!", "denominator", "directory-stream?", "dirname", "display", "display-application", "display-backtrace", "display-error", "dotted-list?", "doubly-weak-hash-table?", "drain-input", "drop", "drop-right", "drop-right!", "drop-while", "dup", "dup->fdes", "dup->inport", "dup->outport", "dup->port", "dup2", "duplicate-port", "dynamic-call", "dynamic-func", "dynamic-link", "dynamic-object?", "dynamic-pointer", "dynamic-state?", "dynamic-unlink", "dynamic-wind", "effective-version", "eighth", "end-of-char-set?", "endgrent", "endhostent", "endnetent", "endprotoent", "endpwent", "endservent", "ensure-batch-mode!", "environ", "eof-object?", "eq?", "equal?", "eqv?", "error", "euclidean-quotient", "euclidean-remainder", "euclidean/", "eval", "eval-string", "even?", "every", "exact->inexact", "exact-integer-sqrt", "exact-integer?", "exact?", "exception-accessor", "exception-args", "exception-kind", "exception-predicate", "exception-type?", "exception?", "execl", "execle", "execlp", "exit", "exp", "expt", "f32vector", "f32vector->list", "f32vector-length", "f32vector-ref", "f32vector-set!", "f32vector?", "f64vector", "f64vector->list", "f64vector-length", "f64vector-ref", "f64vector-set!", "f64vector?", "fcntl", "fdes->inport", "fdes->outport", "fdes->ports", "fdopen", "fifth", "file-encoding", "file-exists?", "file-is-directory?", "file-name-separator?", "file-port?", "file-position", "file-set-position", "fileno", "filter", "filter!", "filter-map", "find", "find-tail", "finite?", "first", "flock", "floor", "floor-quotient", "floor-remainder", "floor/", "fluid->parameter", "fluid-bound?", "fluid-ref", "fluid-ref*", "fluid-set!", "fluid-thread-local?", "fluid-unset!", "fluid?", "flush-all-ports", "fold", "fold-right", "for-each", "force", "force-output", "format", "fourth", "frame-address", "frame-arguments", "frame-dynamic-link", "frame-instruction-pointer", "frame-previous", "frame-procedure-name", "frame-return-address", "frame-source", "frame-stack-pointer", "frame?", "free-identifier=?", "fsync", "ftell", "gai-strerror", "gc", "gc-disable", "gc-dump", "gc-enable", "gc-run-time", "gc-stats", "gcd", "generate-temporaries", "gensym", "get-internal-real-time", "get-internal-run-time", "get-output-string", "get-print-state", "getaddrinfo", "getaffinity", "getcwd", "getegid", "getenv", "geteuid", "getgid", "getgr", "getgrent", "getgrgid", "getgrnam", "getgroups", "gethost", "gethostbyaddr", "gethostbyname", "gethostent", "gethostname", "getitimer", "getlogin", "getnet", "getnetbyaddr", "getnetbyname", "getnetent", "getpass", "getpeername", "getpgrp", "getpid", "getppid", "getpriority", "getproto", "getprotobyname", "getprotobynumber", "getprotoent", "getpw", "getpwent", "getpwnam", "getpwuid", "getrlimit", "getserv", "getservbyname", "getservbyport", "getservent", "getsid", "getsockname", "getsockopt", "gettext", "gettimeofday", "getuid", "gmtime", "group:gid", "group:mem", "group:name", "group:passwd", "hash", "hash-clear!", "hash-count", "hash-create-handle!", "hash-fold", "hash-for-each", "hash-for-each-handle", "hash-get-handle", "hash-map->list", "hash-ref", "hash-remove!", "hash-set!", "hash-table?", "hashq", "hashq-create-handle!", "hashq-get-handle", "hashq-ref", "hashq-remove!", "hashq-set!", "hashv", "hashv-create-handle!", "hashv-get-handle", "hashv-ref", "hashv-remove!", "hashv-set!", "hashx-create-handle!", "hashx-get-handle", "hashx-ref", "hashx-remove!", "hashx-set!", "hook->list", "hook-empty?", "hook?", "hostent:addr-list", "hostent:addrtype", "hostent:aliases", "hostent:length", "hostent:name", "identifier?", "identity", "imag-part", "in-vicinity", "include-deprecated-features", "inet-lnaof", "inet-makeaddr", "inet-netof", "inet-ntop", "inet-pton", "inexact->exact", "inexact?", "inf", "inf?", "inherit-print-state", "input-port?", "install-r6rs!", "install-r7rs!", "integer->char", "integer-expt", "integer-length", "integer?", "interaction-environment", "iota", "isatty?", "issue-deprecation-warning", "keyword->symbol", "keyword-like-symbol->keyword", "keyword?", "kill", "kw-arg-ref", "last", "last-pair", "lcm", "length", "length+", "link", "list", "list->array", "list->bitvector", "list->char-set", "list->char-set!", "list->f32vector", "list->f64vector", "list->s16vector", "list->s32vector", "list->s64vector", "list->s8vector", "list->string", "list->symbol", "list->typed-array", "list->u16vector", "list->u32vector", "list->u64vector", "list->u8vector", "list->vector", "list-cdr-ref", "list-cdr-set!", "list-copy", "list-head", "list-index", "list-ref", "list-set!", "list-tabulate", "list-tail", "list=", "list?", "listen", "load-compiled", "load-extension", "load-from-path", "load-in-vicinity", "load-user-init", "local-define", "local-define-module", "local-ref", "local-ref-module", "local-remove", "local-set!", "localtime", "log", "log10", "logand", "logbit?", "logcount", "logior", "lognot", "logtest", "logxor", "lookup-duplicates-handlers", "lset-adjoin", "lset-diff+intersection", "lset-diff+intersection!", "lset-difference", "lset-difference!", "lset-intersection", "lset-intersection!", "lset-union", "lset-union!", "lset-xor", "lset-xor!", "lset<=", "lset=", "lstat", "macro-binding", "macro-name", "macro-transformer", "macro-type", "macro?", "macroexpand", "macroexpanded?", "magnitude", "major-version", "make-array", "make-autoload-interface", "make-bitvector", "make-doubly-weak-hash-table", "make-exception", "make-exception-from-throw", "make-exception-type", "make-f32vector", "make-f64vector", "make-fluid", "make-fresh-user-module", "make-generalized-vector", "make-guardian", "make-hash-table", "make-hook", "make-list", "make-module", "make-modules-in", "make-mutable-parameter", "make-object-property", "make-parameter", "make-polar", "make-procedure-with-setter", "make-promise", "make-prompt-tag", "make-record-type", "make-rectangular", "make-regexp", "make-s16vector", "make-s32vector", "make-s64vector", "make-s8vector", "make-shared-array", "make-socket-address", "make-soft-port", "make-srfi-4-vector", "make-stack", "make-string", "make-struct-layout", "make-struct/no-tail", "make-struct/simple", "make-symbol", "make-syntax-transformer", "make-thread-local-fluid", "make-typed-array", "make-u16vector", "make-u32vector", "make-u64vector", "make-u8vector", "make-unbound-fluid", "make-undefined-variable", "make-variable", "make-variable-transformer", "make-vector", "make-vtable", "make-weak-key-hash-table", "make-weak-value-hash-table", "map", "map!", "map-in-order", "max", "member", "memoize-expression", "memoized-typecode", "memq", "memv", "merge", "merge!", "micro-version", "min", "minor-version", "mkdir", "mkdtemp", "mknod", "mkstemp", "mkstemp!", "mktime", "module-add!", "module-autoload!", "module-binder", "module-bound?", "module-call-observers", "module-clear!", "module-constructor", "module-declarative?", "module-defer-observers", "module-define!", "module-define-submodule!", "module-defined?", "module-duplicates-handlers", "module-ensure-local-variable!", "module-export!", "module-export-all!", "module-filename", "module-for-each", "module-generate-unique-id!", "module-gensym", "module-import-interface", "module-import-obarray", "module-kind", "module-local-variable", "module-locally-bound?", "module-make-local-var!", "module-map", "module-modified", "module-name", "module-next-unique-id", "module-obarray", "module-obarray-get-handle", "module-obarray-ref", "module-obarray-remove!", "module-obarray-set!", "module-observe", "module-observe-weak", "module-observers", "module-public-interface", "module-re-export!", "module-ref", "module-ref-submodule", "module-remove!", "module-replace!", "module-replacements", "module-reverse-lookup", "module-search", "module-set!", "module-submodule-binder", "module-submodules", "module-symbol-binding", "module-symbol-interned?", "module-symbol-local-binding", "module-symbol-locally-interned?", "module-transformer", "module-unobserve", "module-use!", "module-use-interfaces!", "module-uses", "module-variable", "module-version", "module-weak-observers", "module?", "modulo", "modulo-expt", "move->fdes", "nan", "nan?", "negate", "negative?", "nested-define!", "nested-define-module!", "nested-ref", "nested-ref-module", "nested-remove!", "nested-set!", "netent:addrtype", "netent:aliases", "netent:name", "netent:net", "newline", "ngettext", "nice", "nil?", "ninth", "noop", "not", "not-pair?", "null-environment", "null-list?", "null?", "number->string", "number?", "numerator", "object->string", "object-address", "object-properties", "object-property", "odd?", "open", "open-fdes", "open-file", "open-input-file", "open-input-string", "open-io-file", "open-output-file", "open-output-string", "opendir", "or-map", "output-port?", "pair-fold", "pair-fold-right", "pair-for-each", "pair?", "parameter-converter", "parameter-fluid", "parameter?", "parse-path", "parse-path-with-ellipsis", "partition", "partition!", "passwd:dir", "passwd:gecos", "passwd:gid", "passwd:name", "passwd:passwd", "passwd:shell", "passwd:uid", "pause", "peek", "peek-char", "pipe", "pk", "port->fdes", "port-closed?", "port-column", "port-conversion-strategy", "port-encoding", "port-filename", "port-for-each", "port-line", "port-mode", "port-revealed", "port-with-print-state", "port?", "positive?", "primitive-_exit", "primitive-eval", "primitive-exit", "primitive-fork", "primitive-load", "primitive-load-path", "primitive-move->fdes", "primitive-read", "print-disable", "print-enable", "print-exception", "print-options", "print-options-interface", "procedure", "procedure-documentation", "procedure-minimum-arity", "procedure-name", "procedure-properties", "procedure-property", "procedure-source", "procedure-with-setter?", "procedure?", "process-use-modules", "program-arguments", "promise?", "proper-list?", "protoent:aliases", "protoent:name", "protoent:proto", "provide", "provided?", "purify-module!", "putenv", "quit", "quotient", "raise", "raise-exception", "random", "random-state->datum", "random-state-from-platform", "random:exp", "random:hollow-sphere!", "random:normal", "random:normal-vector!", "random:solid-sphere!", "random:uniform", "rational?", "rationalize", "read", "read-char", "read-disable", "read-enable", "read-hash-extend", "read-hash-procedure", "read-hash-procedures", "read-options", "read-options-interface", "read-syntax", "readdir", "readlink", "real-part", "real?", "record-accessor", "record-constructor", "record-modifier", "record-predicate", "record-type-constructor", "record-type-descriptor", "record-type-extensible?", "record-type-fields", "record-type-has-parent?", "record-type-mutable-fields", "record-type-name", "record-type-opaque?", "record-type-parent", "record-type-parents", "record-type-properties", "record-type-uid", "record-type?", "record?", "recv!", "recvfrom!", "redirect-port", "reduce", "reduce-right", "regexp-exec", "regexp?", "release-port-handle", "reload-module", "remainder", "remove", "remove!", "remove-hook!", "rename-file", "repl-reader", "reset-hook!", "resolve-interface", "resolve-module", "resolve-r6rs-interface", "restore-signals", "restricted-vector-sort!", "reverse", "reverse!", "reverse-list->string", "rewinddir", "rmdir", "round", "round-ash", "round-quotient", "round-remainder", "round/", "run-hook", "s16vector", "s16vector->list", "s16vector-length", "s16vector-ref", "s16vector-set!", "s16vector?", "s32vector", "s32vector->list", "s32vector-length", "s32vector-ref", "s32vector-set!", "s32vector?", "s64vector", "s64vector->list", "s64vector-length", "s64vector-ref", "s64vector-set!", "s64vector?", "s8vector", "s8vector->list", "s8vector-length", "s8vector-ref", "s8vector-set!", "s8vector?", "save-module-excursion", "scheme-report-environment", "scm-error", "search-path", "second", "seed->random-state", "seek", "select", "self-evaluating?", "send", "sendfile", "sendto", "servent:aliases", "servent:name", "servent:port", "servent:proto", "set-autoloaded!", "set-car!", "set-cdr!", "set-current-dynamic-state", "set-current-error-port", "set-current-input-port", "set-current-module", "set-current-output-port", "set-exception-printer!", "set-module-binder!", "set-module-declarative?!", "set-module-duplicates-handlers!", "set-module-filename!", "set-module-kind!", "set-module-name!", "set-module-next-unique-id!", "set-module-obarray!", "set-module-observers!", "set-module-public-interface!", "set-module-submodule-binder!", "set-module-submodules!", "set-module-transformer!", "set-module-uses!", "set-module-version!", "set-object-properties!", "set-object-property!", "set-port-column!", "set-port-conversion-strategy!", "set-port-encoding!", "set-port-filename!", "set-port-line!", "set-port-revealed!", "set-procedure-minimum-arity!", "set-procedure-properties!", "set-procedure-property!", "set-program-arguments", "set-source-properties!", "set-source-property!", "set-struct-vtable-name!", "set-symbol-property!", "set-tm:gmtoff", "set-tm:hour", "set-tm:isdst", "set-tm:mday", "set-tm:min", "set-tm:mon", "set-tm:sec", "set-tm:wday", "set-tm:yday", "set-tm:year", "set-tm:zone", "setaffinity", "setegid", "setenv", "seteuid", "setgid", "setgr", "setgrent", "setgroups", "sethost", "sethostent", "sethostname", "setitimer", "setlocale", "setnet", "setnetent", "setpgid", "setpriority", "setproto", "setprotoent", "setpw", "setpwent", "setrlimit", "setserv", "setservent", "setsid", "setsockopt", "setter", "setuid", "setvbuf", "seventh", "shared-array-increments", "shared-array-offset", "shared-array-root", "shutdown", "sigaction", "simple-exceptions", "simple-format", "sin", "sinh", "sixth", "sleep", "sloppy-assoc", "sloppy-assq", "sloppy-assv", "sockaddr:addr", "sockaddr:fam", "sockaddr:flowinfo", "sockaddr:path", "sockaddr:port", "sockaddr:scopeid", "socket", "socketpair", "sort", "sort!", "sort-list", "sort-list!", "sorted?", "source-properties", "source-property", "span", "span!", "split-at", "split-at!", "sqrt", "stable-sort", "stable-sort!", "stack-id", "stack-length", "stack-ref", "stack?", "stat", "stat:atime", "stat:atimensec", "stat:blksize", "stat:blocks", "stat:ctime", "stat:ctimensec", "stat:dev", "stat:gid", "stat:ino", "stat:mode", "stat:mtime", "stat:mtimensec", "stat:nlink", "stat:perms", "stat:rdev", "stat:size", "stat:type", "stat:uid", "status:exit-val", "status:stop-sig", "status:term-sig", "strerror", "strftime", "string", "string->char-set", "string->char-set!", "string->list", "string->number", "string->symbol", "string-any", "string-any-c-code", "string-append", "string-append/shared", "string-bytes-per-char", "string-capitalize", "string-capitalize!", "string-ci->symbol", "string-ci<", "string-ci<=", "string-ci<=?", "string-ci<>", "string-ci<?", "string-ci=", "string-ci=?", "string-ci>", "string-ci>=", "string-ci>=?", "string-ci>?", "string-compare", "string-compare-ci", "string-concatenate", "string-concatenate-reverse", "string-concatenate-reverse/shared", "string-concatenate/shared", "string-contains", "string-contains-ci", "string-copy", "string-copy!", "string-count", "string-delete", "string-downcase", "string-downcase!", "string-drop", "string-drop-right", "string-every", "string-every-c-code", "string-fill!", "string-filter", "string-fold", "string-fold-right", "string-for-each", "string-for-each-index", "string-hash", "string-hash-ci", "string-index", "string-index-right", "string-join", "string-length", "string-map", "string-map!", "string-normalize-nfc", "string-normalize-nfd", "string-normalize-nfkc", "string-normalize-nfkd", "string-null?", "string-pad", "string-pad-right", "string-prefix-ci?", "string-prefix-length", "string-prefix-length-ci", "string-prefix?", "string-ref", "string-replace", "string-reverse", "string-reverse!", "string-rindex", "string-set!", "string-skip", "string-skip-right", "string-split", "string-suffix-ci?", "string-suffix-length", "string-suffix-length-ci", "string-suffix?", "string-tabulate", "string-take", "string-take-right", "string-titlecase", "string-titlecase!", "string-tokenize", "string-trim", "string-trim-both", "string-trim-right", "string-unfold", "string-unfold-right", "string-upcase", "string-upcase!", "string-utf8-length", "string-xcopy!", "string<", "string<=", "string<=?", "string<>", "string<?", "string=", "string=?", "string>", "string>=", "string>=?", "string>?", "string?", "strptime", "struct-layout", "struct-ref", "struct-ref/unboxed", "struct-set!", "struct-set!/unboxed", "struct-vtable", "struct-vtable-name", "struct-vtable?", "struct?", "substring", "substring-fill!", "substring-move!", "substring/copy", "substring/read-only", "substring/shared", "supports-source-properties?", "symbol", "symbol->keyword", "symbol->string", "symbol-append", "symbol-fref", "symbol-fset!", "symbol-hash", "symbol-interned?", "symbol-pref", "symbol-prefix-proc", "symbol-property", "symbol-property-remove!", "symbol-pset!", "symbol?", "symlink", "sync", "syntax->datum", "syntax-source", "syntax-violation", "system", "system*", "system-async-mark", "system-error-errno", "system-file-name-convention", "take", "take!", "take-right", "take-while", "take-while!", "tan", "tanh", "tcgetpgrp", "tcsetpgrp", "tenth", "textdomain", "third", "throw", "thunk?", "times", "tm:gmtoff", "tm:hour", "tm:isdst", "tm:mday", "tm:min", "tm:mon", "tm:sec", "tm:wday", "tm:yday", "tm:year", "tm:zone", "tmpfile", "tmpnam", "tms:clock", "tms:cstime", "tms:cutime", "tms:stime", "tms:utime", "transpose-array", "truncate", "truncate-file", "truncate-quotient", "truncate-remainder", "truncate/", "try-load-module", "try-module-autoload", "ttyname", "typed-array?", "tzset", "u16vector", "u16vector->list", "u16vector-length", "u16vector-ref", "u16vector-set!", "u16vector?", "u32vector", "u32vector->list", "u32vector-length", "u32vector-ref", "u32vector-set!", "u32vector?", "u64vector", "u64vector->list", "u64vector-length", "u64vector-ref", "u64vector-set!", "u64vector?", "u8vector", "u8vector->list", "u8vector-length", "u8vector-ref", "u8vector-set!", "u8vector?", "ucs-range->char-set", "ucs-range->char-set!", "umask", "uname", "unfold", "unfold-right", "unmemoize-expression", "unread-char", "unread-string", "unsetenv", "unspecified?", "unzip1", "unzip2", "unzip3", "unzip4", "unzip5", "use-srfis", "user-modules-declarative?", "using-readline?", "usleep", "utime", "utsname:machine", "utsname:nodename", "utsname:release", "utsname:sysname", "utsname:version", "values", "variable-bound?", "variable-ref", "variable-set!", "variable-unset!", "variable?", "vector", "vector->list", "vector-copy", "vector-fill!", "vector-length", "vector-move-left!", "vector-move-right!", "vector-ref", "vector-set!", "vector?", "version", "version-matches?", "waitpid", "warn", "weak-key-hash-table?", "weak-value-hash-table?", "with-continuation-barrier", "with-dynamic-state", "with-error-to-file", "with-error-to-port", "with-error-to-string", "with-exception-handler", "with-fluid*", "with-fluids*", "with-input-from-file", "with-input-from-port", "with-input-from-string", "with-output-to-file", "with-output-to-port", "with-output-to-string", "with-throw-handler", "write", "write-char", "xcons", "xsubstring", "zero?", "zip", }
32,563
Python
19.226087
70
0.539907
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/grammar_notation.py
""" pygments.lexers.grammar_notation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for grammar notations like BNF. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, include, this, using, words from pygments.token import Comment, Keyword, Literal, Name, Number, \ Operator, Punctuation, String, Text, Whitespace __all__ = ['BnfLexer', 'AbnfLexer', 'JsgfLexer', 'PegLexer'] class BnfLexer(RegexLexer): """ This lexer is for grammar notations which are similar to original BNF. In order to maximize a number of targets of this lexer, let's decide some designs: * We don't distinguish `Terminal Symbol`. * We do assume that `NonTerminal Symbol` are always enclosed with arrow brackets. * We do assume that `NonTerminal Symbol` may include any printable characters except arrow brackets and ASCII 0x20. This assumption is for `RBNF <http://www.rfc-base.org/txt/rfc-5511.txt>`_. * We do assume that target notation doesn't support comment. * We don't distinguish any operators and punctuation except `::=`. Though these decision making might cause too minimal highlighting and you might be disappointed, but it is reasonable for us. .. versionadded:: 2.1 """ name = 'BNF' aliases = ['bnf'] filenames = ['*.bnf'] mimetypes = ['text/x-bnf'] tokens = { 'root': [ (r'(<)([ -;=?-~]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)), # an only operator (r'::=', Operator), # fallback (r'[^<>:]+', Text), # for performance (r'.', Text), ], } class AbnfLexer(RegexLexer): """ Lexer for IETF 7405 ABNF. (Updates `5234 <http://www.ietf.org/rfc/rfc5234.txt>`_) grammars. .. versionadded:: 2.1 """ name = 'ABNF' url = 'http://www.ietf.org/rfc/rfc7405.txt' aliases = ['abnf'] filenames = ['*.abnf'] mimetypes = ['text/x-abnf'] _core_rules = ( 'ALPHA', 'BIT', 'CHAR', 'CR', 'CRLF', 'CTL', 'DIGIT', 'DQUOTE', 'HEXDIG', 'HTAB', 'LF', 'LWSP', 'OCTET', 'SP', 'VCHAR', 'WSP') tokens = { 'root': [ # comment (r';.*$', Comment.Single), # quoted # double quote itself in this state, it is as '%x22'. (r'(%[si])?"[^"]*"', Literal), # binary (but i have never seen...) (r'%b[01]+\-[01]+\b', Literal), # range (r'%b[01]+(\.[01]+)*\b', Literal), # concat # decimal (r'%d[0-9]+\-[0-9]+\b', Literal), # range (r'%d[0-9]+(\.[0-9]+)*\b', Literal), # concat # hexadecimal (r'%x[0-9a-fA-F]+\-[0-9a-fA-F]+\b', Literal), # range (r'%x[0-9a-fA-F]+(\.[0-9a-fA-F]+)*\b', Literal), # concat # repetition (<a>*<b>element) including nRule (r'\b[0-9]+\*[0-9]+', Operator), (r'\b[0-9]+\*', Operator), (r'\b[0-9]+', Operator), (r'\*', Operator), # Strictly speaking, these are not keyword but # are called `Core Rule'. (words(_core_rules, suffix=r'\b'), Keyword), # nonterminals (ALPHA *(ALPHA / DIGIT / "-")) (r'[a-zA-Z][a-zA-Z0-9-]*\b', Name.Class), # operators (r'(=/|=|/)', Operator), # punctuation (r'[\[\]()]', Punctuation), # fallback (r'\s+', Whitespace), (r'.', Text), ], } class JsgfLexer(RegexLexer): """ For JSpeech Grammar Format grammars. .. versionadded:: 2.2 """ name = 'JSGF' url = 'https://www.w3.org/TR/jsgf/' aliases = ['jsgf'] filenames = ['*.jsgf'] mimetypes = ['application/jsgf', 'application/x-jsgf', 'text/jsgf'] tokens = { 'root': [ include('comments'), include('non-comments'), ], 'comments': [ (r'/\*\*(?!/)', Comment.Multiline, 'documentation comment'), (r'/\*[\w\W]*?\*/', Comment.Multiline), (r'//.*$', Comment.Single), ], 'non-comments': [ (r'\A#JSGF[^;]*', Comment.Preproc), (r'\s+', Whitespace), (r';', Punctuation), (r'[=|()\[\]*+]', Operator), (r'/[^/]+/', Number.Float), (r'"', String.Double, 'string'), (r'\{', String.Other, 'tag'), (words(('import', 'public'), suffix=r'\b'), Keyword.Reserved), (r'grammar\b', Keyword.Reserved, 'grammar name'), (r'(<)(NULL|VOID)(>)', bygroups(Punctuation, Name.Builtin, Punctuation)), (r'<', Punctuation, 'rulename'), (r'\w+|[^\s;=|()\[\]*+/"{<\w]+', Text), ], 'string': [ (r'"', String.Double, '#pop'), (r'\\.', String.Escape), (r'[^\\"]+', String.Double), ], 'tag': [ (r'\}', String.Other, '#pop'), (r'\\.', String.Escape), (r'[^\\}]+', String.Other), ], 'grammar name': [ (r';', Punctuation, '#pop'), (r'\s+', Whitespace), (r'\.', Punctuation), (r'[^;\s.]+', Name.Namespace), ], 'rulename': [ (r'>', Punctuation, '#pop'), (r'\*', Punctuation), (r'\s+', Whitespace), (r'([^.>]+)(\s*)(\.)', bygroups(Name.Namespace, Text, Punctuation)), (r'[^.>]+', Name.Constant), ], 'documentation comment': [ (r'\*/', Comment.Multiline, '#pop'), (r'^(\s*)(\*?)(\s*)(@(?:example|see))(\s+)' r'([\w\W]*?(?=(?:^\s*\*?\s*@|\*/)))', bygroups(Whitespace, Comment.Multiline, Whitespace, Comment.Special, Whitespace, using(this, state='example'))), (r'(^\s*\*?\s*)(@\S*)', bygroups(Comment.Multiline, Comment.Special)), (r'[^*\n@]+|\w|\W', Comment.Multiline), ], 'example': [ (r'(\n\s*)(\*)', bygroups(Whitespace, Comment.Multiline)), include('non-comments'), (r'.', Comment.Multiline), ], } class PegLexer(RegexLexer): """ This lexer is for Parsing Expression Grammars (PEG). Various implementations of PEG have made different decisions regarding the syntax, so let's try to be accommodating: * `<-`, `←`, `:`, and `=` are all accepted as rule operators. * Both `|` and `/` are choice operators. * `^`, `↑`, and `~` are cut operators. * A single `a-z` character immediately before a string, or multiple `a-z` characters following a string, are part of the string (e.g., `r"..."` or `"..."ilmsuxa`). .. versionadded:: 2.6 """ name = 'PEG' url = 'https://bford.info/pub/lang/peg.pdf' aliases = ['peg'] filenames = ['*.peg'] mimetypes = ['text/x-peg'] tokens = { 'root': [ # Comments (r'#.*$', Comment.Single), # All operators (r'<-|[←:=/|&!?*+^↑~]', Operator), # Other punctuation (r'[()]', Punctuation), # Keywords (r'\.', Keyword), # Character classes (r'(\[)([^\]]*(?:\\.[^\]\\]*)*)(\])', bygroups(Punctuation, String, Punctuation)), # Single and double quoted strings (with optional modifiers) (r'[a-z]?"[^"\\]*(?:\\.[^"\\]*)*"[a-z]*', String.Double), (r"[a-z]?'[^'\\]*(?:\\.[^'\\]*)*'[a-z]*", String.Single), # Nonterminals are not whitespace, operators, or punctuation (r'[^\s<←:=/|&!?*+\^↑~()\[\]"\'#]+', Name.Class), # Fallback (r'.', Text), ], }
7,968
Python
28.958647
81
0.467369
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/qvt.py
""" pygments.lexers.qvt ~~~~~~~~~~~~~~~~~~~ Lexer for QVT Operational language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, include, combined, default, \ words from pygments.token import Text, Comment, Operator, Keyword, Punctuation, \ Name, String, Number __all__ = ['QVToLexer'] class QVToLexer(RegexLexer): """ For the `QVT Operational Mapping language <http://www.omg.org/spec/QVT/1.1/>`_. Reference for implementing this: «Meta Object Facility (MOF) 2.0 Query/View/Transformation Specification», Version 1.1 - January 2011 (http://www.omg.org/spec/QVT/1.1/), see §8.4, «Concrete Syntax» in particular. Notable tokens assignments: - Name.Class is assigned to the identifier following any of the following keywords: metamodel, class, exception, primitive, enum, transformation or library - Name.Function is assigned to the names of mappings and queries - Name.Builtin.Pseudo is assigned to the pre-defined variables 'this', 'self' and 'result'. """ # With obvious borrowings & inspiration from the Java, Python and C lexers name = 'QVTO' aliases = ['qvto', 'qvt'] filenames = ['*.qvto'] tokens = { 'root': [ (r'\n', Text), (r'[^\S\n]+', Text), (r'(--|//)(\s*)(directive:)?(.*)$', bygroups(Comment, Comment, Comment.Preproc, Comment)), # Uncomment the following if you want to distinguish between # '/*' and '/**', à la javadoc # (r'/[*]{2}(.|\n)*?[*]/', Comment.Multiline), (r'/[*](.|\n)*?[*]/', Comment.Multiline), (r'\\\n', Text), (r'(and|not|or|xor|##?)\b', Operator.Word), (r'(:{1,2}=|[-+]=)\b', Operator.Word), (r'(@|<<|>>)\b', Keyword), # stereotypes (r'!=|<>|==|=|!->|->|>=|<=|[.]{3}|[+/*%=<>&|.~]', Operator), (r'[]{}:(),;[]', Punctuation), (r'(true|false|unlimited|null)\b', Keyword.Constant), (r'(this|self|result)\b', Name.Builtin.Pseudo), (r'(var)\b', Keyword.Declaration), (r'(from|import)\b', Keyword.Namespace, 'fromimport'), (r'(metamodel|class|exception|primitive|enum|transformation|' r'library)(\s+)(\w+)', bygroups(Keyword.Word, Text, Name.Class)), (r'(exception)(\s+)(\w+)', bygroups(Keyword.Word, Text, Name.Exception)), (r'(main)\b', Name.Function), (r'(mapping|helper|query)(\s+)', bygroups(Keyword.Declaration, Text), 'operation'), (r'(assert)(\s+)\b', bygroups(Keyword, Text), 'assert'), (r'(Bag|Collection|Dict|OrderedSet|Sequence|Set|Tuple|List)\b', Keyword.Type), include('keywords'), ('"', String, combined('stringescape', 'dqs')), ("'", String, combined('stringescape', 'sqs')), include('name'), include('numbers'), # (r'([a-zA-Z_]\w*)(::)([a-zA-Z_]\w*)', # bygroups(Text, Text, Text)), ], 'fromimport': [ (r'(?:[ \t]|\\\n)+', Text), (r'[a-zA-Z_][\w.]*', Name.Namespace), default('#pop'), ], 'operation': [ (r'::', Text), (r'(.*::)([a-zA-Z_]\w*)([ \t]*)(\()', bygroups(Text, Name.Function, Text, Punctuation), '#pop') ], 'assert': [ (r'(warning|error|fatal)\b', Keyword, '#pop'), default('#pop'), # all else: go back ], 'keywords': [ (words(( 'abstract', 'access', 'any', 'assert', 'blackbox', 'break', 'case', 'collect', 'collectNested', 'collectOne', 'collectselect', 'collectselectOne', 'composes', 'compute', 'configuration', 'constructor', 'continue', 'datatype', 'default', 'derived', 'disjuncts', 'do', 'elif', 'else', 'end', 'endif', 'except', 'exists', 'extends', 'forAll', 'forEach', 'forOne', 'from', 'if', 'implies', 'in', 'inherits', 'init', 'inout', 'intermediate', 'invresolve', 'invresolveIn', 'invresolveone', 'invresolveoneIn', 'isUnique', 'iterate', 'late', 'let', 'literal', 'log', 'map', 'merges', 'modeltype', 'new', 'object', 'one', 'ordered', 'out', 'package', 'population', 'property', 'raise', 'readonly', 'references', 'refines', 'reject', 'resolve', 'resolveIn', 'resolveone', 'resolveoneIn', 'return', 'select', 'selectOne', 'sortedBy', 'static', 'switch', 'tag', 'then', 'try', 'typedef', 'unlimited', 'uses', 'when', 'where', 'while', 'with', 'xcollect', 'xmap', 'xselect'), suffix=r'\b'), Keyword), ], # There is no need to distinguish between String.Single and # String.Double: 'strings' is factorised for 'dqs' and 'sqs' 'strings': [ (r'[^\\\'"\n]+', String), # quotes, percents and backslashes must be parsed one at a time (r'[\'"\\]', String), ], 'stringescape': [ (r'\\([\\btnfr"\']|u[0-3][0-7]{2}|u[0-7]{1,2})', String.Escape) ], 'dqs': [ # double-quoted string (r'"', String, '#pop'), (r'\\\\|\\"', String.Escape), include('strings') ], 'sqs': [ # single-quoted string (r"'", String, '#pop'), (r"\\\\|\\'", String.Escape), include('strings') ], 'name': [ (r'[a-zA-Z_]\w*', Name), ], # numbers: excerpt taken from the python lexer 'numbers': [ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+[eE][+-]?[0-9]+', Number.Float), (r'\d+', Number.Integer) ], }
6,066
Python
38.914473
83
0.48846
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/bare.py
""" pygments.lexers.bare ~~~~~~~~~~~~~~~~~~~~ Lexer for the BARE schema. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words, bygroups from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace __all__ = ['BareLexer'] class BareLexer(RegexLexer): """ For BARE schema source. .. versionadded:: 2.7 """ name = 'BARE' url = 'https://baremessages.org' filenames = ['*.bare'] aliases = ['bare'] keywords = [ 'type', 'enum', 'u8', 'u16', 'u32', 'u64', 'uint', 'i8', 'i16', 'i32', 'i64', 'int', 'f32', 'f64', 'bool', 'void', 'data', 'string', 'optional', 'map', ] tokens = { 'root': [ (r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)(\{)', bygroups(Keyword, Whitespace, Name.Class, Whitespace, Text), 'struct'), (r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)(\()', bygroups(Keyword, Whitespace, Name.Class, Whitespace, Text), 'union'), (r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)', bygroups(Keyword, Whitespace, Name, Whitespace), 'typedef'), (r'(enum)(\s+)([A-Z][a-zA-Z0-9]+)(\s+\{)', bygroups(Keyword, Whitespace, Name.Class, Whitespace), 'enum'), (r'#.*?$', Comment), (r'\s+', Whitespace), ], 'struct': [ (r'\{', Text, '#push'), (r'\}', Text, '#pop'), (r'([a-zA-Z0-9]+)(:)(\s*)', bygroups(Name.Attribute, Text, Whitespace), 'typedef'), (r'\s+', Whitespace), ], 'union': [ (r'\)', Text, '#pop'), (r'(\s*)(\|)(\s*)', bygroups(Whitespace, Text, Whitespace)), (r'[A-Z][a-zA-Z0-9]+', Name.Class), (words(keywords), Keyword), (r'\s+', Whitespace), ], 'typedef': [ (r'\[\]', Text), (r'#.*?$', Comment, '#pop'), (r'(\[)(\d+)(\])', bygroups(Text, Literal, Text)), (r'<|>', Text), (r'\(', Text, 'union'), (r'(\[)([a-z][a-z-A-Z0-9]+)(\])', bygroups(Text, Keyword, Text)), (r'(\[)([A-Z][a-z-A-Z0-9]+)(\])', bygroups(Text, Name.Class, Text)), (r'([A-Z][a-z-A-Z0-9]+)', Name.Class), (words(keywords), Keyword), (r'\n', Text, '#pop'), (r'\{', Text, 'struct'), (r'\s+', Whitespace), (r'\d+', Literal), ], 'enum': [ (r'\{', Text, '#push'), (r'\}', Text, '#pop'), (r'([A-Z][A-Z0-9_]*)(\s*=\s*)(\d+)', bygroups(Name.Attribute, Text, Literal)), (r'([A-Z][A-Z0-9_]*)', bygroups(Name.Attribute)), (r'#.*?$', Comment), (r'\s+', Whitespace), ], }
3,021
Python
28.339806
84
0.406157
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/felix.py
""" pygments.lexers.felix ~~~~~~~~~~~~~~~~~~~~~ Lexer for the Felix language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, bygroups, default, words, \ combined from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['FelixLexer'] class FelixLexer(RegexLexer): """ For Felix source code. .. versionadded:: 1.2 """ name = 'Felix' url = 'http://www.felix-lang.org' aliases = ['felix', 'flx'] filenames = ['*.flx', '*.flxh'] mimetypes = ['text/x-felix'] preproc = ( 'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef', ) keywords = ( '_', '_deref', 'all', 'as', 'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass', 'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else', 'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except', 'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork', 'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance', 'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace', 'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise', 'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then', 'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto', 'when', 'whilst', 'with', 'yield', ) keyword_directives = ( '_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export', 'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn', 'package', 'private', 'pod', 'property', 'public', 'publish', 'requires', 'todo', 'virtual', 'use', ) keyword_declarations = ( 'def', 'let', 'ref', 'val', 'var', ) keyword_types = ( 'unit', 'void', 'any', 'bool', 'byte', 'offset', 'address', 'caddress', 'cvaddress', 'vaddress', 'tiny', 'short', 'int', 'long', 'vlong', 'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float', 'double', 'ldouble', 'complex', 'dcomplex', 'lcomplex', 'imaginary', 'dimaginary', 'limaginary', 'char', 'wchar', 'uchar', 'charp', 'charcp', 'ucharp', 'ucharcp', 'string', 'wstring', 'ustring', 'cont', 'array', 'varray', 'list', 'lvalue', 'opt', 'slice', ) keyword_constants = ( 'false', 'true', ) operator_words = ( 'and', 'not', 'in', 'is', 'isin', 'or', 'xor', ) name_builtins = ( '_svc', 'while', ) name_pseudo = ( 'root', 'self', 'this', ) decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?' tokens = { 'root': [ include('whitespace'), # Keywords (words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce', 'union'), suffix=r'\b'), Keyword, 'funcname'), (words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'), Keyword, 'classname'), (r'(instance|module|typeclass)\b', Keyword, 'modulename'), (words(keywords, suffix=r'\b'), Keyword), (words(keyword_directives, suffix=r'\b'), Name.Decorator), (words(keyword_declarations, suffix=r'\b'), Keyword.Declaration), (words(keyword_types, suffix=r'\b'), Keyword.Type), (words(keyword_constants, suffix=r'\b'), Keyword.Constant), # Operators include('operators'), # Float Literal # -- Hex Float (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)' r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float), # -- DecimalFloat (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|' r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float), (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?', Number.Float), # IntegerLiteral # -- Binary (r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin), # -- Octal (r'0[0-7_]+%s' % decimal_suffixes, Number.Oct), # -- Hexadecimal (r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex), # -- Decimal (r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer), # Strings ('([rR][cC]?|[cC][rR])"""', String, 'tdqs'), ("([rR][cC]?|[cC][rR])'''", String, 'tsqs'), ('([rR][cC]?|[cC][rR])"', String, 'dqs'), ("([rR][cC]?|[cC][rR])'", String, 'sqs'), ('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')), ("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')), ('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')), ("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')), # Punctuation (r'[\[\]{}:(),;?]', Punctuation), # Labels (r'[a-zA-Z_]\w*:>', Name.Label), # Identifiers (r'(%s)\b' % '|'.join(name_builtins), Name.Builtin), (r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo), (r'[a-zA-Z_]\w*', Name), ], 'whitespace': [ (r'\s+', Whitespace), include('comment'), # Preprocessor (r'(#)(\s*)(if)(\s+)(0)', bygroups(Comment.Preproc, Whitespace, Comment.Preproc, Whitespace, Comment.Preproc), 'if0'), (r'#', Comment.Preproc, 'macro'), ], 'operators': [ (r'(%s)\b' % '|'.join(operator_words), Operator.Word), (r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator), ], 'comment': [ (r'//(.*?)$', Comment.Single), (r'/[*]', Comment.Multiline, 'comment2'), ], 'comment2': [ (r'[^/*]', Comment.Multiline), (r'/[*]', Comment.Multiline, '#push'), (r'[*]/', Comment.Multiline, '#pop'), (r'[/*]', Comment.Multiline), ], 'if0': [ (r'^(\s*)(#if.*?(?<!\\))(\n)', bygroups(Whitespace, Comment, Whitespace), '#push'), (r'^(\s*)(#endif.*?(?<!\\))(\n)', bygroups(Whitespace, Comment, Whitespace), '#pop'), (r'(.*?)(\n)', bygroups(Comment, Whitespace)), ], 'macro': [ include('comment'), (r'(import|include)(\s+)(<[^>]*?>)', bygroups(Comment.Preproc, Whitespace, String), '#pop'), (r'(import|include)(\s+)("[^"]*?")', bygroups(Comment.Preproc, Whitespace, String), '#pop'), (r"(import|include)(\s+)('[^']*?')", bygroups(Comment.Preproc, Whitespace, String), '#pop'), (r'[^/\n]+', Comment.Preproc), # (r'/[*](.|\n)*?[*]/', Comment), # (r'//.*?\n', Comment, '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Comment.Preproc), (r'\n', Whitespace, '#pop'), ], 'funcname': [ include('whitespace'), (r'[a-zA-Z_]\w*', Name.Function, '#pop'), # anonymous functions (r'(?=\()', Text, '#pop'), ], 'classname': [ include('whitespace'), (r'[a-zA-Z_]\w*', Name.Class, '#pop'), # anonymous classes (r'(?=\{)', Text, '#pop'), ], 'modulename': [ include('whitespace'), (r'\[', Punctuation, ('modulename2', 'tvarlist')), default('modulename2'), ], 'modulename2': [ include('whitespace'), (r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'), ], 'tvarlist': [ include('whitespace'), include('operators'), (r'\[', Punctuation, '#push'), (r'\]', Punctuation, '#pop'), (r',', Punctuation), (r'(with|where)\b', Keyword), (r'[a-zA-Z_]\w*', Name), ], 'stringescape': [ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) ], 'strings': [ (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' '[hlL]?[E-GXc-giorsux%]', String.Interpol), (r'[^\\\'"%\n]+', String), # quotes, percents and backslashes must be parsed one at a time (r'[\'"\\]', String), # unhandled string formatting sign (r'%', String) # newlines are an error (use "nl" state) ], 'nl': [ (r'\n', String) ], 'dqs': [ (r'"', String, '#pop'), # included here again for raw strings (r'\\\\|\\"|\\\n', String.Escape), include('strings') ], 'sqs': [ (r"'", String, '#pop'), # included here again for raw strings (r"\\\\|\\'|\\\n", String.Escape), include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), include('strings'), include('nl') ], 'tsqs': [ (r"'''", String, '#pop'), include('strings'), include('nl') ], }
9,646
Python
33.826715
82
0.431578
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/rebol.py
""" pygments.lexers.rebol ~~~~~~~~~~~~~~~~~~~~~ Lexers for the REBOL and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, bygroups from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Generic, Whitespace __all__ = ['RebolLexer', 'RedLexer'] class RebolLexer(RegexLexer): """ A `REBOL <http://www.rebol.com/>`_ lexer. .. versionadded:: 1.1 """ name = 'REBOL' aliases = ['rebol'] filenames = ['*.r', '*.r3', '*.reb'] mimetypes = ['text/x-rebol'] flags = re.IGNORECASE | re.MULTILINE escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' def word_callback(lexer, match): word = match.group() if re.match(".*:$", word): yield match.start(), Generic.Subheading, word elif re.match( r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' r'while|compress|decompress|secure|open|close|read|read-io|' r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' r'browse|launch|stats|get-modes|set-modes|to-local-file|' r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' r'hide|draw|show|size-text|textinfo|offset-to-caret|' r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' r'dsa-make-key|dsa-generate-key|dsa-make-signature|' r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' r'rsa-encrypt)$', word): yield match.start(), Name.Builtin, word elif re.match( r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' r'minimum|maximum|negate|complement|absolute|random|head|tail|' r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' r'copy)$', word): yield match.start(), Name.Function, word elif re.match( r'(error|source|input|license|help|install|echo|Usage|with|func|' r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' r'remold|charset|array|replace|move|extract|forskip|forall|alter|' r'first+|also|take|for|forever|dispatch|attempt|what-dir|' r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' r'write-user|save-user|set-user-name|protect-system|parse-xml|' r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' r'request-dir|center-face|do-events|net-error|decode-url|' r'parse-header|parse-header-date|parse-email-addrs|import-email|' r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' r'find-key-face|do-face|viewtop|confine|find-window|' r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' r'read-thru|load-thru|do-thru|launch-thru|load-image|' r'request-download|do-face-alt|set-font|set-para|get-style|' r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' r'resize-face|load-stock|load-stock-block|notify|request|flash|' r'request-color|request-pass|request-text|request-list|' r'request-date|request-file|dbug|editor|link-relative-path|' r'emailer|parse-error)$', word): yield match.start(), Keyword.Namespace, word elif re.match( r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' r'return|exit|break)$', word): yield match.start(), Name.Exception, word elif re.match('REBOL$', word): yield match.start(), Generic.Heading, word elif re.match("to-.*", word): yield match.start(), Keyword, word elif re.match(r'(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', word): yield match.start(), Operator, word elif re.match(r".*\?$", word): yield match.start(), Keyword, word elif re.match(r".*\!$", word): yield match.start(), Keyword.Type, word elif re.match("'.*", word): yield match.start(), Name.Variable.Instance, word # lit-word elif re.match("#.*", word): yield match.start(), Name.Label, word # issue elif re.match("%.*", word): yield match.start(), Name.Decorator, word # file else: yield match.start(), Name.Variable, word tokens = { 'root': [ (r'[^R]+', Comment), (r'REBOL\s+\[', Generic.Strong, 'script'), (r'R', Comment) ], 'script': [ (r'\s+', Text), (r'#"', String.Char, 'char'), (r'#\{[0-9a-f]*\}', Number.Hex), (r'2#\{', Number.Hex, 'bin2'), (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), (r'"', String, 'string'), (r'\{', String, 'string2'), (r';#+.*\n', Comment.Special), (r';\*+.*\n', Comment.Preproc), (r';.*\n', Comment), (r'%"', Name.Decorator, 'stringFile'), (r'%[^(^{")\s\[\]]+', Name.Decorator), (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time (r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?' r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple (r'\d+X\d+', Keyword.Constant), # pair (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), (r'[+-]?\d+(\'\d+)?', Number), (r'[\[\]()]', Generic.Strong), (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email (r'comment\s"', Comment, 'commentString1'), (r'comment\s\{', Comment, 'commentString2'), (r'comment\s\[', Comment, 'commentBlock'), (r'comment\s[^(\s{"\[]+', Comment), (r'/[^(^{")\s/[\]]*', Name.Attribute), (r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), (r'<[\w:.-]*>', Name.Tag), (r'<[^(<>\s")]+', Name.Tag, 'tag'), (r'([^(^{")\s]+)', Text), ], 'string': [ (r'[^(^")]+', String), (escape_re, String.Escape), (r'[(|)]+', String), (r'\^.', String.Escape), (r'"', String, '#pop'), ], 'string2': [ (r'[^(^{})]+', String), (escape_re, String.Escape), (r'[(|)]+', String), (r'\^.', String.Escape), (r'\{', String, '#push'), (r'\}', String, '#pop'), ], 'stringFile': [ (r'[^(^")]+', Name.Decorator), (escape_re, Name.Decorator), (r'\^.', Name.Decorator), (r'"', Name.Decorator, '#pop'), ], 'char': [ (escape_re + '"', String.Char, '#pop'), (r'\^."', String.Char, '#pop'), (r'."', String.Char, '#pop'), ], 'tag': [ (escape_re, Name.Tag), (r'"', Name.Tag, 'tagString'), (r'[^(<>\r\n")]+', Name.Tag), (r'>', Name.Tag, '#pop'), ], 'tagString': [ (r'[^(^")]+', Name.Tag), (escape_re, Name.Tag), (r'[(|)]+', Name.Tag), (r'\^.', Name.Tag), (r'"', Name.Tag, '#pop'), ], 'tuple': [ (r'(\d+\.)+', Keyword.Constant), (r'\d+', Keyword.Constant, '#pop'), ], 'bin2': [ (r'\s+', Number.Hex), (r'([01]\s*){8}', Number.Hex), (r'\}', Number.Hex, '#pop'), ], 'commentString1': [ (r'[^(^")]+', Comment), (escape_re, Comment), (r'[(|)]+', Comment), (r'\^.', Comment), (r'"', Comment, '#pop'), ], 'commentString2': [ (r'[^(^{})]+', Comment), (escape_re, Comment), (r'[(|)]+', Comment), (r'\^.', Comment), (r'\{', Comment, '#push'), (r'\}', Comment, '#pop'), ], 'commentBlock': [ (r'\[', Comment, '#push'), (r'\]', Comment, '#pop'), (r'"', Comment, "commentString1"), (r'\{', Comment, "commentString2"), (r'[^(\[\]"{)]+', Comment), ], } def analyse_text(text): """ Check if code contains REBOL header and so it probably not R code """ if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE): # The code starts with REBOL header return 1.0 elif re.search(r'\s*REBOL\s*\[', text, re.IGNORECASE): # The code contains REBOL header but also some text before it return 0.5 class RedLexer(RegexLexer): """ A `Red-language <http://www.red-lang.org/>`_ lexer. .. versionadded:: 2.0 """ name = 'Red' aliases = ['red', 'red/system'] filenames = ['*.red', '*.reds'] mimetypes = ['text/x-red', 'text/x-red-system'] flags = re.IGNORECASE | re.MULTILINE escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' def word_callback(lexer, match): word = match.group() if re.match(".*:$", word): yield match.start(), Generic.Subheading, word elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|' r'foreach|forall|func|function|does|has|switch|' r'case|reduce|compose|get|set|print|prin|equal\?|' r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|' r'greater-or-equal\?|same\?|not|type\?|stats|' r'bind|union|replace|charset|routine)$', word): yield match.start(), Name.Builtin, word elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|' r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|' r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|' r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|' r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|' r'update|write)$', word): yield match.start(), Name.Function, word elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|' r'none|crlf|dot|null-byte)$', word): yield match.start(), Name.Builtin.Pseudo, word elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|' r'#switch|#default|#get-definition)$', word): yield match.start(), Keyword.Namespace, word elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|' r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|' r'quote|forever)$', word): yield match.start(), Name.Exception, word elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|' r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|' r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|' r'any-struct\?|none\?|word\?|any-series\?)$', word): yield match.start(), Keyword, word elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word): yield match.start(), Keyword.Namespace, word elif re.match("to-.*", word): yield match.start(), Keyword, word elif re.match(r'(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|' r'<<<|>>>|<<|>>|<|>%)$', word): yield match.start(), Operator, word elif re.match(r".*\!$", word): yield match.start(), Keyword.Type, word elif re.match("'.*", word): yield match.start(), Name.Variable.Instance, word # lit-word elif re.match("#.*", word): yield match.start(), Name.Label, word # issue elif re.match("%.*", word): yield match.start(), Name.Decorator, word # file elif re.match(":.*", word): yield match.start(), Generic.Subheading, word # get-word else: yield match.start(), Name.Variable, word tokens = { 'root': [ (r'[^R]+', Comment), (r'Red/System\s+\[', Generic.Strong, 'script'), (r'Red\s+\[', Generic.Strong, 'script'), (r'R', Comment) ], 'script': [ (r'\s+', Text), (r'#"', String.Char, 'char'), (r'#\{[0-9a-f\s]*\}', Number.Hex), (r'2#\{', Number.Hex, 'bin2'), (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), (r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))', bygroups(Number.Hex, Name.Variable, Whitespace)), (r'"', String, 'string'), (r'\{', String, 'string2'), (r';#+.*\n', Comment.Special), (r';\*+.*\n', Comment.Preproc), (r';.*\n', Comment), (r'%"', Name.Decorator, 'stringFile'), (r'%[^(^{")\s\[\]]+', Name.Decorator), (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time (r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?' r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple (r'\d+X\d+', Keyword.Constant), # pair (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), (r'[+-]?\d+(\'\d+)?', Number), (r'[\[\]()]', Generic.Strong), (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email (r'comment\s"', Comment, 'commentString1'), (r'comment\s\{', Comment, 'commentString2'), (r'comment\s\[', Comment, 'commentBlock'), (r'comment\s[^(\s{"\[]+', Comment), (r'/[^(^{^")\s/[\]]*', Name.Attribute), (r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), (r'<[\w:.-]*>', Name.Tag), (r'<[^(<>\s")]+', Name.Tag, 'tag'), (r'([^(^{")\s]+)', Text), ], 'string': [ (r'[^(^")]+', String), (escape_re, String.Escape), (r'[(|)]+', String), (r'\^.', String.Escape), (r'"', String, '#pop'), ], 'string2': [ (r'[^(^{})]+', String), (escape_re, String.Escape), (r'[(|)]+', String), (r'\^.', String.Escape), (r'\{', String, '#push'), (r'\}', String, '#pop'), ], 'stringFile': [ (r'[^(^")]+', Name.Decorator), (escape_re, Name.Decorator), (r'\^.', Name.Decorator), (r'"', Name.Decorator, '#pop'), ], 'char': [ (escape_re + '"', String.Char, '#pop'), (r'\^."', String.Char, '#pop'), (r'."', String.Char, '#pop'), ], 'tag': [ (escape_re, Name.Tag), (r'"', Name.Tag, 'tagString'), (r'[^(<>\r\n")]+', Name.Tag), (r'>', Name.Tag, '#pop'), ], 'tagString': [ (r'[^(^")]+', Name.Tag), (escape_re, Name.Tag), (r'[(|)]+', Name.Tag), (r'\^.', Name.Tag), (r'"', Name.Tag, '#pop'), ], 'tuple': [ (r'(\d+\.)+', Keyword.Constant), (r'\d+', Keyword.Constant, '#pop'), ], 'bin2': [ (r'\s+', Number.Hex), (r'([01]\s*){8}', Number.Hex), (r'\}', Number.Hex, '#pop'), ], 'commentString1': [ (r'[^(^")]+', Comment), (escape_re, Comment), (r'[(|)]+', Comment), (r'\^.', Comment), (r'"', Comment, '#pop'), ], 'commentString2': [ (r'[^(^{})]+', Comment), (escape_re, Comment), (r'[(|)]+', Comment), (r'\^.', Comment), (r'\{', Comment, '#push'), (r'\}', Comment, '#pop'), ], 'commentBlock': [ (r'\[', Comment, '#push'), (r'\]', Comment, '#pop'), (r'"', Comment, "commentString1"), (r'\{', Comment, "commentString2"), (r'[^(\[\]"{)]+', Comment), ], }
18,600
Python
42.157773
98
0.456989
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/rita.py
""" pygments.lexers.rita ~~~~~~~~~~~~~~~~~~~~ Lexers for RITA language :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer from pygments.token import Comment, Operator, Keyword, Name, Literal, \ Punctuation, Whitespace __all__ = ['RitaLexer'] class RitaLexer(RegexLexer): """ Lexer for RITA. .. versionadded:: 2.11 """ name = 'Rita' url = 'https://github.com/zaibacu/rita-dsl' filenames = ['*.rita'] aliases = ['rita'] mimetypes = ['text/rita'] tokens = { 'root': [ (r'\n', Whitespace), (r'\s+', Whitespace), (r'#(.*?)\n', Comment.Single), (r'@(.*?)\n', Operator), # Yes, whole line as an operator (r'"(\w|\d|\s|(\\")|[\'_\-./,\?\!])+?"', Literal), (r'\'(\w|\d|\s|(\\\')|["_\-./,\?\!])+?\'', Literal), (r'([A-Z_]+)', Keyword), (r'([a-z0-9_]+)', Name), (r'((->)|[!?+*|=])', Operator), (r'[\(\),\{\}]', Punctuation) ] }
1,128
Python
24.65909
71
0.460993
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/csound.py
""" pygments.lexers.csound ~~~~~~~~~~~~~~~~~~~~~~ Lexers for Csound languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, bygroups, default, include, using, words from pygments.token import Comment, Error, Keyword, Name, Number, Operator, Punctuation, \ String, Text, Whitespace from pygments.lexers._csound_builtins import OPCODES, DEPRECATED_OPCODES, REMOVED_OPCODES from pygments.lexers.html import HtmlLexer from pygments.lexers.python import PythonLexer from pygments.lexers.scripting import LuaLexer __all__ = ['CsoundScoreLexer', 'CsoundOrchestraLexer', 'CsoundDocumentLexer'] newline = (r'((?:(?:;|//).*)*)(\n)', bygroups(Comment.Single, Text)) class CsoundLexer(RegexLexer): url = 'https://csound.com/' tokens = { 'whitespace': [ (r'[ \t]+', Whitespace), (r'/[*](?:.|\n)*?[*]/', Comment.Multiline), (r'(?:;|//).*$', Comment.Single), (r'(\\)(\n)', bygroups(Text, Whitespace)) ], 'preprocessor directives': [ (r'#(?:e(?:nd(?:if)?|lse)\b|##)|@@?[ \t]*\d+', Comment.Preproc), (r'#includestr', Comment.Preproc, 'includestr directive'), (r'#include', Comment.Preproc, 'include directive'), (r'#[ \t]*define', Comment.Preproc, 'define directive'), (r'#(?:ifn?def|undef)\b', Comment.Preproc, 'macro directive') ], 'include directive': [ include('whitespace'), (r'([^ \t]).*?\1', String, '#pop') ], 'includestr directive': [ include('whitespace'), (r'"', String, ('#pop', 'quoted string')) ], 'define directive': [ (r'\n', Whitespace), include('whitespace'), (r'([A-Z_a-z]\w*)(\()', bygroups(Comment.Preproc, Punctuation), ('#pop', 'macro parameter name list')), (r'[A-Z_a-z]\w*', Comment.Preproc, ('#pop', 'before macro body')) ], 'macro parameter name list': [ include('whitespace'), (r'[A-Z_a-z]\w*', Comment.Preproc), (r"['#]", Punctuation), (r'\)', Punctuation, ('#pop', 'before macro body')) ], 'before macro body': [ (r'\n', Whitespace), include('whitespace'), (r'#', Punctuation, ('#pop', 'macro body')) ], 'macro body': [ (r'(?:\\(?!#)|[^#\\]|\n)+', Comment.Preproc), (r'\\#', Comment.Preproc), (r'(?<!\\)#', Punctuation, '#pop') ], 'macro directive': [ include('whitespace'), (r'[A-Z_a-z]\w*', Comment.Preproc, '#pop') ], 'macro uses': [ (r'(\$[A-Z_a-z]\w*\.?)(\()', bygroups(Comment.Preproc, Punctuation), 'macro parameter value list'), (r'\$[A-Z_a-z]\w*(?:\.|\b)', Comment.Preproc) ], 'macro parameter value list': [ (r'(?:[^\'#"{()]|\{(?!\{))+', Comment.Preproc), (r"['#]", Punctuation), (r'"', String, 'macro parameter value quoted string'), (r'\{\{', String, 'macro parameter value braced string'), (r'\(', Comment.Preproc, 'macro parameter value parenthetical'), (r'\)', Punctuation, '#pop') ], 'macro parameter value quoted string': [ (r"\\[#'()]", Comment.Preproc), (r"[#'()]", Error), include('quoted string') ], 'macro parameter value braced string': [ (r"\\[#'()]", Comment.Preproc), (r"[#'()]", Error), include('braced string') ], 'macro parameter value parenthetical': [ (r'(?:[^\\()]|\\\))+', Comment.Preproc), (r'\(', Comment.Preproc, '#push'), (r'\)', Comment.Preproc, '#pop') ], 'whitespace and macro uses': [ include('whitespace'), include('macro uses') ], 'numbers': [ (r'\d+[Ee][+-]?\d+|(\d+\.\d*|\d*\.\d+)([Ee][+-]?\d+)?', Number.Float), (r'(0[Xx])([0-9A-Fa-f]+)', bygroups(Keyword.Type, Number.Hex)), (r'\d+', Number.Integer) ], 'quoted string': [ (r'"', String, '#pop'), (r'[^"$]+', String), include('macro uses'), (r'[$]', String) ], 'braced string': [ # Do nothing. This must be defined in subclasses. ] } class CsoundScoreLexer(CsoundLexer): """ For `Csound <https://csound.com>`_ scores. .. versionadded:: 2.1 """ name = 'Csound Score' aliases = ['csound-score', 'csound-sco'] filenames = ['*.sco'] tokens = { 'root': [ (r'\n', Whitespace), include('whitespace and macro uses'), include('preprocessor directives'), (r'[aBbCdefiqstvxy]', Keyword), # There is also a w statement that is generated internally and should not be # used; see https://github.com/csound/csound/issues/750. (r'z', Keyword.Constant), # z is a constant equal to 800,000,000,000. 800 billion seconds is about # 25,367.8 years. See also # https://csound.com/docs/manual/ScoreTop.html and # https://github.com/csound/csound/search?q=stof+path%3AEngine+filename%3Asread.c. (r'([nNpP][pP])(\d+)', bygroups(Keyword, Number.Integer)), (r'[mn]', Keyword, 'mark statement'), include('numbers'), (r'[!+\-*/^%&|<>#~.]', Operator), (r'[()\[\]]', Punctuation), (r'"', String, 'quoted string'), (r'\{', Comment.Preproc, 'loop after left brace'), ], 'mark statement': [ include('whitespace and macro uses'), (r'[A-Z_a-z]\w*', Name.Label), (r'\n', Whitespace, '#pop') ], 'loop after left brace': [ include('whitespace and macro uses'), (r'\d+', Number.Integer, ('#pop', 'loop after repeat count')), ], 'loop after repeat count': [ include('whitespace and macro uses'), (r'[A-Z_a-z]\w*', Comment.Preproc, ('#pop', 'loop')) ], 'loop': [ (r'\}', Comment.Preproc, '#pop'), include('root') ], # Braced strings are not allowed in Csound scores, but this is needed because the # superclass includes it. 'braced string': [ (r'\}\}', String, '#pop'), (r'[^}]|\}(?!\})', String) ] } class CsoundOrchestraLexer(CsoundLexer): """ For `Csound <https://csound.com>`_ orchestras. .. versionadded:: 2.1 """ name = 'Csound Orchestra' aliases = ['csound', 'csound-orc'] filenames = ['*.orc', '*.udo'] user_defined_opcodes = set() def opcode_name_callback(lexer, match): opcode = match.group(0) lexer.user_defined_opcodes.add(opcode) yield match.start(), Name.Function, opcode def name_callback(lexer, match): type_annotation_token = Keyword.Type name = match.group(1) if name in OPCODES or name in DEPRECATED_OPCODES or name in REMOVED_OPCODES: yield match.start(), Name.Builtin, name elif name in lexer.user_defined_opcodes: yield match.start(), Name.Function, name else: type_annotation_token = Name name_match = re.search(r'^(g?[afikSw])(\w+)', name) if name_match: yield name_match.start(1), Keyword.Type, name_match.group(1) yield name_match.start(2), Name, name_match.group(2) else: yield match.start(), Name, name if match.group(2): yield match.start(2), Punctuation, match.group(2) yield match.start(3), type_annotation_token, match.group(3) tokens = { 'root': [ (r'\n', Whitespace), (r'^([ \t]*)(\w+)(:)([ \t]+|$)', bygroups(Whitespace, Name.Label, Punctuation, Whitespace)), include('whitespace and macro uses'), include('preprocessor directives'), (r'\binstr\b', Keyword.Declaration, 'instrument numbers and identifiers'), (r'\bopcode\b', Keyword.Declaration, 'after opcode keyword'), (r'\b(?:end(?:in|op))\b', Keyword.Declaration), include('partial statements') ], 'partial statements': [ (r'\b(?:0dbfs|A4|k(?:r|smps)|nchnls(?:_i)?|sr)\b', Name.Variable.Global), include('numbers'), (r'\+=|-=|\*=|/=|<<|>>|<=|>=|==|!=|&&|\|\||[~¬]|[=!+\-*/^%&|<>#?:]', Operator), (r'[(),\[\]]', Punctuation), (r'"', String, 'quoted string'), (r'\{\{', String, 'braced string'), (words(( 'do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', 'kthen', 'od', 'then', 'until', 'while', ), prefix=r'\b', suffix=r'\b'), Keyword), (words(('return', 'rireturn'), prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), (r'\b[ik]?goto\b', Keyword, 'goto label'), (r'\b(r(?:einit|igoto)|tigoto)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation), 'goto label'), (r'\b(c(?:g|in?|k|nk?)goto)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation), ('goto label', 'goto argument')), (r'\b(timout)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation), ('goto label', 'goto argument', 'goto argument')), (r'\b(loop_[gl][et])(\(|\b)', bygroups(Keyword.Pseudo, Punctuation), ('goto label', 'goto argument', 'goto argument', 'goto argument')), (r'\bprintk?s\b', Name.Builtin, 'prints opcode'), (r'\b(?:readscore|scoreline(?:_i)?)\b', Name.Builtin, 'Csound score opcode'), (r'\bpyl?run[it]?\b', Name.Builtin, 'Python opcode'), (r'\blua_(?:exec|opdef)\b', Name.Builtin, 'Lua opcode'), (r'\bp\d+\b', Name.Variable.Instance), (r'\b([A-Z_a-z]\w*)(?:(:)([A-Za-z]))?\b', name_callback) ], 'instrument numbers and identifiers': [ include('whitespace and macro uses'), (r'\d+|[A-Z_a-z]\w*', Name.Function), (r'[+,]', Punctuation), (r'\n', Whitespace, '#pop') ], 'after opcode keyword': [ include('whitespace and macro uses'), (r'[A-Z_a-z]\w*', opcode_name_callback, ('#pop', 'opcode type signatures')), (r'\n', Whitespace, '#pop') ], 'opcode type signatures': [ include('whitespace and macro uses'), # https://github.com/csound/csound/search?q=XIDENT+path%3AEngine+filename%3Acsound_orc.lex (r'0|[afijkKoOpPStV\[\]]+', Keyword.Type), (r',', Punctuation), (r'\n', Whitespace, '#pop') ], 'quoted string': [ (r'"', String, '#pop'), (r'[^\\"$%)]+', String), include('macro uses'), include('escape sequences'), include('format specifiers'), (r'[\\$%)]', String) ], 'braced string': [ (r'\}\}', String, '#pop'), (r'(?:[^\\%)}]|\}(?!\}))+', String), include('escape sequences'), include('format specifiers'), (r'[\\%)]', String) ], 'escape sequences': [ # https://github.com/csound/csound/search?q=unquote_string+path%3AEngine+filename%3Acsound_orc_compile.c (r'\\(?:[\\abnrt"]|[0-7]{1,3})', String.Escape) ], # Format specifiers are highlighted in all strings, even though only # fprintks https://csound.com/docs/manual/fprintks.html # fprints https://csound.com/docs/manual/fprints.html # printf/printf_i https://csound.com/docs/manual/printf.html # printks https://csound.com/docs/manual/printks.html # prints https://csound.com/docs/manual/prints.html # sprintf https://csound.com/docs/manual/sprintf.html # sprintfk https://csound.com/docs/manual/sprintfk.html # work with strings that contain format specifiers. In addition, these opcodes’ # handling of format specifiers is inconsistent: # - fprintks and fprints accept %a and %A specifiers, and accept %s specifiers # starting in Csound 6.15.0. # - printks and prints accept %a and %A specifiers, but don’t accept %s # specifiers. # - printf, printf_i, sprintf, and sprintfk don’t accept %a and %A specifiers, # but accept %s specifiers. # See https://github.com/csound/csound/issues/747 for more information. 'format specifiers': [ (r'%[#0\- +]*\d*(?:\.\d+)?[AE-GXac-giosux]', String.Interpol), (r'%%', String.Escape) ], 'goto argument': [ include('whitespace and macro uses'), (r',', Punctuation, '#pop'), include('partial statements') ], 'goto label': [ include('whitespace and macro uses'), (r'\w+', Name.Label, '#pop'), default('#pop') ], 'prints opcode': [ include('whitespace and macro uses'), (r'"', String, 'prints quoted string'), default('#pop') ], 'prints quoted string': [ (r'\\\\[aAbBnNrRtT]', String.Escape), (r'%[!nNrRtT]|[~^]{1,2}', String.Escape), include('quoted string') ], 'Csound score opcode': [ include('whitespace and macro uses'), (r'"', String, 'quoted string'), (r'\{\{', String, 'Csound score'), (r'\n', Whitespace, '#pop') ], 'Csound score': [ (r'\}\}', String, '#pop'), (r'([^}]+)|\}(?!\})', using(CsoundScoreLexer)) ], 'Python opcode': [ include('whitespace and macro uses'), (r'"', String, 'quoted string'), (r'\{\{', String, 'Python'), (r'\n', Whitespace, '#pop') ], 'Python': [ (r'\}\}', String, '#pop'), (r'([^}]+)|\}(?!\})', using(PythonLexer)) ], 'Lua opcode': [ include('whitespace and macro uses'), (r'"', String, 'quoted string'), (r'\{\{', String, 'Lua'), (r'\n', Whitespace, '#pop') ], 'Lua': [ (r'\}\}', String, '#pop'), (r'([^}]+)|\}(?!\})', using(LuaLexer)) ] } class CsoundDocumentLexer(RegexLexer): """ For `Csound <https://csound.com>`_ documents. .. versionadded:: 2.1 """ name = 'Csound Document' aliases = ['csound-document', 'csound-csd'] filenames = ['*.csd'] # These tokens are based on those in XmlLexer in pygments/lexers/html.py. Making # CsoundDocumentLexer a subclass of XmlLexer rather than RegexLexer may seem like a # better idea, since Csound Document files look like XML files. However, Csound # Documents can contain Csound comments (preceded by //, for example) before and # after the root element, unescaped bitwise AND & and less than < operators, etc. In # other words, while Csound Document files look like XML files, they may not actually # be XML files. tokens = { 'root': [ (r'/[*](.|\n)*?[*]/', Comment.Multiline), (r'(?:;|//).*$', Comment.Single), (r'[^/;<]+|/(?!/)', Text), (r'<\s*CsInstruments', Name.Tag, ('orchestra', 'tag')), (r'<\s*CsScore', Name.Tag, ('score', 'tag')), (r'<\s*[Hh][Tt][Mm][Ll]', Name.Tag, ('HTML', 'tag')), (r'<\s*[\w:.-]+', Name.Tag, 'tag'), (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag) ], 'orchestra': [ (r'<\s*/\s*CsInstruments\s*>', Name.Tag, '#pop'), (r'(.|\n)+?(?=<\s*/\s*CsInstruments\s*>)', using(CsoundOrchestraLexer)) ], 'score': [ (r'<\s*/\s*CsScore\s*>', Name.Tag, '#pop'), (r'(.|\n)+?(?=<\s*/\s*CsScore\s*>)', using(CsoundScoreLexer)) ], 'HTML': [ (r'<\s*/\s*[Hh][Tt][Mm][Ll]\s*>', Name.Tag, '#pop'), (r'(.|\n)+?(?=<\s*/\s*[Hh][Tt][Mm][Ll]\s*>)', using(HtmlLexer)) ], 'tag': [ (r'\s+', Whitespace), (r'[\w.:-]+\s*=', Name.Attribute, 'attr'), (r'/?\s*>', Name.Tag, '#pop') ], 'attr': [ (r'\s+', Whitespace), (r'".*?"', String, '#pop'), (r"'.*?'", String, '#pop'), (r'[^\s>]+', String, '#pop') ] }
16,987
Python
35.221748
116
0.483487
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/other.py
""" pygments.lexers.other ~~~~~~~~~~~~~~~~~~~~~ Just export lexer classes previously contained in this module. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ TcshLexer from pygments.lexers.robotframework import RobotFrameworkLexer from pygments.lexers.testing import GherkinLexer from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer from pygments.lexers.prolog import LogtalkLexer from pygments.lexers.snobol import SnobolLexer from pygments.lexers.rebol import RebolLexer from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer from pygments.lexers.modeling import ModelicaLexer from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \ HybrisLexer from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \ AsymptoteLexer, PovrayLexer from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \ GoodDataCLLexer, MaqlLexer from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \ MscgenLexer, VGLLexer from pygments.lexers.basic import CbmBasicV2Lexer from pygments.lexers.pawn import SourcePawnLexer, PawnLexer from pygments.lexers.ecl import ECLLexer from pygments.lexers.urbi import UrbiscriptLexer from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer from pygments.lexers.installers import NSISLexer, RPMSpecLexer from pygments.lexers.textedit import AwkLexer from pygments.lexers.smv import NuSMVLexer __all__ = []
1,744
Python
41.560975
79
0.818234
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/automation.py
""" pygments.lexers.automation ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for automation scripting languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, bygroups, combined from pygments.token import Text, Comment, Operator, Name, String, \ Number, Punctuation, Generic __all__ = ['AutohotkeyLexer', 'AutoItLexer'] class AutohotkeyLexer(RegexLexer): """ For autohotkey source code. .. versionadded:: 1.4 """ name = 'autohotkey' url = 'http://www.autohotkey.com/' aliases = ['autohotkey', 'ahk'] filenames = ['*.ahk', '*.ahkl'] mimetypes = ['text/x-autohotkey'] tokens = { 'root': [ (r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'), (r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'), (r'\s+;.*?$', Comment.Single), (r'^;.*?$', Comment.Single), (r'[]{}(),;[]', Punctuation), (r'(in|is|and|or|not)\b', Operator.Word), (r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable), (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), include('commands'), include('labels'), include('builtInFunctions'), include('builtInVariables'), (r'"', String, combined('stringescape', 'dqs')), include('numbers'), (r'[a-zA-Z_#@$][\w#@$]*', Name), (r'\\|\'', Text), (r'\`([,%`abfnrtv\-+;])', String.Escape), include('garbage'), ], 'incomment': [ (r'^\s*\*/', Comment.Multiline, '#pop'), (r'[^*]+', Comment.Multiline), (r'\*', Comment.Multiline) ], 'incontinuation': [ (r'^\s*\)', Generic, '#pop'), (r'[^)]', Generic), (r'[)]', Generic), ], 'commands': [ (r'(?i)^(\s*)(global|local|static|' r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|' r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|' r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|' r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|' r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|' r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|' r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|' r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|' r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|' r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|' r'ControlSendRaw|ControlSetText|CoordMode|Critical|' r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|' r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|' r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|' r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|' r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|' r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|' r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|' r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|' r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|' r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|' r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|' r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|' r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|' r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|' r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|' r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|' r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|' r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|' r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|' r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|' r'SetBatchLines|SetCapslockState|SetControlDelay|' r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|' r'SetMouseDelay|SetNumlockState|SetScrollLockState|' r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|' r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|' r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|' r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|' r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|' r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|' r'StringReplace|StringRight|StringSplit|StringTrimLeft|' r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|' r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|' r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|' r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|' r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|' r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|' r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|' r'WinWait)\b', bygroups(Text, Name.Builtin)), ], 'builtInFunctions': [ (r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|' r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|' r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|' r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|' r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|' r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|' r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|' r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|' r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|' r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|' r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|' r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|' r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|' r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|' r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|' r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b', Name.Function), ], 'builtInVariables': [ (r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|' r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|' r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|' r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|' r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|' r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|' r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|' r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|' r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|' r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|' r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|' r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|' r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|' r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|' r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|' r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|' r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|' r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|' r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|' r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|' r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|' r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|' r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|' r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|' r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|' r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|' r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|' r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|' r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|' r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b', Name.Variable), ], 'labels': [ # hotkeys and labels # technically, hotkey names are limited to named keys and buttons (r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)), (r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)), ], 'numbers': [ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+[eE][+-]?[0-9]+', Number.Float), (r'0\d+', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+', Number.Integer) ], 'stringescape': [ (r'\"\"|\`([,%`abfnrtv])', String.Escape), ], 'strings': [ (r'[^"\n]+', String), ], 'dqs': [ (r'"', String, '#pop'), include('strings') ], 'garbage': [ (r'[^\S\n]', Text), # (r'.', Text), # no cheating ], } class AutoItLexer(RegexLexer): """ For AutoIt files. AutoIt is a freeware BASIC-like scripting language designed for automating the Windows GUI and general scripting .. versionadded:: 1.6 """ name = 'AutoIt' url = 'http://www.autoitscript.com/site/autoit/' aliases = ['autoit'] filenames = ['*.au3'] mimetypes = ['text/x-autoit'] # Keywords, functions, macros from au3.keywords.properties # which can be found in AutoIt installed directory, e.g. # c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties keywords = """\ #include-once #include #endregion #forcedef #forceref #region and byref case continueloop dim do else elseif endfunc endif endselect exit exitloop for func global if local next not or return select step then to until wend while exit""".split() functions = """\ abs acos adlibregister adlibunregister asc ascw asin assign atan autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor blockinput break call cdtray ceiling chr chrw clipget clipput consoleread consolewrite consolewriteerror controlclick controlcommand controldisable controlenable controlfocus controlgetfocus controlgethandle controlgetpos controlgettext controlhide controllistview controlmove controlsend controlsettext controlshow controltreeview cos dec dircopy dircreate dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree drivespacetotal drivestatus envget envset envupdate eval execute exp filechangedir fileclose filecopy filecreatentfslink filecreateshortcut filedelete fileexists filefindfirstfile filefindnextfile fileflush filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut filegetshortname filegetsize filegettime filegetversion fileinstall filemove fileopen fileopendialog fileread filereadline filerecycle filerecycleempty filesavedialog fileselectfolder filesetattrib filesetpos filesettime filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon guictrlcreateinput guictrlcreatelabel guictrlcreatelist guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj guictrlcreatepic guictrlcreateprogress guictrlcreateradio guictrlcreateslider guictrlcreatetab guictrlcreatetabitem guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize inetread inidelete iniread inireadsection inireadsectionnames inirenamesection iniwrite iniwritesection inputbox int isadmin isarray isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox number objcreate objcreateinterface objevent objevent objget objname onautoitexitregister onautoitexitunregister opt ping pixelchecksum pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists processgetstats processlist processsetpriority processwait processwaitclose progressoff progresson progressset ptr random regdelete regenumkey regenumval regread regwrite round run runas runaswait runwait send sendkeepactive seterror setextended shellexecute shellexecutewait shutdown sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread string stringaddcr stringcompare stringformat stringfromasciiarray stringinstr stringisalnum stringisalpha stringisascii stringisdigit stringisfloat stringisint stringislower stringisspace stringisupper stringisxdigit stringleft stringlen stringlower stringmid stringregexp stringregexpreplace stringreplace stringright stringsplit stringstripcr stringstripws stringtoasciiarray stringtobinary stringtrimleft stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent traysetpauseicon traysetstate traysettooltip traytip ubound udpbind udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype winactivate winactive winclose winexists winflash wingetcaretpos wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess wingetstate wingettext wingettitle winkill winlist winmenuselectitem winminimizeall winminimizeallundo winmove winsetontop winsetstate winsettitle winsettrans winwait winwaitactive winwaitclose winwaitnotactive""".split() macros = """\ @appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion @autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec @cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir @desktopheight @desktoprefresh @desktopwidth @documentscommondir @error @exitcode @exitmethod @extended @favoritescommondir @favoritesdir @gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid @gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour @ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf @logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang @mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype @osversion @programfilesdir @programscommondir @programsdir @scriptdir @scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir @startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide @sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault @sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna @sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir @tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday @windowsdir @workingdir @yday @year""".split() tokens = { 'root': [ (r';.*\n', Comment.Single), (r'(#comments-start|#cs)(.|\n)*?(#comments-end|#ce)', Comment.Multiline), (r'[\[\]{}(),;]', Punctuation), (r'(and|or|not)\b', Operator.Word), (r'[$|@][a-zA-Z_]\w*', Name.Variable), (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), include('commands'), include('labels'), include('builtInFunctions'), include('builtInMarcros'), (r'"', String, combined('stringescape', 'dqs')), (r"'", String, 'sqs'), include('numbers'), (r'[a-zA-Z_#@$][\w#@$]*', Name), (r'\\|\'', Text), (r'\`([,%`abfnrtv\-+;])', String.Escape), (r'_\n', Text), # Line continuation include('garbage'), ], 'commands': [ (r'(?i)(\s*)(%s)\b' % '|'.join(keywords), bygroups(Text, Name.Builtin)), ], 'builtInFunctions': [ (r'(?i)(%s)\b' % '|'.join(functions), Name.Function), ], 'builtInMarcros': [ (r'(?i)(%s)\b' % '|'.join(macros), Name.Variable.Global), ], 'labels': [ # sendkeys (r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)), ], 'numbers': [ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+[eE][+-]?[0-9]+', Number.Float), (r'0\d+', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+', Number.Integer) ], 'stringescape': [ (r'\"\"|\`([,%`abfnrtv])', String.Escape), ], 'strings': [ (r'[^"\n]+', String), ], 'dqs': [ (r'"', String, '#pop'), include('strings') ], 'sqs': [ (r'\'\'|\`([,%`abfnrtv])', String.Escape), (r"'", String, '#pop'), (r"[^'\n]+", String) ], 'garbage': [ (r'[^\S\n]', Text), ], }
19,815
Python
50.874345
80
0.661519
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/meson.py
""" pygments.lexers.meson ~~~~~~~~~~~~~~~~~~~~~ Pygments lexer for the Meson build system :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words, include from pygments.token import Comment, Name, Number, Punctuation, Operator, \ Keyword, String, Whitespace __all__ = ['MesonLexer'] class MesonLexer(RegexLexer): """Meson language lexer. The grammar definition use to transcribe the syntax was retrieved from https://mesonbuild.com/Syntax.html#grammar for version 0.58. Some of those definitions are improperly transcribed, so the Meson++ implementation was also checked: https://github.com/dcbaker/meson-plus-plus. .. versionadded:: 2.10 """ # TODO String interpolation @VARNAME@ inner matches # TODO keyword_arg: value inner matches name = 'Meson' url = 'https://mesonbuild.com/' aliases = ['meson', 'meson.build'] filenames = ['meson.build', 'meson_options.txt'] mimetypes = ['text/x-meson'] tokens = { 'root': [ (r'#.*?$', Comment), (r"'''.*'''", String.Single), (r'[1-9][0-9]*', Number.Integer), (r'0o[0-7]+', Number.Oct), (r'0x[a-fA-F0-9]+', Number.Hex), include('string'), include('keywords'), include('expr'), (r'[a-zA-Z_][a-zA-Z_0-9]*', Name), (r'\s+', Whitespace), ], 'string': [ (r"[']{3}([']{0,2}([^\\']|\\(.|\n)))*[']{3}", String), (r"'.*?(?<!\\)(\\\\)*?'", String), ], 'keywords': [ (words(( 'if', 'elif', 'else', 'endif', 'foreach', 'endforeach', 'break', 'continue', ), suffix=r'\b'), Keyword), ], 'expr': [ (r'(in|and|or|not)\b', Operator.Word), (r'(\*=|/=|%=|\+]=|-=|==|!=|\+|-|=)', Operator), (r'[\[\]{}:().,?]', Punctuation), (words(('true', 'false'), suffix=r'\b'), Keyword.Constant), include('builtins'), (words(( 'meson', 'build_machine', 'host_machine', 'target_machine', ), suffix=r'\b'), Name.Variable.Magic), ], 'builtins': [ # This list was extracted from the v0.58 reference manual (words(( 'add_global_arguments', 'add_global_link_arguments', 'add_languages', 'add_project_arguments', 'add_project_link_arguments', 'add_test_setup', 'assert', 'benchmark', 'both_libraries', 'build_target', 'configuration_data', 'configure_file', 'custom_target', 'declare_dependency', 'dependency', 'disabler', 'environment', 'error', 'executable', 'files', 'find_library', 'find_program', 'generator', 'get_option', 'get_variable', 'include_directories', 'install_data', 'install_headers', 'install_man', 'install_subdir', 'is_disabler', 'is_variable', 'jar', 'join_paths', 'library', 'message', 'project', 'range', 'run_command', 'set_variable', 'shared_library', 'shared_module', 'static_library', 'subdir', 'subdir_done', 'subproject', 'summary', 'test', 'vcs_tag', 'warning', ), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin), (r'(?<!\.)import\b', Name.Namespace), ], }
4,337
Python
29.765957
80
0.414803
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/jslt.py
""" pygments.lexers.jslt ~~~~~~~~~~~~~~~~~~~~ Lexers for the JSLT language :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, combined, words from pygments.token import Comment, Keyword, Name, Number, Operator, \ Punctuation, String, Whitespace __all__ = ['JSLTLexer'] _WORD_END = r'(?=[^0-9A-Z_a-z-])' class JSLTLexer(RegexLexer): """ For JSLT source. .. versionadded:: 2.10 """ name = 'JSLT' url = 'https://github.com/schibsted/jslt' filenames = ['*.jslt'] aliases = ['jslt'] mimetypes = ['text/x-jslt'] tokens = { 'root': [ (r'[\t\n\f\r ]+', Whitespace), (r'//.*(\n|\Z)', Comment.Single), (r'-?(0|[1-9][0-9]*)', Number.Integer), (r'-?(0|[1-9][0-9]*)(.[0-9]+a)?([Ee][+-]?[0-9]+)', Number.Float), (r'"([^"\\]|\\.)*"', String.Double), (r'[(),:\[\]{}]', Punctuation), (r'(!=|[<=>]=?)', Operator), (r'[*+/|-]', Operator), (r'\.', Operator), (words(('import',), suffix=_WORD_END), Keyword.Namespace, combined('import-path', 'whitespace')), (words(('as',), suffix=_WORD_END), Keyword.Namespace, combined('import-alias', 'whitespace')), (words(('let',), suffix=_WORD_END), Keyword.Declaration, combined('constant', 'whitespace')), (words(('def',), suffix=_WORD_END), Keyword.Declaration, combined('function', 'whitespace')), (words(('false', 'null', 'true'), suffix=_WORD_END), Keyword.Constant), (words(('else', 'for', 'if'), suffix=_WORD_END), Keyword), (words(('and', 'or'), suffix=_WORD_END), Operator.Word), (words(( 'all', 'any', 'array', 'boolean', 'capture', 'ceiling', 'contains', 'ends-with', 'error', 'flatten', 'floor', 'format-time', 'from-json', 'get-key', 'hash-int', 'index-of', 'is-array', 'is-boolean', 'is-decimal', 'is-integer', 'is-number', 'is-object', 'is-string', 'join', 'lowercase', 'max', 'min', 'mod', 'not', 'now', 'number', 'parse-time', 'parse-url', 'random', 'replace', 'round', 'sha256-hex', 'size', 'split', 'starts-with', 'string', 'sum', 'test', 'to-json', 'trim', 'uppercase', 'zip', 'zip-with-index', 'fallback'), suffix=_WORD_END), Name.Builtin), (r'[A-Z_a-z][0-9A-Z_a-z-]*:[A-Z_a-z][0-9A-Z_a-z-]*', Name.Function), (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name), (r'\$[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable), ], 'constant': [ (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable, 'root'), ], 'function': [ (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Function, combined('function-parameter-list', 'whitespace')), ], 'function-parameter-list': [ (r'\(', Punctuation, combined('function-parameters', 'whitespace')), ], 'function-parameters': [ (r',', Punctuation), (r'\)', Punctuation, 'root'), (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable), ], 'import-path': [ (r'"([^"]|\\.)*"', String.Symbol, 'root'), ], 'import-alias': [ (r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Namespace, 'root'), ], 'string': [ (r'"', String.Double, '#pop'), (r'\\.', String.Escape), ], 'whitespace': [ (r'[\t\n\f\r ]+', Whitespace), (r'//.*(\n|\Z)', Comment.Single), ] }
3,701
Python
37.5625
109
0.46555
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/x10.py
""" pygments.lexers.x10 ~~~~~~~~~~~~~~~~~~~ Lexers for the X10 programming language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer from pygments.token import Text, Comment, Keyword, String __all__ = ['X10Lexer'] class X10Lexer(RegexLexer): """ For the X10 language. .. versionadded:: 2.2 """ name = 'X10' url = 'http://x10-lang.org/' aliases = ['x10', 'xten'] filenames = ['*.x10'] mimetypes = ['text/x-x10'] keywords = ( 'as', 'assert', 'async', 'at', 'athome', 'ateach', 'atomic', 'break', 'case', 'catch', 'class', 'clocked', 'continue', 'def', 'default', 'do', 'else', 'final', 'finally', 'finish', 'for', 'goto', 'haszero', 'here', 'if', 'import', 'in', 'instanceof', 'interface', 'isref', 'new', 'offer', 'operator', 'package', 'return', 'struct', 'switch', 'throw', 'try', 'type', 'val', 'var', 'when', 'while' ) types = ( 'void' ) values = ( 'false', 'null', 'self', 'super', 'this', 'true' ) modifiers = ( 'abstract', 'extends', 'implements', 'native', 'offers', 'private', 'property', 'protected', 'public', 'static', 'throws', 'transient' ) tokens = { 'root': [ (r'[^\S\n]+', Text), (r'//.*?\n', Comment.Single), (r'/\*(.|\n)*?\*/', Comment.Multiline), (r'\b(%s)\b' % '|'.join(keywords), Keyword), (r'\b(%s)\b' % '|'.join(types), Keyword.Type), (r'\b(%s)\b' % '|'.join(values), Keyword.Constant), (r'\b(%s)\b' % '|'.join(modifiers), Keyword.Declaration), (r'"(\\\\|\\[^\\]|[^"\\])*"', String), (r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char), (r'.', Text) ], }
1,920
Python
27.25
70
0.471354
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/theorem.py
""" pygments.lexers.theorem ~~~~~~~~~~~~~~~~~~~~~~~ Lexers for theorem-proving languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, default, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Whitespace __all__ = ['CoqLexer', 'IsabelleLexer', 'LeanLexer'] class CoqLexer(RegexLexer): """ For the Coq theorem prover. .. versionadded:: 1.5 """ name = 'Coq' url = 'http://coq.inria.fr/' aliases = ['coq'] filenames = ['*.v'] mimetypes = ['text/x-coq'] flags = 0 # no re.MULTILINE keywords1 = ( # Vernacular commands 'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable', 'Variables', 'Parameter', 'Parameters', 'Axiom', 'Axioms', 'Hypothesis', 'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope', 'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Example', 'Let', 'Ltac', 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit', 'Arguments', 'Types', 'Unset', 'Contextual', 'Strict', 'Prenex', 'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure', 'Variant', 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Fact', 'Remark', 'Corollary', 'Proposition', 'Property', 'Goal', 'Proof', 'Restart', 'Save', 'Qed', 'Defined', 'Abort', 'Admitted', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search', 'Compute', 'Eval', 'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside', 'outside', 'Check', 'Global', 'Instance', 'Class', 'Existing', 'Universe', 'Polymorphic', 'Monomorphic', 'Context', 'Scheme', 'From', 'Undo', 'Fail', 'Function', ) keywords2 = ( # Gallina 'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct', 'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else', 'for', 'of', 'nosimpl', 'with', 'as', ) keywords3 = ( # Sorts 'Type', 'Prop', 'SProp', 'Set', ) keywords4 = ( # Tactics 'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro', 'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct', 'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite', 'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold', 'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog', 'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial', 'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto', 'split', 'left', 'right', 'autorewrite', 'tauto', 'setoid_rewrite', 'intuition', 'eauto', 'eapply', 'econstructor', 'etransitivity', 'constructor', 'erewrite', 'red', 'cbv', 'lazy', 'vm_compute', 'native_compute', 'subst', ) keywords5 = ( # Terminators 'by', 'now', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega', 'lia', 'nia', 'lra', 'nra', 'psatz', 'assumption', 'solve', 'contradiction', 'discriminate', 'congruence', 'admit' ) keywords6 = ( # Control 'do', 'last', 'first', 'try', 'idtac', 'repeat', ) # 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', # 'downto', 'else', 'end', 'exception', 'external', 'false', # 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', # 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', # 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', # 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', # 'type', 'val', 'virtual', 'when', 'while', 'with' keyopts = ( '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-', '<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>', r'/\\', r'\\/', r'\{\|', r'\|\}', # 'Π', 'Σ', # Not defined in the standard library 'λ', '¬', '∧', '∨', '∀', '∃', '→', '↔', '≠', '≤', '≥', ) operators = r'[!$%&*+\./:<=>?@^|~-]' prefix_syms = r'[!?~]' infix_syms = r'[=<>@^|&+\*/$%-]' tokens = { 'root': [ (r'\s+', Text), (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), (r'\(\*', Comment, 'comment'), (r'\b(?:[^\W\d][\w\']*\.)+[^\W\d][\w\']*\b', Name), (r'\bEquations\b\??', Keyword.Namespace), # Very weak heuristic to distinguish the Set vernacular from the Set sort (r'\bSet(?=[ \t]+[A-Z][a-z][^\n]*?\.)', Keyword.Namespace), (words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), (words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword), (words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type), (words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword), (words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), (words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved), # (r'\b([A-Z][\w\']*)(\.)', Name.Namespace, 'dotted'), (r'\b([A-Z][\w\']*)', Name), (r'(%s)' % '|'.join(keyopts[::-1]), Operator), (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), (r"[^\W\d][\w']*", Name), (r'\d[\d_]*', Number.Integer), (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), (r'0[oO][0-7][0-7_]*', Number.Oct), (r'0[bB][01][01_]*', Number.Bin), (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", String.Char), (r"'.'", String.Char), (r"'", Keyword), # a stray quote is another syntax element (r'"', String.Double, 'string'), (r'[~?][a-z][\w\']*:', Name), (r'\S', Name.Builtin.Pseudo), ], 'comment': [ (r'[^(*)]+', Comment), (r'\(\*', Comment, '#push'), (r'\*\)', Comment, '#pop'), (r'[(*)]', Comment), ], 'string': [ (r'[^"]+', String.Double), (r'""', String.Double), (r'"', String.Double, '#pop'), ], 'dotted': [ (r'\s+', Text), (r'\.', Punctuation), (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), (r'[A-Z][\w\']*', Name.Class, '#pop'), (r'[a-z][a-z0-9_\']*', Name, '#pop'), default('#pop') ], } def analyse_text(text): if 'Qed' in text and 'Proof' in text: return 1 class IsabelleLexer(RegexLexer): """ For the Isabelle proof assistant. .. versionadded:: 2.0 """ name = 'Isabelle' url = 'https://isabelle.in.tum.de/' aliases = ['isabelle'] filenames = ['*.thy'] mimetypes = ['text/x-isabelle'] keyword_minor = ( 'and', 'assumes', 'attach', 'avoids', 'binder', 'checking', 'class_instance', 'class_relation', 'code_module', 'congs', 'constant', 'constrains', 'datatypes', 'defines', 'file', 'fixes', 'for', 'functions', 'hints', 'identifier', 'if', 'imports', 'in', 'includes', 'infix', 'infixl', 'infixr', 'is', 'keywords', 'lazy', 'module_name', 'monos', 'morphisms', 'no_discs_sels', 'notes', 'obtains', 'open', 'output', 'overloaded', 'parametric', 'permissive', 'pervasive', 'rep_compat', 'shows', 'structure', 'type_class', 'type_constructor', 'unchecked', 'unsafe', 'where', ) keyword_diag = ( 'ML_command', 'ML_val', 'class_deps', 'code_deps', 'code_thms', 'display_drafts', 'find_consts', 'find_theorems', 'find_unused_assms', 'full_prf', 'help', 'locale_deps', 'nitpick', 'pr', 'prf', 'print_abbrevs', 'print_antiquotations', 'print_attributes', 'print_binds', 'print_bnfs', 'print_bundles', 'print_case_translations', 'print_cases', 'print_claset', 'print_classes', 'print_codeproc', 'print_codesetup', 'print_coercions', 'print_commands', 'print_context', 'print_defn_rules', 'print_dependencies', 'print_facts', 'print_induct_rules', 'print_inductives', 'print_interps', 'print_locale', 'print_locales', 'print_methods', 'print_options', 'print_orders', 'print_quot_maps', 'print_quotconsts', 'print_quotients', 'print_quotientsQ3', 'print_quotmapsQ3', 'print_rules', 'print_simpset', 'print_state', 'print_statement', 'print_syntax', 'print_theorems', 'print_theory', 'print_trans_rules', 'prop', 'pwd', 'quickcheck', 'refute', 'sledgehammer', 'smt_status', 'solve_direct', 'spark_status', 'term', 'thm', 'thm_deps', 'thy_deps', 'try', 'try0', 'typ', 'unused_thms', 'value', 'values', 'welcome', 'print_ML_antiquotations', 'print_term_bindings', 'values_prolog', ) keyword_thy = ('theory', 'begin', 'end') keyword_section = ('header', 'chapter') keyword_subsection = ( 'section', 'subsection', 'subsubsection', 'sect', 'subsect', 'subsubsect', ) keyword_theory_decl = ( 'ML', 'ML_file', 'abbreviation', 'adhoc_overloading', 'arities', 'atom_decl', 'attribute_setup', 'axiomatization', 'bundle', 'case_of_simps', 'class', 'classes', 'classrel', 'codatatype', 'code_abort', 'code_class', 'code_const', 'code_datatype', 'code_identifier', 'code_include', 'code_instance', 'code_modulename', 'code_monad', 'code_printing', 'code_reflect', 'code_reserved', 'code_type', 'coinductive', 'coinductive_set', 'consts', 'context', 'datatype', 'datatype_new', 'datatype_new_compat', 'declaration', 'declare', 'default_sort', 'defer_recdef', 'definition', 'defs', 'domain', 'domain_isomorphism', 'domaindef', 'equivariance', 'export_code', 'extract', 'extract_type', 'fixrec', 'fun', 'fun_cases', 'hide_class', 'hide_const', 'hide_fact', 'hide_type', 'import_const_map', 'import_file', 'import_tptp', 'import_type_map', 'inductive', 'inductive_set', 'instantiation', 'judgment', 'lemmas', 'lifting_forget', 'lifting_update', 'local_setup', 'locale', 'method_setup', 'nitpick_params', 'no_adhoc_overloading', 'no_notation', 'no_syntax', 'no_translations', 'no_type_notation', 'nominal_datatype', 'nonterminal', 'notation', 'notepad', 'oracle', 'overloading', 'parse_ast_translation', 'parse_translation', 'partial_function', 'primcorec', 'primrec', 'primrec_new', 'print_ast_translation', 'print_translation', 'quickcheck_generator', 'quickcheck_params', 'realizability', 'realizers', 'recdef', 'record', 'refute_params', 'setup', 'setup_lifting', 'simproc_setup', 'simps_of_case', 'sledgehammer_params', 'spark_end', 'spark_open', 'spark_open_siv', 'spark_open_vcg', 'spark_proof_functions', 'spark_types', 'statespace', 'syntax', 'syntax_declaration', 'text', 'text_raw', 'theorems', 'translations', 'type_notation', 'type_synonym', 'typed_print_translation', 'typedecl', 'hoarestate', 'install_C_file', 'install_C_types', 'wpc_setup', 'c_defs', 'c_types', 'memsafe', 'SML_export', 'SML_file', 'SML_import', 'approximate', 'bnf_axiomatization', 'cartouche', 'datatype_compat', 'free_constructors', 'functor', 'nominal_function', 'nominal_termination', 'permanent_interpretation', 'binds', 'defining', 'smt2_status', 'term_cartouche', 'boogie_file', 'text_cartouche', ) keyword_theory_script = ('inductive_cases', 'inductive_simps') keyword_theory_goal = ( 'ax_specification', 'bnf', 'code_pred', 'corollary', 'cpodef', 'crunch', 'crunch_ignore', 'enriched_type', 'function', 'instance', 'interpretation', 'lemma', 'lift_definition', 'nominal_inductive', 'nominal_inductive2', 'nominal_primrec', 'pcpodef', 'primcorecursive', 'quotient_definition', 'quotient_type', 'recdef_tc', 'rep_datatype', 'schematic_corollary', 'schematic_lemma', 'schematic_theorem', 'spark_vc', 'specification', 'subclass', 'sublocale', 'termination', 'theorem', 'typedef', 'wrap_free_constructors', ) keyword_qed = ('by', 'done', 'qed') keyword_abandon_proof = ('sorry', 'oops') keyword_proof_goal = ('have', 'hence', 'interpret') keyword_proof_block = ('next', 'proof') keyword_proof_chain = ( 'finally', 'from', 'then', 'ultimately', 'with', ) keyword_proof_decl = ( 'ML_prf', 'also', 'include', 'including', 'let', 'moreover', 'note', 'txt', 'txt_raw', 'unfolding', 'using', 'write', ) keyword_proof_asm = ('assume', 'case', 'def', 'fix', 'presume') keyword_proof_asm_goal = ('guess', 'obtain', 'show', 'thus') keyword_proof_script = ( 'apply', 'apply_end', 'apply_trace', 'back', 'defer', 'prefer', ) operators = ( '::', ':', '(', ')', '[', ']', '_', '=', ',', '|', '+', '-', '!', '?', ) proof_operators = ('{', '}', '.', '..') tokens = { 'root': [ (r'\s+', Whitespace), (r'\(\*', Comment, 'comment'), (r'\\<open>', String.Symbol, 'cartouche'), (r'\{\*|‹', String, 'cartouche'), (words(operators), Operator), (words(proof_operators), Operator.Word), (words(keyword_minor, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), (words(keyword_diag, prefix=r'\b', suffix=r'\b'), Keyword.Type), (words(keyword_thy, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_theory_decl, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_section, prefix=r'\b', suffix=r'\b'), Generic.Heading), (words(keyword_subsection, prefix=r'\b', suffix=r'\b'), Generic.Subheading), (words(keyword_theory_goal, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), (words(keyword_theory_script, prefix=r'\b', suffix=r'\b'), Keyword.Namespace), (words(keyword_abandon_proof, prefix=r'\b', suffix=r'\b'), Generic.Error), (words(keyword_qed, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_proof_goal, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_proof_block, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_proof_decl, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_proof_chain, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_proof_asm, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_proof_asm_goal, prefix=r'\b', suffix=r'\b'), Keyword), (words(keyword_proof_script, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo), (r'\\<(\w|\^)*>', Text.Symbol), (r"'[^\W\d][.\w']*", Name.Type), (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), (r'0[oO][0-7][0-7_]*', Number.Oct), (r'0[bB][01][01_]*', Number.Bin), (r'"', String, 'string'), (r'`', String.Other, 'fact'), (r'[^\s:|\[\]\-()=,+!?{}._][^\s:|\[\]\-()=,+!?{}]*', Name), ], 'comment': [ (r'[^(*)]+', Comment), (r'\(\*', Comment, '#push'), (r'\*\)', Comment, '#pop'), (r'[(*)]', Comment), ], 'cartouche': [ (r'[^{*}\\‹›]+', String), (r'\\<open>', String.Symbol, '#push'), (r'\{\*|‹', String, '#push'), (r'\\<close>', String.Symbol, '#pop'), (r'\*\}|›', String, '#pop'), (r'\\<(\w|\^)*>', String.Symbol), (r'[{*}\\]', String), ], 'string': [ (r'[^"\\]+', String), (r'\\<(\w|\^)*>', String.Symbol), (r'\\"', String), (r'\\', String), (r'"', String, '#pop'), ], 'fact': [ (r'[^`\\]+', String.Other), (r'\\<(\w|\^)*>', String.Symbol), (r'\\`', String.Other), (r'\\', String.Other), (r'`', String.Other, '#pop'), ], } class LeanLexer(RegexLexer): """ For the Lean theorem prover. .. versionadded:: 2.0 """ name = 'Lean' url = 'https://github.com/leanprover/lean' aliases = ['lean'] filenames = ['*.lean'] mimetypes = ['text/x-lean'] tokens = { 'root': [ (r'\s+', Text), (r'/--', String.Doc, 'docstring'), (r'/-', Comment, 'comment'), (r'--.*?$', Comment.Single), (words(( 'import', 'renaming', 'hiding', 'namespace', 'local', 'private', 'protected', 'section', 'include', 'omit', 'section', 'protected', 'export', 'open', 'attribute', ), prefix=r'\b', suffix=r'\b'), Keyword.Namespace), (words(( 'lemma', 'theorem', 'def', 'definition', 'example', 'axiom', 'axioms', 'constant', 'constants', 'universe', 'universes', 'inductive', 'coinductive', 'structure', 'extends', 'class', 'instance', 'abbreviation', 'noncomputable theory', 'noncomputable', 'mutual', 'meta', 'attribute', 'parameter', 'parameters', 'variable', 'variables', 'reserve', 'precedence', 'postfix', 'prefix', 'notation', 'infix', 'infixl', 'infixr', 'begin', 'by', 'end', 'set_option', 'run_cmd', ), prefix=r'\b', suffix=r'\b'), Keyword.Declaration), (r'@\[[^\]]*\]', Keyword.Declaration), (words(( 'forall', 'fun', 'Pi', 'from', 'have', 'show', 'assume', 'suffices', 'let', 'if', 'else', 'then', 'in', 'with', 'calc', 'match', 'do' ), prefix=r'\b', suffix=r'\b'), Keyword), (words(('sorry', 'admit'), prefix=r'\b', suffix=r'\b'), Generic.Error), (words(('Sort', 'Prop', 'Type'), prefix=r'\b', suffix=r'\b'), Keyword.Type), (words(( '#eval', '#check', '#reduce', '#exit', '#print', '#help', ), suffix=r'\b'), Keyword), (words(( '(', ')', ':', '{', '}', '[', ']', '⟨', '⟩', '‹', '›', '⦃', '⦄', ':=', ',', )), Operator), (r'[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]' r'[.A-Za-z_\'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079' r'\u207f-\u2089\u2090-\u209c\u2100-\u214f0-9]*', Name), (r'0x[A-Za-z0-9]+', Number.Integer), (r'0b[01]+', Number.Integer), (r'\d+', Number.Integer), (r'"', String.Double, 'string'), (r"'(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4})|.)'", String.Char), (r'[~?][a-z][\w\']*:', Name.Variable), (r'\S', Name.Builtin.Pseudo), ], 'comment': [ (r'[^/-]', Comment.Multiline), (r'/-', Comment.Multiline, '#push'), (r'-/', Comment.Multiline, '#pop'), (r'[/-]', Comment.Multiline) ], 'docstring': [ (r'[^/-]', String.Doc), (r'-/', String.Doc, '#pop'), (r'[/-]', String.Doc) ], 'string': [ (r'[^\\"]+', String.Double), (r"(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4}))", String.Escape), ('"', String.Double, '#pop'), ], }
20,113
Python
40.472165
92
0.480684
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/robotframework.py
""" pygments.lexers.robotframework ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for Robot Framework. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # Copyright 2012 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from pygments.lexer import Lexer from pygments.token import Token __all__ = ['RobotFrameworkLexer'] HEADING = Token.Generic.Heading SETTING = Token.Keyword.Namespace IMPORT = Token.Name.Namespace TC_KW_NAME = Token.Generic.Subheading KEYWORD = Token.Name.Function ARGUMENT = Token.String VARIABLE = Token.Name.Variable COMMENT = Token.Comment SEPARATOR = Token.Punctuation SYNTAX = Token.Punctuation GHERKIN = Token.Generic.Emph ERROR = Token.Error def normalize(string, remove=''): string = string.lower() for char in remove + ' ': if char in string: string = string.replace(char, '') return string class RobotFrameworkLexer(Lexer): """ For Robot Framework test data. Supports both space and pipe separated plain text formats. .. versionadded:: 1.6 """ name = 'RobotFramework' url = 'http://robotframework.org' aliases = ['robotframework'] filenames = ['*.robot', '*.resource'] mimetypes = ['text/x-robotframework'] def __init__(self, **options): options['tabsize'] = 2 options['encoding'] = 'UTF-8' Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): row_tokenizer = RowTokenizer() var_tokenizer = VariableTokenizer() index = 0 for row in text.splitlines(): for value, token in row_tokenizer.tokenize(row): for value, token in var_tokenizer.tokenize(value, token): if value: yield index, token, str(value) index += len(value) class VariableTokenizer: def tokenize(self, string, token): var = VariableSplitter(string, identifiers='$@%&') if var.start < 0 or token in (COMMENT, ERROR): yield string, token return for value, token in self._tokenize(var, string, token): if value: yield value, token def _tokenize(self, var, string, orig_token): before = string[:var.start] yield before, orig_token yield var.identifier + '{', SYNTAX yield from self.tokenize(var.base, VARIABLE) yield '}', SYNTAX if var.index is not None: yield '[', SYNTAX yield from self.tokenize(var.index, VARIABLE) yield ']', SYNTAX yield from self.tokenize(string[var.end:], orig_token) class RowTokenizer: def __init__(self): self._table = UnknownTable() self._splitter = RowSplitter() testcases = TestCaseTable() settings = SettingTable(testcases.set_default_template) variables = VariableTable() keywords = KeywordTable() self._tables = {'settings': settings, 'setting': settings, 'metadata': settings, 'variables': variables, 'variable': variables, 'testcases': testcases, 'testcase': testcases, 'tasks': testcases, 'task': testcases, 'keywords': keywords, 'keyword': keywords, 'userkeywords': keywords, 'userkeyword': keywords} def tokenize(self, row): commented = False heading = False for index, value in enumerate(self._splitter.split(row)): # First value, and every second after that, is a separator. index, separator = divmod(index-1, 2) if value.startswith('#'): commented = True elif index == 0 and value.startswith('*'): self._table = self._start_table(value) heading = True yield from self._tokenize(value, index, commented, separator, heading) self._table.end_row() def _start_table(self, header): name = normalize(header, remove='*') return self._tables.get(name, UnknownTable()) def _tokenize(self, value, index, commented, separator, heading): if commented: yield value, COMMENT elif separator: yield value, SEPARATOR elif heading: yield value, HEADING else: yield from self._table.tokenize(value, index) class RowSplitter: _space_splitter = re.compile('( {2,})') _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))') def split(self, row): splitter = (row.startswith('| ') and self._split_from_pipes or self._split_from_spaces) yield from splitter(row) yield '\n' def _split_from_spaces(self, row): yield '' # Start with (pseudo)separator similarly as with pipes yield from self._space_splitter.split(row) def _split_from_pipes(self, row): _, separator, rest = self._pipe_splitter.split(row, 1) yield separator while self._pipe_splitter.search(rest): cell, separator, rest = self._pipe_splitter.split(rest, 1) yield cell yield separator yield rest class Tokenizer: _tokens = None def __init__(self): self._index = 0 def tokenize(self, value): values_and_tokens = self._tokenize(value, self._index) self._index += 1 if isinstance(values_and_tokens, type(Token)): values_and_tokens = [(value, values_and_tokens)] return values_and_tokens def _tokenize(self, value, index): index = min(index, len(self._tokens) - 1) return self._tokens[index] def _is_assign(self, value): if value.endswith('='): value = value[:-1].strip() var = VariableSplitter(value, identifiers='$@&') return var.start == 0 and var.end == len(value) class Comment(Tokenizer): _tokens = (COMMENT,) class Setting(Tokenizer): _tokens = (SETTING, ARGUMENT) _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown', 'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition', 'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate') _import_settings = ('library', 'resource', 'variables') _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags', 'testtimeout','tasktimeout') _custom_tokenizer = None def __init__(self, template_setter=None): Tokenizer.__init__(self) self._template_setter = template_setter def _tokenize(self, value, index): if index == 1 and self._template_setter: self._template_setter(value) if index == 0: normalized = normalize(value) if normalized in self._keyword_settings: self._custom_tokenizer = KeywordCall(support_assign=False) elif normalized in self._import_settings: self._custom_tokenizer = ImportSetting() elif normalized not in self._other_settings: return ERROR elif self._custom_tokenizer: return self._custom_tokenizer.tokenize(value) return Tokenizer._tokenize(self, value, index) class ImportSetting(Tokenizer): _tokens = (IMPORT, ARGUMENT) class TestCaseSetting(Setting): _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition', 'template') _import_settings = () _other_settings = ('documentation', 'tags', 'timeout') def _tokenize(self, value, index): if index == 0: type = Setting._tokenize(self, value[1:-1], index) return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)] return Setting._tokenize(self, value, index) class KeywordSetting(TestCaseSetting): _keyword_settings = ('teardown',) _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags') class Variable(Tokenizer): _tokens = (SYNTAX, ARGUMENT) def _tokenize(self, value, index): if index == 0 and not self._is_assign(value): return ERROR return Tokenizer._tokenize(self, value, index) class KeywordCall(Tokenizer): _tokens = (KEYWORD, ARGUMENT) def __init__(self, support_assign=True): Tokenizer.__init__(self) self._keyword_found = not support_assign self._assigns = 0 def _tokenize(self, value, index): if not self._keyword_found and self._is_assign(value): self._assigns += 1 return SYNTAX # VariableTokenizer tokenizes this later. if self._keyword_found: return Tokenizer._tokenize(self, value, index - self._assigns) self._keyword_found = True return GherkinTokenizer().tokenize(value, KEYWORD) class GherkinTokenizer: _gherkin_prefix = re.compile('^(Given|When|Then|And|But) ', re.IGNORECASE) def tokenize(self, value, token): match = self._gherkin_prefix.match(value) if not match: return [(value, token)] end = match.end() return [(value[:end], GHERKIN), (value[end:], token)] class TemplatedKeywordCall(Tokenizer): _tokens = (ARGUMENT,) class ForLoop(Tokenizer): def __init__(self): Tokenizer.__init__(self) self._in_arguments = False def _tokenize(self, value, index): token = self._in_arguments and ARGUMENT or SYNTAX if value.upper() in ('IN', 'IN RANGE'): self._in_arguments = True return token class _Table: _tokenizer_class = None def __init__(self, prev_tokenizer=None): self._tokenizer = self._tokenizer_class() self._prev_tokenizer = prev_tokenizer self._prev_values_on_row = [] def tokenize(self, value, index): if self._continues(value, index): self._tokenizer = self._prev_tokenizer yield value, SYNTAX else: yield from self._tokenize(value, index) self._prev_values_on_row.append(value) def _continues(self, value, index): return value == '...' and all(self._is_empty(t) for t in self._prev_values_on_row) def _is_empty(self, value): return value in ('', '\\') def _tokenize(self, value, index): return self._tokenizer.tokenize(value) def end_row(self): self.__init__(prev_tokenizer=self._tokenizer) class UnknownTable(_Table): _tokenizer_class = Comment def _continues(self, value, index): return False class VariableTable(_Table): _tokenizer_class = Variable class SettingTable(_Table): _tokenizer_class = Setting def __init__(self, template_setter, prev_tokenizer=None): _Table.__init__(self, prev_tokenizer) self._template_setter = template_setter def _tokenize(self, value, index): if index == 0 and normalize(value) == 'testtemplate': self._tokenizer = Setting(self._template_setter) return _Table._tokenize(self, value, index) def end_row(self): self.__init__(self._template_setter, prev_tokenizer=self._tokenizer) class TestCaseTable(_Table): _setting_class = TestCaseSetting _test_template = None _default_template = None @property def _tokenizer_class(self): if self._test_template or (self._default_template and self._test_template is not False): return TemplatedKeywordCall return KeywordCall def _continues(self, value, index): return index > 0 and _Table._continues(self, value, index) def _tokenize(self, value, index): if index == 0: if value: self._test_template = None return GherkinTokenizer().tokenize(value, TC_KW_NAME) if index == 1 and self._is_setting(value): if self._is_template(value): self._test_template = False self._tokenizer = self._setting_class(self.set_test_template) else: self._tokenizer = self._setting_class() if index == 1 and self._is_for_loop(value): self._tokenizer = ForLoop() if index == 1 and self._is_empty(value): return [(value, SYNTAX)] return _Table._tokenize(self, value, index) def _is_setting(self, value): return value.startswith('[') and value.endswith(']') def _is_template(self, value): return normalize(value) == '[template]' def _is_for_loop(self, value): return value.startswith(':') and normalize(value, remove=':') == 'for' def set_test_template(self, template): self._test_template = self._is_template_set(template) def set_default_template(self, template): self._default_template = self._is_template_set(template) def _is_template_set(self, template): return normalize(template) not in ('', '\\', 'none', '${empty}') class KeywordTable(TestCaseTable): _tokenizer_class = KeywordCall _setting_class = KeywordSetting def _is_template(self, value): return False # Following code copied directly from Robot Framework 2.7.5. class VariableSplitter: def __init__(self, string, identifiers): self.identifier = None self.base = None self.index = None self.start = -1 self.end = -1 self._identifiers = identifiers self._may_have_internal_variables = False try: self._split(string) except ValueError: pass else: self._finalize() def get_replaced_base(self, variables): if self._may_have_internal_variables: return variables.replace_string(self.base) return self.base def _finalize(self): self.identifier = self._variable_chars[0] self.base = ''.join(self._variable_chars[2:-1]) self.end = self.start + len(self._variable_chars) if self._has_list_or_dict_variable_index(): self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1]) self.end += len(self._list_and_dict_variable_index_chars) def _has_list_or_dict_variable_index(self): return self._list_and_dict_variable_index_chars\ and self._list_and_dict_variable_index_chars[-1] == ']' def _split(self, string): start_index, max_index = self._find_variable(string) self.start = start_index self._open_curly = 1 self._state = self._variable_state self._variable_chars = [string[start_index], '{'] self._list_and_dict_variable_index_chars = [] self._string = string start_index += 2 for index, char in enumerate(string[start_index:]): index += start_index # Giving start to enumerate only in Py 2.6+ try: self._state(char, index) except StopIteration: return if index == max_index and not self._scanning_list_variable_index(): return def _scanning_list_variable_index(self): return self._state in [self._waiting_list_variable_index_state, self._list_variable_index_state] def _find_variable(self, string): max_end_index = string.rfind('}') if max_end_index == -1: raise ValueError('No variable end found') if self._is_escaped(string, max_end_index): return self._find_variable(string[:max_end_index]) start_index = self._find_start_index(string, 1, max_end_index) if start_index == -1: raise ValueError('No variable start found') return start_index, max_end_index def _find_start_index(self, string, start, end): index = string.find('{', start, end) - 1 if index < 0: return -1 if self._start_index_is_ok(string, index): return index return self._find_start_index(string, index+2, end) def _start_index_is_ok(self, string, index): return string[index] in self._identifiers\ and not self._is_escaped(string, index) def _is_escaped(self, string, index): escaped = False while index > 0 and string[index-1] == '\\': index -= 1 escaped = not escaped return escaped def _variable_state(self, char, index): self._variable_chars.append(char) if char == '}' and not self._is_escaped(self._string, index): self._open_curly -= 1 if self._open_curly == 0: if not self._is_list_or_dict_variable(): raise StopIteration self._state = self._waiting_list_variable_index_state elif char in self._identifiers: self._state = self._internal_variable_start_state def _is_list_or_dict_variable(self): return self._variable_chars[0] in ('@','&') def _internal_variable_start_state(self, char, index): self._state = self._variable_state if char == '{': self._variable_chars.append(char) self._open_curly += 1 self._may_have_internal_variables = True else: self._variable_state(char, index) def _waiting_list_variable_index_state(self, char, index): if char != '[': raise StopIteration self._list_and_dict_variable_index_chars.append(char) self._state = self._list_variable_index_state def _list_variable_index_state(self, char, index): self._list_and_dict_variable_index_chars.append(char) if char == ']': raise StopIteration
18,449
Python
32.363472
108
0.593962
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/cddl.py
""" pygments.lexers.cddl ~~~~~~~~~~~~~~~~~~~~ Lexer for the Concise data definition language (CDDL), a notational convention to express CBOR and JSON data structures. More information: https://datatracker.ietf.org/doc/rfc8610/ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, include, words from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ Punctuation, String, Whitespace __all__ = ['CddlLexer'] class CddlLexer(RegexLexer): """ Lexer for CDDL definitions. .. versionadded:: 2.8 """ name = "CDDL" url = 'https://datatracker.ietf.org/doc/rfc8610/' aliases = ["cddl"] filenames = ["*.cddl"] mimetypes = ["text/x-cddl"] _prelude_types = [ "any", "b64legacy", "b64url", "bigfloat", "bigint", "bignint", "biguint", "bool", "bstr", "bytes", "cbor-any", "decfrac", "eb16", "eb64legacy", "eb64url", "encoded-cbor", "false", "float", "float16", "float16-32", "float32", "float32-64", "float64", "int", "integer", "mime-message", "nil", "nint", "null", "number", "regexp", "tdate", "text", "time", "true", "tstr", "uint", "undefined", "unsigned", "uri", ] _controls = [ ".and", ".bits", ".cbor", ".cborseq", ".default", ".eq", ".ge", ".gt", ".le", ".lt", ".ne", ".regexp", ".size", ".within", ] _re_id = ( r"[$@A-Z_a-z]" r"(?:[\-\.]+(?=[$@0-9A-Z_a-z])|[$@0-9A-Z_a-z])*" ) # While the spec reads more like "an int must not start with 0" we use a # lookahead here that says "after a 0 there must be no digit". This makes the # '0' the invalid character in '01', which looks nicer when highlighted. _re_uint = r"(?:0b[01]+|0x[0-9a-fA-F]+|[1-9]\d*|0(?!\d))" _re_int = r"-?" + _re_uint tokens = { "commentsandwhitespace": [(r"\s+", Whitespace), (r";.+$", Comment.Single)], "root": [ include("commentsandwhitespace"), # tag types (r"#(\d\.{uint})?".format(uint=_re_uint), Keyword.Type), # type or any # occurrence ( r"({uint})?(\*)({uint})?".format(uint=_re_uint), bygroups(Number, Operator, Number), ), (r"\?|\+", Operator), # occurrence (r"\^", Operator), # cuts (r"(\.\.\.|\.\.)", Operator), # rangeop (words(_controls, suffix=r"\b"), Operator.Word), # ctlops # into choice op (r"&(?=\s*({groupname}|\())".format(groupname=_re_id), Operator), (r"~(?=\s*{})".format(_re_id), Operator), # unwrap op (r"//|/(?!/)", Operator), # double und single slash (r"=>|/==|/=|=", Operator), (r"[\[\]{}\(\),<>:]", Punctuation), # Bytestrings (r"(b64)(')", bygroups(String.Affix, String.Single), "bstrb64url"), (r"(h)(')", bygroups(String.Affix, String.Single), "bstrh"), (r"'", String.Single, "bstr"), # Barewords as member keys (must be matched before values, types, typenames, # groupnames). # Token type is String as barewords are always interpreted as such. (r"({bareword})(\s*)(:)".format(bareword=_re_id), bygroups(String, Whitespace, Punctuation)), # predefined types (words(_prelude_types, prefix=r"(?![\-_$@])\b", suffix=r"\b(?![\-_$@])"), Name.Builtin), # user-defined groupnames, typenames (_re_id, Name.Class), # values (r"0b[01]+", Number.Bin), (r"0o[0-7]+", Number.Oct), (r"0x[0-9a-fA-F]+(\.[0-9a-fA-F]+)?p[+-]?\d+", Number.Hex), # hexfloat (r"0x[0-9a-fA-F]+", Number.Hex), # hex # Float (r"{int}(?=(\.\d|e[+-]?\d))(?:\.\d+)?(?:e[+-]?\d+)?".format(int=_re_int), Number.Float), # Int (_re_int, Number.Integer), (r'"(\\\\|\\"|[^"])*"', String.Double), ], "bstrb64url": [ (r"'", String.Single, "#pop"), include("commentsandwhitespace"), (r"\\.", String.Escape), (r"[0-9a-zA-Z\-_=]+", String.Single), (r".", Error), # (r";.+$", Token.Other), ], "bstrh": [ (r"'", String.Single, "#pop"), include("commentsandwhitespace"), (r"\\.", String.Escape), (r"[0-9a-fA-F]+", String.Single), (r".", Error), ], "bstr": [ (r"'", String.Single, "#pop"), (r"\\.", String.Escape), (r"[^'\\]+", String.Single), ], }
5,182
Python
28.787356
88
0.443458
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/markup.py
""" pygments.lexers.markup ~~~~~~~~~~~~~~~~~~~~~~ Lexers for non-HTML markup languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexers.html import XmlLexer from pygments.lexers.javascript import JavascriptLexer from pygments.lexers.css import CssLexer from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \ using, this, do_insertions, default, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Other, Whitespace from pygments.util import get_bool_opt, ClassNotFound __all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer', 'MozPreprocHashLexer', 'MozPreprocPercentLexer', 'MozPreprocXulLexer', 'MozPreprocJavascriptLexer', 'MozPreprocCssLexer', 'MarkdownLexer', 'TiddlyWiki5Lexer'] class BBCodeLexer(RegexLexer): """ A lexer that highlights BBCode(-like) syntax. .. versionadded:: 0.6 """ name = 'BBCode' aliases = ['bbcode'] mimetypes = ['text/x-bbcode'] tokens = { 'root': [ (r'[^[]+', Text), # tag/end tag begin (r'\[/?\w+', Keyword, 'tag'), # stray bracket (r'\[', Text), ], 'tag': [ (r'\s+', Text), # attribute with value (r'(\w+)(=)("?[^\s"\]]+"?)', bygroups(Name.Attribute, Operator, String)), # tag argument (a la [color=green]) (r'(=)("?[^\s"\]]+"?)', bygroups(Operator, String)), # tag end (r'\]', Keyword, '#pop'), ], } class MoinWikiLexer(RegexLexer): """ For MoinMoin (and Trac) Wiki markup. .. versionadded:: 0.7 """ name = 'MoinMoin/Trac Wiki markup' aliases = ['trac-wiki', 'moin'] filenames = [] mimetypes = ['text/x-trac-wiki'] flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ (r'^#.*$', Comment), (r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next # Titles (r'^(=+)([^=]+)(=+)(\s*#.+)?$', bygroups(Generic.Heading, using(this), Generic.Heading, String)), # Literal code blocks, with optional shebang (r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'), (r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting # Lists (r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)), (r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)), # Other Formatting (r'\[\[\w+.*?\]\]', Keyword), # Macro (r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])', bygroups(Keyword, String, Keyword)), # Link (r'^----+$', Keyword), # Horizontal rules (r'[^\n\'\[{!_~^,|]+', Text), (r'\n', Text), (r'.', Text), ], 'codeblock': [ (r'\}\}\}', Name.Builtin, '#pop'), # these blocks are allowed to be nested in Trac, but not MoinMoin (r'\{\{\{', Text, '#push'), (r'[^{}]+', Comment.Preproc), # slurp boring text (r'.', Comment.Preproc), # allow loose { or } ], } class RstLexer(RegexLexer): """ For reStructuredText markup. .. versionadded:: 0.7 Additional options accepted: `handlecodeblocks` Highlight the contents of ``.. sourcecode:: language``, ``.. code:: language`` and ``.. code-block:: language`` directives with a lexer for the given language (default: ``True``). .. versionadded:: 0.8 """ name = 'reStructuredText' url = 'https://docutils.sourceforge.io/rst.html' aliases = ['restructuredtext', 'rst', 'rest'] filenames = ['*.rst', '*.rest'] mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"] flags = re.MULTILINE def _handle_sourcecode(self, match): from pygments.lexers import get_lexer_by_name # section header yield match.start(1), Punctuation, match.group(1) yield match.start(2), Text, match.group(2) yield match.start(3), Operator.Word, match.group(3) yield match.start(4), Punctuation, match.group(4) yield match.start(5), Text, match.group(5) yield match.start(6), Keyword, match.group(6) yield match.start(7), Text, match.group(7) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name(match.group(6).strip()) except ClassNotFound: pass indention = match.group(8) indention_size = len(indention) code = (indention + match.group(9) + match.group(10) + match.group(11)) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(8), String, code return # highlight the lines with the lexer. ins = [] codelines = code.splitlines(True) code = '' for line in codelines: if len(line) > indention_size: ins.append((len(code), [(0, Text, line[:indention_size])])) code += line[indention_size:] else: code += line yield from do_insertions(ins, lexer.get_tokens_unprocessed(code)) # from docutils.parsers.rst.states closers = '\'")]}>\u2019\u201d\xbb!?' unicode_delimiters = '\u2010\u2011\u2012\u2013\u2014\u00a0' end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))' % (re.escape(unicode_delimiters), re.escape(closers))) tokens = { 'root': [ # Heading with overline (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)' r'(.+)(\n)(\1)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text, Generic.Heading, Text)), # Plain heading (r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|' r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)), # Bulleted lists (r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), # Numbered lists (r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), (r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), # Numbered, but keep words at BOL from becoming lists (r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)', bygroups(Text, Number, using(this, state='inline'))), (r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)', bygroups(Text, Number, using(this, state='inline'))), # Line blocks (r'^(\s*)(\|)( .+\n(?:\| .+\n)*)', bygroups(Text, Operator, using(this, state='inline'))), # Sourcecode directives (r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)' r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*)?\n)+)', _handle_sourcecode), # A directive (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', bygroups(Punctuation, Text, Operator.Word, Punctuation, Text, using(this, state='inline'))), # A reference target (r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$', bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), # A footnote/citation target (r'^( *\.\.)(\s*)(\[.+\])(.*?)$', bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), # A substitution def (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word, Punctuation, Text, using(this, state='inline'))), # Comments (r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc), # Field list marker (r'^( *)(:(?:\\\\|\\:|[^:\n])+:(?=\s))([ \t]*)', bygroups(Text, Name.Class, Text)), # Definition list (r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)', bygroups(using(this, state='inline'), using(this, state='inline'))), # Code blocks (r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*)?\n)+)', bygroups(String.Escape, Text, String, String, Text, String)), include('inline'), ], 'inline': [ (r'\\.', Text), # escape (r'``', String, 'literal'), # code (r'(`.+?)(<.+?>)(`__?)', # reference with inline target bygroups(String, String.Interpol, String)), (r'`.+?`__?', String), # reference (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?', bygroups(Name.Variable, Name.Attribute)), # role (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)', bygroups(Name.Attribute, Name.Variable)), # role (content first) (r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis (r'\*.+?\*', Generic.Emph), # Emphasis (r'\[.*?\]_', String), # Footnote or citation (r'<.+?>', Name.Tag), # Hyperlink (r'[^\\\n\[*`:]+', Text), (r'.', Text), ], 'literal': [ (r'[^`]+', String), (r'``' + end_string_suffix, String, '#pop'), (r'`', String), ] } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options) def analyse_text(text): if text[:2] == '..' and text[2:3] != '.': return 0.3 p1 = text.find("\n") p2 = text.find("\n", p1 + 1) if (p2 > -1 and # has two lines p1 * 2 + 1 == p2 and # they are the same length text[p1+1] in '-=' and # the next line both starts and ends with text[p1+1] == text[p2-1]): # ...a sufficiently high header return 0.5 class TexLexer(RegexLexer): """ Lexer for the TeX and LaTeX typesetting languages. """ name = 'TeX' aliases = ['tex', 'latex'] filenames = ['*.tex', '*.aux', '*.toc'] mimetypes = ['text/x-tex', 'text/x-latex'] tokens = { 'general': [ (r'%.*?\n', Comment), (r'[{}]', Name.Builtin), (r'[&_^]', Name.Builtin), ], 'root': [ (r'\\\[', String.Backtick, 'displaymath'), (r'\\\(', String, 'inlinemath'), (r'\$\$', String.Backtick, 'displaymath'), (r'\$', String, 'inlinemath'), (r'\\([a-zA-Z]+|.)', Keyword, 'command'), (r'\\$', Keyword), include('general'), (r'[^\\$%&_^{}]+', Text), ], 'math': [ (r'\\([a-zA-Z]+|.)', Name.Variable), include('general'), (r'[0-9]+', Number), (r'[-=!+*/()\[\]]', Operator), (r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin), ], 'inlinemath': [ (r'\\\)', String, '#pop'), (r'\$', String, '#pop'), include('math'), ], 'displaymath': [ (r'\\\]', String, '#pop'), (r'\$\$', String, '#pop'), (r'\$', Name.Builtin), include('math'), ], 'command': [ (r'\[.*?\]', Name.Attribute), (r'\*', Keyword), default('#pop'), ], } def analyse_text(text): for start in ("\\documentclass", "\\input", "\\documentstyle", "\\relax"): if text[:len(start)] == start: return True class GroffLexer(RegexLexer): """ Lexer for the (g)roff typesetting language, supporting groff extensions. Mainly useful for highlighting manpage sources. .. versionadded:: 0.6 """ name = 'Groff' aliases = ['groff', 'nroff', 'man'] filenames = ['*.[1-9]', '*.man', '*.1p', '*.3pm'] mimetypes = ['application/x-troff', 'text/troff'] tokens = { 'root': [ (r'(\.)(\w+)', bygroups(Text, Keyword), 'request'), (r'\.', Punctuation, 'request'), # Regular characters, slurp till we find a backslash or newline (r'[^\\\n]+', Text, 'textline'), default('textline'), ], 'textline': [ include('escapes'), (r'[^\\\n]+', Text), (r'\n', Text, '#pop'), ], 'escapes': [ # groff has many ways to write escapes. (r'\\"[^\n]*', Comment), (r'\\[fn]\w', String.Escape), (r'\\\(.{2}', String.Escape), (r'\\.\[.*\]', String.Escape), (r'\\.', String.Escape), (r'\\\n', Text, 'request'), ], 'request': [ (r'\n', Text, '#pop'), include('escapes'), (r'"[^\n"]+"', String.Double), (r'\d+', Number), (r'\S+', String), (r'\s+', Text), ], } def analyse_text(text): if text[:1] != '.': return False if text[:3] == '.\\"': return True if text[:4] == '.TH ': return True if text[1:3].isalnum() and text[3].isspace(): return 0.9 class MozPreprocHashLexer(RegexLexer): """ Lexer for Mozilla Preprocessor files (with '#' as the marker). Other data is left untouched. .. versionadded:: 2.0 """ name = 'mozhashpreproc' aliases = [name] filenames = [] mimetypes = [] tokens = { 'root': [ (r'^#', Comment.Preproc, ('expr', 'exprstart')), (r'.+', Other), ], 'exprstart': [ (r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'), (words(( 'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif', 'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter', 'include', 'includesubst', 'error')), Comment.Preproc, '#pop'), ], 'expr': [ (words(('!', '!=', '==', '&&', '||')), Operator), (r'(defined)(\()', bygroups(Keyword, Punctuation)), (r'\)', Punctuation), (r'[0-9]+', Number.Decimal), (r'__\w+?__', Name.Variable), (r'@\w+?@', Name.Class), (r'\w+', Name), (r'\n', Text, '#pop'), (r'\s+', Text), (r'\S', Punctuation), ], } class MozPreprocPercentLexer(MozPreprocHashLexer): """ Lexer for Mozilla Preprocessor files (with '%' as the marker). Other data is left untouched. .. versionadded:: 2.0 """ name = 'mozpercentpreproc' aliases = [name] filenames = [] mimetypes = [] tokens = { 'root': [ (r'^%', Comment.Preproc, ('expr', 'exprstart')), (r'.+', Other), ], } class MozPreprocXulLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `XmlLexer`. .. versionadded:: 2.0 """ name = "XUL+mozpreproc" aliases = ['xul+mozpreproc'] filenames = ['*.xul.in'] mimetypes = [] def __init__(self, **options): super().__init__(XmlLexer, MozPreprocHashLexer, **options) class MozPreprocJavascriptLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `JavascriptLexer`. .. versionadded:: 2.0 """ name = "Javascript+mozpreproc" aliases = ['javascript+mozpreproc'] filenames = ['*.js.in'] mimetypes = [] def __init__(self, **options): super().__init__(JavascriptLexer, MozPreprocHashLexer, **options) class MozPreprocCssLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `CssLexer`. .. versionadded:: 2.0 """ name = "CSS+mozpreproc" aliases = ['css+mozpreproc'] filenames = ['*.css.in'] mimetypes = [] def __init__(self, **options): super().__init__(CssLexer, MozPreprocPercentLexer, **options) class MarkdownLexer(RegexLexer): """ For Markdown markup. .. versionadded:: 2.2 """ name = 'Markdown' url = 'https://daringfireball.net/projects/markdown/' aliases = ['markdown', 'md'] filenames = ['*.md', '*.markdown'] mimetypes = ["text/x-markdown"] flags = re.MULTILINE def _handle_codeblock(self, match): """ match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks """ from pygments.lexers import get_lexer_by_name # section header yield match.start(1), String.Backtick, match.group(1) yield match.start(2), String.Backtick, match.group(2) yield match.start(3), Text , match.group(3) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name( match.group(2).strip() ) except ClassNotFound: pass code = match.group(4) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(4), String, code else: yield from do_insertions([], lexer.get_tokens_unprocessed(code)) yield match.start(5), String.Backtick, match.group(5) tokens = { 'root': [ # heading with '#' prefix (atx-style) (r'(^#[^#].+)(\n)', bygroups(Generic.Heading, Text)), # subheading with '#' prefix (atx-style) (r'(^#{2,6}[^#].+)(\n)', bygroups(Generic.Subheading, Text)), # heading with '=' underlines (Setext-style) (r'^(.+)(\n)(=+)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)), # subheading with '-' underlines (Setext-style) (r'^(.+)(\n)(-+)(\n)', bygroups(Generic.Subheading, Text, Generic.Subheading, Text)), # task list (r'^(\s*)([*-] )(\[[ xX]\])( .+\n)', bygroups(Whitespace, Keyword, Keyword, using(this, state='inline'))), # bulleted list (r'^(\s*)([*-])(\s)(.+\n)', bygroups(Whitespace, Keyword, Whitespace, using(this, state='inline'))), # numbered list (r'^(\s*)([0-9]+\.)( .+\n)', bygroups(Whitespace, Keyword, using(this, state='inline'))), # quote (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), # code block fenced by 3 backticks (r'^(\s*```\n[\w\W]*?^\s*```$\n)', String.Backtick), # code block with language (r'^(\s*```)(\w+)(\n)([\w\W]*?)(^\s*```$\n)', _handle_codeblock), include('inline'), ], 'inline': [ # escape (r'\\.', Text), # inline code (r'([^`]?)(`[^`\n]+`)', bygroups(Text, String.Backtick)), # warning: the following rules eat outer tags. # eg. **foo _bar_ baz** => foo and baz are not recognized as bold # bold fenced by '**' (r'([^\*]?)(\*\*[^* \n][^*\n]*\*\*)', bygroups(Text, Generic.Strong)), # bold fenced by '__' (r'([^_]?)(__[^_ \n][^_\n]*__)', bygroups(Text, Generic.Strong)), # italics fenced by '*' (r'([^\*]?)(\*[^* \n][^*\n]*\*)', bygroups(Text, Generic.Emph)), # italics fenced by '_' (r'([^_]?)(_[^_ \n][^_\n]*_)', bygroups(Text, Generic.Emph)), # strikethrough (r'([^~]?)(~~[^~ \n][^~\n]*~~)', bygroups(Text, Generic.Deleted)), # mentions and topics (twitter and github stuff) (r'[@#][\w/:]+', Name.Entity), # (image?) links eg: ![Image of Yaktocat](https://octodex.github.com/images/yaktocat.png) (r'(!?\[)([^]]+)(\])(\()([^)]+)(\))', bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)), # reference-style links, e.g.: # [an example][id] # [id]: http://example.com/ (r'(\[)([^]]+)(\])(\[)([^]]*)(\])', bygroups(Text, Name.Tag, Text, Text, Name.Label, Text)), (r'^(\s*\[)([^]]*)(\]:\s*)(.+)', bygroups(Text, Name.Label, Text, Name.Attribute)), # general text, must come last! (r'[^\\\s]+', Text), (r'.', Text), ], } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options) class TiddlyWiki5Lexer(RegexLexer): """ For TiddlyWiki5 markup. .. versionadded:: 2.7 """ name = 'tiddler' url = 'https://tiddlywiki.com/#TiddlerFiles' aliases = ['tid'] filenames = ['*.tid'] mimetypes = ["text/vnd.tiddlywiki"] flags = re.MULTILINE def _handle_codeblock(self, match): """ match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks """ from pygments.lexers import get_lexer_by_name # section header yield match.start(1), String, match.group(1) yield match.start(2), String, match.group(2) yield match.start(3), Text, match.group(3) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name(match.group(2).strip()) except ClassNotFound: pass code = match.group(4) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(4), String, code return yield from do_insertions([], lexer.get_tokens_unprocessed(code)) yield match.start(5), String, match.group(5) def _handle_cssblock(self, match): """ match args: 1:style tag 2:newline, 3:code, 4:closing style tag """ from pygments.lexers import get_lexer_by_name # section header yield match.start(1), String, match.group(1) yield match.start(2), String, match.group(2) lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name('css') except ClassNotFound: pass code = match.group(3) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(3), String, code return yield from do_insertions([], lexer.get_tokens_unprocessed(code)) yield match.start(4), String, match.group(4) tokens = { 'root': [ # title in metadata section (r'^(title)(:\s)(.+\n)', bygroups(Keyword, Text, Generic.Heading)), # headings (r'^(!)([^!].+\n)', bygroups(Generic.Heading, Text)), (r'^(!{2,6})(.+\n)', bygroups(Generic.Subheading, Text)), # bulleted or numbered lists or single-line block quotes # (can be mixed) (r'^(\s*)([*#>]+)(\s*)(.+\n)', bygroups(Text, Keyword, Text, using(this, state='inline'))), # multi-line block quotes (r'^(<<<.*\n)([\w\W]*?)(^<<<.*$)', bygroups(String, Text, String)), # table header (r'^(\|.*?\|h)$', bygroups(Generic.Strong)), # table footer or caption (r'^(\|.*?\|[cf])$', bygroups(Generic.Emph)), # table class (r'^(\|.*?\|k)$', bygroups(Name.Tag)), # definitions (r'^(;.*)$', bygroups(Generic.Strong)), # text block (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), # code block with language (r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock), # CSS style block (r'^(<style>)(\n)([\w\W]*?)(^</style>$)', _handle_cssblock), include('keywords'), include('inline'), ], 'keywords': [ (words(( '\\define', '\\end', 'caption', 'created', 'modified', 'tags', 'title', 'type'), prefix=r'^', suffix=r'\b'), Keyword), ], 'inline': [ # escape (r'\\.', Text), # created or modified date (r'\d{17}', Number.Integer), # italics (r'(\s)(//[^/]+//)((?=\W|\n))', bygroups(Text, Generic.Emph, Text)), # superscript (r'(\s)(\^\^[^\^]+\^\^)', bygroups(Text, Generic.Emph)), # subscript (r'(\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)), # underscore (r'(\s)(__[^_]+__)', bygroups(Text, Generic.Strong)), # bold (r"(\s)(''[^']+'')((?=\W|\n))", bygroups(Text, Generic.Strong, Text)), # strikethrough (r'(\s)(~~[^~]+~~)((?=\W|\n))', bygroups(Text, Generic.Deleted, Text)), # TiddlyWiki variables (r'<<[^>]+>>', Name.Tag), (r'\$\$[^$]+\$\$', Name.Tag), (r'\$\([^)]+\)\$', Name.Tag), # TiddlyWiki style or class (r'^@@.*$', Name.Tag), # HTML tags (r'</?[^>]+>', Name.Tag), # inline code (r'`[^`]+`', String.Backtick), # HTML escaped symbols (r'&\S*?;', String.Regex), # Wiki links (r'(\[{2})([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text)), # External links (r'(\[{2})([^]\|]+)(\|)([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text, Name.Attribute, Text)), # Transclusion (r'(\{{2})([^}]+)(\}{2})', bygroups(Text, Name.Tag, Text)), # URLs (r'(\b.?.?tps?://[^\s"]+)', bygroups(Name.Attribute)), # general text, must come last! (r'[\w]+', Text), (r'.', Text) ], } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options)
26,797
Python
33.984334
101
0.462589
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/unicon.py
""" pygments.lexers.unicon ~~~~~~~~~~~~~~~~~~~~~~ Lexers for the Icon and Unicon languages, including ucode VM. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, words, using, this from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation __all__ = ['IconLexer', 'UcodeLexer', 'UniconLexer'] class UniconLexer(RegexLexer): """ For Unicon source code. .. versionadded:: 2.4 """ name = 'Unicon' aliases = ['unicon'] filenames = ['*.icn'] mimetypes = ['text/unicon'] flags = re.MULTILINE tokens = { 'root': [ (r'[^\S\n]+', Text), (r'#.*?\n', Comment.Single), (r'[^\S\n]+', Text), (r'class|method|procedure', Keyword.Declaration, 'subprogram'), (r'(record)(\s+)(\w+)', bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), (r'(#line|\$C|\$Cend|\$define|\$else|\$endif|\$error|\$ifdef|' r'\$ifndef|\$include|\$line|\$undef)\b', Keyword.PreProc), (r'(&null|&fail)\b', Keyword.Constant), (r'&allocated|&ascii|&clock|&collections|&column|&col|&control|' r'&cset|&current|&dateline|&date|&digits|&dump|' r'&errno|&errornumber|&errortext|&errorvalue|&error|&errout|' r'&eventcode|&eventvalue|&eventsource|&e|' r'&features|&file|&host|&input|&interval|&lcase|&letters|' r'&level|&line|&ldrag|&lpress|&lrelease|' r'&main|&mdrag|&meta|&mpress|&mrelease|&now|&output|' r'&phi|&pick|&pi|&pos|&progname|' r'&random|&rdrag|&regions|&resize|&row|&rpress|&rrelease|' r'&shift|&source|&storage|&subject|' r'&time|&trace|&ucase|&version|' r'&window|&x|&y', Keyword.Reserved), (r'(by|of|not|to)\b', Keyword.Reserved), (r'(global|local|static|abstract)\b', Keyword.Reserved), (r'package|link|import', Keyword.Declaration), (words(( 'break', 'case', 'create', 'critical', 'default', 'end', 'all', 'do', 'else', 'every', 'fail', 'if', 'import', 'initial', 'initially', 'invocable', 'next', 'repeat', 'return', 'suspend', 'then', 'thread', 'until', 'while'), prefix=r'\b', suffix=r'\b'), Keyword.Reserved), (words(( 'Abort', 'abs', 'acos', 'Active', 'Alert', 'any', 'Any', 'Arb', 'Arbno', 'args', 'array', 'asin', 'atan', 'atanh', 'Attrib', 'Bal', 'bal', 'Bg', 'Break', 'Breakx', 'callout', 'center', 'char', 'chdir', 'chmod', 'chown', 'chroot', 'classname', 'Clip', 'Clone', 'close', 'cofail', 'collect', 'Color', 'ColorValue', 'condvar', 'constructor', 'copy', 'CopyArea', 'cos', 'Couple', 'crypt', 'cset', 'ctime', 'dbcolumns', 'dbdriver', 'dbkeys', 'dblimits', 'dbproduct', 'dbtables', 'delay', 'delete', 'detab', 'display', 'DrawArc', 'DrawCircle', 'DrawCube', 'DrawCurve', 'DrawCylinder', 'DrawDisk', 'DrawImage', 'DrawLine', 'DrawPoint', 'DrawPolygon', 'DrawRectangle', 'DrawSegment', 'DrawSphere', 'DrawString', 'DrawTorus', 'dtor', 'entab', 'EraseArea', 'errorclear', 'Event', 'eventmask', 'EvGet', 'EvSend', 'exec', 'exit', 'exp', 'Eye', 'Fail', 'fcntl', 'fdup', 'Fence', 'fetch', 'Fg', 'fieldnames', 'filepair', 'FillArc', 'FillCircle', 'FillPolygon', 'FillRectangle', 'find', 'flock', 'flush', 'Font', 'fork', 'FreeColor', 'FreeSpace', 'function', 'get', 'getch', 'getche', 'getegid', 'getenv', 'geteuid', 'getgid', 'getgr', 'gethost', 'getpgrp', 'getpid', 'getppid', 'getpw', 'getrusage', 'getserv', 'GetSpace', 'gettimeofday', 'getuid', 'globalnames', 'GotoRC', 'GotoXY', 'gtime', 'hardlink', 'iand', 'icom', 'IdentityMatrix', 'image', 'InPort', 'insert', 'Int86', 'integer', 'ioctl', 'ior', 'ishift', 'istate', 'ixor', 'kbhit', 'key', 'keyword', 'kill', 'left', 'Len', 'list', 'load', 'loadfunc', 'localnames', 'lock', 'log', 'Lower', 'lstat', 'many', 'map', 'match', 'MatrixMode', 'max', 'member', 'membernames', 'methodnames', 'methods', 'min', 'mkdir', 'move', 'MultMatrix', 'mutex', 'name', 'NewColor', 'Normals', 'NotAny', 'numeric', 'open', 'opencl', 'oprec', 'ord', 'OutPort', 'PaletteChars', 'PaletteColor', 'PaletteKey', 'paramnames', 'parent', 'Pattern', 'Peek', 'Pending', 'pipe', 'Pixel', 'PlayAudio', 'Poke', 'pop', 'PopMatrix', 'Pos', 'pos', 'proc', 'pull', 'push', 'PushMatrix', 'PushRotate', 'PushScale', 'PushTranslate', 'put', 'QueryPointer', 'Raise', 'read', 'ReadImage', 'readlink', 'reads', 'ready', 'real', 'receive', 'Refresh', 'Rem', 'remove', 'rename', 'repl', 'reverse', 'right', 'rmdir', 'Rotate', 'Rpos', 'Rtab', 'rtod', 'runerr', 'save', 'Scale', 'seek', 'select', 'send', 'seq', 'serial', 'set', 'setenv', 'setgid', 'setgrent', 'sethostent', 'setpgrp', 'setpwent', 'setservent', 'setuid', 'signal', 'sin', 'sort', 'sortf', 'Span', 'spawn', 'sql', 'sqrt', 'stat', 'staticnames', 'stop', 'StopAudio', 'string', 'structure', 'Succeed', 'Swi', 'symlink', 'sys_errstr', 'system', 'syswrite', 'Tab', 'tab', 'table', 'tan', 'Texcoord', 'Texture', 'TextWidth', 'Translate', 'trap', 'trim', 'truncate', 'trylock', 'type', 'umask', 'Uncouple', 'unlock', 'upto', 'utime', 'variable', 'VAttrib', 'wait', 'WAttrib', 'WDefault', 'WFlush', 'where', 'WinAssociate', 'WinButton', 'WinColorDialog', 'WindowContents', 'WinEditRegion', 'WinFontDialog', 'WinMenuBar', 'WinOpenDialog', 'WinPlayMedia', 'WinSaveDialog', 'WinScrollBar', 'WinSelectDialog', 'write', 'WriteImage', 'writes', 'WSection', 'WSync'), prefix=r'\b', suffix=r'\b'), Name.Function), include('numbers'), (r'<@|<<@|>@|>>@|\.>|->|===|~===|\*\*|\+\+|--|\.|~==|~=|<=|>=|==|' r'=|<<=|<<|>>=|>>|:=:|:=|->|<->|\+:=|\|', Operator), (r'"(?:[^\\"]|\\.)*"', String), (r"'(?:[^\\']|\\.)*'", String.Character), (r'[*<>+=/&!?@~\\-]', Operator), (r'\^', Operator), (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))), (r"[\[\]]", Punctuation), (r"<>|=>|[()|:;,.'`{}%&?]", Punctuation), (r'\n+', Text), ], 'numbers': [ (r'\b([+-]?([2-9]|[12][0-9]|3[0-6])[rR][0-9a-zA-Z]+)\b', Number.Hex), (r'[+-]?[0-9]*\.([0-9]*)([Ee][+-]?[0-9]*)?', Number.Float), (r'\b([+-]?[0-9]+[KMGTPkmgtp]?)\b', Number.Integer), ], 'subprogram': [ (r'\(', Punctuation, ('#pop', 'formal_part')), (r';', Punctuation, '#pop'), (r'"[^"]+"|\w+', Name.Function), include('root'), ], 'type_def': [ (r'\(', Punctuation, 'formal_part'), ], 'formal_part': [ (r'\)', Punctuation, '#pop'), (r'\w+', Name.Variable), (r',', Punctuation), (r'(:string|:integer|:real)\b', Keyword.Reserved), include('root'), ], } class IconLexer(RegexLexer): """ Lexer for Icon. .. versionadded:: 1.6 """ name = 'Icon' aliases = ['icon'] filenames = ['*.icon', '*.ICON'] mimetypes = [] flags = re.MULTILINE tokens = { 'root': [ (r'[^\S\n]+', Text), (r'#.*?\n', Comment.Single), (r'[^\S\n]+', Text), (r'class|method|procedure', Keyword.Declaration, 'subprogram'), (r'(record)(\s+)(\w+)', bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), (r'(#line|\$C|\$Cend|\$define|\$else|\$endif|\$error|\$ifdef|' r'\$ifndef|\$include|\$line|\$undef)\b', Keyword.PreProc), (r'(&null|&fail)\b', Keyword.Constant), (r'&allocated|&ascii|&clock|&collections|&column|&col|&control|' r'&cset|&current|&dateline|&date|&digits|&dump|' r'&errno|&errornumber|&errortext|&errorvalue|&error|&errout|' r'&eventcode|&eventvalue|&eventsource|&e|' r'&features|&file|&host|&input|&interval|&lcase|&letters|' r'&level|&line|&ldrag|&lpress|&lrelease|' r'&main|&mdrag|&meta|&mpress|&mrelease|&now|&output|' r'&phi|&pick|&pi|&pos|&progname|' r'&random|&rdrag|&regions|&resize|&row|&rpress|&rrelease|' r'&shift|&source|&storage|&subject|' r'&time|&trace|&ucase|&version|' r'&window|&x|&y', Keyword.Reserved), (r'(by|of|not|to)\b', Keyword.Reserved), (r'(global|local|static)\b', Keyword.Reserved), (r'link', Keyword.Declaration), (words(( 'break', 'case', 'create', 'default', 'end', 'all', 'do', 'else', 'every', 'fail', 'if', 'initial', 'invocable', 'next', 'repeat', 'return', 'suspend', 'then', 'until', 'while'), prefix=r'\b', suffix=r'\b'), Keyword.Reserved), (words(( 'abs', 'acos', 'Active', 'Alert', 'any', 'args', 'array', 'asin', 'atan', 'atanh', 'Attrib', 'bal', 'Bg', 'callout', 'center', 'char', 'chdir', 'chmod', 'chown', 'chroot', 'Clip', 'Clone', 'close', 'cofail', 'collect', 'Color', 'ColorValue', 'condvar', 'copy', 'CopyArea', 'cos', 'Couple', 'crypt', 'cset', 'ctime', 'delay', 'delete', 'detab', 'display', 'DrawArc', 'DrawCircle', 'DrawCube', 'DrawCurve', 'DrawCylinder', 'DrawDisk', 'DrawImage', 'DrawLine', 'DrawPoint', 'DrawPolygon', 'DrawRectangle', 'DrawSegment', 'DrawSphere', 'DrawString', 'DrawTorus', 'dtor', 'entab', 'EraseArea', 'errorclear', 'Event', 'eventmask', 'EvGet', 'EvSend', 'exec', 'exit', 'exp', 'Eye', 'fcntl', 'fdup', 'fetch', 'Fg', 'fieldnames', 'FillArc', 'FillCircle', 'FillPolygon', 'FillRectangle', 'find', 'flock', 'flush', 'Font', 'FreeColor', 'FreeSpace', 'function', 'get', 'getch', 'getche', 'getenv', 'GetSpace', 'gettimeofday', 'getuid', 'globalnames', 'GotoRC', 'GotoXY', 'gtime', 'hardlink', 'iand', 'icom', 'IdentityMatrix', 'image', 'InPort', 'insert', 'Int86', 'integer', 'ioctl', 'ior', 'ishift', 'istate', 'ixor', 'kbhit', 'key', 'keyword', 'kill', 'left', 'Len', 'list', 'load', 'loadfunc', 'localnames', 'lock', 'log', 'Lower', 'lstat', 'many', 'map', 'match', 'MatrixMode', 'max', 'member', 'membernames', 'methodnames', 'methods', 'min', 'mkdir', 'move', 'MultMatrix', 'mutex', 'name', 'NewColor', 'Normals', 'numeric', 'open', 'opencl', 'oprec', 'ord', 'OutPort', 'PaletteChars', 'PaletteColor', 'PaletteKey', 'paramnames', 'parent', 'Pattern', 'Peek', 'Pending', 'pipe', 'Pixel', 'Poke', 'pop', 'PopMatrix', 'Pos', 'pos', 'proc', 'pull', 'push', 'PushMatrix', 'PushRotate', 'PushScale', 'PushTranslate', 'put', 'QueryPointer', 'Raise', 'read', 'ReadImage', 'readlink', 'reads', 'ready', 'real', 'receive', 'Refresh', 'Rem', 'remove', 'rename', 'repl', 'reverse', 'right', 'rmdir', 'Rotate', 'Rpos', 'rtod', 'runerr', 'save', 'Scale', 'seek', 'select', 'send', 'seq', 'serial', 'set', 'setenv', 'setuid', 'signal', 'sin', 'sort', 'sortf', 'spawn', 'sql', 'sqrt', 'stat', 'staticnames', 'stop', 'string', 'structure', 'Swi', 'symlink', 'sys_errstr', 'system', 'syswrite', 'tab', 'table', 'tan', 'Texcoord', 'Texture', 'TextWidth', 'Translate', 'trap', 'trim', 'truncate', 'trylock', 'type', 'umask', 'Uncouple', 'unlock', 'upto', 'utime', 'variable', 'wait', 'WAttrib', 'WDefault', 'WFlush', 'where', 'WinAssociate', 'WinButton', 'WinColorDialog', 'WindowContents', 'WinEditRegion', 'WinFontDialog', 'WinMenuBar', 'WinOpenDialog', 'WinPlayMedia', 'WinSaveDialog', 'WinScrollBar', 'WinSelectDialog', 'write', 'WriteImage', 'writes', 'WSection', 'WSync'), prefix=r'\b', suffix=r'\b'), Name.Function), include('numbers'), (r'===|~===|\*\*|\+\+|--|\.|==|~==|<=|>=|=|~=|<<=|<<|>>=|>>|' r':=:|:=|<->|<-|\+:=|\|\||\|', Operator), (r'"(?:[^\\"]|\\.)*"', String), (r"'(?:[^\\']|\\.)*'", String.Character), (r'[*<>+=/&!?@~\\-]', Operator), (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))), (r"[\[\]]", Punctuation), (r"<>|=>|[()|:;,.'`{}%\^&?]", Punctuation), (r'\n+', Text), ], 'numbers': [ (r'\b([+-]?([2-9]|[12][0-9]|3[0-6])[rR][0-9a-zA-Z]+)\b', Number.Hex), (r'[+-]?[0-9]*\.([0-9]*)([Ee][+-]?[0-9]*)?', Number.Float), (r'\b([+-]?[0-9]+[KMGTPkmgtp]?)\b', Number.Integer), ], 'subprogram': [ (r'\(', Punctuation, ('#pop', 'formal_part')), (r';', Punctuation, '#pop'), (r'"[^"]+"|\w+', Name.Function), include('root'), ], 'type_def': [ (r'\(', Punctuation, 'formal_part'), ], 'formal_part': [ (r'\)', Punctuation, '#pop'), (r'\w+', Name.Variable), (r',', Punctuation), (r'(:string|:integer|:real)\b', Keyword.Reserved), include('root'), ], } class UcodeLexer(RegexLexer): """ Lexer for Icon ucode files. .. versionadded:: 2.4 """ name = 'ucode' aliases = ['ucode'] filenames = ['*.u', '*.u1', '*.u2'] mimetypes = [] flags = re.MULTILINE tokens = { 'root': [ (r'(#.*\n)', Comment), (words(( 'con', 'declend', 'end', 'global', 'impl', 'invocable', 'lab', 'link', 'local', 'record', 'uid', 'unions', 'version'), prefix=r'\b', suffix=r'\b'), Name.Function), (words(( 'colm', 'filen', 'line', 'synt'), prefix=r'\b', suffix=r'\b'), Comment), (words(( 'asgn', 'bang', 'bscan', 'cat', 'ccase', 'chfail', 'coact', 'cofail', 'compl', 'coret', 'create', 'cset', 'diff', 'div', 'dup', 'efail', 'einit', 'end', 'eqv', 'eret', 'error', 'escan', 'esusp', 'field', 'goto', 'init', 'int', 'inter', 'invoke', 'keywd', 'lconcat', 'lexeq', 'lexge', 'lexgt', 'lexle', 'lexlt', 'lexne', 'limit', 'llist', 'lsusp', 'mark', 'mark0', 'minus', 'mod', 'mult', 'neg', 'neqv', 'nonnull', 'noop', 'null', 'number', 'numeq', 'numge', 'numgt', 'numle', 'numlt', 'numne', 'pfail', 'plus', 'pnull', 'pop', 'power', 'pret', 'proc', 'psusp', 'push1', 'pushn1', 'random', 'rasgn', 'rcv', 'rcvbk', 'real', 'refresh', 'rswap', 'sdup', 'sect', 'size', 'snd', 'sndbk', 'str', 'subsc', 'swap', 'tabmat', 'tally', 'toby', 'trace', 'unmark', 'value', 'var'), prefix=r'\b', suffix=r'\b'), Keyword.Declaration), (words(( 'any', 'case', 'endcase', 'endevery', 'endif', 'endifelse', 'endrepeat', 'endsuspend', 'enduntil', 'endwhile', 'every', 'if', 'ifelse', 'repeat', 'suspend', 'until', 'while'), prefix=r'\b', suffix=r'\b'), Name.Constant), (r'\d+(\s*|\.$|$)', Number.Integer), (r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float), (r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float), (r"(<>|=>|[()|:;,.'`]|[{}]|[%^]|[&?])", Punctuation), (r'\s+\b', Text), (r'[\w-]+', Text), ], } def analyse_text(text): """endsuspend and endrepeat are unique to this language, and \\self, /self doesn't seem to get used anywhere else either.""" result = 0 if 'endsuspend' in text: result += 0.1 if 'endrepeat' in text: result += 0.1 if ':=' in text: result += 0.01 if 'procedure' in text and 'end' in text: result += 0.01 # This seems quite unique to unicon -- doesn't appear in any other # example source we have (A quick search reveals that \SELF appears in # Perl/Raku code) if r'\self' in text and r'/self' in text: result += 0.5 return result
18,512
Python
43.934466
83
0.43723
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/thingsdb.py
""" pygments.lexers.thingsdb ~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for the ThingsDB language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, bygroups from pygments.token import Comment, Keyword, Name, Number, String, Text, \ Operator, Punctuation, Whitespace __all__ = ['ThingsDBLexer'] class ThingsDBLexer(RegexLexer): """ Lexer for the ThingsDB programming language. .. versionadded:: 2.9 """ name = 'ThingsDB' aliases = ['ti', 'thingsdb'] filenames = ['*.ti'] tokens = { 'root': [ include('expression'), ], 'expression': [ include('comments'), include('whitespace'), # numbers (r'[-+]?0b[01]+', Number.Bin), (r'[-+]?0o[0-8]+', Number.Oct), (r'([-+]?0x[0-9a-fA-F]+)', Number.Hex), (r'[-+]?[0-9]+', Number.Integer), (r'[-+]?((inf|nan)([^0-9A-Za-z_]|$)|[0-9]*\.[0-9]+(e[+-][0-9]+)?)', Number.Float), # strings (r'(?:"(?:[^"]*)")+', String.Double), (r"(?:'(?:[^']*)')+", String.Single), # literals (r'(true|false|nil)\b', Keyword.Constant), # regular expressions (r'(/[^/\\]*(?:\\.[^/\\]*)*/i?)', String.Regex), # thing id's (r'#[0-9]+', Comment.Preproc), # name, assignments and functions include('names'), (r'[(){}\[\],;]', Punctuation), (r'[+\-*/%&|<>^!~@=:?]', Operator), ], 'names': [ (r'(\.)' r'(add|call|contains|del|endswith|extend|filter|find|findindex|' r'get|has|id|indexof|keys|len|lower|map|pop|push|remove|set|sort|' r'splice|startswith|test|unwrap|upper|values|wrap)' r'(\()', bygroups(Name.Function, Name.Function, Punctuation), 'arguments'), (r'(array|assert|assert_err|auth_err|backup_info|backups_info|' r'bad_data_err|bool|closure|collection_info|collections_info|' r'counters|deep|del_backup|del_collection|del_expired|del_node|' r'del_procedure|del_token|del_type|del_user|err|float|' r'forbidden_err|grant|int|isarray|isascii|isbool|isbytes|iserr|' r'isfloat|isinf|isint|islist|isnan|isnil|israw|isset|isstr|' r'isthing|istuple|isutf8|lookup_err|max_quota_err|mod_type|new|' r'new_backup|new_collection|new_node|new_procedure|new_token|' r'new_type|new_user|node_err|node_info|nodes_info|now|' r'num_arguments_err|operation_err|overflow_err|procedure_doc|' r'procedure_info|procedures_info|raise|refs|rename_collection|' r'rename_user|reset_counters|return|revoke|run|set_log_level|set|' r'set_quota|set_type|shutdown|str|syntax_err|thing|try|type|' r'type_err|type_count|type_info|types_info|user_info|users_info|' r'value_err|wse|zero_div_err)' r'(\()', bygroups(Name.Function, Punctuation), 'arguments'), (r'(\.[A-Za-z_][0-9A-Za-z_]*)' r'(\s*)(=)', bygroups(Name.Attribute, Text, Operator)), (r'\.[A-Za-z_][0-9A-Za-z_]*', Name.Attribute), (r'([A-Za-z_][0-9A-Za-z_]*)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), (r'[A-Za-z_][0-9A-Za-z_]*', Name.Variable), ], 'whitespace': [ (r'\n', Whitespace), (r'\s+', Whitespace), ], 'comments': [ (r'//(.*?)\n', Comment.Single), (r'/\*', Comment.Multiline, 'comment'), ], 'comment': [ (r'[^*/]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'arguments': [ include('expression'), (',', Punctuation), (r'\(', Punctuation, '#push'), (r'\)', Punctuation, '#pop'), ], }
4,228
Python
35.145299
79
0.494087
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/functional.py
""" pygments.lexers.functional ~~~~~~~~~~~~~~~~~~~~~~~~~~ Just export lexer classes previously contained in this module. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \ NewLispLexer, ShenLexer from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \ KokaLexer from pygments.lexers.theorem import CoqLexer from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \ ElixirConsoleLexer, ElixirLexer from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer __all__ = []
674
Python
31.142856
77
0.738872
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/qlik.py
""" pygments.lexers.qlik ~~~~~~~~~~~~~~~~~~~~ Lexer for the qlik scripting language :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, words from pygments.token import Comment, Keyword, Name, Number, Operator, \ Punctuation, String, Text from pygments.lexers._qlik_builtins import OPERATORS_LIST, STATEMENT_LIST, \ SCRIPT_FUNCTIONS, CONSTANT_LIST __all__ = ["QlikLexer"] class QlikLexer(RegexLexer): """ Lexer for qlik code, including .qvs files .. versionadded:: 2.12 """ name = "Qlik" aliases = ["qlik", "qlikview", "qliksense", "qlikscript"] filenames = ["*.qvs", "*.qvw"] flags = re.IGNORECASE tokens = { # Handle multi-line comments "comment": [ (r"\*/", Comment.Multiline, "#pop"), (r"[^*]+", Comment.Multiline), ], # Handle numbers "numerics": [ (r"\b\d+\.\d+(e\d+)?[fd]?\b", Number.Float), (r"\b\d+\b", Number.Integer), ], # Handle variable names in things "interp": [ ( r"(\$\()(\w+)(\))", bygroups(String.Interpol, Name.Variable, String.Interpol), ), ], # Handle strings "string": [ (r"'", String, "#pop"), include("interp"), (r"[^'$]+", String), (r"\$", String), ], # "assignment": [ (r";", Punctuation, "#pop"), include("root"), ], "field_name_quote": [ (r'"', String.Symbol, "#pop"), include("interp"), (r"[^\"$]+", String.Symbol), (r"\$", String.Symbol), ], "field_name_bracket": [ (r"\]", String.Symbol, "#pop"), include("interp"), (r"[^\]$]+", String.Symbol), (r"\$", String.Symbol), ], "function": [(r"\)", Punctuation, "#pop"), include("root")], "root": [ # Whitespace and comments (r"\s+", Text.Whitespace), (r"/\*", Comment.Multiline, "comment"), (r"//.*\n", Comment.Single), # variable assignment (r"(let|set)(\s+)", bygroups(Keyword.Declaration, Text.Whitespace), "assignment"), # Word operators (words(OPERATORS_LIST["words"], prefix=r"\b", suffix=r"\b"), Operator.Word), # Statements (words(STATEMENT_LIST, suffix=r"\b"), Keyword), # Table names (r"[a-z]\w*:", Keyword.Declaration), # Constants (words(CONSTANT_LIST, suffix=r"\b"), Keyword.Constant), # Functions (words(SCRIPT_FUNCTIONS, suffix=r"(?=\s*\()"), Name.Builtin, "function"), # interpolation - e.g. $(variableName) include("interp"), # Quotes denote a field/file name (r'"', String.Symbol, "field_name_quote"), # Square brackets denote a field/file name (r"\[", String.Symbol, "field_name_bracket"), # Strings (r"'", String, "string"), # Numbers include("numerics"), # Operator symbols (words(OPERATORS_LIST["symbols"]), Operator), # Strings denoted by single quotes (r"'.+?'", String), # Words as text (r"\b\w+\b", Text), # Basic punctuation (r"[,;.()\\/]", Punctuation), ], }
3,665
Python
30.067796
79
0.47176
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_openedge_builtins.py
""" pygments.lexers._openedge_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Builtin list for the OpenEdgeLexer. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ OPENEDGEKEYWORDS = ( 'ABS', 'ABSO', 'ABSOL', 'ABSOLU', 'ABSOLUT', 'ABSOLUTE', 'ABSTRACT', 'ACCELERATOR', 'ACCUM', 'ACCUMU', 'ACCUMUL', 'ACCUMULA', 'ACCUMULAT', 'ACCUMULATE', 'ACTIVE-FORM', 'ACTIVE-WINDOW', 'ADD', 'ADD-BUFFER', 'ADD-CALC-COLUMN', 'ADD-COLUMNS-FROM', 'ADD-EVENTS-PROCEDURE', 'ADD-FIELDS-FROM', 'ADD-FIRST', 'ADD-INDEX-FIELD', 'ADD-LAST', 'ADD-LIKE-COLUMN', 'ADD-LIKE-FIELD', 'ADD-LIKE-INDEX', 'ADD-NEW-FIELD', 'ADD-NEW-INDEX', 'ADD-SCHEMA-LOCATION', 'ADD-SUPER-PROCEDURE', 'ADM-DATA', 'ADVISE', 'ALERT-BOX', 'ALIAS', 'ALL', 'ALLOW-COLUMN-SEARCHING', 'ALLOW-REPLICATION', 'ALTER', 'ALWAYS-ON-TOP', 'AMBIG', 'AMBIGU', 'AMBIGUO', 'AMBIGUOU', 'AMBIGUOUS', 'ANALYZ', 'ANALYZE', 'AND', 'ANSI-ONLY', 'ANY', 'ANYWHERE', 'APPEND', 'APPL-ALERT', 'APPL-ALERT-', 'APPL-ALERT-B', 'APPL-ALERT-BO', 'APPL-ALERT-BOX', 'APPL-ALERT-BOXE', 'APPL-ALERT-BOXES', 'APPL-CONTEXT-ID', 'APPLICATION', 'APPLY', 'APPSERVER-INFO', 'APPSERVER-PASSWORD', 'APPSERVER-USERID', 'ARRAY-MESSAGE', 'AS', 'ASC', 'ASCE', 'ASCEN', 'ASCEND', 'ASCENDI', 'ASCENDIN', 'ASCENDING', 'ASK-OVERWRITE', 'ASSEMBLY', 'ASSIGN', 'ASYNC-REQUEST-COUNT', 'ASYNC-REQUEST-HANDLE', 'ASYNCHRONOUS', 'AT', 'ATTACHED-PAIRLIST', 'ATTR', 'ATTR-SPACE', 'ATTRI', 'ATTRIB', 'ATTRIBU', 'ATTRIBUT', 'AUDIT-CONTROL', 'AUDIT-ENABLED', 'AUDIT-EVENT-CONTEXT', 'AUDIT-POLICY', 'AUTHENTICATION-FAILED', 'AUTHORIZATION', 'AUTO-COMP', 'AUTO-COMPL', 'AUTO-COMPLE', 'AUTO-COMPLET', 'AUTO-COMPLETI', 'AUTO-COMPLETIO', 'AUTO-COMPLETION', 'AUTO-END-KEY', 'AUTO-ENDKEY', 'AUTO-GO', 'AUTO-IND', 'AUTO-INDE', 'AUTO-INDEN', 'AUTO-INDENT', 'AUTO-RESIZE', 'AUTO-RET', 'AUTO-RETU', 'AUTO-RETUR', 'AUTO-RETURN', 'AUTO-SYNCHRONIZE', 'AUTO-Z', 'AUTO-ZA', 'AUTO-ZAP', 'AUTOMATIC', 'AVAIL', 'AVAILA', 'AVAILAB', 'AVAILABL', 'AVAILABLE', 'AVAILABLE-FORMATS', 'AVE', 'AVER', 'AVERA', 'AVERAG', 'AVERAGE', 'AVG', 'BACK', 'BACKG', 'BACKGR', 'BACKGRO', 'BACKGROU', 'BACKGROUN', 'BACKGROUND', 'BACKWARD', 'BACKWARDS', 'BASE64-DECODE', 'BASE64-ENCODE', 'BASE-ADE', 'BASE-KEY', 'BATCH', 'BATCH-', 'BATCH-M', 'BATCH-MO', 'BATCH-MOD', 'BATCH-MODE', 'BATCH-SIZE', 'BEFORE-H', 'BEFORE-HI', 'BEFORE-HID', 'BEFORE-HIDE', 'BEGIN-EVENT-GROUP', 'BEGINS', 'BELL', 'BETWEEN', 'BGC', 'BGCO', 'BGCOL', 'BGCOLO', 'BGCOLOR', 'BIG-ENDIAN', 'BINARY', 'BIND', 'BIND-WHERE', 'BLANK', 'BLOCK-ITERATION-DISPLAY', 'BLOCK-LEVEL', 'BORDER-B', 'BORDER-BO', 'BORDER-BOT', 'BORDER-BOTT', 'BORDER-BOTTO', 'BORDER-BOTTOM-CHARS', 'BORDER-BOTTOM-P', 'BORDER-BOTTOM-PI', 'BORDER-BOTTOM-PIX', 'BORDER-BOTTOM-PIXE', 'BORDER-BOTTOM-PIXEL', 'BORDER-BOTTOM-PIXELS', 'BORDER-L', 'BORDER-LE', 'BORDER-LEF', 'BORDER-LEFT', 'BORDER-LEFT-', 'BORDER-LEFT-C', 'BORDER-LEFT-CH', 'BORDER-LEFT-CHA', 'BORDER-LEFT-CHAR', 'BORDER-LEFT-CHARS', 'BORDER-LEFT-P', 'BORDER-LEFT-PI', 'BORDER-LEFT-PIX', 'BORDER-LEFT-PIXE', 'BORDER-LEFT-PIXEL', 'BORDER-LEFT-PIXELS', 'BORDER-R', 'BORDER-RI', 'BORDER-RIG', 'BORDER-RIGH', 'BORDER-RIGHT', 'BORDER-RIGHT-', 'BORDER-RIGHT-C', 'BORDER-RIGHT-CH', 'BORDER-RIGHT-CHA', 'BORDER-RIGHT-CHAR', 'BORDER-RIGHT-CHARS', 'BORDER-RIGHT-P', 'BORDER-RIGHT-PI', 'BORDER-RIGHT-PIX', 'BORDER-RIGHT-PIXE', 'BORDER-RIGHT-PIXEL', 'BORDER-RIGHT-PIXELS', 'BORDER-T', 'BORDER-TO', 'BORDER-TOP', 'BORDER-TOP-', 'BORDER-TOP-C', 'BORDER-TOP-CH', 'BORDER-TOP-CHA', 'BORDER-TOP-CHAR', 'BORDER-TOP-CHARS', 'BORDER-TOP-P', 'BORDER-TOP-PI', 'BORDER-TOP-PIX', 'BORDER-TOP-PIXE', 'BORDER-TOP-PIXEL', 'BORDER-TOP-PIXELS', 'BOX', 'BOX-SELECT', 'BOX-SELECTA', 'BOX-SELECTAB', 'BOX-SELECTABL', 'BOX-SELECTABLE', 'BREAK', 'BROWSE', 'BUFFER', 'BUFFER-CHARS', 'BUFFER-COMPARE', 'BUFFER-COPY', 'BUFFER-CREATE', 'BUFFER-DELETE', 'BUFFER-FIELD', 'BUFFER-HANDLE', 'BUFFER-LINES', 'BUFFER-NAME', 'BUFFER-PARTITION-ID', 'BUFFER-RELEASE', 'BUFFER-VALUE', 'BUTTON', 'BUTTONS', 'BY', 'BY-POINTER', 'BY-VARIANT-POINTER', 'CACHE', 'CACHE-SIZE', 'CALL', 'CALL-NAME', 'CALL-TYPE', 'CAN-CREATE', 'CAN-DELETE', 'CAN-DO', 'CAN-DO-DOMAIN-SUPPORT', 'CAN-FIND', 'CAN-QUERY', 'CAN-READ', 'CAN-SET', 'CAN-WRITE', 'CANCEL-BREAK', 'CANCEL-BUTTON', 'CAPS', 'CAREFUL-PAINT', 'CASE', 'CASE-SEN', 'CASE-SENS', 'CASE-SENSI', 'CASE-SENSIT', 'CASE-SENSITI', 'CASE-SENSITIV', 'CASE-SENSITIVE', 'CAST', 'CATCH', 'CDECL', 'CENTER', 'CENTERE', 'CENTERED', 'CHAINED', 'CHARACTER', 'CHARACTER_LENGTH', 'CHARSET', 'CHECK', 'CHECKED', 'CHOOSE', 'CHR', 'CLASS', 'CLASS-TYPE', 'CLEAR', 'CLEAR-APPL-CONTEXT', 'CLEAR-LOG', 'CLEAR-SELECT', 'CLEAR-SELECTI', 'CLEAR-SELECTIO', 'CLEAR-SELECTION', 'CLEAR-SORT-ARROW', 'CLEAR-SORT-ARROWS', 'CLIENT-CONNECTION-ID', 'CLIENT-PRINCIPAL', 'CLIENT-TTY', 'CLIENT-TYPE', 'CLIENT-WORKSTATION', 'CLIPBOARD', 'CLOSE', 'CLOSE-LOG', 'CODE', 'CODEBASE-LOCATOR', 'CODEPAGE', 'CODEPAGE-CONVERT', 'COL', 'COL-OF', 'COLLATE', 'COLON', 'COLON-ALIGN', 'COLON-ALIGNE', 'COLON-ALIGNED', 'COLOR', 'COLOR-TABLE', 'COLU', 'COLUM', 'COLUMN', 'COLUMN-BGCOLOR', 'COLUMN-DCOLOR', 'COLUMN-FGCOLOR', 'COLUMN-FONT', 'COLUMN-LAB', 'COLUMN-LABE', 'COLUMN-LABEL', 'COLUMN-MOVABLE', 'COLUMN-OF', 'COLUMN-PFCOLOR', 'COLUMN-READ-ONLY', 'COLUMN-RESIZABLE', 'COLUMN-SCROLLING', 'COLUMNS', 'COM-HANDLE', 'COM-SELF', 'COMBO-BOX', 'COMMAND', 'COMPARES', 'COMPILE', 'COMPILER', 'COMPLETE', 'CONFIG-NAME', 'CONNECT', 'CONNECTED', 'CONSTRUCTOR', 'CONTAINS', 'CONTENTS', 'CONTEXT', 'CONTEXT-HELP', 'CONTEXT-HELP-FILE', 'CONTEXT-HELP-ID', 'CONTEXT-POPUP', 'CONTROL', 'CONTROL-BOX', 'CONTROL-FRAME', 'CONVERT', 'CONVERT-3D-COLORS', 'CONVERT-TO-OFFS', 'CONVERT-TO-OFFSE', 'CONVERT-TO-OFFSET', 'COPY-DATASET', 'COPY-LOB', 'COPY-SAX-ATTRIBUTES', 'COPY-TEMP-TABLE', 'COUNT', 'COUNT-OF', 'CPCASE', 'CPCOLL', 'CPINTERNAL', 'CPLOG', 'CPPRINT', 'CPRCODEIN', 'CPRCODEOUT', 'CPSTREAM', 'CPTERM', 'CRC-VALUE', 'CREATE', 'CREATE-LIKE', 'CREATE-LIKE-SEQUENTIAL', 'CREATE-NODE-NAMESPACE', 'CREATE-RESULT-LIST-ENTRY', 'CREATE-TEST-FILE', 'CURRENT', 'CURRENT-CHANGED', 'CURRENT-COLUMN', 'CURRENT-ENV', 'CURRENT-ENVI', 'CURRENT-ENVIR', 'CURRENT-ENVIRO', 'CURRENT-ENVIRON', 'CURRENT-ENVIRONM', 'CURRENT-ENVIRONME', 'CURRENT-ENVIRONMEN', 'CURRENT-ENVIRONMENT', 'CURRENT-ITERATION', 'CURRENT-LANG', 'CURRENT-LANGU', 'CURRENT-LANGUA', 'CURRENT-LANGUAG', 'CURRENT-LANGUAGE', 'CURRENT-QUERY', 'CURRENT-REQUEST-INFO', 'CURRENT-RESPONSE-INFO', 'CURRENT-RESULT-ROW', 'CURRENT-ROW-MODIFIED', 'CURRENT-VALUE', 'CURRENT-WINDOW', 'CURRENT_DATE', 'CURS', 'CURSO', 'CURSOR', 'CURSOR-CHAR', 'CURSOR-LINE', 'CURSOR-OFFSET', 'DATA-BIND', 'DATA-ENTRY-RET', 'DATA-ENTRY-RETU', 'DATA-ENTRY-RETUR', 'DATA-ENTRY-RETURN', 'DATA-REL', 'DATA-RELA', 'DATA-RELAT', 'DATA-RELATI', 'DATA-RELATIO', 'DATA-RELATION', 'DATA-SOURCE', 'DATA-SOURCE-COMPLETE-MAP', 'DATA-SOURCE-MODIFIED', 'DATA-SOURCE-ROWID', 'DATA-T', 'DATA-TY', 'DATA-TYP', 'DATA-TYPE', 'DATABASE', 'DATASERVERS', 'DATASET', 'DATASET-HANDLE', 'DATE', 'DATE-F', 'DATE-FO', 'DATE-FOR', 'DATE-FORM', 'DATE-FORMA', 'DATE-FORMAT', 'DAY', 'DB-CONTEXT', 'DB-REFERENCES', 'DBCODEPAGE', 'DBCOLLATION', 'DBNAME', 'DBPARAM', 'DBREST', 'DBRESTR', 'DBRESTRI', 'DBRESTRIC', 'DBRESTRICT', 'DBRESTRICTI', 'DBRESTRICTIO', 'DBRESTRICTION', 'DBRESTRICTIONS', 'DBTASKID', 'DBTYPE', 'DBVERS', 'DBVERSI', 'DBVERSIO', 'DBVERSION', 'DCOLOR', 'DDE', 'DDE-ERROR', 'DDE-I', 'DDE-ID', 'DDE-ITEM', 'DDE-NAME', 'DDE-TOPIC', 'DEBLANK', 'DEBU', 'DEBUG', 'DEBUG-ALERT', 'DEBUG-LIST', 'DEBUGGER', 'DECIMAL', 'DECIMALS', 'DECLARE', 'DECLARE-NAMESPACE', 'DECRYPT', 'DEFAULT', 'DEFAULT-B', 'DEFAULT-BU', 'DEFAULT-BUFFER-HANDLE', 'DEFAULT-BUT', 'DEFAULT-BUTT', 'DEFAULT-BUTTO', 'DEFAULT-BUTTON', 'DEFAULT-COMMIT', 'DEFAULT-EX', 'DEFAULT-EXT', 'DEFAULT-EXTE', 'DEFAULT-EXTEN', 'DEFAULT-EXTENS', 'DEFAULT-EXTENSI', 'DEFAULT-EXTENSIO', 'DEFAULT-EXTENSION', 'DEFAULT-NOXL', 'DEFAULT-NOXLA', 'DEFAULT-NOXLAT', 'DEFAULT-NOXLATE', 'DEFAULT-VALUE', 'DEFAULT-WINDOW', 'DEFINE', 'DEFINE-USER-EVENT-MANAGER', 'DEFINED', 'DEL', 'DELE', 'DELEGATE', 'DELET', 'DELETE PROCEDURE', 'DELETE', 'DELETE-CHAR', 'DELETE-CHARA', 'DELETE-CHARAC', 'DELETE-CHARACT', 'DELETE-CHARACTE', 'DELETE-CHARACTER', 'DELETE-CURRENT-ROW', 'DELETE-LINE', 'DELETE-RESULT-LIST-ENTRY', 'DELETE-SELECTED-ROW', 'DELETE-SELECTED-ROWS', 'DELIMITER', 'DESC', 'DESCE', 'DESCEN', 'DESCEND', 'DESCENDI', 'DESCENDIN', 'DESCENDING', 'DESELECT-FOCUSED-ROW', 'DESELECT-ROWS', 'DESELECT-SELECTED-ROW', 'DESELECTION', 'DESTRUCTOR', 'DIALOG-BOX', 'DICT', 'DICTI', 'DICTIO', 'DICTION', 'DICTIONA', 'DICTIONAR', 'DICTIONARY', 'DIR', 'DISABLE', 'DISABLE-AUTO-ZAP', 'DISABLE-DUMP-TRIGGERS', 'DISABLE-LOAD-TRIGGERS', 'DISABLED', 'DISCON', 'DISCONN', 'DISCONNE', 'DISCONNEC', 'DISCONNECT', 'DISP', 'DISPL', 'DISPLA', 'DISPLAY', 'DISPLAY-MESSAGE', 'DISPLAY-T', 'DISPLAY-TY', 'DISPLAY-TYP', 'DISPLAY-TYPE', 'DISTINCT', 'DO', 'DOMAIN-DESCRIPTION', 'DOMAIN-NAME', 'DOMAIN-TYPE', 'DOS', 'DOUBLE', 'DOWN', 'DRAG-ENABLED', 'DROP', 'DROP-DOWN', 'DROP-DOWN-LIST', 'DROP-FILE-NOTIFY', 'DROP-TARGET', 'DS-CLOSE-CURSOR', 'DSLOG-MANAGER', 'DUMP', 'DYNAMIC', 'DYNAMIC-ENUM', 'DYNAMIC-FUNCTION', 'DYNAMIC-INVOKE', 'EACH', 'ECHO', 'EDGE', 'EDGE-', 'EDGE-C', 'EDGE-CH', 'EDGE-CHA', 'EDGE-CHAR', 'EDGE-CHARS', 'EDGE-P', 'EDGE-PI', 'EDGE-PIX', 'EDGE-PIXE', 'EDGE-PIXEL', 'EDGE-PIXELS', 'EDIT-CAN-PASTE', 'EDIT-CAN-UNDO', 'EDIT-CLEAR', 'EDIT-COPY', 'EDIT-CUT', 'EDIT-PASTE', 'EDIT-UNDO', 'EDITING', 'EDITOR', 'ELSE', 'EMPTY', 'EMPTY-TEMP-TABLE', 'ENABLE', 'ENABLED-FIELDS', 'ENCODE', 'ENCRYPT', 'ENCRYPT-AUDIT-MAC-KEY', 'ENCRYPTION-SALT', 'END', 'END-DOCUMENT', 'END-ELEMENT', 'END-EVENT-GROUP', 'END-FILE-DROP', 'END-KEY', 'END-MOVE', 'END-RESIZE', 'END-ROW-RESIZE', 'END-USER-PROMPT', 'ENDKEY', 'ENTERED', 'ENTITY-EXPANSION-LIMIT', 'ENTRY', 'ENUM', 'EQ', 'ERROR', 'ERROR-COL', 'ERROR-COLU', 'ERROR-COLUM', 'ERROR-COLUMN', 'ERROR-ROW', 'ERROR-STACK-TRACE', 'ERROR-STAT', 'ERROR-STATU', 'ERROR-STATUS', 'ESCAPE', 'ETIME', 'EVENT', 'EVENT-GROUP-ID', 'EVENT-PROCEDURE', 'EVENT-PROCEDURE-CONTEXT', 'EVENT-T', 'EVENT-TY', 'EVENT-TYP', 'EVENT-TYPE', 'EVENTS', 'EXCEPT', 'EXCLUSIVE', 'EXCLUSIVE-', 'EXCLUSIVE-ID', 'EXCLUSIVE-L', 'EXCLUSIVE-LO', 'EXCLUSIVE-LOC', 'EXCLUSIVE-LOCK', 'EXCLUSIVE-WEB-USER', 'EXECUTE', 'EXISTS', 'EXP', 'EXPAND', 'EXPANDABLE', 'EXPLICIT', 'EXPORT', 'EXPORT-PRINCIPAL', 'EXTENDED', 'EXTENT', 'EXTERNAL', 'FALSE', 'FETCH', 'FETCH-SELECTED-ROW', 'FGC', 'FGCO', 'FGCOL', 'FGCOLO', 'FGCOLOR', 'FIELD', 'FIELDS', 'FILE', 'FILE-CREATE-DATE', 'FILE-CREATE-TIME', 'FILE-INFO', 'FILE-INFOR', 'FILE-INFORM', 'FILE-INFORMA', 'FILE-INFORMAT', 'FILE-INFORMATI', 'FILE-INFORMATIO', 'FILE-INFORMATION', 'FILE-MOD-DATE', 'FILE-MOD-TIME', 'FILE-NAME', 'FILE-OFF', 'FILE-OFFS', 'FILE-OFFSE', 'FILE-OFFSET', 'FILE-SIZE', 'FILE-TYPE', 'FILENAME', 'FILL', 'FILL-IN', 'FILLED', 'FILTERS', 'FINAL', 'FINALLY', 'FIND', 'FIND-BY-ROWID', 'FIND-CASE-SENSITIVE', 'FIND-CURRENT', 'FIND-FIRST', 'FIND-GLOBAL', 'FIND-LAST', 'FIND-NEXT-OCCURRENCE', 'FIND-PREV-OCCURRENCE', 'FIND-SELECT', 'FIND-UNIQUE', 'FIND-WRAP-AROUND', 'FINDER', 'FIRST', 'FIRST-ASYNCH-REQUEST', 'FIRST-CHILD', 'FIRST-COLUMN', 'FIRST-FORM', 'FIRST-OBJECT', 'FIRST-OF', 'FIRST-PROC', 'FIRST-PROCE', 'FIRST-PROCED', 'FIRST-PROCEDU', 'FIRST-PROCEDUR', 'FIRST-PROCEDURE', 'FIRST-SERVER', 'FIRST-TAB-I', 'FIRST-TAB-IT', 'FIRST-TAB-ITE', 'FIRST-TAB-ITEM', 'FIT-LAST-COLUMN', 'FIXED-ONLY', 'FLAT-BUTTON', 'FLOAT', 'FOCUS', 'FOCUSED-ROW', 'FOCUSED-ROW-SELECTED', 'FONT', 'FONT-TABLE', 'FOR', 'FORCE-FILE', 'FORE', 'FOREG', 'FOREGR', 'FOREGRO', 'FOREGROU', 'FOREGROUN', 'FOREGROUND', 'FORM INPUT', 'FORM', 'FORM-LONG-INPUT', 'FORMA', 'FORMAT', 'FORMATTE', 'FORMATTED', 'FORWARD', 'FORWARDS', 'FRAGMEN', 'FRAGMENT', 'FRAM', 'FRAME', 'FRAME-COL', 'FRAME-DB', 'FRAME-DOWN', 'FRAME-FIELD', 'FRAME-FILE', 'FRAME-INDE', 'FRAME-INDEX', 'FRAME-LINE', 'FRAME-NAME', 'FRAME-ROW', 'FRAME-SPA', 'FRAME-SPAC', 'FRAME-SPACI', 'FRAME-SPACIN', 'FRAME-SPACING', 'FRAME-VAL', 'FRAME-VALU', 'FRAME-VALUE', 'FRAME-X', 'FRAME-Y', 'FREQUENCY', 'FROM', 'FROM-C', 'FROM-CH', 'FROM-CHA', 'FROM-CHAR', 'FROM-CHARS', 'FROM-CUR', 'FROM-CURR', 'FROM-CURRE', 'FROM-CURREN', 'FROM-CURRENT', 'FROM-P', 'FROM-PI', 'FROM-PIX', 'FROM-PIXE', 'FROM-PIXEL', 'FROM-PIXELS', 'FULL-HEIGHT', 'FULL-HEIGHT-', 'FULL-HEIGHT-C', 'FULL-HEIGHT-CH', 'FULL-HEIGHT-CHA', 'FULL-HEIGHT-CHAR', 'FULL-HEIGHT-CHARS', 'FULL-HEIGHT-P', 'FULL-HEIGHT-PI', 'FULL-HEIGHT-PIX', 'FULL-HEIGHT-PIXE', 'FULL-HEIGHT-PIXEL', 'FULL-HEIGHT-PIXELS', 'FULL-PATHN', 'FULL-PATHNA', 'FULL-PATHNAM', 'FULL-PATHNAME', 'FULL-WIDTH', 'FULL-WIDTH-', 'FULL-WIDTH-C', 'FULL-WIDTH-CH', 'FULL-WIDTH-CHA', 'FULL-WIDTH-CHAR', 'FULL-WIDTH-CHARS', 'FULL-WIDTH-P', 'FULL-WIDTH-PI', 'FULL-WIDTH-PIX', 'FULL-WIDTH-PIXE', 'FULL-WIDTH-PIXEL', 'FULL-WIDTH-PIXELS', 'FUNCTION', 'FUNCTION-CALL-TYPE', 'GATEWAY', 'GATEWAYS', 'GE', 'GENERATE-MD5', 'GENERATE-PBE-KEY', 'GENERATE-PBE-SALT', 'GENERATE-RANDOM-KEY', 'GENERATE-UUID', 'GET', 'GET-ATTR-CALL-TYPE', 'GET-ATTRIBUTE-NODE', 'GET-BINARY-DATA', 'GET-BLUE', 'GET-BLUE-', 'GET-BLUE-V', 'GET-BLUE-VA', 'GET-BLUE-VAL', 'GET-BLUE-VALU', 'GET-BLUE-VALUE', 'GET-BROWSE-COLUMN', 'GET-BUFFER-HANDLE', 'GET-BYTE', 'GET-CALLBACK-PROC-CONTEXT', 'GET-CALLBACK-PROC-NAME', 'GET-CGI-LIST', 'GET-CGI-LONG-VALUE', 'GET-CGI-VALUE', 'GET-CLASS', 'GET-CODEPAGES', 'GET-COLLATIONS', 'GET-CONFIG-VALUE', 'GET-CURRENT', 'GET-DOUBLE', 'GET-DROPPED-FILE', 'GET-DYNAMIC', 'GET-ERROR-COLUMN', 'GET-ERROR-ROW', 'GET-FILE', 'GET-FILE-NAME', 'GET-FILE-OFFSE', 'GET-FILE-OFFSET', 'GET-FIRST', 'GET-FLOAT', 'GET-GREEN', 'GET-GREEN-', 'GET-GREEN-V', 'GET-GREEN-VA', 'GET-GREEN-VAL', 'GET-GREEN-VALU', 'GET-GREEN-VALUE', 'GET-INDEX-BY-NAMESPACE-NAME', 'GET-INDEX-BY-QNAME', 'GET-INT64', 'GET-ITERATION', 'GET-KEY-VAL', 'GET-KEY-VALU', 'GET-KEY-VALUE', 'GET-LAST', 'GET-LOCALNAME-BY-INDEX', 'GET-LONG', 'GET-MESSAGE', 'GET-NEXT', 'GET-NUMBER', 'GET-POINTER-VALUE', 'GET-PREV', 'GET-PRINTERS', 'GET-PROPERTY', 'GET-QNAME-BY-INDEX', 'GET-RED', 'GET-RED-', 'GET-RED-V', 'GET-RED-VA', 'GET-RED-VAL', 'GET-RED-VALU', 'GET-RED-VALUE', 'GET-REPOSITIONED-ROW', 'GET-RGB-VALUE', 'GET-SELECTED', 'GET-SELECTED-', 'GET-SELECTED-W', 'GET-SELECTED-WI', 'GET-SELECTED-WID', 'GET-SELECTED-WIDG', 'GET-SELECTED-WIDGE', 'GET-SELECTED-WIDGET', 'GET-SHORT', 'GET-SIGNATURE', 'GET-SIZE', 'GET-STRING', 'GET-TAB-ITEM', 'GET-TEXT-HEIGHT', 'GET-TEXT-HEIGHT-', 'GET-TEXT-HEIGHT-C', 'GET-TEXT-HEIGHT-CH', 'GET-TEXT-HEIGHT-CHA', 'GET-TEXT-HEIGHT-CHAR', 'GET-TEXT-HEIGHT-CHARS', 'GET-TEXT-HEIGHT-P', 'GET-TEXT-HEIGHT-PI', 'GET-TEXT-HEIGHT-PIX', 'GET-TEXT-HEIGHT-PIXE', 'GET-TEXT-HEIGHT-PIXEL', 'GET-TEXT-HEIGHT-PIXELS', 'GET-TEXT-WIDTH', 'GET-TEXT-WIDTH-', 'GET-TEXT-WIDTH-C', 'GET-TEXT-WIDTH-CH', 'GET-TEXT-WIDTH-CHA', 'GET-TEXT-WIDTH-CHAR', 'GET-TEXT-WIDTH-CHARS', 'GET-TEXT-WIDTH-P', 'GET-TEXT-WIDTH-PI', 'GET-TEXT-WIDTH-PIX', 'GET-TEXT-WIDTH-PIXE', 'GET-TEXT-WIDTH-PIXEL', 'GET-TEXT-WIDTH-PIXELS', 'GET-TYPE-BY-INDEX', 'GET-TYPE-BY-NAMESPACE-NAME', 'GET-TYPE-BY-QNAME', 'GET-UNSIGNED-LONG', 'GET-UNSIGNED-SHORT', 'GET-URI-BY-INDEX', 'GET-VALUE-BY-INDEX', 'GET-VALUE-BY-NAMESPACE-NAME', 'GET-VALUE-BY-QNAME', 'GET-WAIT-STATE', 'GETBYTE', 'GLOBAL', 'GO-ON', 'GO-PEND', 'GO-PENDI', 'GO-PENDIN', 'GO-PENDING', 'GRANT', 'GRAPHIC-E', 'GRAPHIC-ED', 'GRAPHIC-EDG', 'GRAPHIC-EDGE', 'GRID-FACTOR-H', 'GRID-FACTOR-HO', 'GRID-FACTOR-HOR', 'GRID-FACTOR-HORI', 'GRID-FACTOR-HORIZ', 'GRID-FACTOR-HORIZO', 'GRID-FACTOR-HORIZON', 'GRID-FACTOR-HORIZONT', 'GRID-FACTOR-HORIZONTA', 'GRID-FACTOR-HORIZONTAL', 'GRID-FACTOR-V', 'GRID-FACTOR-VE', 'GRID-FACTOR-VER', 'GRID-FACTOR-VERT', 'GRID-FACTOR-VERTI', 'GRID-FACTOR-VERTIC', 'GRID-FACTOR-VERTICA', 'GRID-FACTOR-VERTICAL', 'GRID-SNAP', 'GRID-UNIT-HEIGHT', 'GRID-UNIT-HEIGHT-', 'GRID-UNIT-HEIGHT-C', 'GRID-UNIT-HEIGHT-CH', 'GRID-UNIT-HEIGHT-CHA', 'GRID-UNIT-HEIGHT-CHARS', 'GRID-UNIT-HEIGHT-P', 'GRID-UNIT-HEIGHT-PI', 'GRID-UNIT-HEIGHT-PIX', 'GRID-UNIT-HEIGHT-PIXE', 'GRID-UNIT-HEIGHT-PIXEL', 'GRID-UNIT-HEIGHT-PIXELS', 'GRID-UNIT-WIDTH', 'GRID-UNIT-WIDTH-', 'GRID-UNIT-WIDTH-C', 'GRID-UNIT-WIDTH-CH', 'GRID-UNIT-WIDTH-CHA', 'GRID-UNIT-WIDTH-CHAR', 'GRID-UNIT-WIDTH-CHARS', 'GRID-UNIT-WIDTH-P', 'GRID-UNIT-WIDTH-PI', 'GRID-UNIT-WIDTH-PIX', 'GRID-UNIT-WIDTH-PIXE', 'GRID-UNIT-WIDTH-PIXEL', 'GRID-UNIT-WIDTH-PIXELS', 'GRID-VISIBLE', 'GROUP', 'GT', 'GUID', 'HANDLE', 'HANDLER', 'HAS-RECORDS', 'HAVING', 'HEADER', 'HEIGHT', 'HEIGHT-', 'HEIGHT-C', 'HEIGHT-CH', 'HEIGHT-CHA', 'HEIGHT-CHAR', 'HEIGHT-CHARS', 'HEIGHT-P', 'HEIGHT-PI', 'HEIGHT-PIX', 'HEIGHT-PIXE', 'HEIGHT-PIXEL', 'HEIGHT-PIXELS', 'HELP', 'HEX-DECODE', 'HEX-ENCODE', 'HIDDEN', 'HIDE', 'HORI', 'HORIZ', 'HORIZO', 'HORIZON', 'HORIZONT', 'HORIZONTA', 'HORIZONTAL', 'HOST-BYTE-ORDER', 'HTML-CHARSET', 'HTML-END-OF-LINE', 'HTML-END-OF-PAGE', 'HTML-FRAME-BEGIN', 'HTML-FRAME-END', 'HTML-HEADER-BEGIN', 'HTML-HEADER-END', 'HTML-TITLE-BEGIN', 'HTML-TITLE-END', 'HWND', 'ICON', 'IF', 'IMAGE', 'IMAGE-DOWN', 'IMAGE-INSENSITIVE', 'IMAGE-SIZE', 'IMAGE-SIZE-C', 'IMAGE-SIZE-CH', 'IMAGE-SIZE-CHA', 'IMAGE-SIZE-CHAR', 'IMAGE-SIZE-CHARS', 'IMAGE-SIZE-P', 'IMAGE-SIZE-PI', 'IMAGE-SIZE-PIX', 'IMAGE-SIZE-PIXE', 'IMAGE-SIZE-PIXEL', 'IMAGE-SIZE-PIXELS', 'IMAGE-UP', 'IMMEDIATE-DISPLAY', 'IMPLEMENTS', 'IMPORT', 'IMPORT-PRINCIPAL', 'IN', 'IN-HANDLE', 'INCREMENT-EXCLUSIVE-ID', 'INDEX', 'INDEX-HINT', 'INDEX-INFORMATION', 'INDEXED-REPOSITION', 'INDICATOR', 'INFO', 'INFOR', 'INFORM', 'INFORMA', 'INFORMAT', 'INFORMATI', 'INFORMATIO', 'INFORMATION', 'INHERIT-BGC', 'INHERIT-BGCO', 'INHERIT-BGCOL', 'INHERIT-BGCOLO', 'INHERIT-BGCOLOR', 'INHERIT-FGC', 'INHERIT-FGCO', 'INHERIT-FGCOL', 'INHERIT-FGCOLO', 'INHERIT-FGCOLOR', 'INHERITS', 'INIT', 'INITI', 'INITIA', 'INITIAL', 'INITIAL-DIR', 'INITIAL-FILTER', 'INITIALIZE-DOCUMENT-TYPE', 'INITIATE', 'INNER-CHARS', 'INNER-LINES', 'INPUT', 'INPUT-O', 'INPUT-OU', 'INPUT-OUT', 'INPUT-OUTP', 'INPUT-OUTPU', 'INPUT-OUTPUT', 'INPUT-VALUE', 'INSERT', 'INSERT-ATTRIBUTE', 'INSERT-B', 'INSERT-BA', 'INSERT-BAC', 'INSERT-BACK', 'INSERT-BACKT', 'INSERT-BACKTA', 'INSERT-BACKTAB', 'INSERT-FILE', 'INSERT-ROW', 'INSERT-STRING', 'INSERT-T', 'INSERT-TA', 'INSERT-TAB', 'INT64', 'INT', 'INTEGER', 'INTERFACE', 'INTERNAL-ENTRIES', 'INTO', 'INVOKE', 'IS', 'IS-ATTR', 'IS-ATTR-', 'IS-ATTR-S', 'IS-ATTR-SP', 'IS-ATTR-SPA', 'IS-ATTR-SPAC', 'IS-ATTR-SPACE', 'IS-CLASS', 'IS-JSON', 'IS-LEAD-BYTE', 'IS-OPEN', 'IS-PARAMETER-SET', 'IS-PARTITIONED', 'IS-ROW-SELECTED', 'IS-SELECTED', 'IS-XML', 'ITEM', 'ITEMS-PER-ROW', 'JOIN', 'JOIN-BY-SQLDB', 'KBLABEL', 'KEEP-CONNECTION-OPEN', 'KEEP-FRAME-Z', 'KEEP-FRAME-Z-', 'KEEP-FRAME-Z-O', 'KEEP-FRAME-Z-OR', 'KEEP-FRAME-Z-ORD', 'KEEP-FRAME-Z-ORDE', 'KEEP-FRAME-Z-ORDER', 'KEEP-MESSAGES', 'KEEP-SECURITY-CACHE', 'KEEP-TAB-ORDER', 'KEY', 'KEY-CODE', 'KEY-FUNC', 'KEY-FUNCT', 'KEY-FUNCTI', 'KEY-FUNCTIO', 'KEY-FUNCTION', 'KEY-LABEL', 'KEYCODE', 'KEYFUNC', 'KEYFUNCT', 'KEYFUNCTI', 'KEYFUNCTIO', 'KEYFUNCTION', 'KEYLABEL', 'KEYS', 'KEYWORD', 'KEYWORD-ALL', 'LABEL', 'LABEL-BGC', 'LABEL-BGCO', 'LABEL-BGCOL', 'LABEL-BGCOLO', 'LABEL-BGCOLOR', 'LABEL-DC', 'LABEL-DCO', 'LABEL-DCOL', 'LABEL-DCOLO', 'LABEL-DCOLOR', 'LABEL-FGC', 'LABEL-FGCO', 'LABEL-FGCOL', 'LABEL-FGCOLO', 'LABEL-FGCOLOR', 'LABEL-FONT', 'LABEL-PFC', 'LABEL-PFCO', 'LABEL-PFCOL', 'LABEL-PFCOLO', 'LABEL-PFCOLOR', 'LABELS', 'LABELS-HAVE-COLONS', 'LANDSCAPE', 'LANGUAGE', 'LANGUAGES', 'LARGE', 'LARGE-TO-SMALL', 'LAST', 'LAST-ASYNCH-REQUEST', 'LAST-BATCH', 'LAST-CHILD', 'LAST-EVEN', 'LAST-EVENT', 'LAST-FORM', 'LAST-KEY', 'LAST-OBJECT', 'LAST-OF', 'LAST-PROCE', 'LAST-PROCED', 'LAST-PROCEDU', 'LAST-PROCEDUR', 'LAST-PROCEDURE', 'LAST-SERVER', 'LAST-TAB-I', 'LAST-TAB-IT', 'LAST-TAB-ITE', 'LAST-TAB-ITEM', 'LASTKEY', 'LC', 'LDBNAME', 'LE', 'LEAVE', 'LEFT-ALIGN', 'LEFT-ALIGNE', 'LEFT-ALIGNED', 'LEFT-TRIM', 'LENGTH', 'LIBRARY', 'LIKE', 'LIKE-SEQUENTIAL', 'LINE', 'LINE-COUNT', 'LINE-COUNTE', 'LINE-COUNTER', 'LIST-EVENTS', 'LIST-ITEM-PAIRS', 'LIST-ITEMS', 'LIST-PROPERTY-NAMES', 'LIST-QUERY-ATTRS', 'LIST-SET-ATTRS', 'LIST-WIDGETS', 'LISTI', 'LISTIN', 'LISTING', 'LITERAL-QUESTION', 'LITTLE-ENDIAN', 'LOAD', 'LOAD-DOMAINS', 'LOAD-ICON', 'LOAD-IMAGE', 'LOAD-IMAGE-DOWN', 'LOAD-IMAGE-INSENSITIVE', 'LOAD-IMAGE-UP', 'LOAD-MOUSE-P', 'LOAD-MOUSE-PO', 'LOAD-MOUSE-POI', 'LOAD-MOUSE-POIN', 'LOAD-MOUSE-POINT', 'LOAD-MOUSE-POINTE', 'LOAD-MOUSE-POINTER', 'LOAD-PICTURE', 'LOAD-SMALL-ICON', 'LOCAL-NAME', 'LOCAL-VERSION-INFO', 'LOCATOR-COLUMN-NUMBER', 'LOCATOR-LINE-NUMBER', 'LOCATOR-PUBLIC-ID', 'LOCATOR-SYSTEM-ID', 'LOCATOR-TYPE', 'LOCK-REGISTRATION', 'LOCKED', 'LOG', 'LOG-AUDIT-EVENT', 'LOG-MANAGER', 'LOGICAL', 'LOGIN-EXPIRATION-TIMESTAMP', 'LOGIN-HOST', 'LOGIN-STATE', 'LOGOUT', 'LONGCHAR', 'LOOKAHEAD', 'LOOKUP', 'LT', 'MACHINE-CLASS', 'MANDATORY', 'MANUAL-HIGHLIGHT', 'MAP', 'MARGIN-EXTRA', 'MARGIN-HEIGHT', 'MARGIN-HEIGHT-', 'MARGIN-HEIGHT-C', 'MARGIN-HEIGHT-CH', 'MARGIN-HEIGHT-CHA', 'MARGIN-HEIGHT-CHAR', 'MARGIN-HEIGHT-CHARS', 'MARGIN-HEIGHT-P', 'MARGIN-HEIGHT-PI', 'MARGIN-HEIGHT-PIX', 'MARGIN-HEIGHT-PIXE', 'MARGIN-HEIGHT-PIXEL', 'MARGIN-HEIGHT-PIXELS', 'MARGIN-WIDTH', 'MARGIN-WIDTH-', 'MARGIN-WIDTH-C', 'MARGIN-WIDTH-CH', 'MARGIN-WIDTH-CHA', 'MARGIN-WIDTH-CHAR', 'MARGIN-WIDTH-CHARS', 'MARGIN-WIDTH-P', 'MARGIN-WIDTH-PI', 'MARGIN-WIDTH-PIX', 'MARGIN-WIDTH-PIXE', 'MARGIN-WIDTH-PIXEL', 'MARGIN-WIDTH-PIXELS', 'MARK-NEW', 'MARK-ROW-STATE', 'MATCHES', 'MAX', 'MAX-BUTTON', 'MAX-CHARS', 'MAX-DATA-GUESS', 'MAX-HEIGHT', 'MAX-HEIGHT-C', 'MAX-HEIGHT-CH', 'MAX-HEIGHT-CHA', 'MAX-HEIGHT-CHAR', 'MAX-HEIGHT-CHARS', 'MAX-HEIGHT-P', 'MAX-HEIGHT-PI', 'MAX-HEIGHT-PIX', 'MAX-HEIGHT-PIXE', 'MAX-HEIGHT-PIXEL', 'MAX-HEIGHT-PIXELS', 'MAX-ROWS', 'MAX-SIZE', 'MAX-VAL', 'MAX-VALU', 'MAX-VALUE', 'MAX-WIDTH', 'MAX-WIDTH-', 'MAX-WIDTH-C', 'MAX-WIDTH-CH', 'MAX-WIDTH-CHA', 'MAX-WIDTH-CHAR', 'MAX-WIDTH-CHARS', 'MAX-WIDTH-P', 'MAX-WIDTH-PI', 'MAX-WIDTH-PIX', 'MAX-WIDTH-PIXE', 'MAX-WIDTH-PIXEL', 'MAX-WIDTH-PIXELS', 'MAXI', 'MAXIM', 'MAXIMIZE', 'MAXIMU', 'MAXIMUM', 'MAXIMUM-LEVEL', 'MD5-DIGEST', 'MEMBER', 'MEMPTR-TO-NODE-VALUE', 'MENU', 'MENU-BAR', 'MENU-ITEM', 'MENU-K', 'MENU-KE', 'MENU-KEY', 'MENU-M', 'MENU-MO', 'MENU-MOU', 'MENU-MOUS', 'MENU-MOUSE', 'MENUBAR', 'MERGE-BY-FIELD', 'MESSAGE', 'MESSAGE-AREA', 'MESSAGE-AREA-FONT', 'MESSAGE-LINES', 'METHOD', 'MIN', 'MIN-BUTTON', 'MIN-COLUMN-WIDTH-C', 'MIN-COLUMN-WIDTH-CH', 'MIN-COLUMN-WIDTH-CHA', 'MIN-COLUMN-WIDTH-CHAR', 'MIN-COLUMN-WIDTH-CHARS', 'MIN-COLUMN-WIDTH-P', 'MIN-COLUMN-WIDTH-PI', 'MIN-COLUMN-WIDTH-PIX', 'MIN-COLUMN-WIDTH-PIXE', 'MIN-COLUMN-WIDTH-PIXEL', 'MIN-COLUMN-WIDTH-PIXELS', 'MIN-HEIGHT', 'MIN-HEIGHT-', 'MIN-HEIGHT-C', 'MIN-HEIGHT-CH', 'MIN-HEIGHT-CHA', 'MIN-HEIGHT-CHAR', 'MIN-HEIGHT-CHARS', 'MIN-HEIGHT-P', 'MIN-HEIGHT-PI', 'MIN-HEIGHT-PIX', 'MIN-HEIGHT-PIXE', 'MIN-HEIGHT-PIXEL', 'MIN-HEIGHT-PIXELS', 'MIN-SIZE', 'MIN-VAL', 'MIN-VALU', 'MIN-VALUE', 'MIN-WIDTH', 'MIN-WIDTH-', 'MIN-WIDTH-C', 'MIN-WIDTH-CH', 'MIN-WIDTH-CHA', 'MIN-WIDTH-CHAR', 'MIN-WIDTH-CHARS', 'MIN-WIDTH-P', 'MIN-WIDTH-PI', 'MIN-WIDTH-PIX', 'MIN-WIDTH-PIXE', 'MIN-WIDTH-PIXEL', 'MIN-WIDTH-PIXELS', 'MINI', 'MINIM', 'MINIMU', 'MINIMUM', 'MOD', 'MODIFIED', 'MODU', 'MODUL', 'MODULO', 'MONTH', 'MOUSE', 'MOUSE-P', 'MOUSE-PO', 'MOUSE-POI', 'MOUSE-POIN', 'MOUSE-POINT', 'MOUSE-POINTE', 'MOUSE-POINTER', 'MOVABLE', 'MOVE-AFTER', 'MOVE-AFTER-', 'MOVE-AFTER-T', 'MOVE-AFTER-TA', 'MOVE-AFTER-TAB', 'MOVE-AFTER-TAB-', 'MOVE-AFTER-TAB-I', 'MOVE-AFTER-TAB-IT', 'MOVE-AFTER-TAB-ITE', 'MOVE-AFTER-TAB-ITEM', 'MOVE-BEFOR', 'MOVE-BEFORE', 'MOVE-BEFORE-', 'MOVE-BEFORE-T', 'MOVE-BEFORE-TA', 'MOVE-BEFORE-TAB', 'MOVE-BEFORE-TAB-', 'MOVE-BEFORE-TAB-I', 'MOVE-BEFORE-TAB-IT', 'MOVE-BEFORE-TAB-ITE', 'MOVE-BEFORE-TAB-ITEM', 'MOVE-COL', 'MOVE-COLU', 'MOVE-COLUM', 'MOVE-COLUMN', 'MOVE-TO-B', 'MOVE-TO-BO', 'MOVE-TO-BOT', 'MOVE-TO-BOTT', 'MOVE-TO-BOTTO', 'MOVE-TO-BOTTOM', 'MOVE-TO-EOF', 'MOVE-TO-T', 'MOVE-TO-TO', 'MOVE-TO-TOP', 'MPE', 'MTIME', 'MULTI-COMPILE', 'MULTIPLE', 'MULTIPLE-KEY', 'MULTITASKING-INTERVAL', 'MUST-EXIST', 'NAME', 'NAMESPACE-PREFIX', 'NAMESPACE-URI', 'NATIVE', 'NE', 'NEEDS-APPSERVER-PROMPT', 'NEEDS-PROMPT', 'NEW', 'NEW-INSTANCE', 'NEW-ROW', 'NEXT', 'NEXT-COLUMN', 'NEXT-PROMPT', 'NEXT-ROWID', 'NEXT-SIBLING', 'NEXT-TAB-I', 'NEXT-TAB-IT', 'NEXT-TAB-ITE', 'NEXT-TAB-ITEM', 'NEXT-VALUE', 'NO', 'NO-APPLY', 'NO-ARRAY-MESSAGE', 'NO-ASSIGN', 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-L', 'NO-ATTR-LI', 'NO-ATTR-LIS', 'NO-ATTR-LIST', 'NO-ATTR-S', 'NO-ATTR-SP', 'NO-ATTR-SPA', 'NO-ATTR-SPAC', 'NO-ATTR-SPACE', 'NO-AUTO-VALIDATE', 'NO-BIND-WHERE', 'NO-BOX', 'NO-CONSOLE', 'NO-CONVERT', 'NO-CONVERT-3D-COLORS', 'NO-CURRENT-VALUE', 'NO-DEBUG', 'NO-DRAG', 'NO-ECHO', 'NO-EMPTY-SPACE', 'NO-ERROR', 'NO-F', 'NO-FI', 'NO-FIL', 'NO-FILL', 'NO-FOCUS', 'NO-HELP', 'NO-HIDE', 'NO-INDEX-HINT', 'NO-INHERIT-BGC', 'NO-INHERIT-BGCO', 'NO-INHERIT-BGCOLOR', 'NO-INHERIT-FGC', 'NO-INHERIT-FGCO', 'NO-INHERIT-FGCOL', 'NO-INHERIT-FGCOLO', 'NO-INHERIT-FGCOLOR', 'NO-JOIN-BY-SQLDB', 'NO-LABE', 'NO-LABELS', 'NO-LOBS', 'NO-LOCK', 'NO-LOOKAHEAD', 'NO-MAP', 'NO-MES', 'NO-MESS', 'NO-MESSA', 'NO-MESSAG', 'NO-MESSAGE', 'NO-PAUSE', 'NO-PREFE', 'NO-PREFET', 'NO-PREFETC', 'NO-PREFETCH', 'NO-ROW-MARKERS', 'NO-SCROLLBAR-VERTICAL', 'NO-SEPARATE-CONNECTION', 'NO-SEPARATORS', 'NO-TAB-STOP', 'NO-UND', 'NO-UNDE', 'NO-UNDER', 'NO-UNDERL', 'NO-UNDERLI', 'NO-UNDERLIN', 'NO-UNDERLINE', 'NO-UNDO', 'NO-VAL', 'NO-VALI', 'NO-VALID', 'NO-VALIDA', 'NO-VALIDAT', 'NO-VALIDATE', 'NO-WAIT', 'NO-WORD-WRAP', 'NODE-VALUE-TO-MEMPTR', 'NONAMESPACE-SCHEMA-LOCATION', 'NONE', 'NORMALIZE', 'NOT', 'NOT-ACTIVE', 'NOW', 'NULL', 'NUM-ALI', 'NUM-ALIA', 'NUM-ALIAS', 'NUM-ALIASE', 'NUM-ALIASES', 'NUM-BUFFERS', 'NUM-BUT', 'NUM-BUTT', 'NUM-BUTTO', 'NUM-BUTTON', 'NUM-BUTTONS', 'NUM-COL', 'NUM-COLU', 'NUM-COLUM', 'NUM-COLUMN', 'NUM-COLUMNS', 'NUM-COPIES', 'NUM-DBS', 'NUM-DROPPED-FILES', 'NUM-ENTRIES', 'NUM-FIELDS', 'NUM-FORMATS', 'NUM-ITEMS', 'NUM-ITERATIONS', 'NUM-LINES', 'NUM-LOCKED-COL', 'NUM-LOCKED-COLU', 'NUM-LOCKED-COLUM', 'NUM-LOCKED-COLUMN', 'NUM-LOCKED-COLUMNS', 'NUM-MESSAGES', 'NUM-PARAMETERS', 'NUM-REFERENCES', 'NUM-REPLACED', 'NUM-RESULTS', 'NUM-SELECTED', 'NUM-SELECTED-', 'NUM-SELECTED-ROWS', 'NUM-SELECTED-W', 'NUM-SELECTED-WI', 'NUM-SELECTED-WID', 'NUM-SELECTED-WIDG', 'NUM-SELECTED-WIDGE', 'NUM-SELECTED-WIDGET', 'NUM-SELECTED-WIDGETS', 'NUM-TABS', 'NUM-TO-RETAIN', 'NUM-VISIBLE-COLUMNS', 'NUMERIC', 'NUMERIC-F', 'NUMERIC-FO', 'NUMERIC-FOR', 'NUMERIC-FORM', 'NUMERIC-FORMA', 'NUMERIC-FORMAT', 'OCTET-LENGTH', 'OF', 'OFF', 'OK', 'OK-CANCEL', 'OLD', 'ON', 'ON-FRAME', 'ON-FRAME-', 'ON-FRAME-B', 'ON-FRAME-BO', 'ON-FRAME-BOR', 'ON-FRAME-BORD', 'ON-FRAME-BORDE', 'ON-FRAME-BORDER', 'OPEN', 'OPSYS', 'OPTION', 'OR', 'ORDERED-JOIN', 'ORDINAL', 'OS-APPEND', 'OS-COMMAND', 'OS-COPY', 'OS-CREATE-DIR', 'OS-DELETE', 'OS-DIR', 'OS-DRIVE', 'OS-DRIVES', 'OS-ERROR', 'OS-GETENV', 'OS-RENAME', 'OTHERWISE', 'OUTPUT', 'OVERLAY', 'OVERRIDE', 'OWNER', 'PAGE', 'PAGE-BOT', 'PAGE-BOTT', 'PAGE-BOTTO', 'PAGE-BOTTOM', 'PAGE-NUM', 'PAGE-NUMB', 'PAGE-NUMBE', 'PAGE-NUMBER', 'PAGE-SIZE', 'PAGE-TOP', 'PAGE-WID', 'PAGE-WIDT', 'PAGE-WIDTH', 'PAGED', 'PARAM', 'PARAME', 'PARAMET', 'PARAMETE', 'PARAMETER', 'PARENT', 'PARSE-STATUS', 'PARTIAL-KEY', 'PASCAL', 'PASSWORD-FIELD', 'PATHNAME', 'PAUSE', 'PBE-HASH-ALG', 'PBE-HASH-ALGO', 'PBE-HASH-ALGOR', 'PBE-HASH-ALGORI', 'PBE-HASH-ALGORIT', 'PBE-HASH-ALGORITH', 'PBE-HASH-ALGORITHM', 'PBE-KEY-ROUNDS', 'PDBNAME', 'PERSIST', 'PERSISTE', 'PERSISTEN', 'PERSISTENT', 'PERSISTENT-CACHE-DISABLED', 'PFC', 'PFCO', 'PFCOL', 'PFCOLO', 'PFCOLOR', 'PIXELS', 'PIXELS-PER-COL', 'PIXELS-PER-COLU', 'PIXELS-PER-COLUM', 'PIXELS-PER-COLUMN', 'PIXELS-PER-ROW', 'POPUP-M', 'POPUP-ME', 'POPUP-MEN', 'POPUP-MENU', 'POPUP-O', 'POPUP-ON', 'POPUP-ONL', 'POPUP-ONLY', 'PORTRAIT', 'POSITION', 'PRECISION', 'PREFER-DATASET', 'PREPARE-STRING', 'PREPARED', 'PREPROC', 'PREPROCE', 'PREPROCES', 'PREPROCESS', 'PRESEL', 'PRESELE', 'PRESELEC', 'PRESELECT', 'PREV', 'PREV-COLUMN', 'PREV-SIBLING', 'PREV-TAB-I', 'PREV-TAB-IT', 'PREV-TAB-ITE', 'PREV-TAB-ITEM', 'PRIMARY', 'PRINTER', 'PRINTER-CONTROL-HANDLE', 'PRINTER-HDC', 'PRINTER-NAME', 'PRINTER-PORT', 'PRINTER-SETUP', 'PRIVATE', 'PRIVATE-D', 'PRIVATE-DA', 'PRIVATE-DAT', 'PRIVATE-DATA', 'PRIVILEGES', 'PROC-HA', 'PROC-HAN', 'PROC-HAND', 'PROC-HANDL', 'PROC-HANDLE', 'PROC-ST', 'PROC-STA', 'PROC-STAT', 'PROC-STATU', 'PROC-STATUS', 'PROC-TEXT', 'PROC-TEXT-BUFFER', 'PROCE', 'PROCED', 'PROCEDU', 'PROCEDUR', 'PROCEDURE', 'PROCEDURE-CALL-TYPE', 'PROCEDURE-TYPE', 'PROCESS', 'PROFILER', 'PROGRAM-NAME', 'PROGRESS', 'PROGRESS-S', 'PROGRESS-SO', 'PROGRESS-SOU', 'PROGRESS-SOUR', 'PROGRESS-SOURC', 'PROGRESS-SOURCE', 'PROMPT', 'PROMPT-F', 'PROMPT-FO', 'PROMPT-FOR', 'PROMSGS', 'PROPATH', 'PROPERTY', 'PROTECTED', 'PROVERS', 'PROVERSI', 'PROVERSIO', 'PROVERSION', 'PROXY', 'PROXY-PASSWORD', 'PROXY-USERID', 'PUBLIC', 'PUBLIC-ID', 'PUBLISH', 'PUBLISHED-EVENTS', 'PUT', 'PUT-BYTE', 'PUT-DOUBLE', 'PUT-FLOAT', 'PUT-INT64', 'PUT-KEY-VAL', 'PUT-KEY-VALU', 'PUT-KEY-VALUE', 'PUT-LONG', 'PUT-SHORT', 'PUT-STRING', 'PUT-UNSIGNED-LONG', 'PUTBYTE', 'QUERY', 'QUERY-CLOSE', 'QUERY-OFF-END', 'QUERY-OPEN', 'QUERY-PREPARE', 'QUERY-TUNING', 'QUESTION', 'QUIT', 'QUOTER', 'R-INDEX', 'RADIO-BUTTONS', 'RADIO-SET', 'RANDOM', 'RAW', 'RAW-TRANSFER', 'RCODE-INFO', 'RCODE-INFOR', 'RCODE-INFORM', 'RCODE-INFORMA', 'RCODE-INFORMAT', 'RCODE-INFORMATI', 'RCODE-INFORMATIO', 'RCODE-INFORMATION', 'READ-AVAILABLE', 'READ-EXACT-NUM', 'READ-FILE', 'READ-JSON', 'READ-ONLY', 'READ-XML', 'READ-XMLSCHEMA', 'READKEY', 'REAL', 'RECID', 'RECORD-LENGTH', 'RECT', 'RECTA', 'RECTAN', 'RECTANG', 'RECTANGL', 'RECTANGLE', 'RECURSIVE', 'REFERENCE-ONLY', 'REFRESH', 'REFRESH-AUDIT-POLICY', 'REFRESHABLE', 'REGISTER-DOMAIN', 'RELEASE', 'REMOTE', 'REMOVE-EVENTS-PROCEDURE', 'REMOVE-SUPER-PROCEDURE', 'REPEAT', 'REPLACE', 'REPLACE-SELECTION-TEXT', 'REPOSITION', 'REPOSITION-BACKWARD', 'REPOSITION-FORWARD', 'REPOSITION-MODE', 'REPOSITION-TO-ROW', 'REPOSITION-TO-ROWID', 'REQUEST', 'REQUEST-INFO', 'RESET', 'RESIZA', 'RESIZAB', 'RESIZABL', 'RESIZABLE', 'RESIZE', 'RESPONSE-INFO', 'RESTART-ROW', 'RESTART-ROWID', 'RETAIN', 'RETAIN-SHAPE', 'RETRY', 'RETRY-CANCEL', 'RETURN', 'RETURN-ALIGN', 'RETURN-ALIGNE', 'RETURN-INS', 'RETURN-INSE', 'RETURN-INSER', 'RETURN-INSERT', 'RETURN-INSERTE', 'RETURN-INSERTED', 'RETURN-TO-START-DI', 'RETURN-TO-START-DIR', 'RETURN-VAL', 'RETURN-VALU', 'RETURN-VALUE', 'RETURN-VALUE-DATA-TYPE', 'RETURNS', 'REVERSE-FROM', 'REVERT', 'REVOKE', 'RGB-VALUE', 'RIGHT-ALIGNED', 'RIGHT-TRIM', 'ROLES', 'ROUND', 'ROUTINE-LEVEL', 'ROW', 'ROW-HEIGHT-CHARS', 'ROW-HEIGHT-PIXELS', 'ROW-MARKERS', 'ROW-OF', 'ROW-RESIZABLE', 'ROWID', 'RULE', 'RUN', 'RUN-PROCEDURE', 'SAVE CACHE', 'SAVE', 'SAVE-AS', 'SAVE-FILE', 'SAX-COMPLE', 'SAX-COMPLET', 'SAX-COMPLETE', 'SAX-PARSE', 'SAX-PARSE-FIRST', 'SAX-PARSE-NEXT', 'SAX-PARSER-ERROR', 'SAX-RUNNING', 'SAX-UNINITIALIZED', 'SAX-WRITE-BEGIN', 'SAX-WRITE-COMPLETE', 'SAX-WRITE-CONTENT', 'SAX-WRITE-ELEMENT', 'SAX-WRITE-ERROR', 'SAX-WRITE-IDLE', 'SAX-WRITE-TAG', 'SAX-WRITER', 'SCHEMA', 'SCHEMA-LOCATION', 'SCHEMA-MARSHAL', 'SCHEMA-PATH', 'SCREEN', 'SCREEN-IO', 'SCREEN-LINES', 'SCREEN-VAL', 'SCREEN-VALU', 'SCREEN-VALUE', 'SCROLL', 'SCROLL-BARS', 'SCROLL-DELTA', 'SCROLL-OFFSET', 'SCROLL-TO-CURRENT-ROW', 'SCROLL-TO-I', 'SCROLL-TO-IT', 'SCROLL-TO-ITE', 'SCROLL-TO-ITEM', 'SCROLL-TO-SELECTED-ROW', 'SCROLLABLE', 'SCROLLBAR-H', 'SCROLLBAR-HO', 'SCROLLBAR-HOR', 'SCROLLBAR-HORI', 'SCROLLBAR-HORIZ', 'SCROLLBAR-HORIZO', 'SCROLLBAR-HORIZON', 'SCROLLBAR-HORIZONT', 'SCROLLBAR-HORIZONTA', 'SCROLLBAR-HORIZONTAL', 'SCROLLBAR-V', 'SCROLLBAR-VE', 'SCROLLBAR-VER', 'SCROLLBAR-VERT', 'SCROLLBAR-VERTI', 'SCROLLBAR-VERTIC', 'SCROLLBAR-VERTICA', 'SCROLLBAR-VERTICAL', 'SCROLLED-ROW-POS', 'SCROLLED-ROW-POSI', 'SCROLLED-ROW-POSIT', 'SCROLLED-ROW-POSITI', 'SCROLLED-ROW-POSITIO', 'SCROLLED-ROW-POSITION', 'SCROLLING', 'SDBNAME', 'SEAL', 'SEAL-TIMESTAMP', 'SEARCH', 'SEARCH-SELF', 'SEARCH-TARGET', 'SECTION', 'SECURITY-POLICY', 'SEEK', 'SELECT', 'SELECT-ALL', 'SELECT-FOCUSED-ROW', 'SELECT-NEXT-ROW', 'SELECT-PREV-ROW', 'SELECT-ROW', 'SELECTABLE', 'SELECTED', 'SELECTION', 'SELECTION-END', 'SELECTION-LIST', 'SELECTION-START', 'SELECTION-TEXT', 'SELF', 'SEND', 'SEND-SQL-STATEMENT', 'SENSITIVE', 'SEPARATE-CONNECTION', 'SEPARATOR-FGCOLOR', 'SEPARATORS', 'SERIALIZABLE', 'SERIALIZE-HIDDEN', 'SERIALIZE-NAME', 'SERVER', 'SERVER-CONNECTION-BOUND', 'SERVER-CONNECTION-BOUND-REQUEST', 'SERVER-CONNECTION-CONTEXT', 'SERVER-CONNECTION-ID', 'SERVER-OPERATING-MODE', 'SESSION', 'SESSION-ID', 'SET', 'SET-APPL-CONTEXT', 'SET-ATTR-CALL-TYPE', 'SET-ATTRIBUTE-NODE', 'SET-BLUE', 'SET-BLUE-', 'SET-BLUE-V', 'SET-BLUE-VA', 'SET-BLUE-VAL', 'SET-BLUE-VALU', 'SET-BLUE-VALUE', 'SET-BREAK', 'SET-BUFFERS', 'SET-CALLBACK', 'SET-CLIENT', 'SET-COMMIT', 'SET-CONTENTS', 'SET-CURRENT-VALUE', 'SET-DB-CLIENT', 'SET-DYNAMIC', 'SET-EVENT-MANAGER-OPTION', 'SET-GREEN', 'SET-GREEN-', 'SET-GREEN-V', 'SET-GREEN-VA', 'SET-GREEN-VAL', 'SET-GREEN-VALU', 'SET-GREEN-VALUE', 'SET-INPUT-SOURCE', 'SET-OPTION', 'SET-OUTPUT-DESTINATION', 'SET-PARAMETER', 'SET-POINTER-VALUE', 'SET-PROPERTY', 'SET-RED', 'SET-RED-', 'SET-RED-V', 'SET-RED-VA', 'SET-RED-VAL', 'SET-RED-VALU', 'SET-RED-VALUE', 'SET-REPOSITIONED-ROW', 'SET-RGB-VALUE', 'SET-ROLLBACK', 'SET-SELECTION', 'SET-SIZE', 'SET-SORT-ARROW', 'SET-WAIT-STATE', 'SETUSER', 'SETUSERI', 'SETUSERID', 'SHA1-DIGEST', 'SHARE', 'SHARE-', 'SHARE-L', 'SHARE-LO', 'SHARE-LOC', 'SHARE-LOCK', 'SHARED', 'SHOW-IN-TASKBAR', 'SHOW-STAT', 'SHOW-STATS', 'SIDE-LAB', 'SIDE-LABE', 'SIDE-LABEL', 'SIDE-LABEL-H', 'SIDE-LABEL-HA', 'SIDE-LABEL-HAN', 'SIDE-LABEL-HAND', 'SIDE-LABEL-HANDL', 'SIDE-LABEL-HANDLE', 'SIDE-LABELS', 'SIGNATURE', 'SILENT', 'SIMPLE', 'SINGLE', 'SINGLE-RUN', 'SINGLETON', 'SIZE', 'SIZE-C', 'SIZE-CH', 'SIZE-CHA', 'SIZE-CHAR', 'SIZE-CHARS', 'SIZE-P', 'SIZE-PI', 'SIZE-PIX', 'SIZE-PIXE', 'SIZE-PIXEL', 'SIZE-PIXELS', 'SKIP', 'SKIP-DELETED-RECORD', 'SLIDER', 'SMALL-ICON', 'SMALL-TITLE', 'SMALLINT', 'SOME', 'SORT', 'SORT-ASCENDING', 'SORT-NUMBER', 'SOURCE', 'SOURCE-PROCEDURE', 'SPACE', 'SQL', 'SQRT', 'SSL-SERVER-NAME', 'STANDALONE', 'START', 'START-DOCUMENT', 'START-ELEMENT', 'START-MOVE', 'START-RESIZE', 'START-ROW-RESIZE', 'STATE-DETAIL', 'STATIC', 'STATUS', 'STATUS-AREA', 'STATUS-AREA-FONT', 'STDCALL', 'STOP', 'STOP-AFTER', 'STOP-PARSING', 'STOPPE', 'STOPPED', 'STORED-PROC', 'STORED-PROCE', 'STORED-PROCED', 'STORED-PROCEDU', 'STORED-PROCEDUR', 'STORED-PROCEDURE', 'STREAM', 'STREAM-HANDLE', 'STREAM-IO', 'STRETCH-TO-FIT', 'STRICT', 'STRICT-ENTITY-RESOLUTION', 'STRING', 'STRING-VALUE', 'STRING-XREF', 'SUB-AVE', 'SUB-AVER', 'SUB-AVERA', 'SUB-AVERAG', 'SUB-AVERAGE', 'SUB-COUNT', 'SUB-MAXIMUM', 'SUB-MENU', 'SUB-MIN', 'SUB-MINIMUM', 'SUB-TOTAL', 'SUBSCRIBE', 'SUBST', 'SUBSTI', 'SUBSTIT', 'SUBSTITU', 'SUBSTITUT', 'SUBSTITUTE', 'SUBSTR', 'SUBSTRI', 'SUBSTRIN', 'SUBSTRING', 'SUBTYPE', 'SUM', 'SUM-MAX', 'SUM-MAXI', 'SUM-MAXIM', 'SUM-MAXIMU', 'SUPER', 'SUPER-PROCEDURES', 'SUPPRESS-NAMESPACE-PROCESSING', 'SUPPRESS-W', 'SUPPRESS-WA', 'SUPPRESS-WAR', 'SUPPRESS-WARN', 'SUPPRESS-WARNI', 'SUPPRESS-WARNIN', 'SUPPRESS-WARNING', 'SUPPRESS-WARNINGS', 'SYMMETRIC-ENCRYPTION-ALGORITHM', 'SYMMETRIC-ENCRYPTION-IV', 'SYMMETRIC-ENCRYPTION-KEY', 'SYMMETRIC-SUPPORT', 'SYSTEM-ALERT', 'SYSTEM-ALERT-', 'SYSTEM-ALERT-B', 'SYSTEM-ALERT-BO', 'SYSTEM-ALERT-BOX', 'SYSTEM-ALERT-BOXE', 'SYSTEM-ALERT-BOXES', 'SYSTEM-DIALOG', 'SYSTEM-HELP', 'SYSTEM-ID', 'TAB-POSITION', 'TAB-STOP', 'TABLE', 'TABLE-HANDLE', 'TABLE-NUMBER', 'TABLE-SCAN', 'TARGET', 'TARGET-PROCEDURE', 'TEMP-DIR', 'TEMP-DIRE', 'TEMP-DIREC', 'TEMP-DIRECT', 'TEMP-DIRECTO', 'TEMP-DIRECTOR', 'TEMP-DIRECTORY', 'TEMP-TABLE', 'TEMP-TABLE-PREPARE', 'TERM', 'TERMI', 'TERMIN', 'TERMINA', 'TERMINAL', 'TERMINATE', 'TEXT', 'TEXT-CURSOR', 'TEXT-SEG-GROW', 'TEXT-SELECTED', 'THEN', 'THIS-OBJECT', 'THIS-PROCEDURE', 'THREAD-SAFE', 'THREE-D', 'THROUGH', 'THROW', 'THRU', 'TIC-MARKS', 'TIME', 'TIME-SOURCE', 'TITLE', 'TITLE-BGC', 'TITLE-BGCO', 'TITLE-BGCOL', 'TITLE-BGCOLO', 'TITLE-BGCOLOR', 'TITLE-DC', 'TITLE-DCO', 'TITLE-DCOL', 'TITLE-DCOLO', 'TITLE-DCOLOR', 'TITLE-FGC', 'TITLE-FGCO', 'TITLE-FGCOL', 'TITLE-FGCOLO', 'TITLE-FGCOLOR', 'TITLE-FO', 'TITLE-FON', 'TITLE-FONT', 'TO', 'TO-ROWID', 'TODAY', 'TOGGLE-BOX', 'TOOLTIP', 'TOOLTIPS', 'TOP-NAV-QUERY', 'TOP-ONLY', 'TOPIC', 'TOTAL', 'TRAILING', 'TRANS', 'TRANS-INIT-PROCEDURE', 'TRANSACTION', 'TRANSACTION-MODE', 'TRANSPARENT', 'TRIGGER', 'TRIGGERS', 'TRIM', 'TRUE', 'TRUNC', 'TRUNCA', 'TRUNCAT', 'TRUNCATE', 'TYPE', 'TYPE-OF', 'UNBOX', 'UNBUFF', 'UNBUFFE', 'UNBUFFER', 'UNBUFFERE', 'UNBUFFERED', 'UNDERL', 'UNDERLI', 'UNDERLIN', 'UNDERLINE', 'UNDO', 'UNFORM', 'UNFORMA', 'UNFORMAT', 'UNFORMATT', 'UNFORMATTE', 'UNFORMATTED', 'UNION', 'UNIQUE', 'UNIQUE-ID', 'UNIQUE-MATCH', 'UNIX', 'UNLESS-HIDDEN', 'UNLOAD', 'UNSIGNED-LONG', 'UNSUBSCRIBE', 'UP', 'UPDATE', 'UPDATE-ATTRIBUTE', 'URL', 'URL-DECODE', 'URL-ENCODE', 'URL-PASSWORD', 'URL-USERID', 'USE', 'USE-DICT-EXPS', 'USE-FILENAME', 'USE-INDEX', 'USE-REVVIDEO', 'USE-TEXT', 'USE-UNDERLINE', 'USE-WIDGET-POOL', 'USER', 'USER-ID', 'USERID', 'USING', 'V6DISPLAY', 'V6FRAME', 'VALID-EVENT', 'VALID-HANDLE', 'VALID-OBJECT', 'VALIDATE', 'VALIDATE-EXPRESSION', 'VALIDATE-MESSAGE', 'VALIDATE-SEAL', 'VALIDATION-ENABLED', 'VALUE', 'VALUE-CHANGED', 'VALUES', 'VAR', 'VARI', 'VARIA', 'VARIAB', 'VARIABL', 'VARIABLE', 'VERBOSE', 'VERSION', 'VERT', 'VERTI', 'VERTIC', 'VERTICA', 'VERTICAL', 'VIEW', 'VIEW-AS', 'VIEW-FIRST-COLUMN-ON-REOPEN', 'VIRTUAL-HEIGHT', 'VIRTUAL-HEIGHT-', 'VIRTUAL-HEIGHT-C', 'VIRTUAL-HEIGHT-CH', 'VIRTUAL-HEIGHT-CHA', 'VIRTUAL-HEIGHT-CHAR', 'VIRTUAL-HEIGHT-CHARS', 'VIRTUAL-HEIGHT-P', 'VIRTUAL-HEIGHT-PI', 'VIRTUAL-HEIGHT-PIX', 'VIRTUAL-HEIGHT-PIXE', 'VIRTUAL-HEIGHT-PIXEL', 'VIRTUAL-HEIGHT-PIXELS', 'VIRTUAL-WIDTH', 'VIRTUAL-WIDTH-', 'VIRTUAL-WIDTH-C', 'VIRTUAL-WIDTH-CH', 'VIRTUAL-WIDTH-CHA', 'VIRTUAL-WIDTH-CHAR', 'VIRTUAL-WIDTH-CHARS', 'VIRTUAL-WIDTH-P', 'VIRTUAL-WIDTH-PI', 'VIRTUAL-WIDTH-PIX', 'VIRTUAL-WIDTH-PIXE', 'VIRTUAL-WIDTH-PIXEL', 'VIRTUAL-WIDTH-PIXELS', 'VISIBLE', 'VOID', 'WAIT', 'WAIT-FOR', 'WARNING', 'WEB-CONTEXT', 'WEEKDAY', 'WHEN', 'WHERE', 'WHILE', 'WIDGET', 'WIDGET-E', 'WIDGET-EN', 'WIDGET-ENT', 'WIDGET-ENTE', 'WIDGET-ENTER', 'WIDGET-ID', 'WIDGET-L', 'WIDGET-LE', 'WIDGET-LEA', 'WIDGET-LEAV', 'WIDGET-LEAVE', 'WIDGET-POOL', 'WIDTH', 'WIDTH-', 'WIDTH-C', 'WIDTH-CH', 'WIDTH-CHA', 'WIDTH-CHAR', 'WIDTH-CHARS', 'WIDTH-P', 'WIDTH-PI', 'WIDTH-PIX', 'WIDTH-PIXE', 'WIDTH-PIXEL', 'WIDTH-PIXELS', 'WINDOW', 'WINDOW-MAXIM', 'WINDOW-MAXIMI', 'WINDOW-MAXIMIZ', 'WINDOW-MAXIMIZE', 'WINDOW-MAXIMIZED', 'WINDOW-MINIM', 'WINDOW-MINIMI', 'WINDOW-MINIMIZ', 'WINDOW-MINIMIZE', 'WINDOW-MINIMIZED', 'WINDOW-NAME', 'WINDOW-NORMAL', 'WINDOW-STA', 'WINDOW-STAT', 'WINDOW-STATE', 'WINDOW-SYSTEM', 'WITH', 'WORD-INDEX', 'WORD-WRAP', 'WORK-AREA-HEIGHT-PIXELS', 'WORK-AREA-WIDTH-PIXELS', 'WORK-AREA-X', 'WORK-AREA-Y', 'WORK-TAB', 'WORK-TABL', 'WORK-TABLE', 'WORKFILE', 'WRITE', 'WRITE-CDATA', 'WRITE-CHARACTERS', 'WRITE-COMMENT', 'WRITE-DATA-ELEMENT', 'WRITE-EMPTY-ELEMENT', 'WRITE-ENTITY-REF', 'WRITE-EXTERNAL-DTD', 'WRITE-FRAGMENT', 'WRITE-JSON', 'WRITE-MESSAGE', 'WRITE-PROCESSING-INSTRUCTION', 'WRITE-STATUS', 'WRITE-XML', 'WRITE-XMLSCHEMA', 'X', 'X-OF', 'XCODE', 'XML-DATA-TYPE', 'XML-ENTITY-EXPANSION-LIMIT', 'XML-NODE-TYPE', 'XML-SCHEMA-PATH', 'XML-STRICT-ENTITY-RESOLUTION', 'XML-SUPPRESS-NAMESPACE-PROCESSING', 'XREF', 'XREF-XML', 'Y', 'Y-OF', 'YEAR', 'YEAR-OFFSET', 'YES', 'YES-NO', 'YES-NO-CANCEL' )
49,398
Python
17.992311
70
0.52405
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/haskell.py
""" pygments.lexers.haskell ~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Haskell and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \ default, include, inherit, line_re from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Whitespace from pygments import unistring as uni __all__ = ['HaskellLexer', 'HspecLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer', 'LiterateHaskellLexer', 'LiterateIdrisLexer', 'LiterateAgdaLexer', 'LiterateCryptolLexer', 'KokaLexer'] class HaskellLexer(RegexLexer): """ A Haskell lexer based on the lexemes defined in the Haskell 98 Report. .. versionadded:: 0.8 """ name = 'Haskell' url = 'https://www.haskell.org/' aliases = ['haskell', 'hs'] filenames = ['*.hs'] mimetypes = ['text/x-haskell'] reserved = ('case', 'class', 'data', 'default', 'deriving', 'do', 'else', 'family', 'if', 'in', 'infix[lr]?', 'instance', 'let', 'newtype', 'of', 'then', 'type', 'where', '_') ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') tokens = { 'root': [ # Whitespace: (r'\s+', Whitespace), # (r'--\s*|.*$', Comment.Doc), (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), (r'\{-', Comment.Multiline, 'comment'), # Lexemes: # Identifiers (r'\bimport\b', Keyword.Reserved, 'import'), (r'\bmodule\b', Keyword.Reserved, 'module'), (r'\berror\b', Name.Exception), (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), (r"'[^\\]'", String.Char), # this has to come before the TH quote (r'^[_' + uni.Ll + r'][\w\']*', Name.Function), (r"'?[_" + uni.Ll + r"][\w']*", Name), (r"('')?[" + uni.Lu + r"][\w\']*", Keyword.Type), (r"(')[" + uni.Lu + r"][\w\']*", Keyword.Type), (r"(')\[[^\]]*\]", Keyword.Type), # tuples and lists get special treatment in GHC (r"(')\([^)]*\)", Keyword.Type), # .. (r"(')[:!#$%&*+.\\/<=>?@^|~-]+", Keyword.Type), # promoted type operators # Operators (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators # Numbers (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*_*[pP][+-]?\d(_*\d)*', Number.Float), (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*\.[\da-fA-F](_*[\da-fA-F])*' r'(_*[pP][+-]?\d(_*\d)*)?', Number.Float), (r'\d(_*\d)*_*[eE][+-]?\d(_*\d)*', Number.Float), (r'\d(_*\d)*\.\d(_*\d)*(_*[eE][+-]?\d(_*\d)*)?', Number.Float), (r'0[bB]_*[01](_*[01])*', Number.Bin), (r'0[oO]_*[0-7](_*[0-7])*', Number.Oct), (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*', Number.Hex), (r'\d(_*\d)*', Number.Integer), # Character/String Literals (r"'", String.Char, 'character'), (r'"', String, 'string'), # Special (r'\[\]', Keyword.Type), (r'\(\)', Name.Builtin), (r'[][(),;`{}]', Punctuation), ], 'import': [ # Import statements (r'\s+', Whitespace), (r'"', String, 'string'), # after "funclist" state (r'\)', Punctuation, '#pop'), (r'qualified\b', Keyword), # import X as Y (r'([' + uni.Lu + r'][\w.]*)(\s+)(as)(\s+)([' + uni.Lu + r'][\w.]*)', bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Name), '#pop'), # import X hiding (functions) (r'([' + uni.Lu + r'][\w.]*)(\s+)(hiding)(\s+)(\()', bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Punctuation), 'funclist'), # import X (functions) (r'([' + uni.Lu + r'][\w.]*)(\s+)(\()', bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'), # import X (r'[\w.]+', Name.Namespace, '#pop'), ], 'module': [ (r'\s+', Whitespace), (r'([' + uni.Lu + r'][\w.]*)(\s+)(\()', bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'), (r'[' + uni.Lu + r'][\w.]*', Name.Namespace, '#pop'), ], 'funclist': [ (r'\s+', Whitespace), (r'[' + uni.Lu + r']\w*', Keyword.Type), (r'(_[\w\']+|[' + uni.Ll + r'][\w\']*)', Name.Function), (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), (r'\{-', Comment.Multiline, 'comment'), (r',', Punctuation), (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # (HACK, but it makes sense to push two instances, believe me) (r'\(', Punctuation, ('funclist', 'funclist')), (r'\)', Punctuation, '#pop:2'), ], # NOTE: the next four states are shared in the AgdaLexer; make sure # any change is compatible with Agda as well or copy over and change 'comment': [ # Multiline Comments (r'[^-{}]+', Comment.Multiline), (r'\{-', Comment.Multiline, '#push'), (r'-\}', Comment.Multiline, '#pop'), (r'[-{}]', Comment.Multiline), ], 'character': [ # Allows multi-chars, incorrectly. (r"[^\\']'", String.Char, '#pop'), (r"\\", String.Escape, 'escape'), ("'", String.Char, '#pop'), ], 'string': [ (r'[^\\"]+', String), (r"\\", String.Escape, 'escape'), ('"', String, '#pop'), ], 'escape': [ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), (r'\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'), ('|'.join(ascii), String.Escape, '#pop'), (r'o[0-7]+', String.Escape, '#pop'), (r'x[\da-fA-F]+', String.Escape, '#pop'), (r'\d+', String.Escape, '#pop'), (r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop'), ], } class HspecLexer(HaskellLexer): """ A Haskell lexer with support for Hspec constructs. .. versionadded:: 2.4.0 """ name = 'Hspec' aliases = ['hspec'] filenames = ['*Spec.hs'] mimetypes = [] tokens = { 'root': [ (r'(it)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)), (r'(describe)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)), (r'(context)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)), inherit, ], } class IdrisLexer(RegexLexer): """ A lexer for the dependently typed programming language Idris. Based on the Haskell and Agda Lexer. .. versionadded:: 2.0 """ name = 'Idris' url = 'https://www.idris-lang.org/' aliases = ['idris', 'idr'] filenames = ['*.idr'] mimetypes = ['text/x-idris'] reserved = ('case', 'class', 'data', 'default', 'using', 'do', 'else', 'if', 'in', 'infix[lr]?', 'instance', 'rewrite', 'auto', 'namespace', 'codata', 'mutual', 'private', 'public', 'abstract', 'total', 'partial', 'interface', 'implementation', 'export', 'covering', 'constructor', 'let', 'proof', 'of', 'then', 'static', 'where', '_', 'with', 'pattern', 'term', 'syntax', 'prefix', 'postulate', 'parameters', 'record', 'dsl', 'impossible', 'implicit', 'tactics', 'intros', 'intro', 'compute', 'refine', 'exact', 'trivial') ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') directives = ('lib', 'link', 'flag', 'include', 'hide', 'freeze', 'access', 'default', 'logging', 'dynamic', 'name', 'error_handlers', 'language') tokens = { 'root': [ # Comments (r'^(\s*)(%%(%s))' % '|'.join(directives), bygroups(Whitespace, Keyword.Reserved)), (r'(\s*)(--(?![!#$%&*+./<=>?@^|_~:\\]).*?)$', bygroups(Whitespace, Comment.Single)), (r'(\s*)(\|{3}.*?)$', bygroups(Whitespace, Comment.Single)), (r'(\s*)(\{-)', bygroups(Whitespace, Comment.Multiline), 'comment'), # Declaration (r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)', bygroups(Whitespace, Name.Function, Whitespace, Operator.Word, Whitespace)), # Identifiers (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Whitespace), 'module'), (r"('')?[A-Z][\w\']*", Keyword.Type), (r'[a-z][\w\']*', Text), # Special Symbols (r'(<-|::|->|=>|=)', Operator.Word), # specials (r'([(){}\[\]:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials # Numbers (r'\d+[eE][+-]?\d+', Number.Float), (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), (r'0[xX][\da-fA-F]+', Number.Hex), (r'\d+', Number.Integer), # Strings (r"'", String.Char, 'character'), (r'"', String, 'string'), (r'[^\s(){}]+', Text), (r'\s+?', Whitespace), # Whitespace ], 'module': [ (r'\s+', Whitespace), (r'([A-Z][\w.]*)(\s+)(\()', bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'), (r'[A-Z][\w.]*', Name.Namespace, '#pop'), ], 'funclist': [ (r'\s+', Whitespace), (r'[A-Z]\w*', Keyword.Type), (r'(_[\w\']+|[a-z][\w\']*)', Name.Function), (r'--.*$', Comment.Single), (r'\{-', Comment.Multiline, 'comment'), (r',', Punctuation), (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # (HACK, but it makes sense to push two instances, believe me) (r'\(', Punctuation, ('funclist', 'funclist')), (r'\)', Punctuation, '#pop:2'), ], # NOTE: the next four states are shared in the AgdaLexer; make sure # any change is compatible with Agda as well or copy over and change 'comment': [ # Multiline Comments (r'[^-{}]+', Comment.Multiline), (r'\{-', Comment.Multiline, '#push'), (r'-\}', Comment.Multiline, '#pop'), (r'[-{}]', Comment.Multiline), ], 'character': [ # Allows multi-chars, incorrectly. (r"[^\\']", String.Char), (r"\\", String.Escape, 'escape'), ("'", String.Char, '#pop'), ], 'string': [ (r'[^\\"]+', String), (r"\\", String.Escape, 'escape'), ('"', String, '#pop'), ], 'escape': [ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), (r'\^[][A-Z@^_]', String.Escape, '#pop'), ('|'.join(ascii), String.Escape, '#pop'), (r'o[0-7]+', String.Escape, '#pop'), (r'x[\da-fA-F]+', String.Escape, '#pop'), (r'\d+', String.Escape, '#pop'), (r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop') ], } class AgdaLexer(RegexLexer): """ For the Agda dependently typed functional programming language and proof assistant. .. versionadded:: 2.0 """ name = 'Agda' url = 'http://wiki.portal.chalmers.se/agda/pmwiki.php' aliases = ['agda'] filenames = ['*.agda'] mimetypes = ['text/x-agda'] reserved = ( 'abstract', 'codata', 'coinductive', 'constructor', 'data', 'do', 'eta-equality', 'field', 'forall', 'hiding', 'in', 'inductive', 'infix', 'infixl', 'infixr', 'instance', 'interleaved', 'let', 'macro', 'mutual', 'no-eta-equality', 'open', 'overlap', 'pattern', 'postulate', 'primitive', 'private', 'quote', 'quoteTerm', 'record', 'renaming', 'rewrite', 'syntax', 'tactic', 'unquote', 'unquoteDecl', 'unquoteDef', 'using', 'variable', 'where', 'with', ) tokens = { 'root': [ # Declaration (r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)', bygroups(Whitespace, Name.Function, Whitespace, Operator.Word, Whitespace)), # Comments (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), (r'\{-', Comment.Multiline, 'comment'), # Holes (r'\{!', Comment.Directive, 'hole'), # Lexemes: # Identifiers (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Whitespace), 'module'), (r'\b(Set|Prop)[\u2080-\u2089]*\b', Keyword.Type), # Special Symbols (r'(\(|\)|\{|\})', Operator), (r'(\.{1,3}|\||\u03BB|\u2200|\u2192|:|=|->)', Operator.Word), # Numbers (r'\d+[eE][+-]?\d+', Number.Float), (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), (r'0[xX][\da-fA-F]+', Number.Hex), (r'\d+', Number.Integer), # Strings (r"'", String.Char, 'character'), (r'"', String, 'string'), (r'[^\s(){}]+', Text), (r'\s+?', Whitespace), # Whitespace ], 'hole': [ # Holes (r'[^!{}]+', Comment.Directive), (r'\{!', Comment.Directive, '#push'), (r'!\}', Comment.Directive, '#pop'), (r'[!{}]', Comment.Directive), ], 'module': [ (r'\{-', Comment.Multiline, 'comment'), (r'[a-zA-Z][\w.\']*', Name, '#pop'), (r'[\W0-9_]+', Text) ], 'comment': HaskellLexer.tokens['comment'], 'character': HaskellLexer.tokens['character'], 'string': HaskellLexer.tokens['string'], 'escape': HaskellLexer.tokens['escape'] } class CryptolLexer(RegexLexer): """ FIXME: A Cryptol2 lexer based on the lexemes defined in the Haskell 98 Report. .. versionadded:: 2.0 """ name = 'Cryptol' aliases = ['cryptol', 'cry'] filenames = ['*.cry'] mimetypes = ['text/x-cryptol'] reserved = ('Arith', 'Bit', 'Cmp', 'False', 'Inf', 'True', 'else', 'export', 'extern', 'fin', 'if', 'import', 'inf', 'lg2', 'max', 'min', 'module', 'newtype', 'pragma', 'property', 'then', 'type', 'where', 'width') ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK', 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE', 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN', 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL') tokens = { 'root': [ # Whitespace: (r'\s+', Whitespace), # (r'--\s*|.*$', Comment.Doc), (r'//.*$', Comment.Single), (r'/\*', Comment.Multiline, 'comment'), # Lexemes: # Identifiers (r'\bimport\b', Keyword.Reserved, 'import'), (r'\bmodule\b', Keyword.Reserved, 'module'), (r'\berror\b', Name.Exception), (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), (r'^[_a-z][\w\']*', Name.Function), (r"'?[_a-z][\w']*", Name), (r"('')?[A-Z][\w\']*", Keyword.Type), # Operators (r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials (r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators # Numbers (r'\d+[eE][+-]?\d+', Number.Float), (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), (r'0[oO][0-7]+', Number.Oct), (r'0[xX][\da-fA-F]+', Number.Hex), (r'\d+', Number.Integer), # Character/String Literals (r"'", String.Char, 'character'), (r'"', String, 'string'), # Special (r'\[\]', Keyword.Type), (r'\(\)', Name.Builtin), (r'[][(),;`{}]', Punctuation), ], 'import': [ # Import statements (r'\s+', Whitespace), (r'"', String, 'string'), # after "funclist" state (r'\)', Punctuation, '#pop'), (r'qualified\b', Keyword), # import X as Y (r'([A-Z][\w.]*)(\s+)(as)(\s+)([A-Z][\w.]*)', bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Name), '#pop'), # import X hiding (functions) (r'([A-Z][\w.]*)(\s+)(hiding)(\s+)(\()', bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Punctuation), 'funclist'), # import X (functions) (r'([A-Z][\w.]*)(\s+)(\()', bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'), # import X (r'[\w.]+', Name.Namespace, '#pop'), ], 'module': [ (r'\s+', Whitespace), (r'([A-Z][\w.]*)(\s+)(\()', bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'), (r'[A-Z][\w.]*', Name.Namespace, '#pop'), ], 'funclist': [ (r'\s+', Whitespace), (r'[A-Z]\w*', Keyword.Type), (r'(_[\w\']+|[a-z][\w\']*)', Name.Function), # TODO: these don't match the comments in docs, remove. # (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single), # (r'{-', Comment.Multiline, 'comment'), (r',', Punctuation), (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # (HACK, but it makes sense to push two instances, believe me) (r'\(', Punctuation, ('funclist', 'funclist')), (r'\)', Punctuation, '#pop:2'), ], 'comment': [ # Multiline Comments (r'[^/*]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'character': [ # Allows multi-chars, incorrectly. (r"[^\\']'", String.Char, '#pop'), (r"\\", String.Escape, 'escape'), ("'", String.Char, '#pop'), ], 'string': [ (r'[^\\"]+', String), (r"\\", String.Escape, 'escape'), ('"', String, '#pop'), ], 'escape': [ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'), (r'\^[][A-Z@^_]', String.Escape, '#pop'), ('|'.join(ascii), String.Escape, '#pop'), (r'o[0-7]+', String.Escape, '#pop'), (r'x[\da-fA-F]+', String.Escape, '#pop'), (r'\d+', String.Escape, '#pop'), (r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop'), ], } EXTRA_KEYWORDS = {'join', 'split', 'reverse', 'transpose', 'width', 'length', 'tail', '<<', '>>', '<<<', '>>>', 'const', 'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error', 'trace'} def get_tokens_unprocessed(self, text): stack = ['root'] for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name and value in self.EXTRA_KEYWORDS: yield index, Name.Builtin, value else: yield index, token, value class LiterateLexer(Lexer): """ Base class for lexers of literate file formats based on LaTeX or Bird-style (prefixing each code line with ">"). Additional options accepted: `litstyle` If given, must be ``"bird"`` or ``"latex"``. If not given, the style is autodetected: if the first non-whitespace character in the source is a backslash or percent character, LaTeX is assumed, else Bird. """ bird_re = re.compile(r'(>[ \t]*)(.*\n)') def __init__(self, baselexer, **options): self.baselexer = baselexer Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): style = self.options.get('litstyle') if style is None: style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird' code = '' insertions = [] if style == 'bird': # bird-style for match in line_re.finditer(text): line = match.group() m = self.bird_re.match(line) if m: insertions.append((len(code), [(0, Comment.Special, m.group(1))])) code += m.group(2) else: insertions.append((len(code), [(0, Text, line)])) else: # latex-style from pygments.lexers.markup import TexLexer lxlexer = TexLexer(**self.options) codelines = 0 latex = '' for match in line_re.finditer(text): line = match.group() if codelines: if line.lstrip().startswith('\\end{code}'): codelines = 0 latex += line else: code += line elif line.lstrip().startswith('\\begin{code}'): codelines = 1 latex += line insertions.append((len(code), list(lxlexer.get_tokens_unprocessed(latex)))) latex = '' else: latex += line insertions.append((len(code), list(lxlexer.get_tokens_unprocessed(latex)))) yield from do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)) class LiterateHaskellLexer(LiterateLexer): """ For Literate Haskell (Bird-style or LaTeX) source. Additional options accepted: `litstyle` If given, must be ``"bird"`` or ``"latex"``. If not given, the style is autodetected: if the first non-whitespace character in the source is a backslash or percent character, LaTeX is assumed, else Bird. .. versionadded:: 0.9 """ name = 'Literate Haskell' aliases = ['literate-haskell', 'lhaskell', 'lhs'] filenames = ['*.lhs'] mimetypes = ['text/x-literate-haskell'] def __init__(self, **options): hslexer = HaskellLexer(**options) LiterateLexer.__init__(self, hslexer, **options) class LiterateIdrisLexer(LiterateLexer): """ For Literate Idris (Bird-style or LaTeX) source. Additional options accepted: `litstyle` If given, must be ``"bird"`` or ``"latex"``. If not given, the style is autodetected: if the first non-whitespace character in the source is a backslash or percent character, LaTeX is assumed, else Bird. .. versionadded:: 2.0 """ name = 'Literate Idris' aliases = ['literate-idris', 'lidris', 'lidr'] filenames = ['*.lidr'] mimetypes = ['text/x-literate-idris'] def __init__(self, **options): hslexer = IdrisLexer(**options) LiterateLexer.__init__(self, hslexer, **options) class LiterateAgdaLexer(LiterateLexer): """ For Literate Agda source. Additional options accepted: `litstyle` If given, must be ``"bird"`` or ``"latex"``. If not given, the style is autodetected: if the first non-whitespace character in the source is a backslash or percent character, LaTeX is assumed, else Bird. .. versionadded:: 2.0 """ name = 'Literate Agda' aliases = ['literate-agda', 'lagda'] filenames = ['*.lagda'] mimetypes = ['text/x-literate-agda'] def __init__(self, **options): agdalexer = AgdaLexer(**options) LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options) class LiterateCryptolLexer(LiterateLexer): """ For Literate Cryptol (Bird-style or LaTeX) source. Additional options accepted: `litstyle` If given, must be ``"bird"`` or ``"latex"``. If not given, the style is autodetected: if the first non-whitespace character in the source is a backslash or percent character, LaTeX is assumed, else Bird. .. versionadded:: 2.0 """ name = 'Literate Cryptol' aliases = ['literate-cryptol', 'lcryptol', 'lcry'] filenames = ['*.lcry'] mimetypes = ['text/x-literate-cryptol'] def __init__(self, **options): crylexer = CryptolLexer(**options) LiterateLexer.__init__(self, crylexer, **options) class KokaLexer(RegexLexer): """ Lexer for the Koka language. .. versionadded:: 1.6 """ name = 'Koka' url = 'https://koka-lang.github.io/koka/doc/index.html' aliases = ['koka'] filenames = ['*.kk', '*.kki'] mimetypes = ['text/x-koka'] keywords = [ 'infix', 'infixr', 'infixl', 'type', 'cotype', 'rectype', 'alias', 'struct', 'con', 'fun', 'function', 'val', 'var', 'external', 'if', 'then', 'else', 'elif', 'return', 'match', 'private', 'public', 'private', 'module', 'import', 'as', 'include', 'inline', 'rec', 'try', 'yield', 'enum', 'interface', 'instance', ] # keywords that are followed by a type typeStartKeywords = [ 'type', 'cotype', 'rectype', 'alias', 'struct', 'enum', ] # keywords valid in a type typekeywords = [ 'forall', 'exists', 'some', 'with', ] # builtin names and special names builtin = [ 'for', 'while', 'repeat', 'foreach', 'foreach-indexed', 'error', 'catch', 'finally', 'cs', 'js', 'file', 'ref', 'assigned', ] # symbols that can be in an operator symbols = r'[$%&*+@!/\\^~=.:\-?|<>]+' # symbol boundary: an operator keyword should not be followed by any of these sboundary = '(?!' + symbols + ')' # name boundary: a keyword should not be followed by any of these boundary = r'(?![\w/])' # koka token abstractions tokenType = Name.Attribute tokenTypeDef = Name.Class tokenConstructor = Generic.Emph # main lexer tokens = { 'root': [ include('whitespace'), # go into type mode (r'::?' + sboundary, tokenType, 'type'), (r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef), 'alias-type'), (r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef), 'struct-type'), ((r'(%s)' % '|'.join(typeStartKeywords)) + r'(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef), 'type'), # special sequences of tokens (we use ?: for non-capturing group as # required by 'bygroups') (r'(module)(\s+)(interface(?=\s))?(\s+)?((?:[a-z]\w*/)*[a-z]\w*)', bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Namespace)), (r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)' r'(?:(\s*)(=)(\s*)(qualified)?(\s*)' r'((?:[a-z]\w*/)*[a-z]\w*))?', bygroups(Keyword, Whitespace, Name.Namespace, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Name.Namespace)), (r'^(public|private)?(\s+)?(function|fun|val)' r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))', bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function)), (r'^(?:(public|private)(?=\s+external))?((?<!^)\s+)?(external)(\s+)(inline(?=\s))?(\s+)?' r'([a-z]\w*|\((?:' + symbols + r'|/)\))', bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Name.Function)), # keywords (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type), (r'(%s)' % '|'.join(keywords) + boundary, Keyword), (r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo), (r'::?|:=|\->|[=.]' + sboundary, Keyword), # names (r'((?:[a-z]\w*/)*)([A-Z]\w*)', bygroups(Name.Namespace, tokenConstructor)), (r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)), (r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))', bygroups(Name.Namespace, Name)), (r'_\w*', Name.Variable), # literal string (r'@"', String.Double, 'litstring'), # operators (symbols + "|/(?![*/])", Operator), (r'`', Operator), (r'[{}()\[\];,]', Punctuation), # literals. No check for literal characters with len > 1 (r'[0-9]+\.[0-9]+([eE][\-+]?[0-9]+)?', Number.Float), (r'0[xX][0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r"'", String.Char, 'char'), (r'"', String.Double, 'string'), ], # type started by alias 'alias-type': [ (r'=', Keyword), include('type') ], # type started by struct 'struct-type': [ (r'(?=\((?!,*\)))', Punctuation, '#pop'), include('type') ], # type started by colon 'type': [ (r'[(\[<]', tokenType, 'type-nested'), include('type-content') ], # type nested in brackets: can contain parameters, comma etc. 'type-nested': [ (r'[)\]>]', tokenType, '#pop'), (r'[(\[<]', tokenType, 'type-nested'), (r',', tokenType), (r'([a-z]\w*)(\s*)(:)(?!:)', bygroups(Name, Whitespace, tokenType)), # parameter name include('type-content') ], # shared contents of a type 'type-content': [ include('whitespace'), # keywords (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword), (r'(?=((%s)' % '|'.join(keywords) + boundary + '))', Keyword, '#pop'), # need to match because names overlap... # kinds (r'[EPHVX]' + boundary, tokenType), # type names (r'[a-z][0-9]*(?![\w/])', tokenType), (r'_\w*', tokenType.Variable), # Generic.Emph (r'((?:[a-z]\w*/)*)([A-Z]\w*)', bygroups(Name.Namespace, tokenType)), (r'((?:[a-z]\w*/)*)([a-z]\w+)', bygroups(Name.Namespace, tokenType)), # type keyword operators (r'::|->|[.:|]', tokenType), # catchall default('#pop') ], # comments and literals 'whitespace': [ (r'(\n\s*)(#.*)$', bygroups(Whitespace, Comment.Preproc)), (r'\s+', Whitespace), (r'/\*', Comment.Multiline, 'comment'), (r'//.*$', Comment.Single) ], 'comment': [ (r'[^/*]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'litstring': [ (r'[^"]+', String.Double), (r'""', String.Escape), (r'"', String.Double, '#pop'), ], 'string': [ (r'[^\\"\n]+', String.Double), include('escape-sequence'), (r'["\n]', String.Double, '#pop'), ], 'char': [ (r'[^\\\'\n]+', String.Char), include('escape-sequence'), (r'[\'\n]', String.Char, '#pop'), ], 'escape-sequence': [ (r'\\[nrt\\"\']', String.Escape), (r'\\x[0-9a-fA-F]{2}', String.Escape), (r'\\u[0-9a-fA-F]{4}', String.Escape), # Yes, \U literals are 6 hex digits. (r'\\U[0-9a-fA-F]{6}', String.Escape) ] }
32,898
Python
36.728211
101
0.449875
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/resource.py
""" pygments.lexers.resource ~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for resource definition files. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Comment, String, Number, Operator, Text, \ Keyword, Name __all__ = ['ResourceLexer'] class ResourceLexer(RegexLexer): """Lexer for `ICU Resource bundles <http://userguide.icu-project.org/locale/resources>`_. .. versionadded:: 2.0 """ name = 'ResourceBundle' aliases = ['resourcebundle', 'resource'] filenames = [] _types = (':table', ':array', ':string', ':bin', ':import', ':intvector', ':int', ':alias') flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ (r'//.*?$', Comment), (r'"', String, 'string'), (r'-?\d+', Number.Integer), (r'[,{}]', Operator), (r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types), bygroups(Name, Text, Keyword)), (r'\s+', Text), (words(_types), Keyword), ], 'string': [ (r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|' r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String), (r'\{', String.Escape, 'msgname'), (r'"', String, '#pop') ], 'msgname': [ (r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message')) ], 'message': [ (r'\{', String.Escape, 'msgname'), (r'\}', String.Escape, '#pop'), (r'(,)(\s*)([a-z]+)(\s*\})', bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'), (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)', bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, String.Escape, Operator.Word, String.Escape, Operator, String.Escape, Number.Integer, String.Escape), 'choice'), (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)', bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, String.Escape), 'choice'), (r'\s+', String.Escape) ], 'choice': [ (r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)', bygroups(Operator, Number.Integer, String.Escape), 'message'), (r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'), (r'\}', String.Escape, ('#pop', '#pop')), (r'\s+', String.Escape) ], 'str': [ (r'\}', String.Escape, '#pop'), (r'\{', String.Escape, 'msgname'), (r'[^{}]+', String) ] } def analyse_text(text): if text.startswith('root:table'): return 1.0
2,902
Python
33.152941
83
0.454514
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ada.py
""" pygments.lexers.ada ~~~~~~~~~~~~~~~~~~~ Lexers for Ada family languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, words, using, this, \ default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation from pygments.lexers._ada_builtins import KEYWORD_LIST, BUILTIN_LIST __all__ = ['AdaLexer'] class AdaLexer(RegexLexer): """ For Ada source code. .. versionadded:: 1.3 """ name = 'Ada' aliases = ['ada', 'ada95', 'ada2005'] filenames = ['*.adb', '*.ads', '*.ada'] mimetypes = ['text/x-ada'] flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ (r'[^\S\n]+', Text), (r'--.*?\n', Comment.Single), (r'[^\S\n]+', Text), (r'function|procedure|entry', Keyword.Declaration, 'subprogram'), (r'(subtype|type)(\s+)(\w+)', bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), (r'task|protected', Keyword.Declaration), (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)), (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'), (r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text, Comment.Preproc)), (r'(true|false|null)\b', Keyword.Constant), # builtin types (words(BUILTIN_LIST, suffix=r'\b'), Keyword.Type), (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word), (r'generic|private', Keyword.Declaration), (r'package', Keyword.Declaration, 'package'), (r'array\b', Keyword.Reserved, 'array_def'), (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), (r'(\w+)(\s*)(:)(\s*)(constant)', bygroups(Name.Constant, Text, Punctuation, Text, Keyword.Reserved)), (r'<<\w+>>', Name.Label), (r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)', bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)), # keywords (words(KEYWORD_LIST, prefix=r'\b', suffix=r'\b'), Keyword.Reserved), (r'"[^"]*"', String), include('attribute'), include('numbers'), (r"'[^']'", String.Character), (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))), (r"(<>|=>|:=|@|[\[\]]|[()|:;,.'])", Punctuation), (r'[*<>+=/&-]', Operator), (r'\n+', Text), ], 'numbers': [ (r'[0-9_]+#[0-9a-f_\.]+#', Number.Hex), (r'[0-9_]+\.[0-9_]*', Number.Float), (r'[0-9_]+', Number.Integer), ], 'attribute': [ (r"(')(\w+)", bygroups(Punctuation, Name.Attribute)), ], 'subprogram': [ (r'\(', Punctuation, ('#pop', 'formal_part')), (r';', Punctuation, '#pop'), (r'is\b', Keyword.Reserved, '#pop'), (r'"[^"]+"|\w+', Name.Function), include('root'), ], 'end': [ ('(if|case|record|loop|select)', Keyword.Reserved), (r'"[^"]+"|[\w.]+', Name.Function), (r'\s+', Text), (';', Punctuation, '#pop'), ], 'type_def': [ (r';', Punctuation, '#pop'), (r'\(', Punctuation, 'formal_part'), (r'\[', Punctuation, 'formal_part'), (r'with|and|use', Keyword.Reserved), (r'array\b', Keyword.Reserved, ('#pop', 'array_def')), (r'record\b', Keyword.Reserved, ('record_def')), (r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'), include('root'), ], 'array_def': [ (r';', Punctuation, '#pop'), (r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)), include('root'), ], 'record_def': [ (r'end record', Keyword.Reserved, '#pop'), include('root'), ], 'import': [ # TODO: use Name.Namespace if appropriate. This needs # work to disinguish imports from aspects. (r'[\w.]+', Name, '#pop'), default('#pop'), ], 'formal_part': [ (r'\)', Punctuation, '#pop'), (r'\]', Punctuation, '#pop'), (r'\w+', Name.Variable), (r',|:[^=]', Punctuation), (r'(in|not|null|out|access)\b', Keyword.Reserved), include('root'), ], 'package': [ ('body', Keyword.Declaration), (r'is\s+new|renames', Keyword.Reserved), ('is', Keyword.Reserved, '#pop'), (';', Punctuation, '#pop'), (r'\(', Punctuation, 'package_instantiation'), (r'([\w.]+)', Name.Class), include('root'), ], 'package_instantiation': [ (r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)), (r'[\w.\'"]', Text), (r'\)', Punctuation, '#pop'), include('root'), ], }
5,320
Python
35.696551
84
0.457519
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/mime.py
""" pygments.lexers.mime ~~~~~~~~~~~~~~~~~~~~ Lexer for Multipurpose Internet Mail Extensions (MIME) data. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include from pygments.lexers import get_lexer_for_mimetype from pygments.token import Text, Name, String, Operator, Comment, Other from pygments.util import get_int_opt, ClassNotFound __all__ = ["MIMELexer"] class MIMELexer(RegexLexer): """ Lexer for Multipurpose Internet Mail Extensions (MIME) data. This lexer is designed to process nested multipart data. It assumes that the given data contains both header and body (and is split at an empty line). If no valid header is found, then the entire data will be treated as body. Additional options accepted: `MIME-max-level` Max recursion level for nested MIME structure. Any negative number would treated as unlimited. (default: -1) `Content-Type` Treat the data as a specific content type. Useful when header is missing, or this lexer would try to parse from header. (default: `text/plain`) `Multipart-Boundary` Set the default multipart boundary delimiter. This option is only used when `Content-Type` is `multipart` and header is missing. This lexer would try to parse from header by default. (default: None) `Content-Transfer-Encoding` Treat the data as a specific encoding. Or this lexer would try to parse from header by default. (default: None) .. versionadded:: 2.5 """ name = "MIME" aliases = ["mime"] mimetypes = ["multipart/mixed", "multipart/related", "multipart/alternative"] def __init__(self, **options): super().__init__(**options) self.boundary = options.get("Multipart-Boundary") self.content_transfer_encoding = options.get("Content_Transfer_Encoding") self.content_type = options.get("Content_Type", "text/plain") self.max_nested_level = get_int_opt(options, "MIME-max-level", -1) def get_header_tokens(self, match): field = match.group(1) if field.lower() in self.attention_headers: yield match.start(1), Name.Tag, field + ":" yield match.start(2), Text.Whitespace, match.group(2) pos = match.end(2) body = match.group(3) for i, t, v in self.get_tokens_unprocessed(body, ("root", field.lower())): yield pos + i, t, v else: yield match.start(), Comment, match.group() def get_body_tokens(self, match): pos_body_start = match.start() entire_body = match.group() # skip first newline if entire_body[0] == '\n': yield pos_body_start, Text.Whitespace, '\n' pos_body_start = pos_body_start + 1 entire_body = entire_body[1:] # if it is not a multipart if not self.content_type.startswith("multipart") or not self.boundary: for i, t, v in self.get_bodypart_tokens(entire_body): yield pos_body_start + i, t, v return # find boundary bdry_pattern = r"^--%s(--)?\n" % re.escape(self.boundary) bdry_matcher = re.compile(bdry_pattern, re.MULTILINE) # some data has prefix text before first boundary m = bdry_matcher.search(entire_body) if m: pos_part_start = pos_body_start + m.end() pos_iter_start = lpos_end = m.end() yield pos_body_start, Text, entire_body[:m.start()] yield pos_body_start + lpos_end, String.Delimiter, m.group() else: pos_part_start = pos_body_start pos_iter_start = 0 # process tokens of each body part for m in bdry_matcher.finditer(entire_body, pos_iter_start): # bodypart lpos_start = pos_part_start - pos_body_start lpos_end = m.start() part = entire_body[lpos_start:lpos_end] for i, t, v in self.get_bodypart_tokens(part): yield pos_part_start + i, t, v # boundary yield pos_body_start + lpos_end, String.Delimiter, m.group() pos_part_start = pos_body_start + m.end() # some data has suffix text after last boundary lpos_start = pos_part_start - pos_body_start if lpos_start != len(entire_body): yield pos_part_start, Text, entire_body[lpos_start:] def get_bodypart_tokens(self, text): # return if: # * no content # * no content type specific # * content encoding is not readable # * max recurrsion exceed if not text.strip() or not self.content_type: return [(0, Other, text)] cte = self.content_transfer_encoding if cte and cte not in {"8bit", "7bit", "quoted-printable"}: return [(0, Other, text)] if self.max_nested_level == 0: return [(0, Other, text)] # get lexer try: lexer = get_lexer_for_mimetype(self.content_type) except ClassNotFound: return [(0, Other, text)] if isinstance(lexer, type(self)): lexer.max_nested_level = self.max_nested_level - 1 return lexer.get_tokens_unprocessed(text) def store_content_type(self, match): self.content_type = match.group(1) prefix_len = match.start(1) - match.start(0) yield match.start(0), Text.Whitespace, match.group(0)[:prefix_len] yield match.start(1), Name.Label, match.group(2) yield match.end(2), String.Delimiter, '/' yield match.start(3), Name.Label, match.group(3) def get_content_type_subtokens(self, match): yield match.start(1), Text, match.group(1) yield match.start(2), Text.Whitespace, match.group(2) yield match.start(3), Name.Attribute, match.group(3) yield match.start(4), Operator, match.group(4) yield match.start(5), String, match.group(5) if match.group(3).lower() == "boundary": boundary = match.group(5).strip() if boundary[0] == '"' and boundary[-1] == '"': boundary = boundary[1:-1] self.boundary = boundary def store_content_transfer_encoding(self, match): self.content_transfer_encoding = match.group(0).lower() yield match.start(0), Name.Constant, match.group(0) attention_headers = {"content-type", "content-transfer-encoding"} tokens = { "root": [ (r"^([\w-]+):( *)([\s\S]*?\n)(?![ \t])", get_header_tokens), (r"^$[\s\S]+", get_body_tokens), ], "header": [ # folding (r"\n[ \t]", Text.Whitespace), (r"\n(?![ \t])", Text.Whitespace, "#pop"), ], "content-type": [ include("header"), ( r"^\s*((multipart|application|audio|font|image|model|text|video" r"|message)/([\w-]+))", store_content_type, ), (r'(;)((?:[ \t]|\n[ \t])*)([\w:-]+)(=)([\s\S]*?)(?=;|\n(?![ \t]))', get_content_type_subtokens), (r';[ \t]*\n(?![ \t])', Text, '#pop'), ], "content-transfer-encoding": [ include("header"), (r"([\w-]+)", store_content_transfer_encoding), ], }
7,538
Python
34.729858
86
0.571239
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/tlb.py
""" pygments.lexers.tlb ~~~~~~~~~~~~~~~~~~~ Lexers for TL-b. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, words from pygments.token import Operator, Name, \ Number, Whitespace, Punctuation, Comment __all__ = ['TlbLexer'] class TlbLexer(RegexLexer): """ For TL-b source code. """ name = 'Tl-b' aliases = ['tlb'] filenames = ['*.tlb'] tokens = { 'root': [ (r'\s+', Whitespace), include('comments'), (r'[0-9]+', Number), (words(( '+', '-', '*', '=', '?', '~', '.', '^', '==', '<', '>', '<=', '>=', '!=' )), Operator), (words(('##', '#<', '#<=')), Name.Tag), (r'#[0-9a-f]*_?', Name.Tag), (r'\$[01]*_?', Name.Tag), (r'[a-zA-Z_][0-9a-zA-Z_]*', Name), (r'[;():\[\]{}]', Punctuation) ], 'comments': [ (r'//.*', Comment.Singleline), (r'/\*', Comment.Multiline, 'comment'), ], 'comment': [ (r'[^/*]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], }
1,377
Python
22.75862
70
0.404503
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/capnproto.py
""" pygments.lexers.capnproto ~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for the Cap'n Proto schema language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, default from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace __all__ = ['CapnProtoLexer'] class CapnProtoLexer(RegexLexer): """ For Cap'n Proto source. .. versionadded:: 2.2 """ name = 'Cap\'n Proto' url = 'https://capnproto.org' filenames = ['*.capnp'] aliases = ['capnp'] tokens = { 'root': [ (r'#.*?$', Comment.Single), (r'@[0-9a-zA-Z]*', Name.Decorator), (r'=', Literal, 'expression'), (r':', Name.Class, 'type'), (r'\$', Name.Attribute, 'annotation'), (r'(struct|enum|interface|union|import|using|const|annotation|' r'extends|in|of|on|as|with|from|fixed)\b', Keyword), (r'[\w.]+', Name), (r'[^#@=:$\w\s]+', Text), (r'\s+', Whitespace), ], 'type': [ (r'[^][=;,(){}$]+', Name.Class), (r'[\[(]', Name.Class, 'parentype'), default('#pop'), ], 'parentype': [ (r'[^][;()]+', Name.Class), (r'[\[(]', Name.Class, '#push'), (r'[])]', Name.Class, '#pop'), default('#pop'), ], 'expression': [ (r'[^][;,(){}$]+', Literal), (r'[\[(]', Literal, 'parenexp'), default('#pop'), ], 'parenexp': [ (r'[^][;()]+', Literal), (r'[\[(]', Literal, '#push'), (r'[])]', Literal, '#pop'), default('#pop'), ], 'annotation': [ (r'[^][;,(){}=:]+', Name.Attribute), (r'[\[(]', Name.Attribute, 'annexp'), default('#pop'), ], 'annexp': [ (r'[^][;()]+', Name.Attribute), (r'[\[(]', Name.Attribute, '#push'), (r'[])]', Name.Attribute, '#pop'), default('#pop'), ], }
2,175
Python
27.631579
76
0.415632
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/zig.py
""" pygments.lexers.zig ~~~~~~~~~~~~~~~~~~~ Lexers for Zig. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words from pygments.token import Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['ZigLexer'] class ZigLexer(RegexLexer): """ Lexer for the Zig language. grammar: https://ziglang.org/documentation/master/#Grammar """ name = 'Zig' url = 'http://www.ziglang.org' aliases = ['zig'] filenames = ['*.zig'] mimetypes = ['text/zig'] type_keywords = ( words(('bool', 'f16', 'f32', 'f64', 'f128', 'void', 'noreturn', 'type', 'anyerror', 'promise', 'i0', 'u0', 'isize', 'usize', 'comptime_int', 'comptime_float', 'c_short', 'c_ushort', 'c_int', 'c_uint', 'c_long', 'c_ulong', 'c_longlong', 'c_ulonglong', 'c_longdouble', 'c_void' 'i8', 'u8', 'i16', 'u16', 'i32', 'u32', 'i64', 'u64', 'i128', 'u128'), suffix=r'\b'), Keyword.Type) storage_keywords = ( words(('const', 'var', 'extern', 'packed', 'export', 'pub', 'noalias', 'inline', 'comptime', 'nakedcc', 'stdcallcc', 'volatile', 'allowzero', 'align', 'linksection', 'threadlocal'), suffix=r'\b'), Keyword.Reserved) structure_keywords = ( words(('struct', 'enum', 'union', 'error'), suffix=r'\b'), Keyword) statement_keywords = ( words(('break', 'return', 'continue', 'asm', 'defer', 'errdefer', 'unreachable', 'try', 'catch', 'async', 'await', 'suspend', 'resume', 'cancel'), suffix=r'\b'), Keyword) conditional_keywords = ( words(('if', 'else', 'switch', 'and', 'or', 'orelse'), suffix=r'\b'), Keyword) repeat_keywords = ( words(('while', 'for'), suffix=r'\b'), Keyword) other_keywords = ( words(('fn', 'usingnamespace', 'test'), suffix=r'\b'), Keyword) constant_keywords = ( words(('true', 'false', 'null', 'undefined'), suffix=r'\b'), Keyword.Constant) tokens = { 'root': [ (r'\n', Whitespace), (r'\s+', Whitespace), (r'//.*?\n', Comment.Single), # Keywords statement_keywords, storage_keywords, structure_keywords, repeat_keywords, type_keywords, constant_keywords, conditional_keywords, other_keywords, # Floats (r'0x[0-9a-fA-F]+\.[0-9a-fA-F]+([pP][\-+]?[0-9a-fA-F]+)?', Number.Float), (r'0x[0-9a-fA-F]+\.?[pP][\-+]?[0-9a-fA-F]+', Number.Float), (r'[0-9]+\.[0-9]+([eE][-+]?[0-9]+)?', Number.Float), (r'[0-9]+\.?[eE][-+]?[0-9]+', Number.Float), # Integers (r'0b[01]+', Number.Bin), (r'0o[0-7]+', Number.Oct), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), # Identifier (r'@[a-zA-Z_]\w*', Name.Builtin), (r'[a-zA-Z_]\w*', Name), # Characters (r'\'\\\'\'', String.Escape), (r'\'\\(x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])\'', String.Escape), (r'\'[^\\\']\'', String), # Strings (r'\\\\[^\n]*', String.Heredoc), (r'c\\\\[^\n]*', String.Heredoc), (r'c?"', String, 'string'), # Operators, Punctuation (r'[+%=><|^!?/\-*&~:]', Operator), (r'[{}()\[\],.;]', Punctuation) ], 'string': [ (r'\\(x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])', String.Escape), (r'[^\\"\n]+', String), (r'"', String, '#pop') ] }
3,953
Python
30.632
85
0.453833
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/modeling.py
""" pygments.lexers.modeling ~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for modeling languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace from pygments.lexers.html import HtmlLexer from pygments.lexers import _stan_builtins __all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer'] class ModelicaLexer(RegexLexer): """ For Modelica source code. .. versionadded:: 1.1 """ name = 'Modelica' url = 'http://www.modelica.org/' aliases = ['modelica'] filenames = ['*.mo'] mimetypes = ['text/x-modelica'] flags = re.DOTALL | re.MULTILINE _name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)" tokens = { 'whitespace': [ (r'[\s\ufeff]+', Text), (r'//[^\n]*\n?', Comment.Single), (r'/\*.*?\*/', Comment.Multiline) ], 'root': [ include('whitespace'), (r'"', String.Double, 'string'), (r'[()\[\]{},;]+', Punctuation), (r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator), (r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float), (r'\d+', Number.Integer), (r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|' r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|' r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|' r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|' r'identity|inStream|integer|Integer|interval|inverse|isPresent|' r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|' r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|' r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|' r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|' r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|' r'transpose|vector|zeros)\b', Name.Builtin), (r'(algorithm|annotation|break|connect|constant|constrainedby|der|' r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' r'equation|exit|expandable|extends|external|firstTick|final|flow|for|if|' r'import|impure|in|initial|inner|input|interval|loop|nondiscrete|outer|' r'output|parameter|partial|protected|public|pure|redeclare|' r'replaceable|return|stream|then|when|while)\b', Keyword.Reserved), (r'(and|not|or)\b', Operator.Word), (r'(block|class|connector|end|function|model|operator|package|' r'record|type)\b', Keyword.Reserved, 'class'), (r'(false|true)\b', Keyword.Constant), (r'within\b', Keyword.Reserved, 'package-prefix'), (_name, Name) ], 'class': [ include('whitespace'), (r'(function|record)\b', Keyword.Reserved), (r'(if|for|when|while)\b', Keyword.Reserved, '#pop'), (_name, Name.Class, '#pop'), default('#pop') ], 'package-prefix': [ include('whitespace'), (_name, Name.Namespace, '#pop'), default('#pop') ], 'string': [ (r'"', String.Double, '#pop'), (r'\\[\'"?\\abfnrtv]', String.Escape), (r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))', using(HtmlLexer)), (r'<|\\?[^"\\<]+', String.Double) ] } class BugsLexer(RegexLexer): """ Pygments Lexer for OpenBugs and WinBugs models. .. versionadded:: 1.6 """ name = 'BUGS' aliases = ['bugs', 'winbugs', 'openbugs'] filenames = ['*.bug'] _FUNCTIONS = ( # Scalar functions 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance', 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log', 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value', 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior', 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh', 'trunc', # Vector functions 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals', 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM', 'sd', 'sort', 'sum', # Special 'D', 'I', 'F', 'T', 'C') """ OpenBUGS built-in functions From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII This also includes - T, C, I : Truncation and censoring. ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS. - D : ODE - F : Functional http://www.openbugs.info/Examples/Functionals.html """ _DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois', 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp', 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar', 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar', 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm', 'dmt', 'dwish') """ OpenBUGS built-in distributions Functions from http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI """ tokens = { 'whitespace': [ (r"\s+", Text), ], 'comments': [ # Comments (r'#.*$', Comment.Single), ], 'root': [ # Comments include('comments'), include('whitespace'), # Block start (r'(model)(\s+)(\{)', bygroups(Keyword.Namespace, Text, Punctuation)), # Reserved Words (r'(for|in)(?![\w.])', Keyword.Reserved), # Built-in Functions (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS), Name.Builtin), # Regular variable names (r'[A-Za-z][\w.]*', Name), # Number Literals (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), # Punctuation (r'\[|\]|\(|\)|:|,|;', Punctuation), # Assignment operators # SLexer makes these tokens Operators. (r'<-|~', Operator), # Infix and prefix operators (r'\+|-|\*|/', Operator), # Block (r'[{}]', Punctuation), ] } def analyse_text(text): if re.search(r"^\s*model\s*{", text, re.M): return 0.7 else: return 0.0 class JagsLexer(RegexLexer): """ Pygments Lexer for JAGS. .. versionadded:: 1.6 """ name = 'JAGS' aliases = ['jags'] filenames = ['*.jag', '*.bug'] # JAGS _FUNCTIONS = ( 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'cos', 'cosh', 'cloglog', 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact', 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh', 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin', 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse', 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan', # Truncation/Censoring (should I include) 'T', 'I') # Distributions with density, probability and quartile functions _DISTRIBUTIONS = tuple('[dpq]%s' % x for x in ('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp', 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm', 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib')) # Other distributions without density and probability _OTHER_DISTRIBUTIONS = ( 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper', 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq', 'dnbinom', 'dweibull', 'ddirich') tokens = { 'whitespace': [ (r"\s+", Text), ], 'names': [ # Regular variable names (r'[a-zA-Z][\w.]*\b', Name), ], 'comments': [ # do not use stateful comments (r'(?s)/\*.*?\*/', Comment.Multiline), # Comments (r'#.*$', Comment.Single), ], 'root': [ # Comments include('comments'), include('whitespace'), # Block start (r'(model|data)(\s+)(\{)', bygroups(Keyword.Namespace, Text, Punctuation)), (r'var(?![\w.])', Keyword.Declaration), # Reserved Words (r'(for|in)(?![\w.])', Keyword.Reserved), # Builtins # Need to use lookahead because . is a valid char (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS + _OTHER_DISTRIBUTIONS), Name.Builtin), # Names include('names'), # Number Literals (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), (r'\[|\]|\(|\)|:|,|;', Punctuation), # Assignment operators (r'<-|~', Operator), # # JAGS includes many more than OpenBUGS (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator), (r'[{}]', Punctuation), ] } def analyse_text(text): if re.search(r'^\s*model\s*\{', text, re.M): if re.search(r'^\s*data\s*\{', text, re.M): return 0.9 elif re.search(r'^\s*var', text, re.M): return 0.9 else: return 0.3 else: return 0 class StanLexer(RegexLexer): """Pygments Lexer for Stan models. The Stan modeling language is specified in the *Stan Modeling Language User's Guide and Reference Manual, v2.17.0*, `pdf <https://github.com/stan-dev/stan/releases/download/v2.17.0/stan-reference-2.17.0.pdf>`__. .. versionadded:: 1.6 """ name = 'Stan' aliases = ['stan'] filenames = ['*.stan'] tokens = { 'whitespace': [ (r"\s+", Text), ], 'comments': [ (r'(?s)/\*.*?\*/', Comment.Multiline), # Comments (r'(//|#).*$', Comment.Single), ], 'root': [ (r'"[^"]*"', String), # Comments include('comments'), # block start include('whitespace'), # Block start (r'(%s)(\s*)(\{)' % r'|'.join(('functions', 'data', r'transformed\s+?data', 'parameters', r'transformed\s+parameters', 'model', r'generated\s+quantities')), bygroups(Keyword.Namespace, Text, Punctuation)), # target keyword (r'target\s*\+=', Keyword), # Reserved Words (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword), # Truncation (r'T(?=\s*\[)', Keyword), # Data types (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type), # < should be punctuation, but elsewhere I can't tell if it is in # a range constraint (r'(<)(\s*)(upper|lower|offset|multiplier)(\s*)(=)', bygroups(Operator, Whitespace, Keyword, Whitespace, Punctuation)), (r'(,)(\s*)(upper)(\s*)(=)', bygroups(Punctuation, Whitespace, Keyword, Whitespace, Punctuation)), # Punctuation (r"[;,\[\]()]", Punctuation), # Builtin (r'(%s)(?=\s*\()' % '|'.join(_stan_builtins.FUNCTIONS), Name.Builtin), (r'(~)(\s*)(%s)(?=\s*\()' % '|'.join(_stan_builtins.DISTRIBUTIONS), bygroups(Operator, Whitespace, Name.Builtin)), # Special names ending in __, like lp__ (r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo), (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved), # user-defined functions (r'[A-Za-z]\w*(?=\s*\()]', Name.Function), # Imaginary Literals (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?i', Number.Float), (r'\.[0-9]+([eE][+-]?[0-9]+)?i', Number.Float), (r'[0-9]+i', Number.Float), # Real Literals (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?', Number.Float), (r'\.[0-9]+([eE][+-]?[0-9]+)?', Number.Float), # Integer Literals (r'[0-9]+', Number.Integer), # Regular variable names (r'[A-Za-z]\w*\b', Name), # Assignment operators (r'<-|(?:\+|-|\.?/|\.?\*|=)?=|~', Operator), # Infix, prefix and postfix operators (and = ) (r"\+|-|\.?\*|\.?/|\\|'|\.?\^|!=?|<=?|>=?|\|\||&&|%|\?|:|%/%|!", Operator), # Block delimiters (r'[{}]', Punctuation), # Distribution | (r'\|', Punctuation) ] } def analyse_text(text): if re.search(r'^\s*parameters\s*\{', text, re.M): return 1.0 else: return 0.0
13,524
Python
35.554054
99
0.474933
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/webidl.py
""" pygments.lexers.webidl ~~~~~~~~~~~~~~~~~~~~~~ Lexers for Web IDL, including some extensions. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, default, include, words from pygments.token import Comment, Keyword, Name, Number, Punctuation, \ String, Text __all__ = ['WebIDLLexer'] _builtin_types = ( # primitive types 'byte', 'octet', 'boolean', r'(?:unsigned\s+)?(?:short|long(?:\s+long)?)', r'(?:unrestricted\s+)?(?:float|double)', # string types 'DOMString', 'ByteString', 'USVString', # exception types 'Error', 'DOMException', # typed array types 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Uint8ClampedArray', 'Float32Array', 'Float64Array', # buffer source types 'ArrayBuffer', 'DataView', 'Int8Array', 'Int16Array', 'Int32Array', # other 'any', 'void', 'object', 'RegExp', ) _identifier = r'_?[A-Za-z][a-zA-Z0-9_-]*' _keyword_suffix = r'(?![\w-])' _string = r'"[^"]*"' class WebIDLLexer(RegexLexer): """ For Web IDL. .. versionadded:: 2.6 """ name = 'Web IDL' url = 'https://www.w3.org/wiki/Web_IDL' aliases = ['webidl'] filenames = ['*.webidl'] tokens = { 'common': [ (r'\s+', Text), (r'(?s)/\*.*?\*/', Comment.Multiline), (r'//.*', Comment.Single), (r'^#.*', Comment.Preproc), ], 'root': [ include('common'), (r'\[', Punctuation, 'extended_attributes'), (r'partial' + _keyword_suffix, Keyword), (r'typedef' + _keyword_suffix, Keyword, ('typedef', 'type')), (r'interface' + _keyword_suffix, Keyword, 'interface_rest'), (r'enum' + _keyword_suffix, Keyword, 'enum_rest'), (r'callback' + _keyword_suffix, Keyword, 'callback_rest'), (r'dictionary' + _keyword_suffix, Keyword, 'dictionary_rest'), (r'namespace' + _keyword_suffix, Keyword, 'namespace_rest'), (_identifier, Name.Class, 'implements_rest'), ], 'extended_attributes': [ include('common'), (r',', Punctuation), (_identifier, Name.Decorator), (r'=', Punctuation, 'extended_attribute_rest'), (r'\(', Punctuation, 'argument_list'), (r'\]', Punctuation, '#pop'), ], 'extended_attribute_rest': [ include('common'), (_identifier, Name, 'extended_attribute_named_rest'), (_string, String), (r'\(', Punctuation, 'identifier_list'), default('#pop'), ], 'extended_attribute_named_rest': [ include('common'), (r'\(', Punctuation, 'argument_list'), default('#pop'), ], 'argument_list': [ include('common'), (r'\)', Punctuation, '#pop'), default('argument'), ], 'argument': [ include('common'), (r'optional' + _keyword_suffix, Keyword), (r'\[', Punctuation, 'extended_attributes'), (r',', Punctuation, '#pop'), (r'\)', Punctuation, '#pop:2'), default(('argument_rest', 'type')) ], 'argument_rest': [ include('common'), (_identifier, Name.Variable), (r'\.\.\.', Punctuation), (r'=', Punctuation, 'default_value'), default('#pop'), ], 'identifier_list': [ include('common'), (_identifier, Name.Class), (r',', Punctuation), (r'\)', Punctuation, '#pop'), ], 'type': [ include('common'), (r'(?:' + r'|'.join(_builtin_types) + r')' + _keyword_suffix, Keyword.Type, 'type_null'), (words(('sequence', 'Promise', 'FrozenArray'), suffix=_keyword_suffix), Keyword.Type, 'type_identifier'), (_identifier, Name.Class, 'type_identifier'), (r'\(', Punctuation, 'union_type'), ], 'union_type': [ include('common'), (r'or' + _keyword_suffix, Keyword), (r'\)', Punctuation, ('#pop', 'type_null')), default('type'), ], 'type_identifier': [ (r'<', Punctuation, 'type_list'), default(('#pop', 'type_null')) ], 'type_null': [ (r'\?', Punctuation), default('#pop:2'), ], 'default_value': [ include('common'), include('const_value'), (_string, String, '#pop'), (r'\[\s*\]', Punctuation, '#pop'), ], 'const_value': [ include('common'), (words(('true', 'false', '-Infinity', 'Infinity', 'NaN', 'null'), suffix=_keyword_suffix), Keyword.Constant, '#pop'), (r'-?(?:(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:[Ee][+-]?[0-9]+)?' + r'|[0-9]+[Ee][+-]?[0-9]+)', Number.Float, '#pop'), (r'-?[1-9][0-9]*', Number.Integer, '#pop'), (r'-?0[Xx][0-9A-Fa-f]+', Number.Hex, '#pop'), (r'-?0[0-7]*', Number.Oct, '#pop'), ], 'typedef': [ include('common'), (_identifier, Name.Class), (r';', Punctuation, '#pop'), ], 'namespace_rest': [ include('common'), (_identifier, Name.Namespace), (r'\{', Punctuation, 'namespace_body'), (r';', Punctuation, '#pop'), ], 'namespace_body': [ include('common'), (r'\[', Punctuation, 'extended_attributes'), (r'readonly' + _keyword_suffix, Keyword), (r'attribute' + _keyword_suffix, Keyword, ('attribute_rest', 'type')), (r'const' + _keyword_suffix, Keyword, ('const_rest', 'type')), (r'\}', Punctuation, '#pop'), default(('operation_rest', 'type')), ], 'interface_rest': [ include('common'), (_identifier, Name.Class), (r':', Punctuation), (r'\{', Punctuation, 'interface_body'), (r';', Punctuation, '#pop'), ], 'interface_body': [ (words(('iterable', 'maplike', 'setlike'), suffix=_keyword_suffix), Keyword, 'iterable_maplike_setlike_rest'), (words(('setter', 'getter', 'creator', 'deleter', 'legacycaller', 'inherit', 'static', 'stringifier', 'jsonifier'), suffix=_keyword_suffix), Keyword), (r'serializer' + _keyword_suffix, Keyword, 'serializer_rest'), (r';', Punctuation), include('namespace_body'), ], 'attribute_rest': [ include('common'), (_identifier, Name.Variable), (r';', Punctuation, '#pop'), ], 'const_rest': [ include('common'), (_identifier, Name.Constant), (r'=', Punctuation, 'const_value'), (r';', Punctuation, '#pop'), ], 'operation_rest': [ include('common'), (r';', Punctuation, '#pop'), default('operation'), ], 'operation': [ include('common'), (_identifier, Name.Function), (r'\(', Punctuation, 'argument_list'), (r';', Punctuation, '#pop:2'), ], 'iterable_maplike_setlike_rest': [ include('common'), (r'<', Punctuation, 'type_list'), (r';', Punctuation, '#pop'), ], 'type_list': [ include('common'), (r',', Punctuation), (r'>', Punctuation, '#pop'), default('type'), ], 'serializer_rest': [ include('common'), (r'=', Punctuation, 'serialization_pattern'), (r';', Punctuation, '#pop'), default('operation'), ], 'serialization_pattern': [ include('common'), (_identifier, Name.Variable, '#pop'), (r'\{', Punctuation, 'serialization_pattern_map'), (r'\[', Punctuation, 'serialization_pattern_list'), ], 'serialization_pattern_map': [ include('common'), (words(('getter', 'inherit', 'attribute'), suffix=_keyword_suffix), Keyword), (r',', Punctuation), (_identifier, Name.Variable), (r'\}', Punctuation, '#pop:2'), ], 'serialization_pattern_list': [ include('common'), (words(('getter', 'attribute'), suffix=_keyword_suffix), Keyword), (r',', Punctuation), (_identifier, Name.Variable), (r']', Punctuation, '#pop:2'), ], 'enum_rest': [ include('common'), (_identifier, Name.Class), (r'\{', Punctuation, 'enum_body'), (r';', Punctuation, '#pop'), ], 'enum_body': [ include('common'), (_string, String), (r',', Punctuation), (r'\}', Punctuation, '#pop'), ], 'callback_rest': [ include('common'), (r'interface' + _keyword_suffix, Keyword, ('#pop', 'interface_rest')), (_identifier, Name.Class), (r'=', Punctuation, ('operation', 'type')), (r';', Punctuation, '#pop'), ], 'dictionary_rest': [ include('common'), (_identifier, Name.Class), (r':', Punctuation), (r'\{', Punctuation, 'dictionary_body'), (r';', Punctuation, '#pop'), ], 'dictionary_body': [ include('common'), (r'\[', Punctuation, 'extended_attributes'), (r'required' + _keyword_suffix, Keyword), (r'\}', Punctuation, '#pop'), default(('dictionary_item', 'type')), ], 'dictionary_item': [ include('common'), (_identifier, Name.Variable), (r'=', Punctuation, 'default_value'), (r';', Punctuation, '#pop'), ], 'implements_rest': [ include('common'), (r'implements' + _keyword_suffix, Keyword), (_identifier, Name.Class), (r';', Punctuation, '#pop'), ], }
10,517
Python
34.06
79
0.456024
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_php_builtins.py
""" pygments.lexers._php_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This file loads the function names and their modules from the php webpage and generates itself. Run with `python -I` to regenerate. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ MODULES = {'APCu': ('apcu_add', 'apcu_cache_info', 'apcu_cas', 'apcu_clear_cache', 'apcu_dec', 'apcu_delete', 'apcu_enabled', 'apcu_entry', 'apcu_exists', 'apcu_fetch', 'apcu_inc', 'apcu_key_info', 'apcu_sma_info', 'apcu_store'), 'Aliases and deprecated Mysqli': ('mysqli_connect', 'mysqli_execute', 'mysqli_get_client_stats', 'mysqli_get_links_stats', 'mysqli_report'), 'Apache': ('apache_child_terminate', 'apache_get_modules', 'apache_get_version', 'apache_getenv', 'apache_lookup_uri', 'apache_note', 'apache_request_headers', 'apache_response_headers', 'apache_setenv', 'getallheaders', 'virtual'), 'Array': ('array_change_key_case', 'array_chunk', 'array_column', 'array_combine', 'array_count_values', 'array_diff_assoc', 'array_diff_key', 'array_diff_uassoc', 'array_diff_ukey', 'array_diff', 'array_fill_keys', 'array_fill', 'array_filter', 'array_flip', 'array_intersect_assoc', 'array_intersect_key', 'array_intersect_uassoc', 'array_intersect_ukey', 'array_intersect', 'array_is_list', 'array_key_exists', 'array_key_first', 'array_key_last', 'array_keys', 'array_map', 'array_merge_recursive', 'array_merge', 'array_multisort', 'array_pad', 'array_pop', 'array_product', 'array_push', 'array_rand', 'array_reduce', 'array_replace_recursive', 'array_replace', 'array_reverse', 'array_search', 'array_shift', 'array_slice', 'array_splice', 'array_sum', 'array_udiff_assoc', 'array_udiff_uassoc', 'array_udiff', 'array_uintersect_assoc', 'array_uintersect_uassoc', 'array_uintersect', 'array_unique', 'array_unshift', 'array_values', 'array_walk_recursive', 'array_walk', 'array', 'arsort', 'asort', 'compact', 'count', 'current', 'each', 'end', 'extract', 'in_array', 'key_exists', 'key', 'krsort', 'ksort', 'list', 'natcasesort', 'natsort', 'next', 'pos', 'prev', 'range', 'reset', 'rsort', 'shuffle', 'sizeof', 'sort', 'uasort', 'uksort', 'usort'), 'BC Math': ('bcadd', 'bccomp', 'bcdiv', 'bcmod', 'bcmul', 'bcpow', 'bcpowmod', 'bcscale', 'bcsqrt', 'bcsub'), 'Bzip2': ('bzclose', 'bzcompress', 'bzdecompress', 'bzerrno', 'bzerror', 'bzerrstr', 'bzflush', 'bzopen', 'bzread', 'bzwrite'), 'COM': ('com_create_guid', 'com_event_sink', 'com_get_active_object', 'com_load_typelib', 'com_message_pump', 'com_print_typeinfo', 'variant_abs', 'variant_add', 'variant_and', 'variant_cast', 'variant_cat', 'variant_cmp', 'variant_date_from_timestamp', 'variant_date_to_timestamp', 'variant_div', 'variant_eqv', 'variant_fix', 'variant_get_type', 'variant_idiv', 'variant_imp', 'variant_int', 'variant_mod', 'variant_mul', 'variant_neg', 'variant_not', 'variant_or', 'variant_pow', 'variant_round', 'variant_set_type', 'variant_set', 'variant_sub', 'variant_xor'), 'CSPRNG': ('random_bytes', 'random_int'), 'CUBRID': ('cubrid_bind', 'cubrid_close_prepare', 'cubrid_close_request', 'cubrid_col_get', 'cubrid_col_size', 'cubrid_column_names', 'cubrid_column_types', 'cubrid_commit', 'cubrid_connect_with_url', 'cubrid_connect', 'cubrid_current_oid', 'cubrid_disconnect', 'cubrid_drop', 'cubrid_error_code_facility', 'cubrid_error_code', 'cubrid_error_msg', 'cubrid_execute', 'cubrid_fetch', 'cubrid_free_result', 'cubrid_get_autocommit', 'cubrid_get_charset', 'cubrid_get_class_name', 'cubrid_get_client_info', 'cubrid_get_db_parameter', 'cubrid_get_query_timeout', 'cubrid_get_server_info', 'cubrid_get', 'cubrid_insert_id', 'cubrid_is_instance', 'cubrid_lob_close', 'cubrid_lob_export', 'cubrid_lob_get', 'cubrid_lob_send', 'cubrid_lob_size', 'cubrid_lob2_bind', 'cubrid_lob2_close', 'cubrid_lob2_export', 'cubrid_lob2_import', 'cubrid_lob2_new', 'cubrid_lob2_read', 'cubrid_lob2_seek64', 'cubrid_lob2_seek', 'cubrid_lob2_size64', 'cubrid_lob2_size', 'cubrid_lob2_tell64', 'cubrid_lob2_tell', 'cubrid_lob2_write', 'cubrid_lock_read', 'cubrid_lock_write', 'cubrid_move_cursor', 'cubrid_next_result', 'cubrid_num_cols', 'cubrid_num_rows', 'cubrid_pconnect_with_url', 'cubrid_pconnect', 'cubrid_prepare', 'cubrid_put', 'cubrid_rollback', 'cubrid_schema', 'cubrid_seq_drop', 'cubrid_seq_insert', 'cubrid_seq_put', 'cubrid_set_add', 'cubrid_set_autocommit', 'cubrid_set_db_parameter', 'cubrid_set_drop', 'cubrid_set_query_timeout', 'cubrid_version'), 'Calendar': ('cal_days_in_month', 'cal_from_jd', 'cal_info', 'cal_to_jd', 'easter_date', 'easter_days', 'frenchtojd', 'gregoriantojd', 'jddayofweek', 'jdmonthname', 'jdtofrench', 'jdtogregorian', 'jdtojewish', 'jdtojulian', 'jdtounix', 'jewishtojd', 'juliantojd', 'unixtojd'), 'Classes/Object': ('__autoload', 'class_alias', 'class_exists', 'enum_exists', 'get_called_class', 'get_class_methods', 'get_class_vars', 'get_class', 'get_declared_classes', 'get_declared_interfaces', 'get_declared_traits', 'get_mangled_object_vars', 'get_object_vars', 'get_parent_class', 'interface_exists', 'is_a', 'is_subclass_of', 'method_exists', 'property_exists', 'trait_exists'), 'Ctype': ('ctype_alnum', 'ctype_alpha', 'ctype_cntrl', 'ctype_digit', 'ctype_graph', 'ctype_lower', 'ctype_print', 'ctype_punct', 'ctype_space', 'ctype_upper', 'ctype_xdigit'), 'DBA': ('dba_close', 'dba_delete', 'dba_exists', 'dba_fetch', 'dba_firstkey', 'dba_handlers', 'dba_insert', 'dba_key_split', 'dba_list', 'dba_nextkey', 'dba_open', 'dba_optimize', 'dba_popen', 'dba_replace', 'dba_sync'), 'DOM': ('dom_import_simplexml',), 'Date/Time': ('checkdate', 'date_add', 'date_create_from_format', 'date_create_immutable_from_format', 'date_create_immutable', 'date_create', 'date_date_set', 'date_default_timezone_get', 'date_default_timezone_set', 'date_diff', 'date_format', 'date_get_last_errors', 'date_interval_create_from_date_string', 'date_interval_format', 'date_isodate_set', 'date_modify', 'date_offset_get', 'date_parse_from_format', 'date_parse', 'date_sub', 'date_sun_info', 'date_sunrise', 'date_sunset', 'date_time_set', 'date_timestamp_get', 'date_timestamp_set', 'date_timezone_get', 'date_timezone_set', 'date', 'getdate', 'gettimeofday', 'gmdate', 'gmmktime', 'gmstrftime', 'idate', 'localtime', 'microtime', 'mktime', 'strftime', 'strptime', 'strtotime', 'time', 'timezone_abbreviations_list', 'timezone_identifiers_list', 'timezone_location_get', 'timezone_name_from_abbr', 'timezone_name_get', 'timezone_offset_get', 'timezone_open', 'timezone_transitions_get', 'timezone_version_get'), 'Direct IO': ('dio_close', 'dio_fcntl', 'dio_open', 'dio_read', 'dio_seek', 'dio_stat', 'dio_tcsetattr', 'dio_truncate', 'dio_write'), 'Directory': ('chdir', 'chroot', 'closedir', 'dir', 'getcwd', 'opendir', 'readdir', 'rewinddir', 'scandir'), 'Eio': ('eio_busy', 'eio_cancel', 'eio_chmod', 'eio_chown', 'eio_close', 'eio_custom', 'eio_dup2', 'eio_event_loop', 'eio_fallocate', 'eio_fchmod', 'eio_fchown', 'eio_fdatasync', 'eio_fstat', 'eio_fstatvfs', 'eio_fsync', 'eio_ftruncate', 'eio_futime', 'eio_get_event_stream', 'eio_get_last_error', 'eio_grp_add', 'eio_grp_cancel', 'eio_grp_limit', 'eio_grp', 'eio_init', 'eio_link', 'eio_lstat', 'eio_mkdir', 'eio_mknod', 'eio_nop', 'eio_npending', 'eio_nready', 'eio_nreqs', 'eio_nthreads', 'eio_open', 'eio_poll', 'eio_read', 'eio_readahead', 'eio_readdir', 'eio_readlink', 'eio_realpath', 'eio_rename', 'eio_rmdir', 'eio_seek', 'eio_sendfile', 'eio_set_max_idle', 'eio_set_max_parallel', 'eio_set_max_poll_reqs', 'eio_set_max_poll_time', 'eio_set_min_parallel', 'eio_stat', 'eio_statvfs', 'eio_symlink', 'eio_sync_file_range', 'eio_sync', 'eio_syncfs', 'eio_truncate', 'eio_unlink', 'eio_utime', 'eio_write'), 'Enchant': ('enchant_broker_describe', 'enchant_broker_dict_exists', 'enchant_broker_free_dict', 'enchant_broker_free', 'enchant_broker_get_dict_path', 'enchant_broker_get_error', 'enchant_broker_init', 'enchant_broker_list_dicts', 'enchant_broker_request_dict', 'enchant_broker_request_pwl_dict', 'enchant_broker_set_dict_path', 'enchant_broker_set_ordering', 'enchant_dict_add_to_personal', 'enchant_dict_add_to_session', 'enchant_dict_add', 'enchant_dict_check', 'enchant_dict_describe', 'enchant_dict_get_error', 'enchant_dict_is_added', 'enchant_dict_is_in_session', 'enchant_dict_quick_check', 'enchant_dict_store_replacement', 'enchant_dict_suggest'), 'Error Handling': ('debug_backtrace', 'debug_print_backtrace', 'error_clear_last', 'error_get_last', 'error_log', 'error_reporting', 'restore_error_handler', 'restore_exception_handler', 'set_error_handler', 'set_exception_handler', 'trigger_error', 'user_error'), 'Exif': ('exif_imagetype', 'exif_read_data', 'exif_tagname', 'exif_thumbnail', 'read_exif_data'), 'Expect': ('expect_expectl', 'expect_popen'), 'FDF': ('fdf_add_doc_javascript', 'fdf_add_template', 'fdf_close', 'fdf_create', 'fdf_enum_values', 'fdf_errno', 'fdf_error', 'fdf_get_ap', 'fdf_get_attachment', 'fdf_get_encoding', 'fdf_get_file', 'fdf_get_flags', 'fdf_get_opt', 'fdf_get_status', 'fdf_get_value', 'fdf_get_version', 'fdf_header', 'fdf_next_field_name', 'fdf_open_string', 'fdf_open', 'fdf_remove_item', 'fdf_save_string', 'fdf_save', 'fdf_set_ap', 'fdf_set_encoding', 'fdf_set_file', 'fdf_set_flags', 'fdf_set_javascript_action', 'fdf_set_on_import_javascript', 'fdf_set_opt', 'fdf_set_status', 'fdf_set_submit_form_action', 'fdf_set_target_frame', 'fdf_set_value', 'fdf_set_version'), 'FPM': ('fastcgi_finish_request',), 'FTP': ('ftp_alloc', 'ftp_append', 'ftp_cdup', 'ftp_chdir', 'ftp_chmod', 'ftp_close', 'ftp_connect', 'ftp_delete', 'ftp_exec', 'ftp_fget', 'ftp_fput', 'ftp_get_option', 'ftp_get', 'ftp_login', 'ftp_mdtm', 'ftp_mkdir', 'ftp_mlsd', 'ftp_nb_continue', 'ftp_nb_fget', 'ftp_nb_fput', 'ftp_nb_get', 'ftp_nb_put', 'ftp_nlist', 'ftp_pasv', 'ftp_put', 'ftp_pwd', 'ftp_quit', 'ftp_raw', 'ftp_rawlist', 'ftp_rename', 'ftp_rmdir', 'ftp_set_option', 'ftp_site', 'ftp_size', 'ftp_ssl_connect', 'ftp_systype'), 'Fann': ('fann_cascadetrain_on_data', 'fann_cascadetrain_on_file', 'fann_clear_scaling_params', 'fann_copy', 'fann_create_from_file', 'fann_create_shortcut_array', 'fann_create_shortcut', 'fann_create_sparse_array', 'fann_create_sparse', 'fann_create_standard_array', 'fann_create_standard', 'fann_create_train_from_callback', 'fann_create_train', 'fann_descale_input', 'fann_descale_output', 'fann_descale_train', 'fann_destroy_train', 'fann_destroy', 'fann_duplicate_train_data', 'fann_get_activation_function', 'fann_get_activation_steepness', 'fann_get_bias_array', 'fann_get_bit_fail_limit', 'fann_get_bit_fail', 'fann_get_cascade_activation_functions_count', 'fann_get_cascade_activation_functions', 'fann_get_cascade_activation_steepnesses_count', 'fann_get_cascade_activation_steepnesses', 'fann_get_cascade_candidate_change_fraction', 'fann_get_cascade_candidate_limit', 'fann_get_cascade_candidate_stagnation_epochs', 'fann_get_cascade_max_cand_epochs', 'fann_get_cascade_max_out_epochs', 'fann_get_cascade_min_cand_epochs', 'fann_get_cascade_min_out_epochs', 'fann_get_cascade_num_candidate_groups', 'fann_get_cascade_num_candidates', 'fann_get_cascade_output_change_fraction', 'fann_get_cascade_output_stagnation_epochs', 'fann_get_cascade_weight_multiplier', 'fann_get_connection_array', 'fann_get_connection_rate', 'fann_get_errno', 'fann_get_errstr', 'fann_get_layer_array', 'fann_get_learning_momentum', 'fann_get_learning_rate', 'fann_get_MSE', 'fann_get_network_type', 'fann_get_num_input', 'fann_get_num_layers', 'fann_get_num_output', 'fann_get_quickprop_decay', 'fann_get_quickprop_mu', 'fann_get_rprop_decrease_factor', 'fann_get_rprop_delta_max', 'fann_get_rprop_delta_min', 'fann_get_rprop_delta_zero', 'fann_get_rprop_increase_factor', 'fann_get_sarprop_step_error_shift', 'fann_get_sarprop_step_error_threshold_factor', 'fann_get_sarprop_temperature', 'fann_get_sarprop_weight_decay_shift', 'fann_get_total_connections', 'fann_get_total_neurons', 'fann_get_train_error_function', 'fann_get_train_stop_function', 'fann_get_training_algorithm', 'fann_init_weights', 'fann_length_train_data', 'fann_merge_train_data', 'fann_num_input_train_data', 'fann_num_output_train_data', 'fann_print_error', 'fann_randomize_weights', 'fann_read_train_from_file', 'fann_reset_errno', 'fann_reset_errstr', 'fann_reset_MSE', 'fann_run', 'fann_save_train', 'fann_save', 'fann_scale_input_train_data', 'fann_scale_input', 'fann_scale_output_train_data', 'fann_scale_output', 'fann_scale_train_data', 'fann_scale_train', 'fann_set_activation_function_hidden', 'fann_set_activation_function_layer', 'fann_set_activation_function_output', 'fann_set_activation_function', 'fann_set_activation_steepness_hidden', 'fann_set_activation_steepness_layer', 'fann_set_activation_steepness_output', 'fann_set_activation_steepness', 'fann_set_bit_fail_limit', 'fann_set_callback', 'fann_set_cascade_activation_functions', 'fann_set_cascade_activation_steepnesses', 'fann_set_cascade_candidate_change_fraction', 'fann_set_cascade_candidate_limit', 'fann_set_cascade_candidate_stagnation_epochs', 'fann_set_cascade_max_cand_epochs', 'fann_set_cascade_max_out_epochs', 'fann_set_cascade_min_cand_epochs', 'fann_set_cascade_min_out_epochs', 'fann_set_cascade_num_candidate_groups', 'fann_set_cascade_output_change_fraction', 'fann_set_cascade_output_stagnation_epochs', 'fann_set_cascade_weight_multiplier', 'fann_set_error_log', 'fann_set_input_scaling_params', 'fann_set_learning_momentum', 'fann_set_learning_rate', 'fann_set_output_scaling_params', 'fann_set_quickprop_decay', 'fann_set_quickprop_mu', 'fann_set_rprop_decrease_factor', 'fann_set_rprop_delta_max', 'fann_set_rprop_delta_min', 'fann_set_rprop_delta_zero', 'fann_set_rprop_increase_factor', 'fann_set_sarprop_step_error_shift', 'fann_set_sarprop_step_error_threshold_factor', 'fann_set_sarprop_temperature', 'fann_set_sarprop_weight_decay_shift', 'fann_set_scaling_params', 'fann_set_train_error_function', 'fann_set_train_stop_function', 'fann_set_training_algorithm', 'fann_set_weight_array', 'fann_set_weight', 'fann_shuffle_train_data', 'fann_subset_train_data', 'fann_test_data', 'fann_test', 'fann_train_epoch', 'fann_train_on_data', 'fann_train_on_file', 'fann_train'), 'Fileinfo': ('finfo_buffer', 'finfo_close', 'finfo_file', 'finfo_open', 'finfo_set_flags', 'mime_content_type'), 'Filesystem': ('basename', 'chgrp', 'chmod', 'chown', 'clearstatcache', 'copy', 'dirname', 'disk_free_space', 'disk_total_space', 'diskfreespace', 'fclose', 'fdatasync', 'feof', 'fflush', 'fgetc', 'fgetcsv', 'fgets', 'fgetss', 'file_exists', 'file_get_contents', 'file_put_contents', 'file', 'fileatime', 'filectime', 'filegroup', 'fileinode', 'filemtime', 'fileowner', 'fileperms', 'filesize', 'filetype', 'flock', 'fnmatch', 'fopen', 'fpassthru', 'fputcsv', 'fputs', 'fread', 'fscanf', 'fseek', 'fstat', 'fsync', 'ftell', 'ftruncate', 'fwrite', 'glob', 'is_dir', 'is_executable', 'is_file', 'is_link', 'is_readable', 'is_uploaded_file', 'is_writable', 'is_writeable', 'lchgrp', 'lchown', 'link', 'linkinfo', 'lstat', 'mkdir', 'move_uploaded_file', 'parse_ini_file', 'parse_ini_string', 'pathinfo', 'pclose', 'popen', 'readfile', 'readlink', 'realpath_cache_get', 'realpath_cache_size', 'realpath', 'rename', 'rewind', 'rmdir', 'set_file_buffer', 'stat', 'symlink', 'tempnam', 'tmpfile', 'touch', 'umask', 'unlink'), 'Filter': ('filter_has_var', 'filter_id', 'filter_input_array', 'filter_input', 'filter_list', 'filter_var_array', 'filter_var'), 'Firebird/InterBase': ('fbird_add_user', 'fbird_affected_rows', 'fbird_backup', 'fbird_blob_add', 'fbird_blob_cancel', 'fbird_blob_close', 'fbird_blob_create', 'fbird_blob_echo', 'fbird_blob_get', 'fbird_blob_import', 'fbird_blob_info', 'fbird_blob_open', 'fbird_close', 'fbird_commit_ret', 'fbird_commit', 'fbird_connect', 'fbird_db_info', 'fbird_delete_user', 'fbird_drop_db', 'fbird_errcode', 'fbird_errmsg', 'fbird_execute', 'fbird_fetch_assoc', 'fbird_fetch_object', 'fbird_fetch_row', 'fbird_field_info', 'fbird_free_event_handler', 'fbird_free_query', 'fbird_free_result', 'fbird_gen_id', 'fbird_maintain_db', 'fbird_modify_user', 'fbird_name_result', 'fbird_num_fields', 'fbird_num_params', 'fbird_param_info', 'fbird_pconnect', 'fbird_prepare', 'fbird_query', 'fbird_restore', 'fbird_rollback_ret', 'fbird_rollback', 'fbird_server_info', 'fbird_service_attach', 'fbird_service_detach', 'fbird_set_event_handler', 'fbird_trans', 'fbird_wait_event', 'ibase_add_user', 'ibase_affected_rows', 'ibase_backup', 'ibase_blob_add', 'ibase_blob_cancel', 'ibase_blob_close', 'ibase_blob_create', 'ibase_blob_echo', 'ibase_blob_get', 'ibase_blob_import', 'ibase_blob_info', 'ibase_blob_open', 'ibase_close', 'ibase_commit_ret', 'ibase_commit', 'ibase_connect', 'ibase_db_info', 'ibase_delete_user', 'ibase_drop_db', 'ibase_errcode', 'ibase_errmsg', 'ibase_execute', 'ibase_fetch_assoc', 'ibase_fetch_object', 'ibase_fetch_row', 'ibase_field_info', 'ibase_free_event_handler', 'ibase_free_query', 'ibase_free_result', 'ibase_gen_id', 'ibase_maintain_db', 'ibase_modify_user', 'ibase_name_result', 'ibase_num_fields', 'ibase_num_params', 'ibase_param_info', 'ibase_pconnect', 'ibase_prepare', 'ibase_query', 'ibase_restore', 'ibase_rollback_ret', 'ibase_rollback', 'ibase_server_info', 'ibase_service_attach', 'ibase_service_detach', 'ibase_set_event_handler', 'ibase_trans', 'ibase_wait_event'), 'Function handling': ('call_user_func_array', 'call_user_func', 'create_function', 'forward_static_call_array', 'forward_static_call', 'func_get_arg', 'func_get_args', 'func_num_args', 'function_exists', 'get_defined_functions', 'register_shutdown_function', 'register_tick_function', 'unregister_tick_function'), 'GD and Image': ('gd_info', 'getimagesize', 'getimagesizefromstring', 'image_type_to_extension', 'image_type_to_mime_type', 'image2wbmp', 'imageaffine', 'imageaffinematrixconcat', 'imageaffinematrixget', 'imagealphablending', 'imageantialias', 'imagearc', 'imageavif', 'imagebmp', 'imagechar', 'imagecharup', 'imagecolorallocate', 'imagecolorallocatealpha', 'imagecolorat', 'imagecolorclosest', 'imagecolorclosestalpha', 'imagecolorclosesthwb', 'imagecolordeallocate', 'imagecolorexact', 'imagecolorexactalpha', 'imagecolormatch', 'imagecolorresolve', 'imagecolorresolvealpha', 'imagecolorset', 'imagecolorsforindex', 'imagecolorstotal', 'imagecolortransparent', 'imageconvolution', 'imagecopy', 'imagecopymerge', 'imagecopymergegray', 'imagecopyresampled', 'imagecopyresized', 'imagecreate', 'imagecreatefromavif', 'imagecreatefrombmp', 'imagecreatefromgd2', 'imagecreatefromgd2part', 'imagecreatefromgd', 'imagecreatefromgif', 'imagecreatefromjpeg', 'imagecreatefrompng', 'imagecreatefromstring', 'imagecreatefromtga', 'imagecreatefromwbmp', 'imagecreatefromwebp', 'imagecreatefromxbm', 'imagecreatefromxpm', 'imagecreatetruecolor', 'imagecrop', 'imagecropauto', 'imagedashedline', 'imagedestroy', 'imageellipse', 'imagefill', 'imagefilledarc', 'imagefilledellipse', 'imagefilledpolygon', 'imagefilledrectangle', 'imagefilltoborder', 'imagefilter', 'imageflip', 'imagefontheight', 'imagefontwidth', 'imageftbbox', 'imagefttext', 'imagegammacorrect', 'imagegd2', 'imagegd', 'imagegetclip', 'imagegetinterpolation', 'imagegif', 'imagegrabscreen', 'imagegrabwindow', 'imageinterlace', 'imageistruecolor', 'imagejpeg', 'imagelayereffect', 'imageline', 'imageloadfont', 'imageopenpolygon', 'imagepalettecopy', 'imagepalettetotruecolor', 'imagepng', 'imagepolygon', 'imagerectangle', 'imageresolution', 'imagerotate', 'imagesavealpha', 'imagescale', 'imagesetbrush', 'imagesetclip', 'imagesetinterpolation', 'imagesetpixel', 'imagesetstyle', 'imagesetthickness', 'imagesettile', 'imagestring', 'imagestringup', 'imagesx', 'imagesy', 'imagetruecolortopalette', 'imagettfbbox', 'imagettftext', 'imagetypes', 'imagewbmp', 'imagewebp', 'imagexbm', 'iptcembed', 'iptcparse', 'jpeg2wbmp', 'png2wbmp'), 'GMP': ('gmp_abs', 'gmp_add', 'gmp_and', 'gmp_binomial', 'gmp_clrbit', 'gmp_cmp', 'gmp_com', 'gmp_div_q', 'gmp_div_qr', 'gmp_div_r', 'gmp_div', 'gmp_divexact', 'gmp_export', 'gmp_fact', 'gmp_gcd', 'gmp_gcdext', 'gmp_hamdist', 'gmp_import', 'gmp_init', 'gmp_intval', 'gmp_invert', 'gmp_jacobi', 'gmp_kronecker', 'gmp_lcm', 'gmp_legendre', 'gmp_mod', 'gmp_mul', 'gmp_neg', 'gmp_nextprime', 'gmp_or', 'gmp_perfect_power', 'gmp_perfect_square', 'gmp_popcount', 'gmp_pow', 'gmp_powm', 'gmp_prob_prime', 'gmp_random_bits', 'gmp_random_range', 'gmp_random_seed', 'gmp_random', 'gmp_root', 'gmp_rootrem', 'gmp_scan0', 'gmp_scan1', 'gmp_setbit', 'gmp_sign', 'gmp_sqrt', 'gmp_sqrtrem', 'gmp_strval', 'gmp_sub', 'gmp_testbit', 'gmp_xor'), 'GeoIP': ('geoip_asnum_by_name', 'geoip_continent_code_by_name', 'geoip_country_code_by_name', 'geoip_country_code3_by_name', 'geoip_country_name_by_name', 'geoip_database_info', 'geoip_db_avail', 'geoip_db_filename', 'geoip_db_get_all_info', 'geoip_domain_by_name', 'geoip_id_by_name', 'geoip_isp_by_name', 'geoip_netspeedcell_by_name', 'geoip_org_by_name', 'geoip_record_by_name', 'geoip_region_by_name', 'geoip_region_name_by_code', 'geoip_setup_custom_directory', 'geoip_time_zone_by_country_and_region'), 'Gettext': ('bind_textdomain_codeset', 'bindtextdomain', 'dcgettext', 'dcngettext', 'dgettext', 'dngettext', 'gettext', 'ngettext', 'textdomain'), 'GnuPG': ('gnupg_adddecryptkey', 'gnupg_addencryptkey', 'gnupg_addsignkey', 'gnupg_cleardecryptkeys', 'gnupg_clearencryptkeys', 'gnupg_clearsignkeys', 'gnupg_decrypt', 'gnupg_decryptverify', 'gnupg_encrypt', 'gnupg_encryptsign', 'gnupg_export', 'gnupg_getengineinfo', 'gnupg_geterror', 'gnupg_geterrorinfo', 'gnupg_getprotocol', 'gnupg_import', 'gnupg_init', 'gnupg_keyinfo', 'gnupg_setarmor', 'gnupg_seterrormode', 'gnupg_setsignmode', 'gnupg_sign', 'gnupg_verify'), 'Grapheme': ('grapheme_extract', 'grapheme_stripos', 'grapheme_stristr', 'grapheme_strlen', 'grapheme_strpos', 'grapheme_strripos', 'grapheme_strrpos', 'grapheme_strstr', 'grapheme_substr'), 'Hash': ('hash_algos', 'hash_copy', 'hash_equals', 'hash_file', 'hash_final', 'hash_hkdf', 'hash_hmac_algos', 'hash_hmac_file', 'hash_hmac', 'hash_init', 'hash_pbkdf2', 'hash_update_file', 'hash_update_stream', 'hash_update', 'hash'), 'IBM DB2': ('db2_autocommit', 'db2_bind_param', 'db2_client_info', 'db2_close', 'db2_column_privileges', 'db2_columns', 'db2_commit', 'db2_conn_error', 'db2_conn_errormsg', 'db2_connect', 'db2_cursor_type', 'db2_escape_string', 'db2_exec', 'db2_execute', 'db2_fetch_array', 'db2_fetch_assoc', 'db2_fetch_both', 'db2_fetch_object', 'db2_fetch_row', 'db2_field_display_size', 'db2_field_name', 'db2_field_num', 'db2_field_precision', 'db2_field_scale', 'db2_field_type', 'db2_field_width', 'db2_foreign_keys', 'db2_free_result', 'db2_free_stmt', 'db2_get_option', 'db2_last_insert_id', 'db2_lob_read', 'db2_next_result', 'db2_num_fields', 'db2_num_rows', 'db2_pclose', 'db2_pconnect', 'db2_prepare', 'db2_primary_keys', 'db2_procedure_columns', 'db2_procedures', 'db2_result', 'db2_rollback', 'db2_server_info', 'db2_set_option', 'db2_special_columns', 'db2_statistics', 'db2_stmt_error', 'db2_stmt_errormsg', 'db2_table_privileges', 'db2_tables'), 'IDN': ('idn_to_ascii', 'idn_to_utf8'), 'IMAP': ('imap_8bit', 'imap_alerts', 'imap_append', 'imap_base64', 'imap_binary', 'imap_body', 'imap_bodystruct', 'imap_check', 'imap_clearflag_full', 'imap_close', 'imap_create', 'imap_createmailbox', 'imap_delete', 'imap_deletemailbox', 'imap_errors', 'imap_expunge', 'imap_fetch_overview', 'imap_fetchbody', 'imap_fetchheader', 'imap_fetchmime', 'imap_fetchstructure', 'imap_fetchtext', 'imap_gc', 'imap_get_quota', 'imap_get_quotaroot', 'imap_getacl', 'imap_getmailboxes', 'imap_getsubscribed', 'imap_header', 'imap_headerinfo', 'imap_headers', 'imap_last_error', 'imap_list', 'imap_listmailbox', 'imap_listscan', 'imap_listsubscribed', 'imap_lsub', 'imap_mail_compose', 'imap_mail_copy', 'imap_mail_move', 'imap_mail', 'imap_mailboxmsginfo', 'imap_mime_header_decode', 'imap_msgno', 'imap_mutf7_to_utf8', 'imap_num_msg', 'imap_num_recent', 'imap_open', 'imap_ping', 'imap_qprint', 'imap_rename', 'imap_renamemailbox', 'imap_reopen', 'imap_rfc822_parse_adrlist', 'imap_rfc822_parse_headers', 'imap_rfc822_write_address', 'imap_savebody', 'imap_scan', 'imap_scanmailbox', 'imap_search', 'imap_set_quota', 'imap_setacl', 'imap_setflag_full', 'imap_sort', 'imap_status', 'imap_subscribe', 'imap_thread', 'imap_timeout', 'imap_uid', 'imap_undelete', 'imap_unsubscribe', 'imap_utf7_decode', 'imap_utf7_encode', 'imap_utf8_to_mutf7', 'imap_utf8'), 'Igbinary': ('igbinary_serialize', 'igbinary_unserialize'), 'Inotify': ('inotify_add_watch', 'inotify_init', 'inotify_queue_len', 'inotify_read', 'inotify_rm_watch'), 'JSON': ('json_decode', 'json_encode', 'json_last_error_msg', 'json_last_error'), 'LDAP': ('ldap_8859_to_t61', 'ldap_add_ext', 'ldap_add', 'ldap_bind_ext', 'ldap_bind', 'ldap_close', 'ldap_compare', 'ldap_connect', 'ldap_control_paged_result_response', 'ldap_control_paged_result', 'ldap_count_entries', 'ldap_count_references', 'ldap_delete_ext', 'ldap_delete', 'ldap_dn2ufn', 'ldap_err2str', 'ldap_errno', 'ldap_error', 'ldap_escape', 'ldap_exop_passwd', 'ldap_exop_refresh', 'ldap_exop_whoami', 'ldap_exop', 'ldap_explode_dn', 'ldap_first_attribute', 'ldap_first_entry', 'ldap_first_reference', 'ldap_free_result', 'ldap_get_attributes', 'ldap_get_dn', 'ldap_get_entries', 'ldap_get_option', 'ldap_get_values_len', 'ldap_get_values', 'ldap_list', 'ldap_mod_add_ext', 'ldap_mod_add', 'ldap_mod_del_ext', 'ldap_mod_del', 'ldap_mod_replace_ext', 'ldap_mod_replace', 'ldap_modify_batch', 'ldap_modify', 'ldap_next_attribute', 'ldap_next_entry', 'ldap_next_reference', 'ldap_parse_exop', 'ldap_parse_reference', 'ldap_parse_result', 'ldap_read', 'ldap_rename_ext', 'ldap_rename', 'ldap_sasl_bind', 'ldap_search', 'ldap_set_option', 'ldap_set_rebind_proc', 'ldap_sort', 'ldap_start_tls', 'ldap_t61_to_8859', 'ldap_unbind'), 'LZF': ('lzf_compress', 'lzf_decompress', 'lzf_optimized_for'), 'Mail': ('ezmlm_hash', 'mail'), 'Mailparse': ('mailparse_determine_best_xfer_encoding', 'mailparse_msg_create', 'mailparse_msg_extract_part_file', 'mailparse_msg_extract_part', 'mailparse_msg_extract_whole_part_file', 'mailparse_msg_free', 'mailparse_msg_get_part_data', 'mailparse_msg_get_part', 'mailparse_msg_get_structure', 'mailparse_msg_parse_file', 'mailparse_msg_parse', 'mailparse_rfc822_parse_addresses', 'mailparse_stream_encode', 'mailparse_uudecode_all'), 'Math': ('abs', 'acos', 'acosh', 'asin', 'asinh', 'atan2', 'atan', 'atanh', 'base_convert', 'bindec', 'ceil', 'cos', 'cosh', 'decbin', 'dechex', 'decoct', 'deg2rad', 'exp', 'expm1', 'fdiv', 'floor', 'fmod', 'getrandmax', 'hexdec', 'hypot', 'intdiv', 'is_finite', 'is_infinite', 'is_nan', 'lcg_value', 'log10', 'log1p', 'log', 'max', 'min', 'mt_getrandmax', 'mt_rand', 'mt_srand', 'octdec', 'pi', 'pow', 'rad2deg', 'rand', 'round', 'sin', 'sinh', 'sqrt', 'srand', 'tan', 'tanh'), 'Mcrypt': ('mcrypt_create_iv', 'mcrypt_decrypt', 'mcrypt_enc_get_algorithms_name', 'mcrypt_enc_get_block_size', 'mcrypt_enc_get_iv_size', 'mcrypt_enc_get_key_size', 'mcrypt_enc_get_modes_name', 'mcrypt_enc_get_supported_key_sizes', 'mcrypt_enc_is_block_algorithm_mode', 'mcrypt_enc_is_block_algorithm', 'mcrypt_enc_is_block_mode', 'mcrypt_enc_self_test', 'mcrypt_encrypt', 'mcrypt_generic_deinit', 'mcrypt_generic_init', 'mcrypt_generic', 'mcrypt_get_block_size', 'mcrypt_get_cipher_name', 'mcrypt_get_iv_size', 'mcrypt_get_key_size', 'mcrypt_list_algorithms', 'mcrypt_list_modes', 'mcrypt_module_close', 'mcrypt_module_get_algo_block_size', 'mcrypt_module_get_algo_key_size', 'mcrypt_module_get_supported_key_sizes', 'mcrypt_module_is_block_algorithm_mode', 'mcrypt_module_is_block_algorithm', 'mcrypt_module_is_block_mode', 'mcrypt_module_open', 'mcrypt_module_self_test', 'mdecrypt_generic'), 'Memcache': ('memcache_debug',), 'Mhash': ('mhash_count', 'mhash_get_block_size', 'mhash_get_hash_name', 'mhash_keygen_s2k', 'mhash'), 'Misc.': ('connection_aborted', 'connection_status', 'constant', 'define', 'defined', 'die', 'eval', 'exit', 'get_browser', '__halt_compiler', 'highlight_file', 'highlight_string', 'hrtime', 'ignore_user_abort', 'pack', 'php_strip_whitespace', 'sapi_windows_cp_conv', 'sapi_windows_cp_get', 'sapi_windows_cp_is_utf8', 'sapi_windows_cp_set', 'sapi_windows_generate_ctrl_event', 'sapi_windows_set_ctrl_handler', 'sapi_windows_vt100_support', 'show_source', 'sleep', 'sys_getloadavg', 'time_nanosleep', 'time_sleep_until', 'uniqid', 'unpack', 'usleep'), 'Multibyte String': ('mb_check_encoding', 'mb_chr', 'mb_convert_case', 'mb_convert_encoding', 'mb_convert_kana', 'mb_convert_variables', 'mb_decode_mimeheader', 'mb_decode_numericentity', 'mb_detect_encoding', 'mb_detect_order', 'mb_encode_mimeheader', 'mb_encode_numericentity', 'mb_encoding_aliases', 'mb_ereg_match', 'mb_ereg_replace_callback', 'mb_ereg_replace', 'mb_ereg_search_getpos', 'mb_ereg_search_getregs', 'mb_ereg_search_init', 'mb_ereg_search_pos', 'mb_ereg_search_regs', 'mb_ereg_search_setpos', 'mb_ereg_search', 'mb_ereg', 'mb_eregi_replace', 'mb_eregi', 'mb_get_info', 'mb_http_input', 'mb_http_output', 'mb_internal_encoding', 'mb_language', 'mb_list_encodings', 'mb_ord', 'mb_output_handler', 'mb_parse_str', 'mb_preferred_mime_name', 'mb_regex_encoding', 'mb_regex_set_options', 'mb_scrub', 'mb_send_mail', 'mb_split', 'mb_str_split', 'mb_strcut', 'mb_strimwidth', 'mb_stripos', 'mb_stristr', 'mb_strlen', 'mb_strpos', 'mb_strrchr', 'mb_strrichr', 'mb_strripos', 'mb_strrpos', 'mb_strstr', 'mb_strtolower', 'mb_strtoupper', 'mb_strwidth', 'mb_substitute_character', 'mb_substr_count', 'mb_substr'), 'MySQL': ('mysql_affected_rows', 'mysql_client_encoding', 'mysql_close', 'mysql_connect', 'mysql_create_db', 'mysql_data_seek', 'mysql_db_name', 'mysql_db_query', 'mysql_drop_db', 'mysql_errno', 'mysql_error', 'mysql_escape_string', 'mysql_fetch_array', 'mysql_fetch_assoc', 'mysql_fetch_field', 'mysql_fetch_lengths', 'mysql_fetch_object', 'mysql_fetch_row', 'mysql_field_flags', 'mysql_field_len', 'mysql_field_name', 'mysql_field_seek', 'mysql_field_table', 'mysql_field_type', 'mysql_free_result', 'mysql_get_client_info', 'mysql_get_host_info', 'mysql_get_proto_info', 'mysql_get_server_info', 'mysql_info', 'mysql_insert_id', 'mysql_list_dbs', 'mysql_list_fields', 'mysql_list_processes', 'mysql_list_tables', 'mysql_num_fields', 'mysql_num_rows', 'mysql_pconnect', 'mysql_ping', 'mysql_query', 'mysql_real_escape_string', 'mysql_result', 'mysql_select_db', 'mysql_set_charset', 'mysql_stat', 'mysql_tablename', 'mysql_thread_id', 'mysql_unbuffered_query'), 'Mysql_xdevapi': ('expression', 'getSession'), 'Network': ('checkdnsrr', 'closelog', 'dns_check_record', 'dns_get_mx', 'dns_get_record', 'fsockopen', 'gethostbyaddr', 'gethostbyname', 'gethostbynamel', 'gethostname', 'getmxrr', 'getprotobyname', 'getprotobynumber', 'getservbyname', 'getservbyport', 'header_register_callback', 'header_remove', 'header', 'headers_list', 'headers_sent', 'http_response_code', 'inet_ntop', 'inet_pton', 'ip2long', 'long2ip', 'net_get_interfaces', 'openlog', 'pfsockopen', 'setcookie', 'setrawcookie', 'socket_get_status', 'socket_set_blocking', 'socket_set_timeout', 'syslog'), 'OAuth': ('oauth_get_sbs', 'oauth_urlencode'), 'OCI8': ('oci_bind_array_by_name', 'oci_bind_by_name', 'oci_cancel', 'oci_client_version', 'oci_close', 'oci_commit', 'oci_connect', 'oci_define_by_name', 'oci_error', 'oci_execute', 'oci_fetch_all', 'oci_fetch_array', 'oci_fetch_assoc', 'oci_fetch_object', 'oci_fetch_row', 'oci_fetch', 'oci_field_is_null', 'oci_field_name', 'oci_field_precision', 'oci_field_scale', 'oci_field_size', 'oci_field_type_raw', 'oci_field_type', 'oci_free_descriptor', 'oci_free_statement', 'oci_get_implicit_resultset', 'oci_lob_copy', 'oci_lob_is_equal', 'oci_new_collection', 'oci_new_connect', 'oci_new_cursor', 'oci_new_descriptor', 'oci_num_fields', 'oci_num_rows', 'oci_parse', 'oci_password_change', 'oci_pconnect', 'oci_register_taf_callback', 'oci_result', 'oci_rollback', 'oci_server_version', 'oci_set_action', 'oci_set_call_timeout', 'oci_set_client_identifier', 'oci_set_client_info', 'oci_set_db_operation', 'oci_set_edition', 'oci_set_module_name', 'oci_set_prefetch_lob', 'oci_set_prefetch', 'oci_statement_type', 'oci_unregister_taf_callback'), 'ODBC': ('odbc_autocommit', 'odbc_binmode', 'odbc_close_all', 'odbc_close', 'odbc_columnprivileges', 'odbc_columns', 'odbc_commit', 'odbc_connect', 'odbc_cursor', 'odbc_data_source', 'odbc_do', 'odbc_error', 'odbc_errormsg', 'odbc_exec', 'odbc_execute', 'odbc_fetch_array', 'odbc_fetch_into', 'odbc_fetch_object', 'odbc_fetch_row', 'odbc_field_len', 'odbc_field_name', 'odbc_field_num', 'odbc_field_precision', 'odbc_field_scale', 'odbc_field_type', 'odbc_foreignkeys', 'odbc_free_result', 'odbc_gettypeinfo', 'odbc_longreadlen', 'odbc_next_result', 'odbc_num_fields', 'odbc_num_rows', 'odbc_pconnect', 'odbc_prepare', 'odbc_primarykeys', 'odbc_procedurecolumns', 'odbc_procedures', 'odbc_result_all', 'odbc_result', 'odbc_rollback', 'odbc_setoption', 'odbc_specialcolumns', 'odbc_statistics', 'odbc_tableprivileges', 'odbc_tables'), 'OPcache': ('opcache_compile_file', 'opcache_get_configuration', 'opcache_get_status', 'opcache_invalidate', 'opcache_is_script_cached', 'opcache_reset'), 'OpenAL': ('openal_buffer_create', 'openal_buffer_data', 'openal_buffer_destroy', 'openal_buffer_get', 'openal_buffer_loadwav', 'openal_context_create', 'openal_context_current', 'openal_context_destroy', 'openal_context_process', 'openal_context_suspend', 'openal_device_close', 'openal_device_open', 'openal_listener_get', 'openal_listener_set', 'openal_source_create', 'openal_source_destroy', 'openal_source_get', 'openal_source_pause', 'openal_source_play', 'openal_source_rewind', 'openal_source_set', 'openal_source_stop', 'openal_stream'), 'OpenSSL': ('openssl_cipher_iv_length', 'openssl_cms_decrypt', 'openssl_cms_encrypt', 'openssl_cms_read', 'openssl_cms_sign', 'openssl_cms_verify', 'openssl_csr_export_to_file', 'openssl_csr_export', 'openssl_csr_get_public_key', 'openssl_csr_get_subject', 'openssl_csr_new', 'openssl_csr_sign', 'openssl_decrypt', 'openssl_dh_compute_key', 'openssl_digest', 'openssl_encrypt', 'openssl_error_string', 'openssl_free_key', 'openssl_get_cert_locations', 'openssl_get_cipher_methods', 'openssl_get_curve_names', 'openssl_get_md_methods', 'openssl_get_privatekey', 'openssl_get_publickey', 'openssl_open', 'openssl_pbkdf2', 'openssl_pkcs12_export_to_file', 'openssl_pkcs12_export', 'openssl_pkcs12_read', 'openssl_pkcs7_decrypt', 'openssl_pkcs7_encrypt', 'openssl_pkcs7_read', 'openssl_pkcs7_sign', 'openssl_pkcs7_verify', 'openssl_pkey_derive', 'openssl_pkey_export_to_file', 'openssl_pkey_export', 'openssl_pkey_free', 'openssl_pkey_get_details', 'openssl_pkey_get_private', 'openssl_pkey_get_public', 'openssl_pkey_new', 'openssl_private_decrypt', 'openssl_private_encrypt', 'openssl_public_decrypt', 'openssl_public_encrypt', 'openssl_random_pseudo_bytes', 'openssl_seal', 'openssl_sign', 'openssl_spki_export_challenge', 'openssl_spki_export', 'openssl_spki_new', 'openssl_spki_verify', 'openssl_verify', 'openssl_x509_check_private_key', 'openssl_x509_checkpurpose', 'openssl_x509_export_to_file', 'openssl_x509_export', 'openssl_x509_fingerprint', 'openssl_x509_free', 'openssl_x509_parse', 'openssl_x509_read', 'openssl_x509_verify'), 'Output Control': ('flush', 'ob_clean', 'ob_end_clean', 'ob_end_flush', 'ob_flush', 'ob_get_clean', 'ob_get_contents', 'ob_get_flush', 'ob_get_length', 'ob_get_level', 'ob_get_status', 'ob_gzhandler', 'ob_implicit_flush', 'ob_list_handlers', 'ob_start', 'output_add_rewrite_var', 'output_reset_rewrite_vars'), 'PCNTL': ('pcntl_alarm', 'pcntl_async_signals', 'pcntl_errno', 'pcntl_exec', 'pcntl_fork', 'pcntl_get_last_error', 'pcntl_getpriority', 'pcntl_setpriority', 'pcntl_signal_dispatch', 'pcntl_signal_get_handler', 'pcntl_signal', 'pcntl_sigprocmask', 'pcntl_sigtimedwait', 'pcntl_sigwaitinfo', 'pcntl_strerror', 'pcntl_wait', 'pcntl_waitpid', 'pcntl_wexitstatus', 'pcntl_wifexited', 'pcntl_wifsignaled', 'pcntl_wifstopped', 'pcntl_wstopsig', 'pcntl_wtermsig'), 'PCRE': ('preg_filter', 'preg_grep', 'preg_last_error_msg', 'preg_last_error', 'preg_match_all', 'preg_match', 'preg_quote', 'preg_replace_callback_array', 'preg_replace_callback', 'preg_replace', 'preg_split'), 'PHP Options/Info': ('assert_options', 'assert', 'cli_get_process_title', 'cli_set_process_title', 'dl', 'extension_loaded', 'gc_collect_cycles', 'gc_disable', 'gc_enable', 'gc_enabled', 'gc_mem_caches', 'gc_status', 'get_cfg_var', 'get_current_user', 'get_defined_constants', 'get_extension_funcs', 'get_include_path', 'get_included_files', 'get_loaded_extensions', 'get_magic_quotes_gpc', 'get_magic_quotes_runtime', 'get_required_files', 'get_resources', 'getenv', 'getlastmod', 'getmygid', 'getmyinode', 'getmypid', 'getmyuid', 'getopt', 'getrusage', 'ini_alter', 'ini_get_all', 'ini_get', 'ini_restore', 'ini_set', 'memory_get_peak_usage', 'memory_get_usage', 'php_ini_loaded_file', 'php_ini_scanned_files', 'php_sapi_name', 'php_uname', 'phpcredits', 'phpinfo', 'phpversion', 'putenv', 'restore_include_path', 'set_include_path', 'set_time_limit', 'sys_get_temp_dir', 'version_compare', 'zend_thread_id', 'zend_version'), 'POSIX': ('posix_access', 'posix_ctermid', 'posix_errno', 'posix_get_last_error', 'posix_getcwd', 'posix_getegid', 'posix_geteuid', 'posix_getgid', 'posix_getgrgid', 'posix_getgrnam', 'posix_getgroups', 'posix_getlogin', 'posix_getpgid', 'posix_getpgrp', 'posix_getpid', 'posix_getppid', 'posix_getpwnam', 'posix_getpwuid', 'posix_getrlimit', 'posix_getsid', 'posix_getuid', 'posix_initgroups', 'posix_isatty', 'posix_kill', 'posix_mkfifo', 'posix_mknod', 'posix_setegid', 'posix_seteuid', 'posix_setgid', 'posix_setpgid', 'posix_setrlimit', 'posix_setsid', 'posix_setuid', 'posix_strerror', 'posix_times', 'posix_ttyname', 'posix_uname'), 'PS': ('ps_add_bookmark', 'ps_add_launchlink', 'ps_add_locallink', 'ps_add_note', 'ps_add_pdflink', 'ps_add_weblink', 'ps_arc', 'ps_arcn', 'ps_begin_page', 'ps_begin_pattern', 'ps_begin_template', 'ps_circle', 'ps_clip', 'ps_close_image', 'ps_close', 'ps_closepath_stroke', 'ps_closepath', 'ps_continue_text', 'ps_curveto', 'ps_delete', 'ps_end_page', 'ps_end_pattern', 'ps_end_template', 'ps_fill_stroke', 'ps_fill', 'ps_findfont', 'ps_get_buffer', 'ps_get_parameter', 'ps_get_value', 'ps_hyphenate', 'ps_include_file', 'ps_lineto', 'ps_makespotcolor', 'ps_moveto', 'ps_new', 'ps_open_file', 'ps_open_image_file', 'ps_open_image', 'ps_open_memory_image', 'ps_place_image', 'ps_rect', 'ps_restore', 'ps_rotate', 'ps_save', 'ps_scale', 'ps_set_border_color', 'ps_set_border_dash', 'ps_set_border_style', 'ps_set_info', 'ps_set_parameter', 'ps_set_text_pos', 'ps_set_value', 'ps_setcolor', 'ps_setdash', 'ps_setflat', 'ps_setfont', 'ps_setgray', 'ps_setlinecap', 'ps_setlinejoin', 'ps_setlinewidth', 'ps_setmiterlimit', 'ps_setoverprintmode', 'ps_setpolydash', 'ps_shading_pattern', 'ps_shading', 'ps_shfill', 'ps_show_boxed', 'ps_show_xy2', 'ps_show_xy', 'ps_show2', 'ps_show', 'ps_string_geometry', 'ps_stringwidth', 'ps_stroke', 'ps_symbol_name', 'ps_symbol_width', 'ps_symbol', 'ps_translate'), 'Password Hashing': ('password_algos', 'password_get_info', 'password_hash', 'password_needs_rehash', 'password_verify'), 'PostgreSQL': ('pg_affected_rows', 'pg_cancel_query', 'pg_client_encoding', 'pg_close', 'pg_connect_poll', 'pg_connect', 'pg_connection_busy', 'pg_connection_reset', 'pg_connection_status', 'pg_consume_input', 'pg_convert', 'pg_copy_from', 'pg_copy_to', 'pg_dbname', 'pg_delete', 'pg_end_copy', 'pg_escape_bytea', 'pg_escape_identifier', 'pg_escape_literal', 'pg_escape_string', 'pg_execute', 'pg_fetch_all_columns', 'pg_fetch_all', 'pg_fetch_array', 'pg_fetch_assoc', 'pg_fetch_object', 'pg_fetch_result', 'pg_fetch_row', 'pg_field_is_null', 'pg_field_name', 'pg_field_num', 'pg_field_prtlen', 'pg_field_size', 'pg_field_table', 'pg_field_type_oid', 'pg_field_type', 'pg_flush', 'pg_free_result', 'pg_get_notify', 'pg_get_pid', 'pg_get_result', 'pg_host', 'pg_insert', 'pg_last_error', 'pg_last_notice', 'pg_last_oid', 'pg_lo_close', 'pg_lo_create', 'pg_lo_export', 'pg_lo_import', 'pg_lo_open', 'pg_lo_read_all', 'pg_lo_read', 'pg_lo_seek', 'pg_lo_tell', 'pg_lo_truncate', 'pg_lo_unlink', 'pg_lo_write', 'pg_meta_data', 'pg_num_fields', 'pg_num_rows', 'pg_options', 'pg_parameter_status', 'pg_pconnect', 'pg_ping', 'pg_port', 'pg_prepare', 'pg_put_line', 'pg_query_params', 'pg_query', 'pg_result_error_field', 'pg_result_error', 'pg_result_seek', 'pg_result_status', 'pg_select', 'pg_send_execute', 'pg_send_prepare', 'pg_send_query_params', 'pg_send_query', 'pg_set_client_encoding', 'pg_set_error_verbosity', 'pg_socket', 'pg_trace', 'pg_transaction_status', 'pg_tty', 'pg_unescape_bytea', 'pg_untrace', 'pg_update', 'pg_version'), 'Program execution': ('escapeshellarg', 'escapeshellcmd', 'exec', 'passthru', 'proc_close', 'proc_get_status', 'proc_nice', 'proc_open', 'proc_terminate', 'shell_exec', 'system'), 'Pspell': ('pspell_add_to_personal', 'pspell_add_to_session', 'pspell_check', 'pspell_clear_session', 'pspell_config_create', 'pspell_config_data_dir', 'pspell_config_dict_dir', 'pspell_config_ignore', 'pspell_config_mode', 'pspell_config_personal', 'pspell_config_repl', 'pspell_config_runtogether', 'pspell_config_save_repl', 'pspell_new_config', 'pspell_new_personal', 'pspell_new', 'pspell_save_wordlist', 'pspell_store_replacement', 'pspell_suggest'), 'RRD': ('rrd_create', 'rrd_error', 'rrd_fetch', 'rrd_first', 'rrd_graph', 'rrd_info', 'rrd_last', 'rrd_lastupdate', 'rrd_restore', 'rrd_tune', 'rrd_update', 'rrd_version', 'rrd_xport', 'rrdc_disconnect'), 'Radius': ('radius_acct_open', 'radius_add_server', 'radius_auth_open', 'radius_close', 'radius_config', 'radius_create_request', 'radius_cvt_addr', 'radius_cvt_int', 'radius_cvt_string', 'radius_demangle_mppe_key', 'radius_demangle', 'radius_get_attr', 'radius_get_tagged_attr_data', 'radius_get_tagged_attr_tag', 'radius_get_vendor_attr', 'radius_put_addr', 'radius_put_attr', 'radius_put_int', 'radius_put_string', 'radius_put_vendor_addr', 'radius_put_vendor_attr', 'radius_put_vendor_int', 'radius_put_vendor_string', 'radius_request_authenticator', 'radius_salt_encrypt_attr', 'radius_send_request', 'radius_server_secret', 'radius_strerror'), 'Rar': ('rar_wrapper_cache_stats',), 'Readline': ('readline_add_history', 'readline_callback_handler_install', 'readline_callback_handler_remove', 'readline_callback_read_char', 'readline_clear_history', 'readline_completion_function', 'readline_info', 'readline_list_history', 'readline_on_new_line', 'readline_read_history', 'readline_redisplay', 'readline_write_history', 'readline'), 'Recode': ('recode_file', 'recode_string', 'recode'), 'RpmInfo': ('rpmaddtag', 'rpmdbinfo', 'rpmdbsearch', 'rpminfo', 'rpmvercmp'), 'SNMP': ('snmp_get_quick_print', 'snmp_get_valueretrieval', 'snmp_read_mib', 'snmp_set_enum_print', 'snmp_set_oid_numeric_print', 'snmp_set_oid_output_format', 'snmp_set_quick_print', 'snmp_set_valueretrieval', 'snmp2_get', 'snmp2_getnext', 'snmp2_real_walk', 'snmp2_set', 'snmp2_walk', 'snmp3_get', 'snmp3_getnext', 'snmp3_real_walk', 'snmp3_set', 'snmp3_walk', 'snmpget', 'snmpgetnext', 'snmprealwalk', 'snmpset', 'snmpwalk', 'snmpwalkoid'), 'SOAP': ('is_soap_fault', 'use_soap_error_handler'), 'SPL': ('class_implements', 'class_parents', 'class_uses', 'iterator_apply', 'iterator_count', 'iterator_to_array', 'spl_autoload_call', 'spl_autoload_extensions', 'spl_autoload_functions', 'spl_autoload_register', 'spl_autoload_unregister', 'spl_autoload', 'spl_classes', 'spl_object_hash', 'spl_object_id'), 'SQLSRV': ('sqlsrv_begin_transaction', 'sqlsrv_cancel', 'sqlsrv_client_info', 'sqlsrv_close', 'sqlsrv_commit', 'sqlsrv_configure', 'sqlsrv_connect', 'sqlsrv_errors', 'sqlsrv_execute', 'sqlsrv_fetch_array', 'sqlsrv_fetch_object', 'sqlsrv_fetch', 'sqlsrv_field_metadata', 'sqlsrv_free_stmt', 'sqlsrv_get_config', 'sqlsrv_get_field', 'sqlsrv_has_rows', 'sqlsrv_next_result', 'sqlsrv_num_fields', 'sqlsrv_num_rows', 'sqlsrv_prepare', 'sqlsrv_query', 'sqlsrv_rollback', 'sqlsrv_rows_affected', 'sqlsrv_send_stream_data', 'sqlsrv_server_info'), 'SSH2': ('ssh2_auth_agent', 'ssh2_auth_hostbased_file', 'ssh2_auth_none', 'ssh2_auth_password', 'ssh2_auth_pubkey_file', 'ssh2_connect', 'ssh2_disconnect', 'ssh2_exec', 'ssh2_fetch_stream', 'ssh2_fingerprint', 'ssh2_forward_accept', 'ssh2_forward_listen', 'ssh2_methods_negotiated', 'ssh2_poll', 'ssh2_publickey_add', 'ssh2_publickey_init', 'ssh2_publickey_list', 'ssh2_publickey_remove', 'ssh2_scp_recv', 'ssh2_scp_send', 'ssh2_send_eof', 'ssh2_sftp_chmod', 'ssh2_sftp_lstat', 'ssh2_sftp_mkdir', 'ssh2_sftp_readlink', 'ssh2_sftp_realpath', 'ssh2_sftp_rename', 'ssh2_sftp_rmdir', 'ssh2_sftp_stat', 'ssh2_sftp_symlink', 'ssh2_sftp_unlink', 'ssh2_sftp', 'ssh2_shell', 'ssh2_tunnel'), 'SVN': ('svn_add', 'svn_auth_get_parameter', 'svn_auth_set_parameter', 'svn_blame', 'svn_cat', 'svn_checkout', 'svn_cleanup', 'svn_client_version', 'svn_commit', 'svn_delete', 'svn_diff', 'svn_export', 'svn_fs_abort_txn', 'svn_fs_apply_text', 'svn_fs_begin_txn2', 'svn_fs_change_node_prop', 'svn_fs_check_path', 'svn_fs_contents_changed', 'svn_fs_copy', 'svn_fs_delete', 'svn_fs_dir_entries', 'svn_fs_file_contents', 'svn_fs_file_length', 'svn_fs_is_dir', 'svn_fs_is_file', 'svn_fs_make_dir', 'svn_fs_make_file', 'svn_fs_node_created_rev', 'svn_fs_node_prop', 'svn_fs_props_changed', 'svn_fs_revision_prop', 'svn_fs_revision_root', 'svn_fs_txn_root', 'svn_fs_youngest_rev', 'svn_import', 'svn_log', 'svn_ls', 'svn_mkdir', 'svn_repos_create', 'svn_repos_fs_begin_txn_for_commit', 'svn_repos_fs_commit_txn', 'svn_repos_fs', 'svn_repos_hotcopy', 'svn_repos_open', 'svn_repos_recover', 'svn_revert', 'svn_status', 'svn_update'), 'Scoutapm': ('scoutapm_get_calls', 'scoutapm_list_instrumented_functions'), 'Seaslog': ('seaslog_get_author', 'seaslog_get_version'), 'Semaphore': ('ftok', 'msg_get_queue', 'msg_queue_exists', 'msg_receive', 'msg_remove_queue', 'msg_send', 'msg_set_queue', 'msg_stat_queue', 'sem_acquire', 'sem_get', 'sem_release', 'sem_remove', 'shm_attach', 'shm_detach', 'shm_get_var', 'shm_has_var', 'shm_put_var', 'shm_remove_var', 'shm_remove'), 'Session': ('session_abort', 'session_cache_expire', 'session_cache_limiter', 'session_commit', 'session_create_id', 'session_decode', 'session_destroy', 'session_encode', 'session_gc', 'session_get_cookie_params', 'session_id', 'session_module_name', 'session_name', 'session_regenerate_id', 'session_register_shutdown', 'session_reset', 'session_save_path', 'session_set_cookie_params', 'session_set_save_handler', 'session_start', 'session_status', 'session_unset', 'session_write_close'), 'Shared Memory': ('shmop_close', 'shmop_delete', 'shmop_open', 'shmop_read', 'shmop_size', 'shmop_write'), 'SimpleXML': ('simplexml_import_dom', 'simplexml_load_file', 'simplexml_load_string'), 'Socket': ('socket_accept', 'socket_addrinfo_bind', 'socket_addrinfo_connect', 'socket_addrinfo_explain', 'socket_addrinfo_lookup', 'socket_bind', 'socket_clear_error', 'socket_close', 'socket_cmsg_space', 'socket_connect', 'socket_create_listen', 'socket_create_pair', 'socket_create', 'socket_export_stream', 'socket_get_option', 'socket_getopt', 'socket_getpeername', 'socket_getsockname', 'socket_import_stream', 'socket_last_error', 'socket_listen', 'socket_read', 'socket_recv', 'socket_recvfrom', 'socket_recvmsg', 'socket_select', 'socket_send', 'socket_sendmsg', 'socket_sendto', 'socket_set_block', 'socket_set_nonblock', 'socket_set_option', 'socket_setopt', 'socket_shutdown', 'socket_strerror', 'socket_write', 'socket_wsaprotocol_info_export', 'socket_wsaprotocol_info_import', 'socket_wsaprotocol_info_release'), 'Sodium': ('sodium_add', 'sodium_base642bin', 'sodium_bin2base64', 'sodium_bin2hex', 'sodium_compare', 'sodium_crypto_aead_aes256gcm_decrypt', 'sodium_crypto_aead_aes256gcm_encrypt', 'sodium_crypto_aead_aes256gcm_is_available', 'sodium_crypto_aead_aes256gcm_keygen', 'sodium_crypto_aead_chacha20poly1305_decrypt', 'sodium_crypto_aead_chacha20poly1305_encrypt', 'sodium_crypto_aead_chacha20poly1305_ietf_decrypt', 'sodium_crypto_aead_chacha20poly1305_ietf_encrypt', 'sodium_crypto_aead_chacha20poly1305_ietf_keygen', 'sodium_crypto_aead_chacha20poly1305_keygen', 'sodium_crypto_aead_xchacha20poly1305_ietf_decrypt', 'sodium_crypto_aead_xchacha20poly1305_ietf_encrypt', 'sodium_crypto_aead_xchacha20poly1305_ietf_keygen', 'sodium_crypto_auth_keygen', 'sodium_crypto_auth_verify', 'sodium_crypto_auth', 'sodium_crypto_box_keypair_from_secretkey_and_publickey', 'sodium_crypto_box_keypair', 'sodium_crypto_box_open', 'sodium_crypto_box_publickey_from_secretkey', 'sodium_crypto_box_publickey', 'sodium_crypto_box_seal_open', 'sodium_crypto_box_seal', 'sodium_crypto_box_secretkey', 'sodium_crypto_box_seed_keypair', 'sodium_crypto_box', 'sodium_crypto_generichash_final', 'sodium_crypto_generichash_init', 'sodium_crypto_generichash_keygen', 'sodium_crypto_generichash_update', 'sodium_crypto_generichash', 'sodium_crypto_kdf_derive_from_key', 'sodium_crypto_kdf_keygen', 'sodium_crypto_kx_client_session_keys', 'sodium_crypto_kx_keypair', 'sodium_crypto_kx_publickey', 'sodium_crypto_kx_secretkey', 'sodium_crypto_kx_seed_keypair', 'sodium_crypto_kx_server_session_keys', 'sodium_crypto_pwhash_scryptsalsa208sha256_str_verify', 'sodium_crypto_pwhash_scryptsalsa208sha256_str', 'sodium_crypto_pwhash_scryptsalsa208sha256', 'sodium_crypto_pwhash_str_needs_rehash', 'sodium_crypto_pwhash_str_verify', 'sodium_crypto_pwhash_str', 'sodium_crypto_pwhash', 'sodium_crypto_scalarmult_base', 'sodium_crypto_scalarmult', 'sodium_crypto_secretbox_keygen', 'sodium_crypto_secretbox_open', 'sodium_crypto_secretbox', 'sodium_crypto_secretstream_xchacha20poly1305_init_pull', 'sodium_crypto_secretstream_xchacha20poly1305_init_push', 'sodium_crypto_secretstream_xchacha20poly1305_keygen', 'sodium_crypto_secretstream_xchacha20poly1305_pull', 'sodium_crypto_secretstream_xchacha20poly1305_push', 'sodium_crypto_secretstream_xchacha20poly1305_rekey', 'sodium_crypto_shorthash_keygen', 'sodium_crypto_shorthash', 'sodium_crypto_sign_detached', 'sodium_crypto_sign_ed25519_pk_to_curve25519', 'sodium_crypto_sign_ed25519_sk_to_curve25519', 'sodium_crypto_sign_keypair_from_secretkey_and_publickey', 'sodium_crypto_sign_keypair', 'sodium_crypto_sign_open', 'sodium_crypto_sign_publickey_from_secretkey', 'sodium_crypto_sign_publickey', 'sodium_crypto_sign_secretkey', 'sodium_crypto_sign_seed_keypair', 'sodium_crypto_sign_verify_detached', 'sodium_crypto_sign', 'sodium_crypto_stream_keygen', 'sodium_crypto_stream_xor', 'sodium_crypto_stream', 'sodium_hex2bin', 'sodium_increment', 'sodium_memcmp', 'sodium_memzero', 'sodium_pad', 'sodium_unpad'), 'Solr': ('solr_get_version',), 'Stomp': ('stomp_connect_error', 'stomp_version'), 'Stream': ('stream_bucket_append', 'stream_bucket_make_writeable', 'stream_bucket_new', 'stream_bucket_prepend', 'stream_context_create', 'stream_context_get_default', 'stream_context_get_options', 'stream_context_get_params', 'stream_context_set_default', 'stream_context_set_option', 'stream_context_set_params', 'stream_copy_to_stream', 'stream_filter_append', 'stream_filter_prepend', 'stream_filter_register', 'stream_filter_remove', 'stream_get_contents', 'stream_get_filters', 'stream_get_line', 'stream_get_meta_data', 'stream_get_transports', 'stream_get_wrappers', 'stream_is_local', 'stream_isatty', 'stream_notification_callback', 'stream_register_wrapper', 'stream_resolve_include_path', 'stream_select', 'stream_set_blocking', 'stream_set_chunk_size', 'stream_set_read_buffer', 'stream_set_timeout', 'stream_set_write_buffer', 'stream_socket_accept', 'stream_socket_client', 'stream_socket_enable_crypto', 'stream_socket_get_name', 'stream_socket_pair', 'stream_socket_recvfrom', 'stream_socket_sendto', 'stream_socket_server', 'stream_socket_shutdown', 'stream_supports_lock', 'stream_wrapper_register', 'stream_wrapper_restore', 'stream_wrapper_unregister'), 'String': ('addcslashes', 'addslashes', 'bin2hex', 'chop', 'chr', 'chunk_split', 'convert_cyr_string', 'convert_uudecode', 'convert_uuencode', 'count_chars', 'crc32', 'crypt', 'echo', 'explode', 'fprintf', 'get_html_translation_table', 'hebrev', 'hebrevc', 'hex2bin', 'html_entity_decode', 'htmlentities', 'htmlspecialchars_decode', 'htmlspecialchars', 'implode', 'join', 'lcfirst', 'levenshtein', 'localeconv', 'ltrim', 'md5_file', 'md5', 'metaphone', 'money_format', 'nl_langinfo', 'nl2br', 'number_format', 'ord', 'parse_str', 'print', 'printf', 'quoted_printable_decode', 'quoted_printable_encode', 'quotemeta', 'rtrim', 'setlocale', 'sha1_file', 'sha1', 'similar_text', 'soundex', 'sprintf', 'sscanf', 'str_contains', 'str_ends_with', 'str_getcsv', 'str_ireplace', 'str_pad', 'str_repeat', 'str_replace', 'str_rot13', 'str_shuffle', 'str_split', 'str_starts_with', 'str_word_count', 'strcasecmp', 'strchr', 'strcmp', 'strcoll', 'strcspn', 'strip_tags', 'stripcslashes', 'stripos', 'stripslashes', 'stristr', 'strlen', 'strnatcasecmp', 'strnatcmp', 'strncasecmp', 'strncmp', 'strpbrk', 'strpos', 'strrchr', 'strrev', 'strripos', 'strrpos', 'strspn', 'strstr', 'strtok', 'strtolower', 'strtoupper', 'strtr', 'substr_compare', 'substr_count', 'substr_replace', 'substr', 'trim', 'ucfirst', 'ucwords', 'vfprintf', 'vprintf', 'vsprintf', 'wordwrap'), 'Swoole': ('swoole_async_dns_lookup', 'swoole_async_read', 'swoole_async_readfile', 'swoole_async_set', 'swoole_async_write', 'swoole_async_writefile', 'swoole_clear_error', 'swoole_client_select', 'swoole_cpu_num', 'swoole_errno', 'swoole_error_log', 'swoole_event_add', 'swoole_event_defer', 'swoole_event_del', 'swoole_event_exit', 'swoole_event_set', 'swoole_event_wait', 'swoole_event_write', 'swoole_get_local_ip', 'swoole_last_error', 'swoole_load_module', 'swoole_select', 'swoole_set_process_name', 'swoole_strerror', 'swoole_timer_after', 'swoole_timer_exists', 'swoole_timer_tick', 'swoole_version'), 'TCP': ('tcpwrap_check',), 'Taint': ('is_tainted', 'taint', 'untaint'), 'Tidy': ('ob_tidyhandler', 'tidy_access_count', 'tidy_config_count', 'tidy_error_count', 'tidy_get_output', 'tidy_warning_count'), 'Tokenizer': ('token_get_all', 'token_name'), 'Trader': ('trader_acos', 'trader_ad', 'trader_add', 'trader_adosc', 'trader_adx', 'trader_adxr', 'trader_apo', 'trader_aroon', 'trader_aroonosc', 'trader_asin', 'trader_atan', 'trader_atr', 'trader_avgprice', 'trader_bbands', 'trader_beta', 'trader_bop', 'trader_cci', 'trader_cdl2crows', 'trader_cdl3blackcrows', 'trader_cdl3inside', 'trader_cdl3linestrike', 'trader_cdl3outside', 'trader_cdl3starsinsouth', 'trader_cdl3whitesoldiers', 'trader_cdlabandonedbaby', 'trader_cdladvanceblock', 'trader_cdlbelthold', 'trader_cdlbreakaway', 'trader_cdlclosingmarubozu', 'trader_cdlconcealbabyswall', 'trader_cdlcounterattack', 'trader_cdldarkcloudcover', 'trader_cdldoji', 'trader_cdldojistar', 'trader_cdldragonflydoji', 'trader_cdlengulfing', 'trader_cdleveningdojistar', 'trader_cdleveningstar', 'trader_cdlgapsidesidewhite', 'trader_cdlgravestonedoji', 'trader_cdlhammer', 'trader_cdlhangingman', 'trader_cdlharami', 'trader_cdlharamicross', 'trader_cdlhighwave', 'trader_cdlhikkake', 'trader_cdlhikkakemod', 'trader_cdlhomingpigeon', 'trader_cdlidentical3crows', 'trader_cdlinneck', 'trader_cdlinvertedhammer', 'trader_cdlkicking', 'trader_cdlkickingbylength', 'trader_cdlladderbottom', 'trader_cdllongleggeddoji', 'trader_cdllongline', 'trader_cdlmarubozu', 'trader_cdlmatchinglow', 'trader_cdlmathold', 'trader_cdlmorningdojistar', 'trader_cdlmorningstar', 'trader_cdlonneck', 'trader_cdlpiercing', 'trader_cdlrickshawman', 'trader_cdlrisefall3methods', 'trader_cdlseparatinglines', 'trader_cdlshootingstar', 'trader_cdlshortline', 'trader_cdlspinningtop', 'trader_cdlstalledpattern', 'trader_cdlsticksandwich', 'trader_cdltakuri', 'trader_cdltasukigap', 'trader_cdlthrusting', 'trader_cdltristar', 'trader_cdlunique3river', 'trader_cdlupsidegap2crows', 'trader_cdlxsidegap3methods', 'trader_ceil', 'trader_cmo', 'trader_correl', 'trader_cos', 'trader_cosh', 'trader_dema', 'trader_div', 'trader_dx', 'trader_ema', 'trader_errno', 'trader_exp', 'trader_floor', 'trader_get_compat', 'trader_get_unstable_period', 'trader_ht_dcperiod', 'trader_ht_dcphase', 'trader_ht_phasor', 'trader_ht_sine', 'trader_ht_trendline', 'trader_ht_trendmode', 'trader_kama', 'trader_linearreg_angle', 'trader_linearreg_intercept', 'trader_linearreg_slope', 'trader_linearreg', 'trader_ln', 'trader_log10', 'trader_ma', 'trader_macd', 'trader_macdext', 'trader_macdfix', 'trader_mama', 'trader_mavp', 'trader_max', 'trader_maxindex', 'trader_medprice', 'trader_mfi', 'trader_midpoint', 'trader_midprice', 'trader_min', 'trader_minindex', 'trader_minmax', 'trader_minmaxindex', 'trader_minus_di', 'trader_minus_dm', 'trader_mom', 'trader_mult', 'trader_natr', 'trader_obv', 'trader_plus_di', 'trader_plus_dm', 'trader_ppo', 'trader_roc', 'trader_rocp', 'trader_rocr100', 'trader_rocr', 'trader_rsi', 'trader_sar', 'trader_sarext', 'trader_set_compat', 'trader_set_unstable_period', 'trader_sin', 'trader_sinh', 'trader_sma', 'trader_sqrt', 'trader_stddev', 'trader_stoch', 'trader_stochf', 'trader_stochrsi', 'trader_sub', 'trader_sum', 'trader_t3', 'trader_tan', 'trader_tanh', 'trader_tema', 'trader_trange', 'trader_trima', 'trader_trix', 'trader_tsf', 'trader_typprice', 'trader_ultosc', 'trader_var', 'trader_wclprice', 'trader_willr', 'trader_wma'), 'URL': ('base64_decode', 'base64_encode', 'get_headers', 'get_meta_tags', 'http_build_query', 'parse_url', 'rawurldecode', 'rawurlencode', 'urldecode', 'urlencode'), 'Uopz': ('uopz_add_function', 'uopz_allow_exit', 'uopz_backup', 'uopz_compose', 'uopz_copy', 'uopz_del_function', 'uopz_delete', 'uopz_extend', 'uopz_flags', 'uopz_function', 'uopz_get_exit_status', 'uopz_get_hook', 'uopz_get_mock', 'uopz_get_property', 'uopz_get_return', 'uopz_get_static', 'uopz_implement', 'uopz_overload', 'uopz_redefine', 'uopz_rename', 'uopz_restore', 'uopz_set_hook', 'uopz_set_mock', 'uopz_set_property', 'uopz_set_return', 'uopz_set_static', 'uopz_undefine', 'uopz_unset_hook', 'uopz_unset_mock', 'uopz_unset_return'), 'Variable handling': ('boolval', 'debug_zval_dump', 'doubleval', 'empty', 'floatval', 'get_debug_type', 'get_defined_vars', 'get_resource_id', 'get_resource_type', 'gettype', 'intval', 'is_array', 'is_bool', 'is_callable', 'is_countable', 'is_double', 'is_float', 'is_int', 'is_integer', 'is_iterable', 'is_long', 'is_null', 'is_numeric', 'is_object', 'is_real', 'is_resource', 'is_scalar', 'is_string', 'isset', 'print_r', 'serialize', 'settype', 'strval', 'unserialize', 'unset', 'var_dump', 'var_export'), 'WDDX': ('wddx_add_vars', 'wddx_deserialize', 'wddx_packet_end', 'wddx_packet_start', 'wddx_serialize_value', 'wddx_serialize_vars'), 'WinCache': ('wincache_fcache_fileinfo', 'wincache_fcache_meminfo', 'wincache_lock', 'wincache_ocache_fileinfo', 'wincache_ocache_meminfo', 'wincache_refresh_if_changed', 'wincache_rplist_fileinfo', 'wincache_rplist_meminfo', 'wincache_scache_info', 'wincache_scache_meminfo', 'wincache_ucache_add', 'wincache_ucache_cas', 'wincache_ucache_clear', 'wincache_ucache_dec', 'wincache_ucache_delete', 'wincache_ucache_exists', 'wincache_ucache_get', 'wincache_ucache_inc', 'wincache_ucache_info', 'wincache_ucache_meminfo', 'wincache_ucache_set', 'wincache_unlock'), 'XML Parser': ('utf8_decode', 'utf8_encode', 'xml_error_string', 'xml_get_current_byte_index', 'xml_get_current_column_number', 'xml_get_current_line_number', 'xml_get_error_code', 'xml_parse_into_struct', 'xml_parse', 'xml_parser_create_ns', 'xml_parser_create', 'xml_parser_free', 'xml_parser_get_option', 'xml_parser_set_option', 'xml_set_character_data_handler', 'xml_set_default_handler', 'xml_set_element_handler', 'xml_set_end_namespace_decl_handler', 'xml_set_external_entity_ref_handler', 'xml_set_notation_decl_handler', 'xml_set_object', 'xml_set_processing_instruction_handler', 'xml_set_start_namespace_decl_handler', 'xml_set_unparsed_entity_decl_handler'), 'XML-RPC': ('xmlrpc_decode_request', 'xmlrpc_decode', 'xmlrpc_encode_request', 'xmlrpc_encode', 'xmlrpc_get_type', 'xmlrpc_is_fault', 'xmlrpc_parse_method_descriptions', 'xmlrpc_server_add_introspection_data', 'xmlrpc_server_call_method', 'xmlrpc_server_create', 'xmlrpc_server_destroy', 'xmlrpc_server_register_introspection_callback', 'xmlrpc_server_register_method', 'xmlrpc_set_type'), 'Xhprof': ('xhprof_disable', 'xhprof_enable', 'xhprof_sample_disable', 'xhprof_sample_enable'), 'YAZ': ('yaz_addinfo', 'yaz_ccl_conf', 'yaz_ccl_parse', 'yaz_close', 'yaz_connect', 'yaz_database', 'yaz_element', 'yaz_errno', 'yaz_error', 'yaz_es_result', 'yaz_es', 'yaz_get_option', 'yaz_hits', 'yaz_itemorder', 'yaz_present', 'yaz_range', 'yaz_record', 'yaz_scan_result', 'yaz_scan', 'yaz_schema', 'yaz_search', 'yaz_set_option', 'yaz_sort', 'yaz_syntax', 'yaz_wait'), 'Yaml': ('yaml_emit_file', 'yaml_emit', 'yaml_parse_file', 'yaml_parse_url', 'yaml_parse'), 'Zip': ('zip_close', 'zip_entry_close', 'zip_entry_compressedsize', 'zip_entry_compressionmethod', 'zip_entry_filesize', 'zip_entry_name', 'zip_entry_open', 'zip_entry_read', 'zip_open', 'zip_read'), 'Zlib': ('deflate_add', 'deflate_init', 'gzclose', 'gzcompress', 'gzdecode', 'gzdeflate', 'gzencode', 'gzeof', 'gzfile', 'gzgetc', 'gzgets', 'gzgetss', 'gzinflate', 'gzopen', 'gzpassthru', 'gzputs', 'gzread', 'gzrewind', 'gzseek', 'gztell', 'gzuncompress', 'gzwrite', 'inflate_add', 'inflate_get_read_len', 'inflate_get_status', 'inflate_init', 'readgzfile', 'zlib_decode', 'zlib_encode', 'zlib_get_coding_type'), 'ZooKeeper': ('zookeeper_dispatch',), 'cURL': ('curl_close', 'curl_copy_handle', 'curl_errno', 'curl_error', 'curl_escape', 'curl_exec', 'curl_file_create', 'curl_getinfo', 'curl_init', 'curl_multi_add_handle', 'curl_multi_close', 'curl_multi_errno', 'curl_multi_exec', 'curl_multi_getcontent', 'curl_multi_info_read', 'curl_multi_init', 'curl_multi_remove_handle', 'curl_multi_select', 'curl_multi_setopt', 'curl_multi_strerror', 'curl_pause', 'curl_reset', 'curl_setopt_array', 'curl_setopt', 'curl_share_close', 'curl_share_errno', 'curl_share_init', 'curl_share_setopt', 'curl_share_strerror', 'curl_strerror', 'curl_unescape', 'curl_version'), 'dBase': ('dbase_add_record', 'dbase_close', 'dbase_create', 'dbase_delete_record', 'dbase_get_header_info', 'dbase_get_record_with_names', 'dbase_get_record', 'dbase_numfields', 'dbase_numrecords', 'dbase_open', 'dbase_pack', 'dbase_replace_record'), 'iconv': ('iconv_get_encoding', 'iconv_mime_decode_headers', 'iconv_mime_decode', 'iconv_mime_encode', 'iconv_set_encoding', 'iconv_strlen', 'iconv_strpos', 'iconv_strrpos', 'iconv_substr', 'iconv', 'ob_iconv_handler'), 'intl': ('intl_error_name', 'intl_get_error_code', 'intl_get_error_message', 'intl_is_failure'), 'libxml': ('libxml_clear_errors', 'libxml_disable_entity_loader', 'libxml_get_errors', 'libxml_get_last_error', 'libxml_set_external_entity_loader', 'libxml_set_streams_context', 'libxml_use_internal_errors'), 'mqseries': ('mqseries_back', 'mqseries_begin', 'mqseries_close', 'mqseries_cmit', 'mqseries_conn', 'mqseries_connx', 'mqseries_disc', 'mqseries_get', 'mqseries_inq', 'mqseries_open', 'mqseries_put1', 'mqseries_put', 'mqseries_set', 'mqseries_strerror'), 'phpdbg': ('phpdbg_break_file', 'phpdbg_break_function', 'phpdbg_break_method', 'phpdbg_break_next', 'phpdbg_clear', 'phpdbg_color', 'phpdbg_end_oplog', 'phpdbg_exec', 'phpdbg_get_executable', 'phpdbg_prompt', 'phpdbg_start_oplog'), 'runkit7': ('runkit7_constant_add', 'runkit7_constant_redefine', 'runkit7_constant_remove', 'runkit7_function_add', 'runkit7_function_copy', 'runkit7_function_redefine', 'runkit7_function_remove', 'runkit7_function_rename', 'runkit7_import', 'runkit7_method_add', 'runkit7_method_copy', 'runkit7_method_redefine', 'runkit7_method_remove', 'runkit7_method_rename', 'runkit7_object_id', 'runkit7_superglobals', 'runkit7_zval_inspect'), 'ssdeep': ('ssdeep_fuzzy_compare', 'ssdeep_fuzzy_hash_filename', 'ssdeep_fuzzy_hash'), 'var_representation': ('var_representation',), 'win32service': ('win32_continue_service', 'win32_create_service', 'win32_delete_service', 'win32_get_last_control_message', 'win32_pause_service', 'win32_query_service_status', 'win32_send_custom_control', 'win32_set_service_exit_code', 'win32_set_service_exit_mode', 'win32_set_service_status', 'win32_start_service_ctrl_dispatcher', 'win32_start_service', 'win32_stop_service'), 'xattr': ('xattr_get', 'xattr_list', 'xattr_remove', 'xattr_set', 'xattr_supported'), 'xdiff': ('xdiff_file_bdiff_size', 'xdiff_file_bdiff', 'xdiff_file_bpatch', 'xdiff_file_diff_binary', 'xdiff_file_diff', 'xdiff_file_merge3', 'xdiff_file_patch_binary', 'xdiff_file_patch', 'xdiff_file_rabdiff', 'xdiff_string_bdiff_size', 'xdiff_string_bdiff', 'xdiff_string_bpatch', 'xdiff_string_diff_binary', 'xdiff_string_diff', 'xdiff_string_merge3', 'xdiff_string_patch_binary', 'xdiff_string_patch', 'xdiff_string_rabdiff')} if __name__ == '__main__': # pragma: no cover import glob import os import pprint import re import shutil import tarfile from urllib.request import urlretrieve PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz' PHP_MANUAL_DIR = './php-chunked-xhtml/' PHP_REFERENCE_GLOB = 'ref.*' PHP_FUNCTION_RE = r'<a href="function\..*?\.html">(.*?)</a>' PHP_MODULE_RE = '<title>(.*?) Functions</title>' def get_php_functions(): function_re = re.compile(PHP_FUNCTION_RE) module_re = re.compile(PHP_MODULE_RE) modules = {} for file in get_php_references(): module = '' with open(file) as f: for line in f: if not module: search = module_re.search(line) if search: module = search.group(1) modules[module] = [] elif 'href="function.' in line: for match in function_re.finditer(line): fn = match.group(1) if '»' not in fn and '«' not in fn and \ '::' not in fn and '\\' not in fn and \ fn not in modules[module]: modules[module].append(fn) if module: # These are dummy manual pages, not actual functions if module == 'Filesystem': modules[module].remove('delete') if not modules[module]: del modules[module] for key in modules: modules[key] = tuple(modules[key]) return modules def get_php_references(): download = urlretrieve(PHP_MANUAL_URL) with tarfile.open(download[0]) as tar: tar.extractall() yield from glob.glob("%s%s" % (PHP_MANUAL_DIR, PHP_REFERENCE_GLOB)) os.remove(download[0]) def regenerate(filename, modules): with open(filename) as fp: content = fp.read() header = content[:content.find('MODULES = {')] footer = content[content.find("if __name__ == '__main__':"):] with open(filename, 'w') as fp: fp.write(header) fp.write('MODULES = %s\n\n' % pprint.pformat(modules)) fp.write(footer) def run(): print('>> Downloading Function Index') modules = get_php_functions() total = sum(len(v) for v in modules.values()) print('%d functions found' % total) regenerate(__file__, modules) shutil.rmtree(PHP_MANUAL_DIR) run()
107,874
Python
31.433854
87
0.438335
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/smalltalk.py
""" pygments.lexers.smalltalk ~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Smalltalk and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, bygroups, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation __all__ = ['SmalltalkLexer', 'NewspeakLexer'] class SmalltalkLexer(RegexLexer): """ For Smalltalk syntax. Contributed by Stefan Matthias Aust. Rewritten by Nils Winter. .. versionadded:: 0.10 """ name = 'Smalltalk' url = 'http://www.smalltalk.org/' filenames = ['*.st'] aliases = ['smalltalk', 'squeak', 'st'] mimetypes = ['text/x-smalltalk'] tokens = { 'root': [ (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), include('squeak fileout'), include('whitespaces'), include('method definition'), (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), include('objects'), (r'\^|\:=|\_', Operator), # temporaries (r'[\]({}.;!]', Text), ], 'method definition': [ # Not perfect can't allow whitespaces at the beginning and the # without breaking everything (r'([a-zA-Z]+\w*:)(\s*)(\w+)', bygroups(Name.Function, Text, Name.Variable)), (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', bygroups(Name.Function, Text, Name.Variable, Text)), ], 'blockvariables': [ include('whitespaces'), (r'(:)(\s*)(\w+)', bygroups(Operator, Text, Name.Variable)), (r'\|', Operator, '#pop'), default('#pop'), # else pop ], 'literals': [ (r"'(''|[^'])*'", String, 'afterobject'), (r'\$.', String.Char, 'afterobject'), (r'#\(', String.Symbol, 'parenth'), (r'\)', Text, 'afterobject'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), ], '_parenth_helper': [ include('whitespaces'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), # literals (r"'(''|[^'])*'", String), (r'\$.', String.Char), (r'#*\(', String.Symbol, 'inner_parenth'), ], 'parenth': [ # This state is a bit tricky since # we can't just pop this state (r'\)', String.Symbol, ('root', 'afterobject')), include('_parenth_helper'), ], 'inner_parenth': [ (r'\)', String.Symbol, '#pop'), include('_parenth_helper'), ], 'whitespaces': [ # skip whitespace and comments (r'\s+', Text), (r'"(""|[^"])*"', Comment), ], 'objects': [ (r'\[', Text, 'blockvariables'), (r'\]', Text, 'afterobject'), (r'\b(self|super|true|false|nil|thisContext)\b', Name.Builtin.Pseudo, 'afterobject'), (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', String.Symbol, 'afterobject'), include('literals'), ], 'afterobject': [ (r'! !$', Keyword, '#pop'), # squeak chunk delimiter include('whitespaces'), (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', Name.Builtin, '#pop'), (r'\b(new\b(?!:))', Name.Builtin), (r'\:=|\_', Operator, '#pop'), (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), (r'\b[a-zA-Z]+\w*', Name.Function), (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), (r'\.', Punctuation, '#pop'), (r';', Punctuation), (r'[\])}]', Text), (r'[\[({]', Text, '#pop'), ], 'squeak fileout': [ # Squeak fileout format (optional) (r'^"(""|[^"])*"!', Keyword), (r"^'(''|[^'])*'!", Keyword), (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", bygroups(Keyword, Name.Class, Keyword, String, Keyword)), (r'^(\w+)( subclass: )(#\w+)' r'(\s+instanceVariableNames: )(.*?)' r'(\s+classVariableNames: )(.*?)' r'(\s+poolDictionaries: )(.*?)' r'(\s+category: )(.*?)(!)', bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, String, Keyword, String, Keyword, String, Keyword)), (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', bygroups(Name.Class, Keyword, String, Keyword)), (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), (r'! !$', Keyword), ], } class NewspeakLexer(RegexLexer): """ For Newspeak syntax. .. versionadded:: 1.1 """ name = 'Newspeak' url = 'http://newspeaklanguage.org/' filenames = ['*.ns2'] aliases = ['newspeak', ] mimetypes = ['text/x-newspeak'] tokens = { 'root': [ (r'\b(Newsqueak2)\b', Keyword.Declaration), (r"'[^']*'", String), (r'\b(class)(\s+)(\w+)(\s*)', bygroups(Keyword.Declaration, Text, Name.Class, Text)), (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', Keyword), (r'(\w+\:)(\s*)([a-zA-Z_]\w+)', bygroups(Name.Function, Text, Name.Variable)), (r'(\w+)(\s*)(=)', bygroups(Name.Attribute, Text, Operator)), (r'<\w+>', Comment.Special), include('expressionstat'), include('whitespace') ], 'expressionstat': [ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'\d+', Number.Integer), (r':\w+', Name.Variable), (r'(\w+)(::)', bygroups(Name.Variable, Operator)), (r'\w+:', Name.Function), (r'\w+', Name.Variable), (r'\(|\)', Punctuation), (r'\[|\]', Punctuation), (r'\{|\}', Punctuation), (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), (r'\.|;', Punctuation), include('whitespace'), include('literals'), ], 'literals': [ (r'\$.', String), (r"'[^']*'", String), (r"#'[^']*'", String.Symbol), (r"#\w+:?", String.Symbol), (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) ], 'whitespace': [ (r'\s+', Text), (r'"[^"]*"', Comment) ], }
7,206
Python
35.583756
88
0.425618
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/foxpro.py
""" pygments.lexers.foxpro ~~~~~~~~~~~~~~~~~~~~~~ Simple lexer for Microsoft Visual FoxPro source code. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \ Name, String __all__ = ['FoxProLexer'] class FoxProLexer(RegexLexer): """Lexer for Microsoft Visual FoxPro language. FoxPro syntax allows to shorten all keywords and function names to 4 characters. Shortened forms are not recognized by this lexer. .. versionadded:: 1.6 """ name = 'FoxPro' aliases = ['foxpro', 'vfp', 'clipper', 'xbase'] filenames = ['*.PRG', '*.prg'] mimetype = [] flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ (r';\s*\n', Punctuation), # consume newline (r'(^|\n)\s*', Text, 'newline'), # Square brackets may be used for array indices # and for string literal. Look for arrays # before matching string literals. (r'(?<=\w)\[[0-9, ]+\]', Text), (r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String), (r'(^\s*\*|&&|&amp;&amp;).*?\n', Comment.Single), (r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|' r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|' r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|' r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|' r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|' r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|' r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|' r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|' r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|' r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|' r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|' r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|' r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|' r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|' r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|' r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|' r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|' r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|' r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|' r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|' r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|' r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|' r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|' r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|' r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|' r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|' r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|' r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|' r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|' r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|' r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|' r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|' r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|' r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|' r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|' r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|' r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|' r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|' r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|' r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|' r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|' r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|' r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|' r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|' r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|' r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|' r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|' r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|' r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|' r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|' r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|' r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|' r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|' r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|' r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|' r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|' r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|' r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|' r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|' r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|' r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|' r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|' r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|' r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|' r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|' r'YEAR)(?=\s*\()', Name.Function), (r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|' r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|' r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|' r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|' r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|' r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|' r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|' r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|' r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|' r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|' r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|' r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|' r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo), (r'THISFORMSET|THISFORM|THIS', Name.Builtin), (r'Application|CheckBox|Collection|Column|ComboBox|' r'CommandButton|CommandGroup|Container|Control|CursorAdapter|' r'Cursor|Custom|DataEnvironment|DataObject|EditBox|' r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|' r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|' r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|' r'Project|Relation|ReportListener|Separator|Servers|Server|' r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|' r'XMLAdapter|XMLField|XMLTable', Name.Class), (r'm\.[a-z_]\w*', Name.Variable), (r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word), (r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|' r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|' r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|' r'AllowCellSelection|AllowDelete|AllowHeaderSizing|' r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|' r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|' r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|' r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|' r'AutoCompSource|AutoCompTable|AutoHideScrollBar|' r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|' r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|' r'BaseClass|BatchUpdateCount|BindControls|BorderColor|' r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|' r'BreakOnError|BufferModeOverride|BufferMode|' r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|' r'Centered|Century|ChildAlias|ChildOrder|ChildTable|' r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|' r'ColorScheme|ColorSource|ColumnCount|ColumnLines|' r'ColumnOrder|Columns|ColumnWidths|CommandClauses|' r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|' r'ContinuousScroll|ControlBox|ControlCount|Controls|' r'ControlSource|ConversionFunc|Count|CurrentControl|' r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|' r'CursorSchema|CursorSource|CursorStatus|Curvature|' r'Database|DataSessionID|DataSession|DataSourceType|' r'DataSource|DataType|DateFormat|DateMark|Debug|' r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|' r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|' r'DeleteCmd|DeleteMark|Description|Desktop|' r'Details|DisabledBackColor|DisabledForeColor|' r'DisabledItemBackColor|DisabledItemForeColor|' r'DisabledPicture|DisableEncode|DisplayCount|' r'DisplayValue|Dockable|Docked|DockPosition|' r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|' r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|' r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|' r'DynamicFontItalic|DynamicFontStrikethru|' r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|' r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|' r'DynamicLineHeight|EditorOptions|Enabled|' r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|' r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|' r'FetchMemoDataSource|FetchMemo|FetchSize|' r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|' r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|' r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|' r'FontExtend|FontName|FontOutline|FontShadow|FontSize|' r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|' r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|' r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|' r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|' r'HelpContextID|HideSelection|HighlightBackColor|' r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|' r'HighlightRow|Highlight|HomeDir|Hours|HostName|' r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|' r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|' r'InsertCmdDataSource|InsertCmdRefreshCmd|' r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|' r'InsertCmd|Instancing|IntegralHeight|' r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|' r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|' r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|' r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|' r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|' r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|' r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|' r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|' r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|' r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|' r'MemoWindow|Message|MinButton|MinHeight|MinWidth|' r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|' r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|' r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|' r'NumberOfElements|Object|OLEClass|OLEDragMode|' r'OLEDragPicture|OLEDropEffects|OLEDropHasData|' r'OLEDropMode|OLEDropTextInsertion|OLELCID|' r'OLERequestPendingTimeout|OLEServerBusyRaiseError|' r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|' r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|' r'OutputPageCount|OutputType|PageCount|PageHeight|' r'PageNo|PageOrder|Pages|PageTotal|PageWidth|' r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|' r'Parent|Partition|PasswordChar|PictureMargin|' r'PicturePosition|PictureSpacing|PictureSelectionDisplay|' r'PictureVal|Picture|Prepared|' r'PolyPoints|PreserveWhiteSpace|PreviewContainer|' r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|' r'ProjectHookLibrary|ProjectHook|QuietMode|' r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|' r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|' r'RecordSource|RefreshAlias|' r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|' r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|' r'RelativeColumn|RelativeRow|ReleaseType|Resizable|' r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|' r'Rotation|RowColChange|RowHeight|RowSourceType|' r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|' r'Seconds|SelectCmd|SelectedID|' r'SelectedItemBackColor|SelectedItemForeColor|Selected|' r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|' r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|' r'ServerClass|ServerHelpFile|ServerName|' r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|' r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|' r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|' r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|' r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|' r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|' r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|' r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|' r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|' r'UpdateCmdDataSourceType|UpdateCmdDataSource|' r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|' r'UpdateCmdRefreshKeyFieldList|UpdateCmd|' r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|' r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|' r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|' r'VersionCompany|VersionCopyright|VersionDescription|' r'VersionNumber|VersionProduct|VersionTrademarks|Version|' r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|' r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|' r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|' r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|' r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|' r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|' r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|' r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|' r'XSDtype|ZoomBox)', Name.Attribute), (r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|' r'AddProperty|AddTableSchema|AddToSCC|Add|' r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|' r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|' r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|' r'Close|Cls|CursorAttach|CursorDetach|CursorFill|' r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|' r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|' r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|' r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|' r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|' r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|' r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|' r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|' r'RemoveItem|RemoveListItem|RemoveObject|Remove|' r'Render|Requery|RequestData|ResetToDefault|Reset|Run|' r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|' r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|' r'SupportsListenerType|TextHeight|TextWidth|ToCursor|' r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|' r'WriteExpression|WriteMethod|ZOrder)', Name.Function), (r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|' r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|' r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|' r'AfterCursorUpdate|AfterDelete|AfterInsert|' r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|' r'AfterRowColChange|BeforeBand|BeforeCursorAttach|' r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|' r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|' r'BeforeInsert|BeforeDock|BeforeOpenTables|' r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|' r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|' r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|' r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|' r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|' r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|' r'dbc_AfterDropOffline|dbc_AfterDropTable|' r'dbc_AfterModifyConnection|dbc_AfterModifyProc|' r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|' r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|' r'dbc_AfterRenameTable|dbc_AfterRenameView|' r'dbc_AfterValidateData|dbc_BeforeAddTable|' r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|' r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|' r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|' r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|' r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|' r'dbc_BeforeDropOffline|dbc_BeforeDropTable|' r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|' r'dbc_BeforeModifyTable|dbc_BeforeModifyView|' r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|' r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|' r'dbc_BeforeRenameView|dbc_BeforeValidateData|' r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|' r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|' r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|' r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|' r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|' r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|' r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|' r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|' r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|' r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|' r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|' r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|' r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function), (r'\s+', Text), # everything else is not colored (r'.', Text), ], 'newline': [ (r'\*.*?$', Comment.Single, '#pop'), (r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|' r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|' r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|' r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|' r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|' r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|' r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|' r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|' r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|' r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|' r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|' r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|' r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|' r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|' r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|' r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|' r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|' r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|' r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|' r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|' r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|' r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|' r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|' r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|' r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|' r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|' r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|' r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|' r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|' r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|' r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|' r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|' r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|' r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|' r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|' r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|' r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|' r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|' r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|' r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|' r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|' r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|' r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|' r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|' r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|' r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|' r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|' r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|' r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|' r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|' r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|' r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|' r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|' r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|' r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|' r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|' r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|' r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|' r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|' r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|' r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|' r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|' r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|' r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|' r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|' r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|' r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|' r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|' r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|' r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|' r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|' r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|' r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|' r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)', Keyword.Reserved, '#pop'), (r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)', Comment.Preproc, '#pop'), (r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'), (r'.', Text, '#pop'), ], }
26,212
Python
60.245327
80
0.657752
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/fantom.py
""" pygments.lexers.fantom ~~~~~~~~~~~~~~~~~~~~~~ Lexer for the Fantom language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from string import Template from pygments.lexer import RegexLexer, include, bygroups, using, \ this, default, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Literal, Whitespace __all__ = ['FantomLexer'] class FantomLexer(RegexLexer): """ For Fantom source code. .. versionadded:: 1.5 """ name = 'Fantom' aliases = ['fan'] filenames = ['*.fan'] mimetypes = ['application/x-fantom'] # often used regexes def s(str): return Template(str).substitute( dict( pod=r'[\"\w\.]+', eos=r'\n|;', id=r'[a-zA-Z_]\w*', # all chars which can be part of type definition. Starts with # either letter, or [ (maps), or | (funcs) type=r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]|\->?]*?', ) ) tokens = { 'comments': [ (r'(?s)/\*.*?\*/', Comment.Multiline), # Multiline (r'//.*?$', Comment.Single), # Single line # TODO: highlight references in fandocs (r'\*\*.*?$', Comment.Special), # Fandoc (r'#.*$', Comment.Single) # Shell-style ], 'literals': [ (r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration (r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration with dot (r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), # Float/Decimal (r'\b-?0x[0-9a-fA-F_]+', Number.Hex), # Hex (r'\b-?[\d_]+', Number.Integer), # Int (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), # Char (r'"', Punctuation, 'insideStr'), # Opening quote (r'`', Punctuation, 'insideUri'), # Opening accent (r'\b(true|false|null)\b', Keyword.Constant), # Bool & null (r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', # DSL bygroups(Name.Namespace, Punctuation, Name.Class, Punctuation, String, Punctuation)), (r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', # Type/slot literal bygroups(Name.Namespace, Punctuation, Name.Class, Punctuation, Name.Function)), (r'\[,\]', Literal), # Empty list (s(r'($type)(\[,\])'), # Typed empty list bygroups(using(this, state='inType'), Literal)), (r'\[:\]', Literal), # Empty Map (s(r'($type)(\[:\])'), bygroups(using(this, state='inType'), Literal)), ], 'insideStr': [ (r'\\\\', String.Escape), # Escaped backslash (r'\\"', String.Escape), # Escaped " (r'\\`', String.Escape), # Escaped ` (r'\$\w+', String.Interpol), # Subst var (r'\$\{.*?\}', String.Interpol), # Subst expr (r'"', Punctuation, '#pop'), # Closing quot (r'.', String) # String content ], 'insideUri': [ # TODO: remove copy/paste str/uri (r'\\\\', String.Escape), # Escaped backslash (r'\\"', String.Escape), # Escaped " (r'\\`', String.Escape), # Escaped ` (r'\$\w+', String.Interpol), # Subst var (r'\$\{.*?\}', String.Interpol), # Subst expr (r'`', Punctuation, '#pop'), # Closing tick (r'.', String.Backtick) # URI content ], 'protectionKeywords': [ (r'\b(public|protected|private|internal)\b', Keyword), ], 'typeKeywords': [ (r'\b(abstract|final|const|native|facet|enum)\b', Keyword), ], 'methodKeywords': [ (r'\b(abstract|native|once|override|static|virtual|final)\b', Keyword), ], 'fieldKeywords': [ (r'\b(abstract|const|final|native|override|static|virtual|' r'readonly)\b', Keyword) ], 'otherKeywords': [ (words(( 'try', 'catch', 'throw', 'finally', 'for', 'if', 'else', 'while', 'as', 'is', 'isnot', 'switch', 'case', 'default', 'continue', 'break', 'do', 'return', 'get', 'set'), prefix=r'\b', suffix=r'\b'), Keyword), (r'\b(it|this|super)\b', Name.Builtin.Pseudo), ], 'operators': [ (r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator) ], 'inType': [ (r'[\[\]|\->:?]', Punctuation), (s(r'$id'), Name.Class), default('#pop'), ], 'root': [ include('comments'), include('protectionKeywords'), include('typeKeywords'), include('methodKeywords'), include('fieldKeywords'), include('literals'), include('otherKeywords'), include('operators'), (r'using\b', Keyword.Namespace, 'using'), # Using stmt (r'@\w+', Name.Decorator, 'facet'), # Symbol (r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Whitespace, Name.Class), 'inheritance'), # Inheritance list # Type var := val (s(r'($type)([ \t]+)($id)(\s*)(:=)'), bygroups(using(this, state='inType'), Whitespace, Name.Variable, Whitespace, Operator)), # var := val (s(r'($id)(\s*)(:=)'), bygroups(Name.Variable, Whitespace, Operator)), # .someId( or ->someId( ### (s(r'(\.|(?:\->))($id)(\s*)(\()'), bygroups(Operator, Name.Function, Whitespace, Punctuation), 'insideParen'), # .someId or ->someId (s(r'(\.|(?:\->))($id)'), bygroups(Operator, Name.Function)), # new makeXXX ( (r'(new)(\s+)(make\w*)(\s*)(\()', bygroups(Keyword, Whitespace, Name.Function, Whitespace, Punctuation), 'insideMethodDeclArgs'), # Type name ( (s(r'($type)([ \t]+)' # Return type and whitespace r'($id)(\s*)(\()'), # method name + open brace bygroups(using(this, state='inType'), Whitespace, Name.Function, Whitespace, Punctuation), 'insideMethodDeclArgs'), # ArgType argName, (s(r'($type)(\s+)($id)(\s*)(,)'), bygroups(using(this, state='inType'), Whitespace, Name.Variable, Whitespace, Punctuation)), # ArgType argName) # Covered in 'insideParen' state # ArgType argName -> ArgType| (s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'), bygroups(using(this, state='inType'), Whitespace, Name.Variable, Whitespace, Punctuation, Whitespace, using(this, state='inType'), Punctuation)), # ArgType argName| (s(r'($type)(\s+)($id)(\s*)(\|)'), bygroups(using(this, state='inType'), Whitespace, Name.Variable, Whitespace, Punctuation)), # Type var (s(r'($type)([ \t]+)($id)'), bygroups(using(this, state='inType'), Whitespace, Name.Variable)), (r'\(', Punctuation, 'insideParen'), (r'\{', Punctuation, 'insideBrace'), (r'\s+', Whitespace), (r'.', Text) ], 'insideParen': [ (r'\)', Punctuation, '#pop'), include('root'), ], 'insideMethodDeclArgs': [ (r'\)', Punctuation, '#pop'), (s(r'($type)(\s+)($id)(\s*)(\))'), bygroups(using(this, state='inType'), Whitespace, Name.Variable, Whitespace, Punctuation), '#pop'), include('root'), ], 'insideBrace': [ (r'\}', Punctuation, '#pop'), include('root'), ], 'inheritance': [ (r'\s+', Whitespace), # Whitespace (r':|,', Punctuation), (r'(?:(\w+)(::))?(\w+)', bygroups(Name.Namespace, Punctuation, Name.Class)), (r'\{', Punctuation, '#pop') ], 'using': [ (r'[ \t]+', Whitespace), # consume whitespaces (r'(\[)(\w+)(\])', bygroups(Punctuation, Comment.Special, Punctuation)), # ffi (r'(\")?([\w.]+)(\")?', bygroups(Punctuation, Name.Namespace, Punctuation)), # podname (r'::', Punctuation, 'usingClass'), default('#pop') ], 'usingClass': [ (r'[ \t]+', Whitespace), # consume whitespaces (r'(as)(\s+)(\w+)', bygroups(Keyword.Declaration, Whitespace, Name.Class), '#pop:2'), (r'[\w$]+', Name.Class), default('#pop:2') # jump out to root state ], 'facet': [ (r'\s+', Whitespace), (r'\{', Punctuation, 'facetFields'), default('#pop') ], 'facetFields': [ include('comments'), include('literals'), include('operators'), (r'\s+', Whitespace), (r'(\s*)(\w+)(\s*)(=)', bygroups(Whitespace, Name, Whitespace, Operator)), (r'\}', Punctuation, '#pop'), (r'\s+', Whitespace), (r'.', Text) ], }
10,197
Python
39.468254
87
0.419143
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/kuin.py
""" pygments.lexers.kuin ~~~~~~~~~~~~~~~~~~~~ Lexers for the Kuin language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, using, this, bygroups, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['KuinLexer'] class KuinLexer(RegexLexer): """ For Kuin source code. .. versionadded:: 2.9 """ name = 'Kuin' url = 'https://github.com/kuina/Kuin' aliases = ['kuin'] filenames = ['*.kn'] tokens = { 'root': [ include('statement'), ], 'statement': [ # Whitespace / Comment include('whitespace'), # Block-statement (r'(\+?)([ \t]*)(\*?)([ \t]*)(\bfunc)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword,Whitespace, Keyword, Whitespace, Keyword, using(this), Name.Function), 'func_'), (r'\b(class)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword, using(this), Name.Class), 'class_'), (r'\b(enum)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword, using(this), Name.Constant), 'enum_'), (r'\b(block)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'block_'), (r'\b(ifdef)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'ifdef_'), (r'\b(if)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'if_'), (r'\b(switch)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'switch_'), (r'\b(while)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'while_'), (r'\b(for)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'for_'), (r'\b(foreach)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'foreach_'), (r'\b(try)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'try_'), # Line-statement (r'\b(do)\b', Keyword, 'do'), (r'(\+?[ \t]*\bvar)\b', Keyword, 'var'), (r'\b(const)\b', Keyword, 'const'), (r'\b(ret)\b', Keyword, 'ret'), (r'\b(throw)\b', Keyword, 'throw'), (r'\b(alias)\b', Keyword, 'alias'), (r'\b(assert)\b', Keyword, 'assert'), (r'\|', Text, 'continued_line'), (r'[ \t]*\n', Whitespace), ], # Whitespace / Comment 'whitespace': [ (r'^([ \t]*)(;.*)', bygroups(Comment.Single, Whitespace)), (r'[ \t]+(?![; \t])', Whitespace), (r'\{', Comment.Multiline, 'multiline_comment'), ], 'multiline_comment': [ (r'\{', Comment.Multiline, 'multiline_comment'), (r'(?:\s*;.*|[^{}\n]+)', Comment.Multiline), (r'\n', Comment.Multiline), (r'\}', Comment.Multiline, '#pop'), ], # Block-statement 'func_': [ include('expr'), (r'\n', Whitespace, 'func'), ], 'func': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(func)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), ], 'class_': [ include('expr'), (r'\n', Whitespace, 'class'), ], 'class': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(class)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), ], 'enum_': [ include('expr'), (r'\n', Whitespace, 'enum'), ], 'enum': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(enum)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), include('expr'), (r'\n', Whitespace), ], 'block_': [ include('expr'), (r'\n', Whitespace, 'block'), ], 'block': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(block)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), include('break'), include('skip'), ], 'ifdef_': [ include('expr'), (r'\n', Whitespace, 'ifdef'), ], 'ifdef': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(ifdef)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), (words(('rls', 'dbg'), prefix=r'\b', suffix=r'\b'), Keyword.Constant, 'ifdef_sp'), include('statement'), include('break'), include('skip'), ], 'ifdef_sp': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'if_': [ include('expr'), (r'\n', Whitespace, 'if'), ], 'if': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(if)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), (words(('elif', 'else'), prefix=r'\b', suffix=r'\b'), Keyword, 'if_sp'), include('statement'), include('break'), include('skip'), ], 'if_sp': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'switch_': [ include('expr'), (r'\n', Whitespace, 'switch'), ], 'switch': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(switch)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), (words(('case', 'default', 'to'), prefix=r'\b', suffix=r'\b'), Keyword, 'switch_sp'), include('statement'), include('break'), include('skip'), ], 'switch_sp': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'while_': [ include('expr'), (r'\n', Whitespace, 'while'), ], 'while': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(while)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), include('break'), include('skip'), ], 'for_': [ include('expr'), (r'\n', Whitespace, 'for'), ], 'for': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(for)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), include('break'), include('skip'), ], 'foreach_': [ include('expr'), (r'\n', Whitespace, 'foreach'), ], 'foreach': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(foreach)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), include('break'), include('skip'), ], 'try_': [ include('expr'), (r'\n', Whitespace, 'try'), ], 'try': [ (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(try)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), (words(('catch', 'finally', 'to'), prefix=r'\b', suffix=r'\b'), Keyword, 'try_sp'), include('statement'), include('break'), include('skip'), ], 'try_sp': [ include('expr'), (r'\n', Whitespace, '#pop'), ], # Line-statement 'break': [ (r'\b(break)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword, using(this), Name.Other)), ], 'skip': [ (r'\b(skip)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword, using(this), Name.Other)), ], 'alias': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'assert': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'const': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'do': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'ret': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'throw': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'var': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'continued_line': [ include('expr'), (r'\n', Whitespace, '#pop'), ], 'expr': [ # Whitespace / Comment include('whitespace'), # Punctuation (r'\(', Punctuation,), (r'\)', Punctuation,), (r'\[', Punctuation,), (r'\]', Punctuation,), (r',', Punctuation), # Keyword (words(( 'true', 'false', 'null', 'inf' ), prefix=r'\b', suffix=r'\b'), Keyword.Constant), (words(( 'me' ), prefix=r'\b', suffix=r'\b'), Keyword), (words(( 'bit16', 'bit32', 'bit64', 'bit8', 'bool', 'char', 'class', 'dict', 'enum', 'float', 'func', 'int', 'list', 'queue', 'stack' ), prefix=r'\b', suffix=r'\b'), Keyword.Type), # Number (r'\b[0-9]\.[0-9]+(?!\.)(:?e[\+-][0-9]+)?\b', Number.Float), (r'\b2#[01]+(?:b(?:8|16|32|64))?\b', Number.Bin), (r'\b8#[0-7]+(?:b(?:8|16|32|64))?\b', Number.Oct), (r'\b16#[0-9A-F]+(?:b(?:8|16|32|64))?\b', Number.Hex), (r'\b[0-9]+(?:b(?:8|16|32|64))?\b', Number.Decimal), # String / Char (r'"', String.Double, 'string'), (r"'(?:\\.|.)+?'", String.Char), # Operator (r'(?:\.|\$(?:>|<)?)', Operator), (r'(?:\^)', Operator), (r'(?:\+|-|!|##?)', Operator), (r'(?:\*|/|%)', Operator), (r'(?:~)', Operator), (r'(?:(?:=|<>)(?:&|\$)?|<=?|>=?)', Operator), (r'(?:&)', Operator), (r'(?:\|)', Operator), (r'(?:\?)', Operator), (r'(?::(?::|\+|-|\*|/|%|\^|~)?)', Operator), # Identifier (r"\b([a-zA-Z_][0-9a-zA-Z_]*)(?=@)\b", Name), (r"(@)?\b([a-zA-Z_][0-9a-zA-Z_]*)\b", bygroups(Name.Other, Name.Variable)), ], # String 'string': [ (r'(?:\\[^{\n]|[^"\\])+', String.Double), (r'\\\{', String.Double, 'toStrInString'), (r'"', String.Double, '#pop'), ], 'toStrInString': [ include('expr'), (r'\}', String.Double, '#pop'), ], }
11,406
Python
33.152695
101
0.376995
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/webmisc.py
""" pygments.lexers.webmisc ~~~~~~~~~~~~~~~~~~~~~~~ Lexers for misc. web stuff. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \ default, using from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Literal, Whitespace from pygments.lexers.css import _indentation, _starts_block from pygments.lexers.html import HtmlLexer from pygments.lexers.javascript import JavascriptLexer from pygments.lexers.ruby import RubyLexer __all__ = ['DuelLexer', 'SlimLexer', 'XQueryLexer', 'QmlLexer', 'CirruLexer'] class DuelLexer(RegexLexer): """ Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks. .. versionadded:: 1.4 """ name = 'Duel' url = 'http://duelengine.org/' aliases = ['duel', 'jbst', 'jsonml+bst'] filenames = ['*.duel', '*.jbst'] mimetypes = ['text/x-duel', 'text/x-jbst'] flags = re.DOTALL tokens = { 'root': [ (r'(<%[@=#!:]?)(.*?)(%>)', bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)), (r'(<%\$)(.*?)(:)(.*?)(%>)', bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)), (r'(<%--)(.*?)(--%>)', bygroups(Name.Tag, Comment.Multiline, Name.Tag)), (r'(<script.*?>)(.*?)(</script>)', bygroups(using(HtmlLexer), using(JavascriptLexer), using(HtmlLexer))), (r'(.+?)(?=<)', using(HtmlLexer)), (r'.+', using(HtmlLexer)), ], } class XQueryLexer(ExtendedRegexLexer): """ An XQuery lexer, parsing a stream and outputting the tokens needed to highlight xquery code. .. versionadded:: 1.4 """ name = 'XQuery' url = 'https://www.w3.org/XML/Query/' aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm'] filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'] mimetypes = ['text/xquery', 'application/xquery'] xquery_parse_state = [] # FIX UNICODE LATER # ncnamestartchar = ( # r"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|" # r"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|" # r"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|" # r"[\u10000-\uEFFFF]" # ) ncnamestartchar = r"(?:[A-Z]|_|[a-z])" # FIX UNICODE LATER # ncnamechar = ncnamestartchar + (r"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|" # r"[\u203F-\u2040]") ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])" ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar) pitarget_namestartchar = r"(?:[A-KN-WYZ]|_|:|[a-kn-wyz])" pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])" pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar) prefixedname = "%s:%s" % (ncname, ncname) unprefixedname = ncname qname = "(?:%s|%s)" % (prefixedname, unprefixedname) entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)' charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)' stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")' stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')" # FIX UNICODE LATER # elementcontentchar = (r'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|' # r'[\u003d-\u007a]|\u007c|[\u007e-\u007F]') elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_\'`|~]' # quotattrcontentchar = (r'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|' # r'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]') quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%()*+,\-./:;=?@\[\\\]^_\'`|~]' # aposattrcontentchar = (r'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|' # r'[\u003d-\u007a]|\u007c|[\u007e-\u007F]') aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_`|~]' # CHAR elements - fix the above elementcontentchar, quotattrcontentchar, # aposattrcontentchar # x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] flags = re.DOTALL | re.MULTILINE def punctuation_root_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) # transition to root always - don't pop off stack ctx.stack = ['root'] ctx.pos = match.end() def operator_root_callback(lexer, match, ctx): yield match.start(), Operator, match.group(1) # transition to root always - don't pop off stack ctx.stack = ['root'] ctx.pos = match.end() def popstate_tag_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) if lexer.xquery_parse_state: ctx.stack.append(lexer.xquery_parse_state.pop()) ctx.pos = match.end() def popstate_xmlcomment_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append(lexer.xquery_parse_state.pop()) ctx.pos = match.end() def popstate_kindtest_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) next_state = lexer.xquery_parse_state.pop() if next_state == 'occurrenceindicator': if re.match("[?*+]+", match.group(2)): yield match.start(), Punctuation, match.group(2) ctx.stack.append('operator') ctx.pos = match.end() else: ctx.stack.append('operator') ctx.pos = match.end(1) else: ctx.stack.append(next_state) ctx.pos = match.end(1) def popstate_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) # if we have run out of our state stack, pop whatever is on the pygments # state stack if len(lexer.xquery_parse_state) == 0: ctx.stack.pop() if not ctx.stack: # make sure we have at least the root state on invalid inputs ctx.stack = ['root'] elif len(ctx.stack) > 1: ctx.stack.append(lexer.xquery_parse_state.pop()) else: # i don't know if i'll need this, but in case, default back to root ctx.stack = ['root'] ctx.pos = match.end() def pushstate_element_content_starttag_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) lexer.xquery_parse_state.append('element_content') ctx.stack.append('start_tag') ctx.pos = match.end() def pushstate_cdata_section_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('cdata_section') lexer.xquery_parse_state.append(ctx.state.pop) ctx.pos = match.end() def pushstate_starttag_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) lexer.xquery_parse_state.append(ctx.state.pop) ctx.stack.append('start_tag') ctx.pos = match.end() def pushstate_operator_order_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Punctuation, match.group(3) ctx.stack = ['root'] lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_operator_map_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Punctuation, match.group(3) ctx.stack = ['root'] lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_operator_root_validate(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Punctuation, match.group(3) ctx.stack = ['root'] lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_operator_root_validate_withmode(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Keyword, match.group(3) ctx.stack = ['root'] lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_operator_processing_instruction_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('processing_instruction') lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_element_content_processing_instruction_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('processing_instruction') lexer.xquery_parse_state.append('element_content') ctx.pos = match.end() def pushstate_element_content_cdata_section_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('cdata_section') lexer.xquery_parse_state.append('element_content') ctx.pos = match.end() def pushstate_operator_cdata_section_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('cdata_section') lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_element_content_xmlcomment_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('xml_comment') lexer.xquery_parse_state.append('element_content') ctx.pos = match.end() def pushstate_operator_xmlcomment_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('xml_comment') lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_kindtest_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('kindtest') ctx.stack.append('kindtest') ctx.pos = match.end() def pushstate_operator_kindtestforpi_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('operator') ctx.stack.append('kindtestforpi') ctx.pos = match.end() def pushstate_operator_kindtest_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('operator') ctx.stack.append('kindtest') ctx.pos = match.end() def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('occurrenceindicator') ctx.stack.append('kindtest') ctx.pos = match.end() def pushstate_operator_starttag_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) lexer.xquery_parse_state.append('operator') ctx.stack.append('start_tag') ctx.pos = match.end() def pushstate_operator_root_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) lexer.xquery_parse_state.append('operator') ctx.stack = ['root'] ctx.pos = match.end() def pushstate_operator_root_construct_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Whitespace, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('operator') ctx.stack = ['root'] ctx.pos = match.end() def pushstate_root_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) cur_state = ctx.stack.pop() lexer.xquery_parse_state.append(cur_state) ctx.stack = ['root'] ctx.pos = match.end() def pushstate_operator_attribute_callback(lexer, match, ctx): yield match.start(), Name.Attribute, match.group(1) ctx.stack.append('operator') ctx.pos = match.end() tokens = { 'comment': [ # xquery comments (r'[^:()]+', Comment), (r'\(:', Comment, '#push'), (r':\)', Comment, '#pop'), (r'[:()]', Comment), ], 'whitespace': [ (r'\s+', Whitespace), ], 'operator': [ include('whitespace'), (r'(\})', popstate_callback), (r'\(:', Comment, 'comment'), (r'(\{)', pushstate_root_callback), (r'then|else|external|at|div|except', Keyword, 'root'), (r'order by', Keyword, 'root'), (r'group by', Keyword, 'root'), (r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'), (r'and|or', Operator.Word, 'root'), (r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)', Operator.Word, 'root'), (r'return|satisfies|to|union|where|count|preserve\s+strip', Keyword, 'root'), (r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=|!)', operator_root_callback), (r'(::|:|;|\[|//|/|,)', punctuation_root_callback), (r'(castable|cast)(\s+)(as)\b', bygroups(Keyword, Whitespace, Keyword), 'singletype'), (r'(instance)(\s+)(of)\b', bygroups(Keyword, Whitespace, Keyword), 'itemtype'), (r'(treat)(\s+)(as)\b', bygroups(Keyword, Whitespace, Keyword), 'itemtype'), (r'(case)(\s+)(' + stringdouble + ')', bygroups(Keyword, Whitespace, String.Double), 'itemtype'), (r'(case)(\s+)(' + stringsingle + ')', bygroups(Keyword, Whitespace, String.Single), 'itemtype'), (r'(case|as)\b', Keyword, 'itemtype'), (r'(\))(\s*)(as)', bygroups(Punctuation, Whitespace, Keyword), 'itemtype'), (r'\$', Name.Variable, 'varname'), (r'(for|let|previous|next)(\s+)(\$)', bygroups(Keyword, Whitespace, Name.Variable), 'varname'), (r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)', bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Name.Variable), 'varname'), # (r'\)|\?|\]', Punctuation, '#push'), (r'\)|\?|\]', Punctuation), (r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Whitespace, Keyword)), (r'ascending|descending|default', Keyword, '#push'), (r'(allowing)(\s+)(empty)', bygroups(Keyword, Whitespace, Keyword)), (r'external', Keyword), (r'(start|when|end)', Keyword, 'root'), (r'(only)(\s+)(end)', bygroups(Keyword, Whitespace, Keyword), 'root'), (r'collation', Keyword, 'uritooperator'), # eXist specific XQUF (r'(into|following|preceding|with)', Keyword, 'root'), # support for current context on rhs of Simple Map Operator (r'\.', Operator), # finally catch all string literals and stay in operator state (stringdouble, String.Double), (stringsingle, String.Single), (r'(catch)(\s*)', bygroups(Keyword, Whitespace), 'root'), ], 'uritooperator': [ (stringdouble, String.Double, '#pop'), (stringsingle, String.Single, '#pop'), ], 'namespacedecl': [ include('whitespace'), (r'\(:', Comment, 'comment'), (r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Whitespace, String.Double)), (r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Whitespace, String.Single)), (stringdouble, String.Double), (stringsingle, String.Single), (r',', Punctuation), (r'=', Operator), (r';', Punctuation, 'root'), (ncname, Name.Namespace), ], 'namespacekeyword': [ include('whitespace'), (r'\(:', Comment, 'comment'), (stringdouble, String.Double, 'namespacedecl'), (stringsingle, String.Single, 'namespacedecl'), (r'inherit|no-inherit', Keyword, 'root'), (r'namespace', Keyword, 'namespacedecl'), (r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)), (r'preserve|no-preserve', Keyword), (r',', Punctuation), ], 'annotationname': [ (r'\(:', Comment, 'comment'), (qname, Name.Decorator), (r'(\()(' + stringdouble + ')', bygroups(Punctuation, String.Double)), (r'(\()(' + stringsingle + ')', bygroups(Punctuation, String.Single)), (r'(\,)(\s+)(' + stringdouble + ')', bygroups(Punctuation, Text, String.Double)), (r'(\,)(\s+)(' + stringsingle + ')', bygroups(Punctuation, Text, String.Single)), (r'\)', Punctuation), (r'(\s+)(\%)', bygroups(Text, Name.Decorator), 'annotationname'), (r'(\s+)(variable)(\s+)(\$)', bygroups(Text, Keyword.Declaration, Text, Name.Variable), 'varname'), (r'(\s+)(function)(\s+)', bygroups(Text, Keyword.Declaration, Text), 'root') ], 'varname': [ (r'\(:', Comment, 'comment'), (r'(' + qname + r')(\()?', bygroups(Name, Punctuation), 'operator'), ], 'singletype': [ include('whitespace'), (r'\(:', Comment, 'comment'), (ncname + r'(:\*)', Name.Variable, 'operator'), (qname, Name.Variable, 'operator'), ], 'itemtype': [ include('whitespace'), (r'\(:', Comment, 'comment'), (r'\$', Name.Variable, 'varname'), (r'(void)(\s*)(\()(\s*)(\))', bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'), (r'(element|attribute|schema-element|schema-attribute|comment|text|' r'node|binary|document-node|empty-sequence)(\s*)(\()', pushstate_occurrenceindicator_kindtest_callback), # Marklogic specific type? (r'(processing-instruction)(\s*)(\()', bygroups(Keyword, Text, Punctuation), ('occurrenceindicator', 'kindtestforpi')), (r'(item)(\s*)(\()(\s*)(\))(?=[*+?])', bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'occurrenceindicator'), (r'(\(\#)(\s*)', bygroups(Punctuation, Text), 'pragma'), (r';', Punctuation, '#pop'), (r'then|else', Keyword, '#pop'), (r'(at)(\s+)(' + stringdouble + ')', bygroups(Keyword, Text, String.Double), 'namespacedecl'), (r'(at)(\s+)(' + stringsingle + ')', bygroups(Keyword, Text, String.Single), 'namespacedecl'), (r'except|intersect|in|is|return|satisfies|to|union|where|count', Keyword, 'root'), (r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'), (r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'), (r'external|at', Keyword, 'root'), (r'(stable)(\s+)(order)(\s+)(by)', bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'), (r'(castable|cast)(\s+)(as)', bygroups(Keyword, Text, Keyword), 'singletype'), (r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)), (r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)), (r'(case)(\s+)(' + stringdouble + ')', bygroups(Keyword, Text, String.Double), 'itemtype'), (r'(case)(\s+)(' + stringsingle + ')', bygroups(Keyword, Text, String.Single), 'itemtype'), (r'case|as', Keyword, 'itemtype'), (r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), (ncname + r':\*', Keyword.Type, 'operator'), (r'(function|map|array)(\()', bygroups(Keyword.Type, Punctuation)), (qname, Keyword.Type, 'occurrenceindicator'), ], 'kindtest': [ (r'\(:', Comment, 'comment'), (r'\{', Punctuation, 'root'), (r'(\))([*+?]?)', popstate_kindtest_callback), (r'\*', Name, 'closekindtest'), (qname, Name, 'closekindtest'), (r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback), ], 'kindtestforpi': [ (r'\(:', Comment, 'comment'), (r'\)', Punctuation, '#pop'), (ncname, Name.Variable), (stringdouble, String.Double), (stringsingle, String.Single), ], 'closekindtest': [ (r'\(:', Comment, 'comment'), (r'(\))', popstate_callback), (r',', Punctuation), (r'(\{)', pushstate_operator_root_callback), (r'\?', Punctuation), ], 'xml_comment': [ (r'(-->)', popstate_xmlcomment_callback), (r'[^-]{1,2}', Literal), (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]', Literal), ], 'processing_instruction': [ (r'\s+', Text, 'processing_instruction_content'), (r'\?>', String.Doc, '#pop'), (pitarget, Name), ], 'processing_instruction_content': [ (r'\?>', String.Doc, '#pop'), (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]', Literal), ], 'cdata_section': [ (r']]>', String.Doc, '#pop'), (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]', Literal), ], 'start_tag': [ include('whitespace'), (r'(/>)', popstate_tag_callback), (r'>', Name.Tag, 'element_content'), (r'"', Punctuation, 'quot_attribute_content'), (r"'", Punctuation, 'apos_attribute_content'), (r'=', Operator), (qname, Name.Tag), ], 'quot_attribute_content': [ (r'"', Punctuation, 'start_tag'), (r'(\{)', pushstate_root_callback), (r'""', Name.Attribute), (quotattrcontentchar, Name.Attribute), (entityref, Name.Attribute), (charref, Name.Attribute), (r'\{\{|\}\}', Name.Attribute), ], 'apos_attribute_content': [ (r"'", Punctuation, 'start_tag'), (r'\{', Punctuation, 'root'), (r"''", Name.Attribute), (aposattrcontentchar, Name.Attribute), (entityref, Name.Attribute), (charref, Name.Attribute), (r'\{\{|\}\}', Name.Attribute), ], 'element_content': [ (r'</', Name.Tag, 'end_tag'), (r'(\{)', pushstate_root_callback), (r'(<!--)', pushstate_element_content_xmlcomment_callback), (r'(<\?)', pushstate_element_content_processing_instruction_callback), (r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback), (r'(<)', pushstate_element_content_starttag_callback), (elementcontentchar, Literal), (entityref, Literal), (charref, Literal), (r'\{\{|\}\}', Literal), ], 'end_tag': [ include('whitespace'), (r'(>)', popstate_tag_callback), (qname, Name.Tag), ], 'xmlspace_decl': [ include('whitespace'), (r'\(:', Comment, 'comment'), (r'preserve|strip', Keyword, '#pop'), ], 'declareordering': [ (r'\(:', Comment, 'comment'), include('whitespace'), (r'ordered|unordered', Keyword, '#pop'), ], 'xqueryversion': [ include('whitespace'), (r'\(:', Comment, 'comment'), (stringdouble, String.Double), (stringsingle, String.Single), (r'encoding', Keyword), (r';', Punctuation, '#pop'), ], 'pragma': [ (qname, Name.Variable, 'pragmacontents'), ], 'pragmacontents': [ (r'#\)', Punctuation, 'operator'), (r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]', Literal), (r'(\s+)', Whitespace), ], 'occurrenceindicator': [ include('whitespace'), (r'\(:', Comment, 'comment'), (r'\*|\?|\+', Operator, 'operator'), (r':=', Operator, 'root'), default('operator'), ], 'option': [ include('whitespace'), (qname, Name.Variable, '#pop'), ], 'qname_braren': [ include('whitespace'), (r'(\{)', pushstate_operator_root_callback), (r'(\()', Punctuation, 'root'), ], 'element_qname': [ (qname, Name.Variable, 'root'), ], 'attribute_qname': [ (qname, Name.Variable, 'root'), ], 'root': [ include('whitespace'), (r'\(:', Comment, 'comment'), # handle operator state # order on numbers matters - handle most complex first (r'\d+(\.\d*)?[eE][+-]?\d+', Number.Float, 'operator'), (r'(\.\d+)[eE][+-]?\d+', Number.Float, 'operator'), (r'(\.\d+|\d+\.\d*)', Number.Float, 'operator'), (r'(\d+)', Number.Integer, 'operator'), (r'(\.\.|\.|\))', Punctuation, 'operator'), (r'(declare)(\s+)(construction)', bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'operator'), (r'(declare)(\s+)(default)(\s+)(order)', bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'), (r'(declare)(\s+)(context)(\s+)(item)', bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'), (ncname + r':\*', Name, 'operator'), (r'\*:'+ncname, Name.Tag, 'operator'), (r'\*', Name.Tag, 'operator'), (stringdouble, String.Double, 'operator'), (stringsingle, String.Single, 'operator'), (r'(\}|\])', popstate_callback), # NAMESPACE DECL (r'(declare)(\s+)(default)(\s+)(collation)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Keyword.Declaration)), (r'(module|declare)(\s+)(namespace)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration), 'namespacedecl'), (r'(declare)(\s+)(base-uri)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration), 'namespacedecl'), # NAMESPACE KEYWORD (r'(declare)(\s+)(default)(\s+)(element|function)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Keyword.Declaration), 'namespacekeyword'), (r'(import)(\s+)(schema|module)', bygroups(Keyword.Pseudo, Whitespace, Keyword.Pseudo), 'namespacekeyword'), (r'(declare)(\s+)(copy-namespaces)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration), 'namespacekeyword'), # VARNAMEs (r'(for|let|some|every)(\s+)(\$)', bygroups(Keyword, Whitespace, Name.Variable), 'varname'), (r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)', bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Name.Variable), 'varname'), (r'\$', Name.Variable, 'varname'), (r'(declare)(\s+)(variable)(\s+)(\$)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Name.Variable), 'varname'), # ANNOTATED GLOBAL VARIABLES AND FUNCTIONS (r'(declare)(\s+)(\%)', bygroups(Keyword.Declaration, Whitespace, Name.Decorator), 'annotationname'), # ITEMTYPE (r'(\))(\s+)(as)', bygroups(Operator, Whitespace, Keyword), 'itemtype'), (r'(element|attribute|schema-element|schema-attribute|comment|' r'text|node|document-node|empty-sequence)(\s+)(\()', pushstate_operator_kindtest_callback), (r'(processing-instruction)(\s+)(\()', pushstate_operator_kindtestforpi_callback), (r'(<!--)', pushstate_operator_xmlcomment_callback), (r'(<\?)', pushstate_operator_processing_instruction_callback), (r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback), # (r'</', Name.Tag, 'end_tag'), (r'(<)', pushstate_operator_starttag_callback), (r'(declare)(\s+)(boundary-space)', bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'xmlspace_decl'), (r'(validate)(\s+)(lax|strict)', pushstate_operator_root_validate_withmode), (r'(validate)(\s*)(\{)', pushstate_operator_root_validate), (r'(typeswitch)(\s*)(\()', bygroups(Keyword, Whitespace, Punctuation)), (r'(switch)(\s*)(\()', bygroups(Keyword, Whitespace, Punctuation)), (r'(element|attribute|namespace)(\s*)(\{)', pushstate_operator_root_construct_callback), (r'(document|text|processing-instruction|comment)(\s*)(\{)', pushstate_operator_root_construct_callback), # ATTRIBUTE (r'(attribute)(\s+)(?=' + qname + r')', bygroups(Keyword, Whitespace), 'attribute_qname'), # ELEMENT (r'(element)(\s+)(?=' + qname + r')', bygroups(Keyword, Whitespace), 'element_qname'), # PROCESSING_INSTRUCTION (r'(processing-instruction|namespace)(\s+)(' + ncname + r')(\s*)(\{)', bygroups(Keyword, Whitespace, Name.Variable, Whitespace, Punctuation), 'operator'), (r'(declare|define)(\s+)(function)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration)), (r'(\{|\[)', pushstate_operator_root_callback), (r'(unordered|ordered)(\s*)(\{)', pushstate_operator_order_callback), (r'(map|array)(\s*)(\{)', pushstate_operator_map_callback), (r'(declare)(\s+)(ordering)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration), 'declareordering'), (r'(xquery)(\s+)(version)', bygroups(Keyword.Pseudo, Whitespace, Keyword.Pseudo), 'xqueryversion'), (r'(\(#)(\s*)', bygroups(Punctuation, Whitespace), 'pragma'), # sometimes return can occur in root state (r'return', Keyword), (r'(declare)(\s+)(option)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration), 'option'), # URI LITERALS - single and double quoted (r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'), (r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'), (r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)', bygroups(Keyword, Punctuation)), (r'(descendant|following-sibling|following|parent|preceding-sibling' r'|preceding|self)(::)', bygroups(Keyword, Punctuation)), (r'(if)(\s*)(\()', bygroups(Keyword, Whitespace, Punctuation)), (r'then|else', Keyword), # eXist specific XQUF (r'(update)(\s*)(insert|delete|replace|value|rename)', bygroups(Keyword, Whitespace, Keyword)), (r'(into|following|preceding|with)', Keyword), # Marklogic specific (r'(try)(\s*)', bygroups(Keyword, Whitespace), 'root'), (r'(catch)(\s*)(\()(\$)', bygroups(Keyword, Whitespace, Punctuation, Name.Variable), 'varname'), (r'(@'+qname+')', Name.Attribute, 'operator'), (r'(@'+ncname+')', Name.Attribute, 'operator'), (r'@\*:'+ncname, Name.Attribute, 'operator'), (r'@\*', Name.Attribute, 'operator'), (r'(@)', Name.Attribute, 'operator'), (r'//|/|\+|-|;|,|\(|\)', Punctuation), # STANDALONE QNAMES (qname + r'(?=\s*\{)', Name.Tag, 'qname_braren'), (qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'), (r'(' + qname + ')(#)([0-9]+)', bygroups(Name.Function, Keyword.Type, Number.Integer)), (qname, Name.Tag, 'operator'), ] } class QmlLexer(RegexLexer): """ For QML files. .. versionadded:: 1.6 """ # QML is based on javascript, so much of this is taken from the # JavascriptLexer above. name = 'QML' url = 'https://doc.qt.io/qt-6/qmlapplications.html' aliases = ['qml', 'qbs'] filenames = ['*.qml', '*.qbs'] mimetypes = ['application/x-qml', 'application/x-qt.qbs+qml'] # pasted from JavascriptLexer, with some additions flags = re.DOTALL | re.MULTILINE tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'<!--', Comment), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), default('#pop') ], 'badregex': [ (r'\n', Text, '#pop') ], 'root': [ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), # QML insertions (r'\bid\s*:\s*[A-Za-z][\w.]*', Keyword.Declaration, 'slashstartsregex'), (r'\b[A-Za-z][\w.]*\s*:', Keyword, 'slashstartsregex'), # the rest from JavascriptLexer (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|' r'throw|try|catch|finally|new|delete|typeof|instanceof|void|' r'this)\b', Keyword, 'slashstartsregex'), (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'), (r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|' r'extends|final|float|goto|implements|import|int|interface|long|native|' r'package|private|protected|public|short|static|super|synchronized|throws|' r'transient|volatile)\b', Keyword.Reserved), (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|netscape|' r'Number|Object|Packages|RegExp|String|sun|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|' r'window)\b', Name.Builtin), (r'[$a-zA-Z_]\w*', Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double), (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single), ] } class CirruLexer(RegexLexer): r""" * using ``()`` for expressions, but restricted in a same line * using ``""`` for strings, with ``\`` for escaping chars * using ``$`` as folding operator * using ``,`` as unfolding operator * using indentations for nested blocks .. versionadded:: 2.0 """ name = 'Cirru' url = 'http://cirru.org/' aliases = ['cirru'] filenames = ['*.cirru'] mimetypes = ['text/x-cirru'] flags = re.MULTILINE tokens = { 'string': [ (r'[^"\\\n]+', String), (r'\\', String.Escape, 'escape'), (r'"', String, '#pop'), ], 'escape': [ (r'.', String.Escape, '#pop'), ], 'function': [ (r'\,', Operator, '#pop'), (r'[^\s"()]+', Name.Function, '#pop'), (r'\)', Operator, '#pop'), (r'(?=\n)', Text, '#pop'), (r'\(', Operator, '#push'), (r'"', String, ('#pop', 'string')), (r'[ ]+', Text.Whitespace), ], 'line': [ (r'(?<!\w)\$(?!\w)', Operator, 'function'), (r'\(', Operator, 'function'), (r'\)', Operator), (r'\n', Text, '#pop'), (r'"', String, 'string'), (r'[ ]+', Text.Whitespace), (r'[+-]?[\d.]+\b', Number), (r'[^\s"()]+', Name.Variable) ], 'root': [ (r'^\n+', Text.Whitespace), default(('line', 'function')), ] } class SlimLexer(ExtendedRegexLexer): """ For Slim markup. .. versionadded:: 2.0 """ name = 'Slim' aliases = ['slim'] filenames = ['*.slim'] mimetypes = ['text/x-slim'] flags = re.IGNORECASE _dot = r'(?: \|\n(?=.* \|)|.)' tokens = { 'root': [ (r'[ \t]*\n', Text), (r'[ \t]*', _indentation), ], 'css': [ (r'\.[\w:-]+', Name.Class, 'tag'), (r'\#[\w:-]+', Name.Function, 'tag'), ], 'eval-or-plain': [ (r'([ \t]*==?)(.*\n)', bygroups(Punctuation, using(RubyLexer)), 'root'), (r'[ \t]+[\w:-]+(?==)', Name.Attribute, 'html-attributes'), default('plain'), ], 'content': [ include('css'), (r'[\w:-]+:[ \t]*\n', Text, 'plain'), (r'(-)(.*\n)', bygroups(Punctuation, using(RubyLexer)), '#pop'), (r'\|' + _dot + r'*\n', _starts_block(Text, 'plain'), '#pop'), (r'/' + _dot + r'*\n', _starts_block(Comment.Preproc, 'slim-comment-block'), '#pop'), (r'[\w:-]+', Name.Tag, 'tag'), include('eval-or-plain'), ], 'tag': [ include('css'), (r'[<>]{1,2}(?=[ \t=])', Punctuation), (r'[ \t]+\n', Punctuation, '#pop:2'), include('eval-or-plain'), ], 'plain': [ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), (r'(#\{)(.*?)(\})', bygroups(String.Interpol, using(RubyLexer), String.Interpol)), (r'\n', Text, 'root'), ], 'html-attributes': [ (r'=', Punctuation), (r'"[^"]+"', using(RubyLexer), 'tag'), (r'\'[^\']+\'', using(RubyLexer), 'tag'), (r'\w+', Text, 'tag'), ], 'slim-comment-block': [ (_dot + '+', Comment.Preproc), (r'\n', Text, 'root'), ], }
40,549
Python
39.108803
110
0.508397
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/solidity.py
""" pygments.lexers.solidity ~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Solidity. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, include, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['SolidityLexer'] class SolidityLexer(RegexLexer): """ For Solidity source code. .. versionadded:: 2.5 """ name = 'Solidity' aliases = ['solidity'] filenames = ['*.sol'] mimetypes = [] datatype = ( r'\b(address|bool|(?:(?:bytes|hash|int|string|uint)(?:8|16|24|32|40|48|56|64' r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208' r'|216|224|232|240|248|256)?))\b' ) tokens = { 'root': [ include('whitespace'), include('comments'), (r'\bpragma\s+solidity\b', Keyword, 'pragma'), (r'\b(contract)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword, Whitespace, Name.Entity)), (datatype + r'(\s+)((?:external|public|internal|private)\s+)?' + r'([a-zA-Z_]\w*)', bygroups(Keyword.Type, Whitespace, Keyword, Name.Variable)), (r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Type, Whitespace, Name.Variable)), (r'\b(msg|block|tx)\.([A-Za-z_][a-zA-Z0-9_]*)\b', Keyword), (words(( 'block', 'break', 'constant', 'constructor', 'continue', 'contract', 'do', 'else', 'external', 'false', 'for', 'function', 'if', 'import', 'inherited', 'internal', 'is', 'library', 'mapping', 'memory', 'modifier', 'msg', 'new', 'payable', 'private', 'public', 'require', 'return', 'returns', 'struct', 'suicide', 'throw', 'this', 'true', 'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'), Keyword.Type), (words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin), (datatype, Keyword.Type), include('constants'), (r'[a-zA-Z_]\w*', Text), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[.;{}(),\[\]]', Punctuation) ], 'comments': [ (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline), (r'/(\\\n)?[*][\w\W]*', Comment.Multiline) ], 'constants': [ (r'("(\\"|.)*?")', String.Double), (r"('(\\'|.)*?')", String.Single), (r'\b0[xX][0-9a-fA-F]+\b', Number.Hex), (r'\b\d+\b', Number.Decimal), ], 'pragma': [ include('whitespace'), include('comments'), (r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)', bygroups(Operator, Whitespace, Keyword)), (r';', Punctuation, '#pop') ], 'whitespace': [ (r'\s+', Whitespace), (r'\n', Whitespace) ] }
3,127
Python
34.545454
85
0.464663
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/whiley.py
""" pygments.lexers.whiley ~~~~~~~~~~~~~~~~~~~~~~ Lexers for the Whiley language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Comment, Keyword, Name, Number, Operator, \ Punctuation, String, Text __all__ = ['WhileyLexer'] class WhileyLexer(RegexLexer): """ Lexer for the Whiley programming language. .. versionadded:: 2.2 """ name = 'Whiley' url = 'http://whiley.org/' filenames = ['*.whiley'] aliases = ['whiley'] mimetypes = ['text/x-whiley'] # See the language specification: # http://whiley.org/download/WhileyLanguageSpec.pdf tokens = { 'root': [ # Whitespace (r'\s+', Text), # Comments (r'//.*', Comment.Single), # don't parse empty comment as doc comment (r'/\*\*/', Comment.Multiline), (r'(?s)/\*\*.*?\*/', String.Doc), (r'(?s)/\*.*?\*/', Comment.Multiline), # Keywords (words(( 'if', 'else', 'while', 'for', 'do', 'return', 'switch', 'case', 'default', 'break', 'continue', 'requires', 'ensures', 'where', 'assert', 'assume', 'all', 'no', 'some', 'in', 'is', 'new', 'throw', 'try', 'catch', 'debug', 'skip', 'fail', 'finite', 'total'), suffix=r'\b'), Keyword.Reserved), (words(( 'function', 'method', 'public', 'private', 'protected', 'export', 'native'), suffix=r'\b'), Keyword.Declaration), # "constant" & "type" are not keywords unless used in declarations (r'(constant|type)(\s+)([a-zA-Z_]\w*)(\s+)(is)\b', bygroups(Keyword.Declaration, Text, Name, Text, Keyword.Reserved)), (r'(true|false|null)\b', Keyword.Constant), (r'(bool|byte|int|real|any|void)\b', Keyword.Type), # "from" is not a keyword unless used with import (r'(import)(\s+)(\*)([^\S\n]+)(from)\b', bygroups(Keyword.Namespace, Text, Punctuation, Text, Keyword.Namespace)), (r'(import)(\s+)([a-zA-Z_]\w*)([^\S\n]+)(from)\b', bygroups(Keyword.Namespace, Text, Name, Text, Keyword.Namespace)), (r'(package|import)\b', Keyword.Namespace), # standard library: https://github.com/Whiley/WhileyLibs/ (words(( # types defined in whiley.lang.Int 'i8', 'i16', 'i32', 'i64', 'u8', 'u16', 'u32', 'u64', 'uint', 'nat', # whiley.lang.Any 'toString'), suffix=r'\b'), Name.Builtin), # byte literal (r'[01]+b', Number.Bin), # decimal literal (r'[0-9]+\.[0-9]+', Number.Float), # match "1." but not ranges like "3..5" (r'[0-9]+\.(?!\.)', Number.Float), # integer literal (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), # character literal (r"""'[^\\]'""", String.Char), (r"""(')(\\['"\\btnfr])(')""", bygroups(String.Char, String.Escape, String.Char)), # string literal (r'"', String, 'string'), # operators and punctuation (r'[{}()\[\],.;]', Punctuation), (r'[+\-*/%&|<>^!~@=:?' # unicode operators r'\u2200\u2203\u2205\u2282\u2286\u2283\u2287' r'\u222A\u2229\u2264\u2265\u2208\u2227\u2228' r']', Operator), # identifier (r'[a-zA-Z_]\w*', Name), ], 'string': [ (r'"', String, '#pop'), (r'\\[btnfr]', String.Escape), (r'\\u[0-9a-fA-F]{4}', String.Escape), (r'\\.', String), (r'[^\\"]+', String), ], }
4,018
Python
33.350427
86
0.46665
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/text.py
""" pygments.lexers.text ~~~~~~~~~~~~~~~~~~~~ Lexers for non-source code file types. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexers.configs import ApacheConfLexer, NginxConfLexer, \ SquidConfLexer, LighttpdConfLexer, IniLexer, RegeditLexer, PropertiesLexer, \ UnixConfigLexer from pygments.lexers.console import PyPyLogLexer from pygments.lexers.textedit import VimLexer from pygments.lexers.markup import BBCodeLexer, MoinWikiLexer, RstLexer, \ TexLexer, GroffLexer from pygments.lexers.installers import DebianControlLexer, SourcesListLexer from pygments.lexers.make import MakefileLexer, BaseMakefileLexer, CMakeLexer from pygments.lexers.haxe import HxmlLexer from pygments.lexers.sgf import SmartGameFormatLexer from pygments.lexers.diff import DiffLexer, DarcsPatchLexer from pygments.lexers.data import YamlLexer from pygments.lexers.textfmts import IrcLogsLexer, GettextLexer, HttpLexer __all__ = []
1,029
Python
37.148147
81
0.790087
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/c_like.py
""" pygments.lexers.c_like ~~~~~~~~~~~~~~~~~~~~~~ Lexers for other C-like languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \ default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace from pygments.lexers.c_cpp import CLexer, CppLexer from pygments.lexers import _mql_builtins __all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer', 'CudaLexer', 'SwigLexer', 'MqlLexer', 'ArduinoLexer', 'CharmciLexer', 'OmgIdlLexer'] class PikeLexer(CppLexer): """ For `Pike <http://pike.lysator.liu.se/>`_ source code. .. versionadded:: 2.0 """ name = 'Pike' aliases = ['pike'] filenames = ['*.pike', '*.pmod'] mimetypes = ['text/x-pike'] tokens = { 'statements': [ (words(( 'catch', 'new', 'private', 'protected', 'public', 'gauge', 'throw', 'throws', 'class', 'interface', 'implement', 'abstract', 'extends', 'from', 'this', 'super', 'constant', 'final', 'static', 'import', 'use', 'extern', 'inline', 'proto', 'break', 'continue', 'if', 'else', 'for', 'while', 'do', 'switch', 'case', 'as', 'in', 'version', 'return', 'true', 'false', 'null', '__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__', '__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__', '__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__', '__amigaos__', '_Pragma', 'static_assert', 'defined', 'sscanf'), suffix=r'\b'), Keyword), (r'(bool|int|long|float|short|double|char|string|object|void|mapping|' r'array|multiset|program|function|lambda|mixed|' r'[a-z_][a-z0-9_]*_t)\b', Keyword.Type), (r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'), (r'[~!%^&*+=|?:<>/@-]', Operator), inherit, ], 'classname': [ (r'[a-zA-Z_]\w*', Name.Class, '#pop'), # template specification (r'\s*(?=>)', Whitespace, '#pop'), ], } class NesCLexer(CLexer): """ For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor directives. .. versionadded:: 2.0 """ name = 'nesC' aliases = ['nesc'] filenames = ['*.nc'] mimetypes = ['text/x-nescsrc'] tokens = { 'statements': [ (words(( 'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component', 'components', 'configuration', 'event', 'extends', 'generic', 'implementation', 'includes', 'interface', 'module', 'new', 'norace', 'post', 'provides', 'signal', 'task', 'uses'), suffix=r'\b'), Keyword), (words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t', 'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t', 'nx_uint64_t'), suffix=r'\b'), Keyword.Type), inherit, ], } class ClayLexer(RegexLexer): """ For `Clay <http://claylabs.com/clay/>`_ source. .. versionadded:: 2.0 """ name = 'Clay' filenames = ['*.clay'] aliases = ['clay'] mimetypes = ['text/x-clay'] tokens = { 'root': [ (r'\s+', Whitespace), (r'//.*?$', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'\b(public|private|import|as|record|variant|instance' r'|define|overload|default|external|alias' r'|rvalue|ref|forward|inline|noinline|forceinline' r'|enum|var|and|or|not|if|else|goto|return|while' r'|switch|case|break|continue|for|in|true|false|try|catch|throw' r'|finally|onerror|staticassert|eval|when|newtype' r'|__FILE__|__LINE__|__COLUMN__|__ARG__' r')\b', Keyword), (r'[~!%^&*+=|:<>/-]', Operator), (r'[#(){}\[\],;.]', Punctuation), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'\d+[LlUu]*', Number.Integer), (r'\b(true|false)\b', Name.Builtin), (r'(?i)[a-z_?][\w?]*', Name), (r'"""', String, 'tdqs'), (r'"', String, 'dqs'), ], 'strings': [ (r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape), (r'[^\\"]+', String), ], 'nl': [ (r'\n', String), ], 'dqs': [ (r'"', String, '#pop'), include('strings'), ], 'tdqs': [ (r'"""', String, '#pop'), include('strings'), include('nl'), ], } class ECLexer(CLexer): """ For eC source code with preprocessor directives. .. versionadded:: 1.5 """ name = 'eC' aliases = ['ec'] filenames = ['*.ec', '*.eh'] mimetypes = ['text/x-echdr', 'text/x-ecsrc'] tokens = { 'statements': [ (words(( 'virtual', 'class', 'private', 'public', 'property', 'import', 'delete', 'new', 'new0', 'renew', 'renew0', 'define', 'get', 'set', 'remote', 'dllexport', 'dllimport', 'stdcall', 'subclass', '__on_register_module', 'namespace', 'using', 'typed_object', 'any_object', 'incref', 'register', 'watch', 'stopwatching', 'firewatchers', 'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset', 'class_default_property', 'property_category', 'class_data', 'class_property', 'thisclass', 'dbtable', 'dbindex', 'database_open', 'dbfield'), suffix=r'\b'), Keyword), (words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte', 'unichar', 'int64'), suffix=r'\b'), Keyword.Type), (r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'), (r'(null|value|this)\b', Name.Builtin), inherit, ] } class ValaLexer(RegexLexer): """ For Vala source code with preprocessor directives. .. versionadded:: 1.1 """ name = 'Vala' aliases = ['vala', 'vapi'] filenames = ['*.vala', '*.vapi'] mimetypes = ['text/x-vala'] tokens = { 'whitespace': [ (r'^\s*#if\s+0', Comment.Preproc, 'if0'), (r'\n', Whitespace), (r'\s+', Whitespace), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), ], 'statements': [ (r'[L@]?"', String, 'string'), (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(?s)""".*?"""', String), # verbatim strings (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'0[0-7]+[Ll]?', Number.Oct), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])', bygroups(Punctuation, Name.Decorator, Punctuation)), # TODO: "correctly" parse complex code attributes (r'(\[)(CCode|(?:Integer|Floating)Type)', bygroups(Punctuation, Name.Decorator)), (r'[()\[\],.]', Punctuation), (words(( 'as', 'base', 'break', 'case', 'catch', 'construct', 'continue', 'default', 'delete', 'do', 'else', 'enum', 'finally', 'for', 'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params', 'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try', 'typeof', 'while', 'yield'), suffix=r'\b'), Keyword), (words(( 'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern', 'inline', 'internal', 'override', 'owned', 'private', 'protected', 'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned', 'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'), Keyword.Declaration), (r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'namespace'), (r'(class|errordomain|interface|struct)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'class'), (r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)), # void is an actual keyword, others are in glib-2.0.vapi (words(( 'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16', 'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string', 'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64', 'ulong', 'unichar', 'ushort'), suffix=r'\b'), Keyword.Type), (r'(true|false|null)\b', Name.Builtin), (r'[a-zA-Z_]\w*', Name), ], 'root': [ include('whitespace'), default('statement'), ], 'statement': [ include('whitespace'), include('statements'), ('[{}]', Punctuation), (';', Punctuation, '#pop'), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'if0': [ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'), (r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'), (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), (r'.*?\n', Comment), ], 'class': [ (r'[a-zA-Z_]\w*', Name.Class, '#pop') ], 'namespace': [ (r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop') ], } class CudaLexer(CLexer): """ For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_ source. .. versionadded:: 1.6 """ name = 'CUDA' filenames = ['*.cu', '*.cuh'] aliases = ['cuda', 'cu'] mimetypes = ['text/x-cuda'] function_qualifiers = {'__device__', '__global__', '__host__', '__noinline__', '__forceinline__'} variable_qualifiers = {'__device__', '__constant__', '__shared__', '__restrict__'} vector_types = {'char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3', 'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2', 'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1', 'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1', 'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4', 'ulong4', 'longlong1', 'ulonglong1', 'longlong2', 'ulonglong2', 'float1', 'float2', 'float3', 'float4', 'double1', 'double2', 'dim3'} variables = {'gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'} functions = {'__threadfence_block', '__threadfence', '__threadfence_system', '__syncthreads', '__syncthreads_count', '__syncthreads_and', '__syncthreads_or'} execution_confs = {'<<<', '>>>'} def get_tokens_unprocessed(self, text, stack=('root',)): for index, token, value in CLexer.get_tokens_unprocessed(self, text, stack): if token is Name: if value in self.variable_qualifiers: token = Keyword.Type elif value in self.vector_types: token = Keyword.Type elif value in self.variables: token = Name.Builtin elif value in self.execution_confs: token = Keyword.Pseudo elif value in self.function_qualifiers: token = Keyword.Reserved elif value in self.functions: token = Name.Function yield index, token, value class SwigLexer(CppLexer): """ For `SWIG <http://www.swig.org/>`_ source code. .. versionadded:: 2.0 """ name = 'SWIG' aliases = ['swig'] filenames = ['*.swg', '*.i'] mimetypes = ['text/swig'] priority = 0.04 # Lower than C/C++ and Objective C/C++ tokens = { 'root': [ # Match it here so it won't be matched as a function in the rest of root (r'\$\**\&?\w+', Name), inherit ], 'statements': [ # SWIG directives (r'(%[a-z_][a-z0-9_]*)', Name.Function), # Special variables (r'\$\**\&?\w+', Name), # Stringification / additional preprocessor directives (r'##*[a-zA-Z_]\w*', Comment.Preproc), inherit, ], } # This is a far from complete set of SWIG directives swig_directives = { # Most common directives '%apply', '%define', '%director', '%enddef', '%exception', '%extend', '%feature', '%fragment', '%ignore', '%immutable', '%import', '%include', '%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma', '%rename', '%shared_ptr', '%template', '%typecheck', '%typemap', # Less common directives '%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear', '%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum', '%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor', '%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor', '%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments', '%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv', '%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception', '%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar', '%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend', '%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall', '%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof', '%trackobjects', '%types', '%unrefobject', '%varargs', '%warn', '%warnfilter'} def analyse_text(text): rv = 0 # Search for SWIG directives, which are conventionally at the beginning of # a line. The probability of them being within a line is low, so let another # lexer win in this case. matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M) for m in matches: if m in SwigLexer.swig_directives: rv = 0.98 break else: rv = 0.91 # Fraction higher than MatlabLexer return rv class MqlLexer(CppLexer): """ For `MQL4 <http://docs.mql4.com/>`_ and `MQL5 <http://www.mql5.com/en/docs>`_ source code. .. versionadded:: 2.0 """ name = 'MQL' aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5'] filenames = ['*.mq4', '*.mq5', '*.mqh'] mimetypes = ['text/x-mql'] tokens = { 'statements': [ (words(_mql_builtins.keywords, suffix=r'\b'), Keyword), (words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type), (words(_mql_builtins.types, suffix=r'\b'), Name.Function), (words(_mql_builtins.constants, suffix=r'\b'), Name.Constant), (words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'), Name.Constant), inherit, ], } class ArduinoLexer(CppLexer): """ For `Arduino(tm) <https://arduino.cc/>`_ source. This is an extension of the CppLexer, as the Arduino® Language is a superset of C++ .. versionadded:: 2.1 """ name = 'Arduino' aliases = ['arduino'] filenames = ['*.ino'] mimetypes = ['text/x-arduino'] # Language sketch main structure functions structure = {'setup', 'loop'} # Language operators operators = {'not', 'or', 'and', 'xor'} # Language 'variables' variables = { 'DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE', 'REPORT_DIGITAL', 'REPORT_ANALOG', 'INPUT_PULLUP', 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET', 'LED_BUILTIN', 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL', 'HIGH', 'LOW', 'INPUT', 'OUTPUT', 'INPUT_PULLUP', 'LED_BUILTIN', 'true', 'false', 'void', 'boolean', 'char', 'unsigned char', 'byte', 'int', 'unsigned int', 'word', 'long', 'unsigned long', 'short', 'float', 'double', 'string', 'String', 'array', 'static', 'volatile', 'const', 'boolean', 'byte', 'word', 'string', 'String', 'array', 'int', 'float', 'private', 'char', 'virtual', 'operator', 'sizeof', 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int8_t', 'int16_t', 'int32_t', 'int64_t', 'dynamic_cast', 'typedef', 'const_cast', 'const', 'struct', 'static_cast', 'union', 'unsigned', 'long', 'volatile', 'static', 'protected', 'bool', 'public', 'friend', 'auto', 'void', 'enum', 'extern', 'class', 'short', 'reinterpret_cast', 'double', 'register', 'explicit', 'signed', 'inline', 'delete', '_Bool', 'complex', '_Complex', '_Imaginary', 'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short', 'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong', 'atomic_llong', 'atomic_ullong', 'PROGMEM'} # Language shipped functions and class ( ) functions = { 'KeyboardController', 'MouseController', 'SoftwareSerial', 'EthernetServer', 'EthernetClient', 'LiquidCrystal', 'RobotControl', 'GSMVoiceCall', 'EthernetUDP', 'EsploraTFT', 'HttpClient', 'RobotMotor', 'WiFiClient', 'GSMScanner', 'FileSystem', 'Scheduler', 'GSMServer', 'YunClient', 'YunServer', 'IPAddress', 'GSMClient', 'GSMModem', 'Keyboard', 'Ethernet', 'Console', 'GSMBand', 'Esplora', 'Stepper', 'Process', 'WiFiUDP', 'GSM_SMS', 'Mailbox', 'USBHost', 'Firmata', 'PImage', 'Client', 'Server', 'GSMPIN', 'FileIO', 'Bridge', 'Serial', 'EEPROM', 'Stream', 'Mouse', 'Audio', 'Servo', 'File', 'Task', 'GPRS', 'WiFi', 'Wire', 'TFT', 'GSM', 'SPI', 'SD', 'runShellCommandAsynchronously', 'analogWriteResolution', 'retrieveCallingNumber', 'printFirmwareVersion', 'analogReadResolution', 'sendDigitalPortPair', 'noListenOnLocalhost', 'readJoystickButton', 'setFirmwareVersion', 'readJoystickSwitch', 'scrollDisplayRight', 'getVoiceCallStatus', 'scrollDisplayLeft', 'writeMicroseconds', 'delayMicroseconds', 'beginTransmission', 'getSignalStrength', 'runAsynchronously', 'getAsynchronously', 'listenOnLocalhost', 'getCurrentCarrier', 'readAccelerometer', 'messageAvailable', 'sendDigitalPorts', 'lineFollowConfig', 'countryNameWrite', 'runShellCommand', 'readStringUntil', 'rewindDirectory', 'readTemperature', 'setClockDivider', 'readLightSensor', 'endTransmission', 'analogReference', 'detachInterrupt', 'countryNameRead', 'attachInterrupt', 'encryptionType', 'readBytesUntil', 'robotNameWrite', 'readMicrophone', 'robotNameRead', 'cityNameWrite', 'userNameWrite', 'readJoystickY', 'readJoystickX', 'mouseReleased', 'openNextFile', 'scanNetworks', 'noInterrupts', 'digitalWrite', 'beginSpeaker', 'mousePressed', 'isActionDone', 'mouseDragged', 'displayLogos', 'noAutoscroll', 'addParameter', 'remoteNumber', 'getModifiers', 'keyboardRead', 'userNameRead', 'waitContinue', 'processInput', 'parseCommand', 'printVersion', 'readNetworks', 'writeMessage', 'blinkVersion', 'cityNameRead', 'readMessage', 'setDataMode', 'parsePacket', 'isListening', 'setBitOrder', 'beginPacket', 'isDirectory', 'motorsWrite', 'drawCompass', 'digitalRead', 'clearScreen', 'serialEvent', 'rightToLeft', 'setTextSize', 'leftToRight', 'requestFrom', 'keyReleased', 'compassRead', 'analogWrite', 'interrupts', 'WiFiServer', 'disconnect', 'playMelody', 'parseFloat', 'autoscroll', 'getPINUsed', 'setPINUsed', 'setTimeout', 'sendAnalog', 'readSlider', 'analogRead', 'beginWrite', 'createChar', 'motorsStop', 'keyPressed', 'tempoWrite', 'readButton', 'subnetMask', 'debugPrint', 'macAddress', 'writeGreen', 'randomSeed', 'attachGPRS', 'readString', 'sendString', 'remotePort', 'releaseAll', 'mouseMoved', 'background', 'getXChange', 'getYChange', 'answerCall', 'getResult', 'voiceCall', 'endPacket', 'constrain', 'getSocket', 'writeJSON', 'getButton', 'available', 'connected', 'findUntil', 'readBytes', 'exitValue', 'readGreen', 'writeBlue', 'startLoop', 'IPAddress', 'isPressed', 'sendSysex', 'pauseMode', 'gatewayIP', 'setCursor', 'getOemKey', 'tuneWrite', 'noDisplay', 'loadImage', 'switchPIN', 'onRequest', 'onReceive', 'changePIN', 'playFile', 'noBuffer', 'parseInt', 'overflow', 'checkPIN', 'knobRead', 'beginTFT', 'bitClear', 'updateIR', 'bitWrite', 'position', 'writeRGB', 'highByte', 'writeRed', 'setSpeed', 'readBlue', 'noStroke', 'remoteIP', 'transfer', 'shutdown', 'hangCall', 'beginSMS', 'endWrite', 'attached', 'maintain', 'noCursor', 'checkReg', 'checkPUK', 'shiftOut', 'isValid', 'shiftIn', 'pulseIn', 'connect', 'println', 'localIP', 'pinMode', 'getIMEI', 'display', 'noBlink', 'process', 'getBand', 'running', 'beginSD', 'drawBMP', 'lowByte', 'setBand', 'release', 'bitRead', 'prepare', 'pointTo', 'readRed', 'setMode', 'noFill', 'remove', 'listen', 'stroke', 'detach', 'attach', 'noTone', 'exists', 'buffer', 'height', 'bitSet', 'circle', 'config', 'cursor', 'random', 'IRread', 'setDNS', 'endSMS', 'getKey', 'micros', 'millis', 'begin', 'print', 'write', 'ready', 'flush', 'width', 'isPIN', 'blink', 'clear', 'press', 'mkdir', 'rmdir', 'close', 'point', 'yield', 'image', 'BSSID', 'click', 'delay', 'read', 'text', 'move', 'peek', 'beep', 'rect', 'line', 'open', 'seek', 'fill', 'size', 'turn', 'stop', 'home', 'find', 'step', 'tone', 'sqrt', 'RSSI', 'SSID', 'end', 'bit', 'tan', 'cos', 'sin', 'pow', 'map', 'abs', 'max', 'min', 'get', 'run', 'put', 'isAlphaNumeric', 'isAlpha', 'isAscii', 'isWhitespace', 'isControl', 'isDigit', 'isGraph', 'isLowerCase', 'isPrintable', 'isPunct', 'isSpace', 'isUpperCase', 'isHexadecimalDigit'} # do not highlight suppress_highlight = { 'namespace', 'template', 'mutable', 'using', 'asm', 'typeid', 'typename', 'this', 'alignof', 'constexpr', 'decltype', 'noexcept', 'static_assert', 'thread_local', 'restrict'} def get_tokens_unprocessed(self, text, stack=('root',)): for index, token, value in CppLexer.get_tokens_unprocessed(self, text, stack): if value in self.structure: yield index, Name.Builtin, value elif value in self.operators: yield index, Operator, value elif value in self.variables: yield index, Keyword.Reserved, value elif value in self.suppress_highlight: yield index, Name, value elif value in self.functions: yield index, Name.Function, value else: yield index, token, value class CharmciLexer(CppLexer): """ For `Charm++ <https://charm.cs.illinois.edu>`_ interface files (.ci). .. versionadded:: 2.4 """ name = 'Charmci' aliases = ['charmci'] filenames = ['*.ci'] mimetypes = [] tokens = { 'keywords': [ (r'(module)(\s+)', bygroups(Keyword, Text), 'classname'), (words(('mainmodule', 'mainchare', 'chare', 'array', 'group', 'nodegroup', 'message', 'conditional')), Keyword), (words(('entry', 'aggregate', 'threaded', 'sync', 'exclusive', 'nokeep', 'notrace', 'immediate', 'expedited', 'inline', 'local', 'python', 'accel', 'readwrite', 'writeonly', 'accelblock', 'memcritical', 'packed', 'varsize', 'initproc', 'initnode', 'initcall', 'stacksize', 'createhere', 'createhome', 'reductiontarget', 'iget', 'nocopy', 'mutable', 'migratable', 'readonly')), Keyword), inherit, ], } class OmgIdlLexer(CLexer): """ Lexer for Object Management Group Interface Definition Language. .. versionadded:: 2.9 """ name = 'OMG Interface Definition Language' url = 'https://www.omg.org/spec/IDL/About-IDL/' aliases = ['omg-idl'] filenames = ['*.idl', '*.pidl'] mimetypes = [] scoped_name = r'((::)?\w+)+' tokens = { 'values': [ (words(('true', 'false'), prefix=r'(?i)', suffix=r'\b'), Number), (r'([Ll]?)(")', bygroups(String.Affix, String.Double), 'string'), (r'([Ll]?)(\')(\\[^\']+)(\')', bygroups(String.Affix, String.Char, String.Escape, String.Char)), (r'([Ll]?)(\')(\\\')(\')', bygroups(String.Affix, String.Char, String.Escape, String.Char)), (r'([Ll]?)(\'.\')', bygroups(String.Affix, String.Char)), (r'[+-]?\d+(\.\d*)?[Ee][+-]?\d+', Number.Float), (r'[+-]?(\d+\.\d*)|(\d*\.\d+)([Ee][+-]?\d+)?', Number.Float), (r'(?i)[+-]?0x[0-9a-f]+', Number.Hex), (r'[+-]?[1-9]\d*', Number.Integer), (r'[+-]?0[0-7]*', Number.Oct), (r'[\+\-\*\/%^&\|~]', Operator), (words(('<<', '>>')), Operator), (scoped_name, Name), (r'[{};:,<>\[\]]', Punctuation), ], 'annotation_params': [ include('whitespace'), (r'\(', Punctuation, '#push'), include('values'), (r'=', Punctuation), (r'\)', Punctuation, '#pop'), ], 'annotation_params_maybe': [ (r'\(', Punctuation, 'annotation_params'), include('whitespace'), default('#pop'), ], 'annotation_appl': [ (r'@' + scoped_name, Name.Decorator, 'annotation_params_maybe'), ], 'enum': [ include('whitespace'), (r'[{,]', Punctuation), (r'\w+', Name.Constant), include('annotation_appl'), (r'\}', Punctuation, '#pop'), ], 'root': [ include('whitespace'), (words(( 'typedef', 'const', 'in', 'out', 'inout', 'local', ), prefix=r'(?i)', suffix=r'\b'), Keyword.Declaration), (words(( 'void', 'any', 'native', 'bitfield', 'unsigned', 'boolean', 'char', 'wchar', 'octet', 'short', 'long', 'int8', 'uint8', 'int16', 'int32', 'int64', 'uint16', 'uint32', 'uint64', 'float', 'double', 'fixed', 'sequence', 'string', 'wstring', 'map', ), prefix=r'(?i)', suffix=r'\b'), Keyword.Type), (words(( '@annotation', 'struct', 'union', 'bitset', 'interface', 'exception', 'valuetype', 'eventtype', 'component', ), prefix=r'(?i)', suffix=r'(\s+)(\w+)'), bygroups(Keyword, Whitespace, Name.Class)), (words(( 'abstract', 'alias', 'attribute', 'case', 'connector', 'consumes', 'context', 'custom', 'default', 'emits', 'factory', 'finder', 'getraises', 'home', 'import', 'manages', 'mirrorport', 'multiple', 'Object', 'oneway', 'primarykey', 'private', 'port', 'porttype', 'provides', 'public', 'publishes', 'raises', 'readonly', 'setraises', 'supports', 'switch', 'truncatable', 'typeid', 'typename', 'typeprefix', 'uses', 'ValueBase', ), prefix=r'(?i)', suffix=r'\b'), Keyword), (r'(?i)(enum|bitmask)(\s+)(\w+)', bygroups(Keyword, Whitespace, Name.Class), 'enum'), (r'(?i)(module)(\s+)(\w+)', bygroups(Keyword.Namespace, Whitespace, Name.Namespace)), (r'(\w+)(\s*)(=)', bygroups(Name.Constant, Whitespace, Operator)), (r'[\(\)]', Punctuation), include('values'), include('annotation_appl'), ], }
29,203
Python
42.784108
97
0.506763
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/installers.py
""" pygments.lexers.installers ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for installer/packager DSLs and formats. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, this, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Punctuation, Generic, Number, Whitespace __all__ = ['NSISLexer', 'RPMSpecLexer', 'SourcesListLexer', 'DebianControlLexer'] class NSISLexer(RegexLexer): """ For NSIS scripts. .. versionadded:: 1.6 """ name = 'NSIS' url = 'http://nsis.sourceforge.net/' aliases = ['nsis', 'nsi', 'nsh'] filenames = ['*.nsi', '*.nsh'] mimetypes = ['text/x-nsis'] flags = re.IGNORECASE tokens = { 'root': [ (r'([;#].*)(\n)', bygroups(Comment, Whitespace)), (r"'.*?'", String.Single), (r'"', String.Double, 'str_double'), (r'`', String.Backtick, 'str_backtick'), include('macro'), include('interpol'), include('basic'), (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo), (r'/[a-z_]\w*', Name.Attribute), (r'\s+', Whitespace), (r'[\w.]+', Text), ], 'basic': [ (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b', bygroups(Whitespace, Keyword, Whitespace, Name.Function)), (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b', bygroups(Keyword.Namespace, Punctuation, Name.Function)), (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)), (r'(\b[ULS]|\B)([!<>=]?=|\<\>?|\>)\B', Operator), (r'[|+-]', Operator), (r'\\', Punctuation), (r'\b(Abort|Add(?:BrandingImage|Size)|' r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|' r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|' r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|' r'ComponentText|CopyFiles|CRCCheck|' r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|' r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|' r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|' r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|' r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|' r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|' r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|' r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|' r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|' r'InstDirError|LabelAddress|TempFileName)|' r'Goto|HideWindow|Icon|' r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|' r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|' r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|' r'IsWindow|LangString(?:UP)?|' r'License(?:BkColor|Data|ForceSelection|LangString|Text)|' r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|' r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|' r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|' r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|' r'Return|RMDir|SearchPath|Section(?:Divider|End|' r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|' r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|' r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|' r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|' r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|' r'Silent|StaticBkColor)|' r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|' r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|' r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|' r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|' r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|' r'XPStyle)\b', Keyword), (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?' r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|' r'HK(CC|CR|CU|DD|LM|PD|U)|' r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|' r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|' r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|' r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|' r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|' r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|' r'YESNO(?:CANCEL)?)|SET|SHCTX|' r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|' r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|' r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|' r'listonly|lzma|nevershow|none|normal|off|on|pop|push|' r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|' r'true|try|user|zlib)\b', Name.Constant), ], 'macro': [ (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|' r'delfilefile|echo(?:message)?|else|endif|error|execute|' r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|' r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|' r'warning)\b', Comment.Preproc), ], 'interpol': [ (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|' r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|' r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|' r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|' r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|' r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})', Name.Builtin), (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global), (r'\$[a-z_]\w*', Name.Variable), ], 'str_double': [ (r'"', String.Double, '#pop'), (r'\$(\\[nrt"]|\$)', String.Escape), include('interpol'), (r'[^"]+', String.Double), ], 'str_backtick': [ (r'`', String.Double, '#pop'), (r'\$(\\[nrt"]|\$)', String.Escape), include('interpol'), (r'[^`]+', String.Double), ], } class RPMSpecLexer(RegexLexer): """ For RPM ``.spec`` files. .. versionadded:: 1.6 """ name = 'RPMSpec' aliases = ['spec'] filenames = ['*.spec'] mimetypes = ['text/x-rpm-spec'] _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' 'post[a-z]*|trigger[a-z]*|files)') tokens = { 'root': [ (r'#.*$', Comment), include('basic'), ], 'description': [ (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\s+', Whitespace), (r'.', Text), ], 'changelog': [ (r'\*.*$', Generic.Subheading), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\s+', Whitespace), (r'.', Text), ], 'string': [ (r'"', String.Double, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), include('interpol'), (r'.', String.Double), ], 'basic': [ include('macro'), (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', bygroups(Generic.Heading, Punctuation, using(this))), (r'^%description', Name.Decorator, 'description'), (r'^%changelog', Name.Decorator, 'changelog'), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', Keyword), include('interpol'), (r"'.*?'", String.Single), (r'"', String.Double, 'string'), (r'\s+', Whitespace), (r'.', Text), ], 'macro': [ (r'%define.*$', Comment.Preproc), (r'%\{\!\?.*%define.*\}', Comment.Preproc), (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', bygroups(Comment.Preproc, Text)), ], 'interpol': [ (r'%\{?__[a-z_]+\}?', Name.Function), (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), (r'%\{\?\w+\}', Name.Variable), (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), (r'%\{[a-zA-Z]\w+\}', Keyword.Constant), ] } class SourcesListLexer(RegexLexer): """ Lexer that highlights debian sources.list files. .. versionadded:: 0.7 """ name = 'Debian Sourcelist' aliases = ['debsources', 'sourceslist', 'sources.list'] filenames = ['sources.list'] mimetype = ['application/x-debian-sourceslist'] tokens = { 'root': [ (r'\s+', Whitespace), (r'#.*?$', Comment), (r'^(deb(?:-src)?)(\s+)', bygroups(Keyword, Whitespace), 'distribution') ], 'distribution': [ (r'#.*?$', Comment, '#pop'), (r'\$\(ARCH\)', Name.Variable), (r'[^\s$[]+', String), (r'\[', String.Other, 'escaped-distribution'), (r'\$', String), (r'\s+', Whitespace, 'components') ], 'escaped-distribution': [ (r'\]', String.Other, '#pop'), (r'\$\(ARCH\)', Name.Variable), (r'[^\]$]+', String.Other), (r'\$', String.Other) ], 'components': [ (r'#.*?$', Comment, '#pop:2'), (r'$', Text, '#pop:2'), (r'\s+', Whitespace), (r'\S+', Keyword.Pseudo), ] } def analyse_text(text): for line in text.splitlines(): line = line.strip() if line.startswith('deb ') or line.startswith('deb-src '): return True class DebianControlLexer(RegexLexer): """ Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs. .. versionadded:: 0.9 """ name = 'Debian Control file' url = 'https://www.debian.org/doc/debian-policy/ch-controlfields.html' aliases = ['debcontrol', 'control'] filenames = ['control'] tokens = { 'root': [ (r'^(Description)', Keyword, 'description'), (r'^(Maintainer|Uploaders)(:\s*)', bygroups(Keyword, Text), 'maintainer'), (r'^((?:Build-|Pre-)?Depends(?:-Indep|-Arch)?)(:\s*)', bygroups(Keyword, Text), 'depends'), (r'^(Recommends|Suggests|Enhances)(:\s*)', bygroups(Keyword, Text), 'depends'), (r'^((?:Python-)?Version)(:\s*)(\S+)$', bygroups(Keyword, Text, Number)), (r'^((?:Installed-)?Size)(:\s*)(\S+)$', bygroups(Keyword, Text, Number)), (r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$', bygroups(Keyword, Text, Number)), (r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$', bygroups(Keyword, Whitespace, String)), ], 'maintainer': [ (r'<[^>]+>$', Generic.Strong, '#pop'), (r'<[^>]+>', Generic.Strong), (r',\n?', Text), (r'[^,<]+$', Text, '#pop'), (r'[^,<]+', Text), ], 'description': [ (r'(.*)(Homepage)(: )(\S+)', bygroups(Text, String, Name, Name.Class)), (r':.*\n', Generic.Strong), (r' .*\n', Text), default('#pop'), ], 'depends': [ (r'(\$)(\{)(\w+\s*:\s*\w+)(\})', bygroups(Operator, Text, Name.Entity, Text)), (r'\(', Text, 'depend_vers'), (r'\|', Operator), (r',\n', Text), (r'\n', Text, '#pop'), (r'[,\s]', Text), (r'[+.a-zA-Z0-9-]+', Name.Function), (r'\[.*?\]', Name.Entity), ], 'depend_vers': [ (r'\)', Text, '#pop'), (r'([><=]+)(\s*)([^)]+)', bygroups(Operator, Text, Number)), ] }
13,178
Python
39.179878
85
0.492563
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/hdl.py
""" pygments.lexers.hdl ~~~~~~~~~~~~~~~~~~~ Lexers for hardware descriptor languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, bygroups, include, using, this, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer'] class VerilogLexer(RegexLexer): """ For verilog source code with preprocessor directives. .. versionadded:: 1.4 """ name = 'verilog' aliases = ['verilog', 'v'] filenames = ['*.v'] mimetypes = ['text/x-verilog'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' tokens = { 'root': [ (r'^\s*`define', Comment.Preproc, 'macro'), (r'\s+', Whitespace), (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'[{}#@]', Punctuation), (r'L?"', String, 'string'), (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex), (r'([0-9]+)|(\'b)[01]+', Number.Bin), (r'([0-9]+)|(\'d)[0-9]+', Number.Integer), (r'([0-9]+)|(\'o)[0-7]+', Number.Oct), (r'\'[01xz]', Number), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.;\']', Punctuation), (r'`[a-zA-Z_]\w*', Name.Constant), (r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text)), (r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text), 'import'), (words(( 'always', 'always_comb', 'always_ff', 'always_latch', 'and', 'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1', 'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign', 'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase', 'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive', 'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for', 'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0', 'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large', 'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge', 'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed', 'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1', 'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return', 'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed', 'small', 'specify', 'specparam', 'strength', 'string', 'strong0', 'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1', 'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait', 'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'), Keyword), (words(( 'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype', 'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected', 'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate', 'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames', 'nounconnected_drive', 'protect', 'protected', 'remove_gatenames', 'remove_netnames', 'resetall', 'timescale', 'unconnected_drive', 'undef'), prefix=r'`', suffix=r'\b'), Comment.Preproc), (words(( 'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose', 'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite', 'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log', 'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale', 'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset', 'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope', 'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb', 'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'), prefix=r'\$', suffix=r'\b'), Name.Builtin), (words(( 'byte', 'shortint', 'int', 'longint', 'integer', 'time', 'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand', 'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wor' 'shortreal', 'real', 'realtime'), suffix=r'\b'), Keyword.Type), (r'[a-zA-Z_]\w*:(?!:)', Name.Label), (r'\$?[a-zA-Z_]\w*', Name), (r'\\(\S+)', Name), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation (r'\\', String), # stray backslash ], 'macro': [ (r'[^/\n]+', Comment.Preproc), (r'/[*](.|\n)*?[*]/', Comment.Multiline), (r'//.*?\n', Comment.Single, '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Comment.Preproc), (r'\n', Whitespace, '#pop'), ], 'import': [ (r'[\w:]+\*?', Name.Namespace, '#pop') ] } def analyse_text(text): """Verilog code will use one of reg/wire/assign for sure, and that is not common elsewhere.""" result = 0 if 'reg' in text: result += 0.1 if 'wire' in text: result += 0.1 if 'assign' in text: result += 0.1 return result class SystemVerilogLexer(RegexLexer): """ Extends verilog lexer to recognise all SystemVerilog keywords from IEEE 1800-2009 standard. .. versionadded:: 1.5 """ name = 'systemverilog' aliases = ['systemverilog', 'sv'] filenames = ['*.sv', '*.svh'] mimetypes = ['text/x-systemverilog'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' tokens = { 'root': [ (r'^(\s*)(`define)', bygroups(Whitespace, Comment.Preproc), 'macro'), (r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace)), (r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace), 'import'), (r'\s+', Whitespace), (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'[{}#@]', Punctuation), (r'L?"', String, 'string'), (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'([1-9][_0-9]*)?\s*\'[sS]?[bB]\s*[xXzZ?01][_xXzZ?01]*', Number.Bin), (r'([1-9][_0-9]*)?\s*\'[sS]?[oO]\s*[xXzZ?0-7][_xXzZ?0-7]*', Number.Oct), (r'([1-9][_0-9]*)?\s*\'[sS]?[dD]\s*[xXzZ?0-9][_xXzZ?0-9]*', Number.Integer), (r'([1-9][_0-9]*)?\s*\'[sS]?[hH]\s*[xXzZ?0-9a-fA-F][_xXzZ?0-9a-fA-F]*', Number.Hex), (r'\'[01xXzZ]', Number), (r'[0-9][_0-9]*', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (words(('inside', 'dist'), suffix=r'\b'), Operator.Word), (r'[()\[\],.;\'$]', Punctuation), (r'`[a-zA-Z_]\w*', Name.Constant), (words(( 'accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch', 'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins', 'binsof', 'break', 'buf', 'bufif0', 'bufif1', 'case', 'casex', 'casez', 'cell', 'checker', 'clocking', 'cmos', 'config', 'constraint', 'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design', 'disable', 'do', 'edge', 'else', 'end', 'endcase', 'endchecker', 'endclocking', 'endconfig', 'endfunction', 'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive', 'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable', 'endtask', 'enum', 'eventually', 'expect', 'export', 'extern', 'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone', 'ignore_bins', 'illegal_bins', 'implies', 'implements', 'import', 'incdir', 'include', 'initial', 'inout', 'input', 'instance', 'interconnect', 'interface', 'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library', 'local', 'localparam', 'macromodule', 'matches', 'medium', 'modport', 'module', 'nand', 'negedge', 'nettype', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled', 'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter', 'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected', 'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent', 'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'ref', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime', 's_until', 's_until_with', 'scalared', 'sequence', 'showcancelled', 'small', 'soft', 'solve', 'specify', 'specparam', 'static', 'strong', 'strong0', 'strong1', 'struct', 'super', 'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'typedef', 'union', 'unique', 'unique0', 'until', 'until_with', 'untyped', 'use', 'vectored', 'virtual', 'wait', 'wait_order', 'weak', 'weak0', 'weak1', 'while', 'wildcard', 'with', 'within', 'xnor', 'xor'), suffix=r'\b'), Keyword), (r'(class)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, Whitespace, Name.Class)), (r'(extends)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, Whitespace, Name.Class)), (r'(endclass\b)(?:(\s*)(:)(\s*)([a-zA-Z_]\w*))?', bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace, Name.Class)), (words(( # Variable types 'bit', 'byte', 'chandle', 'const', 'event', 'int', 'integer', 'logic', 'longint', 'real', 'realtime', 'reg', 'shortint', 'shortreal', 'signed', 'string', 'time', 'type', 'unsigned', 'var', 'void', # Net types 'supply0', 'supply1', 'tri', 'triand', 'trior', 'trireg', 'tri0', 'tri1', 'uwire', 'wand', 'wire', 'wor'), suffix=r'\b'), Keyword.Type), (words(( '`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype', '`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif', '`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma', '`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'), suffix=r'\b'), Comment.Preproc), (words(( # Simulation control tasks (20.2) '$exit', '$finish', '$stop', # Simulation time functions (20.3) '$realtime', '$stime', '$time', # Timescale tasks (20.4) '$printtimescale', '$timeformat', # Conversion functions '$bitstoreal', '$bitstoshortreal', '$cast', '$itor', '$realtobits', '$rtoi', '$shortrealtobits', '$signed', '$unsigned', # Data query functions (20.6) '$bits', '$isunbounded', '$typename', # Array query functions (20.7) '$dimensions', '$high', '$increment', '$left', '$low', '$right', '$size', '$unpacked_dimensions', # Math functions (20.8) '$acos', '$acosh', '$asin', '$asinh', '$atan', '$atan2', '$atanh', '$ceil', '$clog2', '$cos', '$cosh', '$exp', '$floor', '$hypot', '$ln', '$log10', '$pow', '$sin', '$sinh', '$sqrt', '$tan', '$tanh', # Bit vector system functions (20.9) '$countbits', '$countones', '$isunknown', '$onehot', '$onehot0', # Severity tasks (20.10) '$info', '$error', '$fatal', '$warning', # Assertion control tasks (20.12) '$assertcontrol', '$assertfailoff', '$assertfailon', '$assertkill', '$assertnonvacuouson', '$assertoff', '$asserton', '$assertpassoff', '$assertpasson', '$assertvacuousoff', # Sampled value system functions (20.13) '$changed', '$changed_gclk', '$changing_gclk', '$falling_gclk', '$fell', '$fell_gclk', '$future_gclk', '$past', '$past_gclk', '$rising_gclk', '$rose', '$rose_gclk', '$sampled', '$stable', '$stable_gclk', '$steady_gclk', # Coverage control functions (20.14) '$coverage_control', '$coverage_get', '$coverage_get_max', '$coverage_merge', '$coverage_save', '$get_coverage', '$load_coverage_db', '$set_coverage_db_name', # Probabilistic distribution functions (20.15) '$dist_chi_square', '$dist_erlang', '$dist_exponential', '$dist_normal', '$dist_poisson', '$dist_t', '$dist_uniform', '$random', # Stochastic analysis tasks and functions (20.16) '$q_add', '$q_exam', '$q_full', '$q_initialize', '$q_remove', # PLA modeling tasks (20.17) '$async$and$array', '$async$and$plane', '$async$nand$array', '$async$nand$plane', '$async$nor$array', '$async$nor$plane', '$async$or$array', '$async$or$plane', '$sync$and$array', '$sync$and$plane', '$sync$nand$array', '$sync$nand$plane', '$sync$nor$array', '$sync$nor$plane', '$sync$or$array', '$sync$or$plane', # Miscellaneous tasks and functions (20.18) '$system', # Display tasks (21.2) '$display', '$displayb', '$displayh', '$displayo', '$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff', '$monitoron', '$strobe', '$strobeb', '$strobeh', '$strobeo', '$write', '$writeb', '$writeh', '$writeo', # File I/O tasks and functions (21.3) '$fclose', '$fdisplay', '$fdisplayb', '$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', '$fgets', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', '$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh', '$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo', '$rewind', '$sformat', '$sformatf', '$sscanf', '$swrite', '$swriteb', '$swriteh', '$swriteo', '$ungetc', # Memory load tasks (21.4) '$readmemb', '$readmemh', # Memory dump tasks (21.5) '$writememb', '$writememh', # Command line input (21.6) '$test$plusargs', '$value$plusargs', # VCD tasks (21.7) '$dumpall', '$dumpfile', '$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports', '$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff', '$dumpportson', '$dumpvars', ), suffix=r'\b'), Name.Builtin), (r'[a-zA-Z_]\w*:(?!:)', Name.Label), (r'\$?[a-zA-Z_]\w*', Name), (r'\\(\S+)', Name), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation (r'\\', String), # stray backslash ], 'macro': [ (r'[^/\n]+', Comment.Preproc), (r'/[*](.|\n)*?[*]/', Comment.Multiline), (r'//.*?$', Comment.Single, '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Comment.Preproc), (r'\n', Whitespace, '#pop'), ], 'import': [ (r'[\w:]+\*?', Name.Namespace, '#pop') ] } class VhdlLexer(RegexLexer): """ For VHDL source code. .. versionadded:: 1.5 """ name = 'vhdl' aliases = ['vhdl'] filenames = ['*.vhdl', '*.vhd'] mimetypes = ['text/x-vhdl'] flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ (r'\s+', Whitespace), (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation (r'--.*?$', Comment.Single), (r"'(U|X|0|1|Z|W|L|H|-)'", String.Char), (r'[~!%^&*+=|?:<>/-]', Operator), (r"'[a-z_]\w*", Name.Attribute), (r'[()\[\],.;\']', Punctuation), (r'"[^\n\\"]*"', String), (r'(library)(\s+)([a-z_]\w*)', bygroups(Keyword, Whitespace, Name.Namespace)), (r'(use)(\s+)(entity)', bygroups(Keyword, Whitespace, Keyword)), (r'(use)(\s+)([a-z_][\w.]*\.)(all)', bygroups(Keyword, Whitespace, Name.Namespace, Keyword)), (r'(use)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Whitespace, Name.Namespace)), (r'(std|ieee)(\.[a-z_]\w*)', bygroups(Name.Namespace, Name.Namespace)), (words(('std', 'ieee', 'work'), suffix=r'\b'), Name.Namespace), (r'(entity|component)(\s+)([a-z_]\w*)', bygroups(Keyword, Whitespace, Name.Class)), (r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)' r'(of)(\s+)([a-z_]\w*)(\s+)(is)', bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword, Whitespace, Name.Class, Whitespace, Keyword)), (r'([a-z_]\w*)(:)(\s+)(process|for)', bygroups(Name.Class, Operator, Whitespace, Keyword)), (r'(end)(\s+)', bygroups(using(this), Whitespace), 'endblock'), include('types'), include('keywords'), include('numbers'), (r'[a-z_]\w*', Name), ], 'endblock': [ include('keywords'), (r'[a-z_]\w*', Name.Class), (r'\s+', Whitespace), (r';', Punctuation, '#pop'), ], 'types': [ (words(( 'boolean', 'bit', 'character', 'severity_level', 'integer', 'time', 'delay_length', 'natural', 'positive', 'string', 'bit_vector', 'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector', 'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix=r'\b'), Keyword.Type), ], 'keywords': [ (words(( 'abs', 'access', 'after', 'alias', 'all', 'and', 'architecture', 'array', 'assert', 'attribute', 'begin', 'block', 'body', 'buffer', 'bus', 'case', 'component', 'configuration', 'constant', 'disconnect', 'downto', 'else', 'elsif', 'end', 'entity', 'exit', 'file', 'for', 'function', 'generate', 'generic', 'group', 'guarded', 'if', 'impure', 'in', 'inertial', 'inout', 'is', 'label', 'library', 'linkage', 'literal', 'loop', 'map', 'mod', 'nand', 'new', 'next', 'nor', 'not', 'null', 'of', 'on', 'open', 'or', 'others', 'out', 'package', 'port', 'postponed', 'procedure', 'process', 'pure', 'range', 'record', 'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select', 'severity', 'signal', 'shared', 'sla', 'sll', 'sra', 'srl', 'subtype', 'then', 'to', 'transport', 'type', 'units', 'until', 'use', 'variable', 'wait', 'when', 'while', 'with', 'xnor', 'xor'), suffix=r'\b'), Keyword), ], 'numbers': [ (r'\d{1,2}#[0-9a-f_]+#?', Number.Integer), (r'\d+', Number.Integer), (r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float), (r'X"[0-9a-f_]+"', Number.Hex), (r'O"[0-7_]+"', Number.Oct), (r'B"[01_]+"', Number.Bin), ], }
22,520
Python
47.328326
100
0.452043
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/graphics.py
""" pygments.lexers.graphics ~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for computer graphics and plotting related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words, include, bygroups, using, \ this, default from pygments.token import Text, Comment, Operator, Keyword, Name, \ Number, Punctuation, String, Whitespace __all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer', 'PovrayLexer', 'HLSLShaderLexer'] class GLShaderLexer(RegexLexer): """ GLSL (OpenGL Shader) lexer. .. versionadded:: 1.1 """ name = 'GLSL' aliases = ['glsl'] filenames = ['*.vert', '*.frag', '*.geo'] mimetypes = ['text/x-glslsrc'] tokens = { 'root': [ (r'^#.*$', Comment.Preproc), (r'//.*$', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?', Operator), (r'[?:]', Operator), # quick hack for ternary (r'\bdefined\b', Operator), (r'[;{}(),\[\]]', Punctuation), # FIXME when e is present, no decimal point needed (r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float), (r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float), (r'0[xX][0-9a-fA-F]*', Number.Hex), (r'0[0-7]*', Number.Oct), (r'[1-9][0-9]*', Number.Integer), (words(( # Storage qualifiers 'attribute', 'const', 'uniform', 'varying', 'buffer', 'shared', 'in', 'out', # Layout qualifiers 'layout', # Interpolation qualifiers 'flat', 'smooth', 'noperspective', # Auxiliary qualifiers 'centroid', 'sample', 'patch', # Parameter qualifiers. Some double as Storage qualifiers 'inout', # Precision qualifiers 'lowp', 'mediump', 'highp', 'precision', # Invariance qualifiers 'invariant', # Precise qualifiers 'precise', # Memory qualifiers 'coherent', 'volatile', 'restrict', 'readonly', 'writeonly', # Statements 'break', 'continue', 'do', 'for', 'while', 'switch', 'case', 'default', 'if', 'else', 'subroutine', 'discard', 'return', 'struct'), prefix=r'\b', suffix=r'\b'), Keyword), (words(( # Boolean values 'true', 'false'), prefix=r'\b', suffix=r'\b'), Keyword.Constant), (words(( # Miscellaneous types 'void', 'atomic_uint', # Floating-point scalars and vectors 'float', 'vec2', 'vec3', 'vec4', 'double', 'dvec2', 'dvec3', 'dvec4', # Integer scalars and vectors 'int', 'ivec2', 'ivec3', 'ivec4', 'uint', 'uvec2', 'uvec3', 'uvec4', # Boolean scalars and vectors 'bool', 'bvec2', 'bvec3', 'bvec4', # Matrices 'mat2', 'mat3', 'mat4', 'dmat2', 'dmat3', 'dmat4', 'mat2x2', 'mat2x3', 'mat2x4', 'dmat2x2', 'dmat2x3', 'dmat2x4', 'mat3x2', 'mat3x3', 'mat3x4', 'dmat3x2', 'dmat3x3', 'dmat3x4', 'mat4x2', 'mat4x3', 'mat4x4', 'dmat4x2', 'dmat4x3', 'dmat4x4', # Floating-point samplers 'sampler1D', 'sampler2D', 'sampler3D', 'samplerCube', 'sampler1DArray', 'sampler2DArray', 'samplerCubeArray', 'sampler2DRect', 'samplerBuffer', 'sampler2DMS', 'sampler2DMSArray', # Shadow samplers 'sampler1DShadow', 'sampler2DShadow', 'samplerCubeShadow', 'sampler1DArrayShadow', 'sampler2DArrayShadow', 'samplerCubeArrayShadow', 'sampler2DRectShadow', # Signed integer samplers 'isampler1D', 'isampler2D', 'isampler3D', 'isamplerCube', 'isampler1DArray', 'isampler2DArray', 'isamplerCubeArray', 'isampler2DRect', 'isamplerBuffer', 'isampler2DMS', 'isampler2DMSArray', # Unsigned integer samplers 'usampler1D', 'usampler2D', 'usampler3D', 'usamplerCube', 'usampler1DArray', 'usampler2DArray', 'usamplerCubeArray', 'usampler2DRect', 'usamplerBuffer', 'usampler2DMS', 'usampler2DMSArray', # Floating-point image types 'image1D', 'image2D', 'image3D', 'imageCube', 'image1DArray', 'image2DArray', 'imageCubeArray', 'image2DRect', 'imageBuffer', 'image2DMS', 'image2DMSArray', # Signed integer image types 'iimage1D', 'iimage2D', 'iimage3D', 'iimageCube', 'iimage1DArray', 'iimage2DArray', 'iimageCubeArray', 'iimage2DRect', 'iimageBuffer', 'iimage2DMS', 'iimage2DMSArray', # Unsigned integer image types 'uimage1D', 'uimage2D', 'uimage3D', 'uimageCube', 'uimage1DArray', 'uimage2DArray', 'uimageCubeArray', 'uimage2DRect', 'uimageBuffer', 'uimage2DMS', 'uimage2DMSArray'), prefix=r'\b', suffix=r'\b'), Keyword.Type), (words(( # Reserved for future use. 'common', 'partition', 'active', 'asm', 'class', 'union', 'enum', 'typedef', 'template', 'this', 'resource', 'goto', 'inline', 'noinline', 'public', 'static', 'extern', 'external', 'interface', 'long', 'short', 'half', 'fixed', 'unsigned', 'superp', 'input', 'output', 'hvec2', 'hvec3', 'hvec4', 'fvec2', 'fvec3', 'fvec4', 'sampler3DRect', 'filter', 'sizeof', 'cast', 'namespace', 'using'), prefix=r'\b', suffix=r'\b'), Keyword.Reserved), # All names beginning with "gl_" are reserved. (r'gl_\w*', Name.Builtin), (r'[a-zA-Z_]\w*', Name), (r'\.', Punctuation), (r'\s+', Whitespace), ], } class HLSLShaderLexer(RegexLexer): """ HLSL (Microsoft Direct3D Shader) lexer. .. versionadded:: 2.3 """ name = 'HLSL' aliases = ['hlsl'] filenames = ['*.hlsl', '*.hlsli'] mimetypes = ['text/x-hlsl'] tokens = { 'root': [ (r'^#.*$', Comment.Preproc), (r'//.*$', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?', Operator), (r'[?:]', Operator), # quick hack for ternary (r'\bdefined\b', Operator), (r'[;{}(),.\[\]]', Punctuation), # FIXME when e is present, no decimal point needed (r'[+-]?\d*\.\d+([eE][-+]?\d+)?f?', Number.Float), (r'[+-]?\d+\.\d*([eE][-+]?\d+)?f?', Number.Float), (r'0[xX][0-9a-fA-F]*', Number.Hex), (r'0[0-7]*', Number.Oct), (r'[1-9][0-9]*', Number.Integer), (r'"', String, 'string'), (words(( 'asm','asm_fragment','break','case','cbuffer','centroid','class', 'column_major','compile','compile_fragment','const','continue', 'default','discard','do','else','export','extern','for','fxgroup', 'globallycoherent','groupshared','if','in','inline','inout', 'interface','line','lineadj','linear','namespace','nointerpolation', 'noperspective','NULL','out','packoffset','pass','pixelfragment', 'point','precise','return','register','row_major','sample', 'sampler','shared','stateblock','stateblock_state','static', 'struct','switch','tbuffer','technique','technique10', 'technique11','texture','typedef','triangle','triangleadj', 'uniform','vertexfragment','volatile','while'), prefix=r'\b', suffix=r'\b'), Keyword), (words(('true','false'), prefix=r'\b', suffix=r'\b'), Keyword.Constant), (words(( 'auto','catch','char','const_cast','delete','dynamic_cast','enum', 'explicit','friend','goto','long','mutable','new','operator', 'private','protected','public','reinterpret_cast','short','signed', 'sizeof','static_cast','template','this','throw','try','typename', 'union','unsigned','using','virtual'), prefix=r'\b', suffix=r'\b'), Keyword.Reserved), (words(( 'dword','matrix','snorm','string','unorm','unsigned','void','vector', 'BlendState','Buffer','ByteAddressBuffer','ComputeShader', 'DepthStencilState','DepthStencilView','DomainShader', 'GeometryShader','HullShader','InputPatch','LineStream', 'OutputPatch','PixelShader','PointStream','RasterizerState', 'RenderTargetView','RasterizerOrderedBuffer', 'RasterizerOrderedByteAddressBuffer', 'RasterizerOrderedStructuredBuffer','RasterizerOrderedTexture1D', 'RasterizerOrderedTexture1DArray','RasterizerOrderedTexture2D', 'RasterizerOrderedTexture2DArray','RasterizerOrderedTexture3D', 'RWBuffer','RWByteAddressBuffer','RWStructuredBuffer', 'RWTexture1D','RWTexture1DArray','RWTexture2D','RWTexture2DArray', 'RWTexture3D','SamplerState','SamplerComparisonState', 'StructuredBuffer','Texture1D','Texture1DArray','Texture2D', 'Texture2DArray','Texture2DMS','Texture2DMSArray','Texture3D', 'TextureCube','TextureCubeArray','TriangleStream','VertexShader'), prefix=r'\b', suffix=r'\b'), Keyword.Type), (words(( 'bool','double','float','int','half','min16float','min10float', 'min16int','min12int','min16uint','uint'), prefix=r'\b', suffix=r'([1-4](x[1-4])?)?\b'), Keyword.Type), # vector and matrix types (words(( 'abort','abs','acos','all','AllMemoryBarrier', 'AllMemoryBarrierWithGroupSync','any','AppendStructuredBuffer', 'asdouble','asfloat','asin','asint','asuint','asuint','atan', 'atan2','ceil','CheckAccessFullyMapped','clamp','clip', 'CompileShader','ConsumeStructuredBuffer','cos','cosh','countbits', 'cross','D3DCOLORtoUBYTE4','ddx','ddx_coarse','ddx_fine','ddy', 'ddy_coarse','ddy_fine','degrees','determinant', 'DeviceMemoryBarrier','DeviceMemoryBarrierWithGroupSync','distance', 'dot','dst','errorf','EvaluateAttributeAtCentroid', 'EvaluateAttributeAtSample','EvaluateAttributeSnapped','exp', 'exp2','f16tof32','f32tof16','faceforward','firstbithigh', 'firstbitlow','floor','fma','fmod','frac','frexp','fwidth', 'GetRenderTargetSampleCount','GetRenderTargetSamplePosition', 'GlobalOrderedCountIncrement','GroupMemoryBarrier', 'GroupMemoryBarrierWithGroupSync','InterlockedAdd','InterlockedAnd', 'InterlockedCompareExchange','InterlockedCompareStore', 'InterlockedExchange','InterlockedMax','InterlockedMin', 'InterlockedOr','InterlockedXor','isfinite','isinf','isnan', 'ldexp','length','lerp','lit','log','log10','log2','mad','max', 'min','modf','msad4','mul','noise','normalize','pow','printf', 'Process2DQuadTessFactorsAvg','Process2DQuadTessFactorsMax', 'Process2DQuadTessFactorsMin','ProcessIsolineTessFactors', 'ProcessQuadTessFactorsAvg','ProcessQuadTessFactorsMax', 'ProcessQuadTessFactorsMin','ProcessTriTessFactorsAvg', 'ProcessTriTessFactorsMax','ProcessTriTessFactorsMin', 'QuadReadLaneAt','QuadSwapX','QuadSwapY','radians','rcp', 'reflect','refract','reversebits','round','rsqrt','saturate', 'sign','sin','sincos','sinh','smoothstep','sqrt','step','tan', 'tanh','tex1D','tex1D','tex1Dbias','tex1Dgrad','tex1Dlod', 'tex1Dproj','tex2D','tex2D','tex2Dbias','tex2Dgrad','tex2Dlod', 'tex2Dproj','tex3D','tex3D','tex3Dbias','tex3Dgrad','tex3Dlod', 'tex3Dproj','texCUBE','texCUBE','texCUBEbias','texCUBEgrad', 'texCUBElod','texCUBEproj','transpose','trunc','WaveAllBitAnd', 'WaveAllMax','WaveAllMin','WaveAllBitOr','WaveAllBitXor', 'WaveAllEqual','WaveAllProduct','WaveAllSum','WaveAllTrue', 'WaveAnyTrue','WaveBallot','WaveGetLaneCount','WaveGetLaneIndex', 'WaveGetOrderedIndex','WaveIsHelperLane','WaveOnce', 'WavePrefixProduct','WavePrefixSum','WaveReadFirstLane', 'WaveReadLaneAt'), prefix=r'\b', suffix=r'\b'), Name.Builtin), # built-in functions (words(( 'SV_ClipDistance','SV_ClipDistance0','SV_ClipDistance1', 'SV_Culldistance','SV_CullDistance0','SV_CullDistance1', 'SV_Coverage','SV_Depth','SV_DepthGreaterEqual', 'SV_DepthLessEqual','SV_DispatchThreadID','SV_DomainLocation', 'SV_GroupID','SV_GroupIndex','SV_GroupThreadID','SV_GSInstanceID', 'SV_InnerCoverage','SV_InsideTessFactor','SV_InstanceID', 'SV_IsFrontFace','SV_OutputControlPointID','SV_Position', 'SV_PrimitiveID','SV_RenderTargetArrayIndex','SV_SampleIndex', 'SV_StencilRef','SV_TessFactor','SV_VertexID', 'SV_ViewportArrayIndex'), prefix=r'\b', suffix=r'\b'), Name.Decorator), # system-value semantics (r'\bSV_Target[0-7]?\b', Name.Decorator), (words(( 'allow_uav_condition','branch','call','domain','earlydepthstencil', 'fastopt','flatten','forcecase','instance','loop','maxtessfactor', 'numthreads','outputcontrolpoints','outputtopology','partitioning', 'patchconstantfunc','unroll'), prefix=r'\b', suffix=r'\b'), Name.Decorator), # attributes (r'[a-zA-Z_]\w*', Name), (r'\\$', Comment.Preproc), # backslash at end of line -- usually macro continuation (r'\s+', Whitespace), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], } class PostScriptLexer(RegexLexer): """ Lexer for PostScript files. .. versionadded:: 1.4 """ name = 'PostScript' url = 'https://en.wikipedia.org/wiki/PostScript' aliases = ['postscript', 'postscr'] filenames = ['*.ps', '*.eps'] mimetypes = ['application/postscript'] delimiter = r'()<>\[\]{}/%\s' delimiter_end = r'(?=[%s])' % delimiter valid_name_chars = r'[^%s]' % delimiter valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) tokens = { 'root': [ # All comment types (r'^%!.+$', Comment.Preproc), (r'%%.*$', Comment.Special), (r'(^%.*\n){2,}', Comment.Multiline), (r'%.*$', Comment.Single), # String literals are awkward; enter separate state. (r'\(', String, 'stringliteral'), (r'[{}<>\[\]]', Punctuation), # Numbers (r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex), # Slight abuse: use Oct to signify any explicit base system (r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)' r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct), (r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' + delimiter_end, Number.Float), (r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer), # References (r'\/%s' % valid_name, Name.Variable), # Names (valid_name, Name.Function), # Anything else is executed # These keywords taken from # <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf> # Is there an authoritative list anywhere that doesn't involve # trawling documentation? (r'(false|true)' + delimiter_end, Keyword.Constant), # Conditionals / flow control (r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)' + delimiter_end, Keyword.Reserved), (words(( 'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin', 'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat', 'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix', 'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix', 'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end', 'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get', 'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv', 'idtransform', 'index', 'invertmatrix', 'itransform', 'length', 'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto', 'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print', 'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat', 'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run', 'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray', 'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix', 'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt', 'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror', 'transform', 'translate', 'truncate', 'typecheck', 'undefined', 'undefinedfilename', 'undefinedresult'), suffix=delimiter_end), Name.Builtin), (r'\s+', Whitespace), ], 'stringliteral': [ (r'[^()\\]+', String), (r'\\', String.Escape, 'escape'), (r'\(', String, '#push'), (r'\)', String, '#pop'), ], 'escape': [ (r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'), default('#pop'), ], } class AsymptoteLexer(RegexLexer): """ For Asymptote source code. .. versionadded:: 1.2 """ name = 'Asymptote' url = 'http://asymptote.sf.net/' aliases = ['asymptote', 'asy'] filenames = ['*.asy'] mimetypes = ['text/x-asymptote'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' tokens = { 'whitespace': [ (r'\n', Whitespace), (r'\s+', Whitespace), (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), ], 'statements': [ # simple string (TeX friendly) (r'"(\\\\|\\[^\\]|[^"\\])*"', String), # C style string (with character escapes) (r"'", String, 'string'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'0[0-7]+[Ll]?', Number.Oct), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.]', Punctuation), (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), (r'(and|controls|tension|atleast|curl|if|else|while|for|do|' r'return|break|continue|struct|typedef|new|access|import|' r'unravel|from|include|quote|static|public|private|restricted|' r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword), # Since an asy-type-name can be also an asy-function-name, # in the following we test if the string " [a-zA-Z]" follows # the Keyword.Type. # Of course it is not perfect ! (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|' r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|' r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|' r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|' r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|' r'path3|pen|picture|point|position|projection|real|revolution|' r'scaleT|scientific|segment|side|slice|splitface|string|surface|' r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|' r'transformation|tree|triangle|trilinear|triple|vector|' r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type), # Now the asy-type-name which are not asy-function-name # except yours ! # Perhaps useless (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|' r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|' r'picture|position|real|revolution|slice|splitface|ticksgridT|' r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type), (r'[a-zA-Z_]\w*:(?!:)', Name.Label), (r'[a-zA-Z_]\w*', Name), ], 'root': [ include('whitespace'), # functions (r'((?:[\w*\s])+?(?:\s|\*))' # return arguments r'([a-zA-Z_]\w*)' # method name r'(\s*\([^;]*?\))' # signature r'(' + _ws + r')(\{)', bygroups(using(this), Name.Function, using(this), using(this), Punctuation), 'function'), # function declarations (r'((?:[\w*\s])+?(?:\s|\*))' # return arguments r'([a-zA-Z_]\w*)' # method name r'(\s*\([^;]*?\))' # signature r'(' + _ws + r')(;)', bygroups(using(this), Name.Function, using(this), using(this), Punctuation)), default('statement'), ], 'statement': [ include('whitespace'), include('statements'), ('[{}]', Punctuation), (';', Punctuation, '#pop'), ], 'function': [ include('whitespace'), include('statements'), (';', Punctuation), (r'\{', Punctuation, '#push'), (r'\}', Punctuation, '#pop'), ], 'string': [ (r"'", String, '#pop'), (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'\n', String), (r"[^\\'\n]+", String), # all other characters (r'\\\n', String), (r'\\n', String), # line continuation (r'\\', String), # stray backslash ], } def get_tokens_unprocessed(self, text): from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name and value in ASYFUNCNAME: token = Name.Function elif token is Name and value in ASYVARNAME: token = Name.Variable yield index, token, value def _shortened(word): dpos = word.find('$') return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b' for i in range(len(word), dpos, -1)) def _shortened_many(*words): return '|'.join(map(_shortened, words)) class GnuplotLexer(RegexLexer): """ For Gnuplot plotting scripts. .. versionadded:: 0.11 """ name = 'Gnuplot' url = 'http://gnuplot.info/' aliases = ['gnuplot'] filenames = ['*.plot', '*.plt'] mimetypes = ['text/x-gnuplot'] tokens = { 'root': [ include('whitespace'), (_shortened('bi$nd'), Keyword, 'bind'), (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'), (_shortened('f$it'), Keyword, 'fit'), (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'), (r'else\b', Keyword), (_shortened('pa$use'), Keyword, 'pause'), (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'), (_shortened('sa$ve'), Keyword, 'save'), (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')), (_shortened_many('sh$ow', 'uns$et'), Keyword, ('noargs', 'optionarg')), (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear', 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int', 'pwd$', 're$read', 'res$et', 'scr$eendump', 'she$ll', 'sy$stem', 'up$date'), Keyword, 'genericargs'), (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump', 'she$ll', 'test$'), Keyword, 'noargs'), (r'([a-zA-Z_]\w*)(\s*)(=)', bygroups(Name.Variable, Whitespace, Operator), 'genericargs'), (r'([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)', bygroups(Name.Function, Whitespace, Operator), 'genericargs'), (r'@[a-zA-Z_]\w*', Name.Constant), # macros (r';', Keyword), ], 'comment': [ (r'[^\\\n]', Comment), (r'\\\n', Comment), (r'\\', Comment), # don't add the newline to the Comment token default('#pop'), ], 'whitespace': [ ('#', Comment, 'comment'), (r'[ \t\v\f]+', Whitespace), ], 'noargs': [ include('whitespace'), # semicolon and newline end the argument list (r';', Punctuation, '#pop'), (r'\n', Whitespace, '#pop'), ], 'dqstring': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash (r'\n', Whitespace, '#pop'), # newline ends the string too ], 'sqstring': [ (r"''", String), # escaped single quote (r"'", String, '#pop'), (r"[^\\'\n]+", String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # normal backslash (r'\n', Whitespace, '#pop'), # newline ends the string too ], 'genericargs': [ include('noargs'), (r'"', String, 'dqstring'), (r"'", String, 'sqstring'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), (r'(\d+\.\d*|\.\d+)', Number.Float), (r'-?\d+', Number.Integer), ('[,.~!%^&*+=|?:<>/-]', Operator), (r'[{}()\[\]]', Punctuation), (r'(eq|ne)\b', Operator.Word), (r'([a-zA-Z_]\w*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), (r'[a-zA-Z_]\w*', Name), (r'@[a-zA-Z_]\w*', Name.Constant), # macros (r'(\\)(\n)', bygroups(Text, Whitespace)), ], 'optionarg': [ include('whitespace'), (_shortened_many( "a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der", "box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta", "data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign", "fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid", "hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle", "la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale", "mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin", "rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot", "mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics", "nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics", "mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput", "pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot", "poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze", "st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs", "ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le", "v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta", "yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel", "yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs", "x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs", "zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs", "x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs", "noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs", "xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs", "noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs", "cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange", "y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange", "vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis", "zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'), ], 'bind': [ ('!', Keyword, '#pop'), (_shortened('all$windows'), Name.Builtin), include('genericargs'), ], 'quit': [ (r'gnuplot\b', Keyword), include('noargs'), ], 'fit': [ (r'via\b', Name.Builtin), include('plot'), ], 'if': [ (r'\)', Punctuation, '#pop'), include('genericargs'), ], 'pause': [ (r'(mouse|any|button1|button2|button3)\b', Name.Builtin), (_shortened('key$press'), Name.Builtin), include('genericargs'), ], 'plot': [ (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex', 'mat$rix', 's$mooth', 'thru$', 't$itle', 'not$itle', 'u$sing', 'w$ith'), Name.Builtin), include('genericargs'), ], 'save': [ (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'), Name.Builtin), include('genericargs'), ], } class PovrayLexer(RegexLexer): """ For Persistence of Vision Raytracer files. .. versionadded:: 0.11 """ name = 'POVRay' url = 'http://www.povray.org/' aliases = ['pov'] filenames = ['*.pov', '*.inc'] mimetypes = ['text/x-povray'] tokens = { 'root': [ (r'/\*[\w\W]*?\*/', Comment.Multiline), (r'//.*$', Comment.Single), (r'(?s)"(?:\\.|[^"\\])+"', String.Double), (words(( 'break', 'case', 'debug', 'declare', 'default', 'define', 'else', 'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef', 'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render', 'statistics', 'switch', 'undef', 'version', 'warning', 'while', 'write'), prefix=r'#', suffix=r'\b'), Comment.Preproc), (words(( 'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout', 'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle', 'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma', 'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation', 'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples', 'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size', 'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map', 'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock', 'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat', 'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh', 'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping', 'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction', 'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity', 'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent', 'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false', 'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor', 'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif', 'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold', 'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex', 'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate', 'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard', 'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like', 'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map', 'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level', 'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar', 'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves', 'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open', 'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3', 'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment', 'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm', 'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color', 'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow', 'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red', 'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt', 'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave', 'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere', 'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral', 'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str', 'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys', 't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3', 'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold', 'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit', 'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type', 'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps', 'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength', 'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light', 'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width', 'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'), Keyword), (words(( 'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference', 'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe', 'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism', 'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid', 'text', 'torus', 'triangle', 'union'), suffix=r'\b'), Name.Builtin), (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo), (r'[a-zA-Z_]\w*', Name), (r'[0-9]*\.[0-9]+', Number.Float), (r'[0-9]+', Number.Integer), (r'[\[\](){}<>;,]', Punctuation), (r'[-+*/=.|&]|<=|>=|!=', Operator), (r'"(\\\\|\\[^\\]|[^"\\])*"', String), (r'\s+', Whitespace), ] } def analyse_text(text): """POVRAY is similar to JSON/C, but the combination of camera and light_source is probably not very likely elsewhere. HLSL or GLSL are similar (GLSL even has #version), but they miss #declare, and light_source/camera are not keywords anywhere else -- it's fair to assume though that any POVRAY scene must have a camera and lightsource.""" result = 0 if '#version' in text: result += 0.05 if '#declare' in text: result += 0.05 if 'camera' in text: result += 0.05 if 'light_source' in text: result += 0.1 return result
38,931
Python
47.786967
103
0.485783
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/lisp.py
""" pygments.lexers.lisp ~~~~~~~~~~~~~~~~~~~~ Lexers for Lispy languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, words, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Literal, Error, Whitespace from pygments.lexers.python import PythonLexer from pygments.lexers._scheme_builtins import scheme_keywords, scheme_builtins __all__ = ['SchemeLexer', 'CommonLispLexer', 'HyLexer', 'RacketLexer', 'NewLispLexer', 'EmacsLispLexer', 'ShenLexer', 'CPSALexer', 'XtlangLexer', 'FennelLexer'] class SchemeLexer(RegexLexer): """ A Scheme lexer. This parser is checked with pastes from the LISP pastebin at http://paste.lisp.org/ to cover as much syntax as possible. It supports the full Scheme syntax as defined in R5RS. .. versionadded:: 0.6 """ name = 'Scheme' url = 'http://www.scheme-reports.org/' aliases = ['scheme', 'scm'] filenames = ['*.scm', '*.ss'] mimetypes = ['text/x-scheme', 'application/x-scheme'] flags = re.DOTALL | re.MULTILINE # valid names for identifiers # well, names can only not consist fully of numbers # but this should be good enough for now valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+' # Use within verbose regexes token_end = r''' (?= \s # whitespace | ; # comment | \#[;|!] # fancy comments | [)\]] # end delimiters | $ # end of file ) ''' # Recognizing builtins. def get_tokens_unprocessed(self, text): for index, token, value in super().get_tokens_unprocessed(text): if token is Name.Function or token is Name.Variable: if value in scheme_keywords: yield index, Keyword, value elif value in scheme_builtins: yield index, Name.Builtin, value else: yield index, token, value else: yield index, token, value # Scheme has funky syntactic rules for numbers. These are all # valid number literals: 5.0e55|14, 14/13, -1+5j, +1@5, #b110, # #o#Iinf.0-nan.0i. This is adapted from the formal grammar given # in http://www.r6rs.org/final/r6rs.pdf, section 4.2.1. Take a # deep breath ... # It would be simpler if we could just not bother about invalid # numbers like #b35. But we cannot parse 'abcdef' without #x as a # number. number_rules = {} for base in (2, 8, 10, 16): if base == 2: digit = r'[01]' radix = r'( \#[bB] )' elif base == 8: digit = r'[0-7]' radix = r'( \#[oO] )' elif base == 10: digit = r'[0-9]' radix = r'( (\#[dD])? )' elif base == 16: digit = r'[0-9a-fA-F]' radix = r'( \#[xX] )' # Radix, optional exactness indicator. prefix = rf''' ( {radix} (\#[iIeE])? | \#[iIeE] {radix} ) ''' # Simple unsigned number or fraction. ureal = rf''' ( {digit}+ ( / {digit}+ )? ) ''' # Add decimal numbers. if base == 10: decimal = r''' ( # Decimal part ( [0-9]+ ([.][0-9]*)? | [.][0-9]+ ) # Optional exponent ( [eEsSfFdDlL] [+-]? [0-9]+ )? # Optional mantissa width ( \|[0-9]+ )? ) ''' ureal = rf''' ( {decimal} (?!/) | {ureal} ) ''' naninf = r'(nan.0|inf.0)' real = rf''' ( [+-] {naninf} # Sign mandatory | [+-]? {ureal} # Sign optional ) ''' complex_ = rf''' ( {real}? [+-] ({naninf}|{ureal})? i | {real} (@ {real})? ) ''' num = rf'''(?x) ( {prefix} {complex_} ) # Need to ensure we have a full token. 1+ is not a # number followed by something else, but a function # name. {token_end} ''' number_rules[base] = num # If you have a headache now, say thanks to RnRS editors. # Doing it this way is simpler than splitting the number(10) # regex in a floating-point and a no-floating-point version. def decimal_cb(self, match): if '.' in match.group(): token_type = Number.Float # includes [+-](inf|nan).0 else: token_type = Number.Integer yield match.start(), token_type, match.group() # -- # The 'scheme-root' state parses as many expressions as needed, always # delegating to the 'scheme-value' state. The latter parses one complete # expression and immediately pops back. This is needed for the LilyPondLexer. # When LilyPond encounters a #, it starts parsing embedded Scheme code, and # returns to normal syntax after one expression. We implement this # by letting the LilyPondLexer subclass the SchemeLexer. When it finds # the #, the LilyPondLexer goes to the 'value' state, which then pops back # to LilyPondLexer. The 'root' state of the SchemeLexer merely delegates the # work to 'scheme-root'; this is so that LilyPondLexer can inherit # 'scheme-root' and redefine 'root'. tokens = { 'root': [ default('scheme-root'), ], 'scheme-root': [ default('value'), ], 'value': [ # the comments # and going to the end of the line (r';.*?$', Comment.Single), # multi-line comment (r'#\|', Comment.Multiline, 'multiline-comment'), # commented form (entire sexpr following) (r'#;[([]', Comment, 'commented-form'), # commented datum (r'#;', Comment, 'commented-datum'), # signifies that the program text that follows is written with the # lexical and datum syntax described in r6rs (r'#!r6rs', Comment), # whitespaces - usually not relevant (r'\s+', Whitespace), # numbers (number_rules[2], Number.Bin, '#pop'), (number_rules[8], Number.Oct, '#pop'), (number_rules[10], decimal_cb, '#pop'), (number_rules[16], Number.Hex, '#pop'), # strings, symbols, keywords and characters (r'"', String, 'string'), (r"'" + valid_name, String.Symbol, "#pop"), (r'#:' + valid_name, Keyword.Declaration, '#pop'), (r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char, "#pop"), # constants (r'(#t|#f)', Name.Constant, '#pop'), # special operators (r"('|#|`|,@|,|\.)", Operator), # first variable in a quoted string like # '(this is syntactic sugar) (r"(?<='\()" + valid_name, Name.Variable, '#pop'), (r"(?<=#\()" + valid_name, Name.Variable, '#pop'), # Functions -- note that this also catches variables # defined in let/let*, but there is little that can # be done about it. (r'(?<=\()' + valid_name, Name.Function, '#pop'), # find the remaining variables (valid_name, Name.Variable, '#pop'), # the famous parentheses! # Push scheme-root to enter a state that will parse as many things # as needed in the parentheses. (r'[([]', Punctuation, 'scheme-root'), # Pop one 'value', one 'scheme-root', and yet another 'value', so # we get back to a state parsing expressions as needed in the # enclosing context. (r'[)\]]', Punctuation, '#pop:3'), ], 'multiline-comment': [ (r'#\|', Comment.Multiline, '#push'), (r'\|#', Comment.Multiline, '#pop'), (r'[^|#]+', Comment.Multiline), (r'[|#]', Comment.Multiline), ], 'commented-form': [ (r'[([]', Comment, '#push'), (r'[)\]]', Comment, '#pop'), (r'[^()[\]]+', Comment), ], 'commented-datum': [ (rf'(?x).*?{token_end}', Comment, '#pop'), ], 'string': [ # Pops back from 'string', and pops 'value' as well. ('"', String, '#pop:2'), # Hex escape sequences, R6RS-style. (r'\\x[0-9a-fA-F]+;', String.Escape), # We try R6RS style first, but fall back to Guile-style. (r'\\x[0-9a-fA-F]{2}', String.Escape), # Other special escape sequences implemented by Guile. (r'\\u[0-9a-fA-F]{4}', String.Escape), (r'\\U[0-9a-fA-F]{6}', String.Escape), # Escape sequences are not overly standardized. Recognizing # a single character after the backslash should be good enough. # NB: we have DOTALL. (r'\\.', String.Escape), # The rest (r'[^\\"]+', String), ] } class CommonLispLexer(RegexLexer): """ A Common Lisp lexer. .. versionadded:: 0.9 """ name = 'Common Lisp' url = 'https://lisp-lang.org/' aliases = ['common-lisp', 'cl', 'lisp'] filenames = ['*.cl', '*.lisp'] mimetypes = ['text/x-common-lisp'] flags = re.IGNORECASE | re.MULTILINE # couple of useful regexes # characters that are not macro-characters and can be used to begin a symbol nonmacro = r'\\.|[\w!$%&*+-/<=>?@\[\]^{}~]' constituent = nonmacro + '|[#.:]' terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters # symbol token, reverse-engineered from hyperspec # Take a deep breath... symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent) def __init__(self, **options): from pygments.lexers._cl_builtins import BUILTIN_FUNCTIONS, \ SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \ BUILTIN_TYPES, BUILTIN_CLASSES self.builtin_function = BUILTIN_FUNCTIONS self.special_forms = SPECIAL_FORMS self.macros = MACROS self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS self.declarations = DECLARATIONS self.builtin_types = BUILTIN_TYPES self.builtin_classes = BUILTIN_CLASSES RegexLexer.__init__(self, **options) def get_tokens_unprocessed(self, text): stack = ['root'] for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name.Variable: if value in self.builtin_function: yield index, Name.Builtin, value continue if value in self.special_forms: yield index, Keyword, value continue if value in self.macros: yield index, Name.Builtin, value continue if value in self.lambda_list_keywords: yield index, Keyword, value continue if value in self.declarations: yield index, Keyword, value continue if value in self.builtin_types: yield index, Keyword.Type, value continue if value in self.builtin_classes: yield index, Name.Class, value continue yield index, token, value tokens = { 'root': [ default('body'), ], 'multiline-comment': [ (r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19) (r'\|#', Comment.Multiline, '#pop'), (r'[^|#]+', Comment.Multiline), (r'[|#]', Comment.Multiline), ], 'commented-form': [ (r'\(', Comment.Preproc, '#push'), (r'\)', Comment.Preproc, '#pop'), (r'[^()]+', Comment.Preproc), ], 'body': [ # whitespace (r'\s+', Whitespace), # single-line comment (r';.*$', Comment.Single), # multi-line comment (r'#\|', Comment.Multiline, 'multiline-comment'), # encoding comment (?) (r'#\d*Y.*$', Comment.Special), # strings and characters (r'"(\\.|\\\n|[^"\\])*"', String), # quoting (r":" + symbol, String.Symbol), (r"::" + symbol, String.Symbol), (r":#" + symbol, String.Symbol), (r"'" + symbol, String.Symbol), (r"'", Operator), (r"`", Operator), # decimal numbers (r'[-+]?\d+\.?' + terminated, Number.Integer), (r'[-+]?\d+/\d+' + terminated, Number), (r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' + terminated, Number.Float), # sharpsign strings and characters (r"#\\." + terminated, String.Char), (r"#\\" + symbol, String.Char), # vector (r'#\(', Operator, 'body'), # bitstring (r'#\d*\*[01]*', Literal.Other), # uninterned symbol (r'#:' + symbol, String.Symbol), # read-time and load-time evaluation (r'#[.,]', Operator), # function shorthand (r'#\'', Name.Function), # binary rational (r'#b[+-]?[01]+(/[01]+)?', Number.Bin), # octal rational (r'#o[+-]?[0-7]+(/[0-7]+)?', Number.Oct), # hex rational (r'#x[+-]?[0-9a-f]+(/[0-9a-f]+)?', Number.Hex), # radix rational (r'#\d+r[+-]?[0-9a-z]+(/[0-9a-z]+)?', Number), # complex (r'(#c)(\()', bygroups(Number, Punctuation), 'body'), # array (r'(#\d+a)(\()', bygroups(Literal.Other, Punctuation), 'body'), # structure (r'(#s)(\()', bygroups(Literal.Other, Punctuation), 'body'), # path (r'#p?"(\\.|[^"])*"', Literal.Other), # reference (r'#\d+=', Operator), (r'#\d+#', Operator), # read-time comment (r'#+nil' + terminated + r'\s*\(', Comment.Preproc, 'commented-form'), # read-time conditional (r'#[+-]', Operator), # special operators that should have been parsed already (r'(,@|,|\.)', Operator), # special constants (r'(t|nil)' + terminated, Name.Constant), # functions and variables (r'\*' + symbol + r'\*', Name.Variable.Global), (symbol, Name.Variable), # parentheses (r'\(', Punctuation, 'body'), (r'\)', Punctuation, '#pop'), ], } class HyLexer(RegexLexer): """ Lexer for Hy source code. .. versionadded:: 2.0 """ name = 'Hy' url = 'http://hylang.org/' aliases = ['hylang'] filenames = ['*.hy'] mimetypes = ['text/x-hy', 'application/x-hy'] special_forms = ( 'cond', 'for', '->', '->>', 'car', 'cdr', 'first', 'rest', 'let', 'when', 'unless', 'import', 'do', 'progn', 'get', 'slice', 'assoc', 'with-decorator', ',', 'list_comp', 'kwapply', '~', 'is', 'in', 'is-not', 'not-in', 'quasiquote', 'unquote', 'unquote-splice', 'quote', '|', '<<=', '>>=', 'foreach', 'while', 'eval-and-compile', 'eval-when-compile' ) declarations = ( 'def', 'defn', 'defun', 'defmacro', 'defclass', 'lambda', 'fn', 'setv' ) hy_builtins = () hy_core = ( 'cycle', 'dec', 'distinct', 'drop', 'even?', 'filter', 'inc', 'instance?', 'iterable?', 'iterate', 'iterator?', 'neg?', 'none?', 'nth', 'numeric?', 'odd?', 'pos?', 'remove', 'repeat', 'repeatedly', 'take', 'take_nth', 'take_while', 'zero?' ) builtins = hy_builtins + hy_core # valid names for identifiers # well, names can only not consist fully of numbers # but this should be good enough for now valid_name = r'(?!#)[\w!$%*+<=>?/.#:-]+' def _multi_escape(entries): return words(entries, suffix=' ') tokens = { 'root': [ # the comments - always starting with semicolon # and going to the end of the line (r';.*$', Comment.Single), # whitespaces - usually not relevant (r',+', Text), (r'\s+', Whitespace), # numbers (r'-?\d+\.\d+', Number.Float), (r'-?\d+', Number.Integer), (r'0[0-7]+j?', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), # strings, symbols and characters (r'"(\\\\|\\[^\\]|[^"\\])*"', String), (r"'" + valid_name, String.Symbol), (r"\\(.|[a-z]+)", String.Char), (r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)), (r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)), # keywords (r'::?' + valid_name, String.Symbol), # special operators (r'~@|[`\'#^~&@]', Operator), include('py-keywords'), include('py-builtins'), # highlight the special forms (_multi_escape(special_forms), Keyword), # Technically, only the special forms are 'keywords'. The problem # is that only treating them as keywords means that things like # 'defn' and 'ns' need to be highlighted as builtins. This is ugly # and weird for most styles. So, as a compromise we're going to # highlight them as Keyword.Declarations. (_multi_escape(declarations), Keyword.Declaration), # highlight the builtins (_multi_escape(builtins), Name.Builtin), # the remaining functions (r'(?<=\()' + valid_name, Name.Function), # find the remaining variables (valid_name, Name.Variable), # Hy accepts vector notation (r'(\[|\])', Punctuation), # Hy accepts map notation (r'(\{|\})', Punctuation), # the famous parentheses! (r'(\(|\))', Punctuation), ], 'py-keywords': PythonLexer.tokens['keywords'], 'py-builtins': PythonLexer.tokens['builtins'], } def analyse_text(text): if '(import ' in text or '(defn ' in text: return 0.9 class RacketLexer(RegexLexer): """ Lexer for Racket source code (formerly known as PLT Scheme). .. versionadded:: 1.6 """ name = 'Racket' url = 'http://racket-lang.org/' aliases = ['racket', 'rkt'] filenames = ['*.rkt', '*.rktd', '*.rktl'] mimetypes = ['text/x-racket', 'application/x-racket'] # Generated by example.rkt _keywords = ( '#%app', '#%datum', '#%declare', '#%expression', '#%module-begin', '#%plain-app', '#%plain-lambda', '#%plain-module-begin', '#%printing-module-begin', '#%provide', '#%require', '#%stratified-body', '#%top', '#%top-interaction', '#%variable-reference', '->', '->*', '->*m', '->d', '->dm', '->i', '->m', '...', ':do-in', '==', '=>', '_', 'absent', 'abstract', 'all-defined-out', 'all-from-out', 'and', 'any', 'augment', 'augment*', 'augment-final', 'augment-final*', 'augride', 'augride*', 'begin', 'begin-for-syntax', 'begin0', 'case', 'case->', 'case->m', 'case-lambda', 'class', 'class*', 'class-field-accessor', 'class-field-mutator', 'class/c', 'class/derived', 'combine-in', 'combine-out', 'command-line', 'compound-unit', 'compound-unit/infer', 'cond', 'cons/dc', 'contract', 'contract-out', 'contract-struct', 'contracted', 'define', 'define-compound-unit', 'define-compound-unit/infer', 'define-contract-struct', 'define-custom-hash-types', 'define-custom-set-types', 'define-for-syntax', 'define-local-member-name', 'define-logger', 'define-match-expander', 'define-member-name', 'define-module-boundary-contract', 'define-namespace-anchor', 'define-opt/c', 'define-sequence-syntax', 'define-serializable-class', 'define-serializable-class*', 'define-signature', 'define-signature-form', 'define-struct', 'define-struct/contract', 'define-struct/derived', 'define-syntax', 'define-syntax-rule', 'define-syntaxes', 'define-unit', 'define-unit-binding', 'define-unit-from-context', 'define-unit/contract', 'define-unit/new-import-export', 'define-unit/s', 'define-values', 'define-values-for-export', 'define-values-for-syntax', 'define-values/invoke-unit', 'define-values/invoke-unit/infer', 'define/augment', 'define/augment-final', 'define/augride', 'define/contract', 'define/final-prop', 'define/match', 'define/overment', 'define/override', 'define/override-final', 'define/private', 'define/public', 'define/public-final', 'define/pubment', 'define/subexpression-pos-prop', 'define/subexpression-pos-prop/name', 'delay', 'delay/idle', 'delay/name', 'delay/strict', 'delay/sync', 'delay/thread', 'do', 'else', 'except', 'except-in', 'except-out', 'export', 'extends', 'failure-cont', 'false', 'false/c', 'field', 'field-bound?', 'file', 'flat-murec-contract', 'flat-rec-contract', 'for', 'for*', 'for*/and', 'for*/async', 'for*/first', 'for*/fold', 'for*/fold/derived', 'for*/hash', 'for*/hasheq', 'for*/hasheqv', 'for*/last', 'for*/list', 'for*/lists', 'for*/mutable-set', 'for*/mutable-seteq', 'for*/mutable-seteqv', 'for*/or', 'for*/product', 'for*/set', 'for*/seteq', 'for*/seteqv', 'for*/stream', 'for*/sum', 'for*/vector', 'for*/weak-set', 'for*/weak-seteq', 'for*/weak-seteqv', 'for-label', 'for-meta', 'for-syntax', 'for-template', 'for/and', 'for/async', 'for/first', 'for/fold', 'for/fold/derived', 'for/hash', 'for/hasheq', 'for/hasheqv', 'for/last', 'for/list', 'for/lists', 'for/mutable-set', 'for/mutable-seteq', 'for/mutable-seteqv', 'for/or', 'for/product', 'for/set', 'for/seteq', 'for/seteqv', 'for/stream', 'for/sum', 'for/vector', 'for/weak-set', 'for/weak-seteq', 'for/weak-seteqv', 'gen:custom-write', 'gen:dict', 'gen:equal+hash', 'gen:set', 'gen:stream', 'generic', 'get-field', 'hash/dc', 'if', 'implies', 'import', 'include', 'include-at/relative-to', 'include-at/relative-to/reader', 'include/reader', 'inherit', 'inherit-field', 'inherit/inner', 'inherit/super', 'init', 'init-depend', 'init-field', 'init-rest', 'inner', 'inspect', 'instantiate', 'interface', 'interface*', 'invariant-assertion', 'invoke-unit', 'invoke-unit/infer', 'lambda', 'lazy', 'let', 'let*', 'let*-values', 'let-syntax', 'let-syntaxes', 'let-values', 'let/cc', 'let/ec', 'letrec', 'letrec-syntax', 'letrec-syntaxes', 'letrec-syntaxes+values', 'letrec-values', 'lib', 'link', 'local', 'local-require', 'log-debug', 'log-error', 'log-fatal', 'log-info', 'log-warning', 'match', 'match*', 'match*/derived', 'match-define', 'match-define-values', 'match-lambda', 'match-lambda*', 'match-lambda**', 'match-let', 'match-let*', 'match-let*-values', 'match-let-values', 'match-letrec', 'match-letrec-values', 'match/derived', 'match/values', 'member-name-key', 'mixin', 'module', 'module*', 'module+', 'nand', 'new', 'nor', 'object-contract', 'object/c', 'only', 'only-in', 'only-meta-in', 'open', 'opt/c', 'or', 'overment', 'overment*', 'override', 'override*', 'override-final', 'override-final*', 'parameterize', 'parameterize*', 'parameterize-break', 'parametric->/c', 'place', 'place*', 'place/context', 'planet', 'prefix', 'prefix-in', 'prefix-out', 'private', 'private*', 'prompt-tag/c', 'protect-out', 'provide', 'provide-signature-elements', 'provide/contract', 'public', 'public*', 'public-final', 'public-final*', 'pubment', 'pubment*', 'quasiquote', 'quasisyntax', 'quasisyntax/loc', 'quote', 'quote-syntax', 'quote-syntax/prune', 'recontract-out', 'recursive-contract', 'relative-in', 'rename', 'rename-in', 'rename-inner', 'rename-out', 'rename-super', 'require', 'send', 'send*', 'send+', 'send-generic', 'send/apply', 'send/keyword-apply', 'set!', 'set!-values', 'set-field!', 'shared', 'stream', 'stream*', 'stream-cons', 'struct', 'struct*', 'struct-copy', 'struct-field-index', 'struct-out', 'struct/c', 'struct/ctc', 'struct/dc', 'submod', 'super', 'super-instantiate', 'super-make-object', 'super-new', 'syntax', 'syntax-case', 'syntax-case*', 'syntax-id-rules', 'syntax-rules', 'syntax/loc', 'tag', 'this', 'this%', 'thunk', 'thunk*', 'time', 'unconstrained-domain->', 'unit', 'unit-from-context', 'unit/c', 'unit/new-import-export', 'unit/s', 'unless', 'unquote', 'unquote-splicing', 'unsyntax', 'unsyntax-splicing', 'values/drop', 'when', 'with-continuation-mark', 'with-contract', 'with-contract-continuation-mark', 'with-handlers', 'with-handlers*', 'with-method', 'with-syntax', 'λ' ) # Generated by example.rkt _builtins = ( '*', '*list/c', '+', '-', '/', '<', '</c', '<=', '<=/c', '=', '=/c', '>', '>/c', '>=', '>=/c', 'abort-current-continuation', 'abs', 'absolute-path?', 'acos', 'add-between', 'add1', 'alarm-evt', 'always-evt', 'and/c', 'andmap', 'angle', 'any/c', 'append', 'append*', 'append-map', 'apply', 'argmax', 'argmin', 'arithmetic-shift', 'arity-at-least', 'arity-at-least-value', 'arity-at-least?', 'arity-checking-wrapper', 'arity-includes?', 'arity=?', 'arrow-contract-info', 'arrow-contract-info-accepts-arglist', 'arrow-contract-info-chaperone-procedure', 'arrow-contract-info-check-first-order', 'arrow-contract-info?', 'asin', 'assf', 'assoc', 'assq', 'assv', 'atan', 'bad-number-of-results', 'banner', 'base->-doms/c', 'base->-rngs/c', 'base->?', 'between/c', 'bitwise-and', 'bitwise-bit-field', 'bitwise-bit-set?', 'bitwise-ior', 'bitwise-not', 'bitwise-xor', 'blame-add-car-context', 'blame-add-cdr-context', 'blame-add-context', 'blame-add-missing-party', 'blame-add-nth-arg-context', 'blame-add-range-context', 'blame-add-unknown-context', 'blame-context', 'blame-contract', 'blame-fmt->-string', 'blame-missing-party?', 'blame-negative', 'blame-original?', 'blame-positive', 'blame-replace-negative', 'blame-source', 'blame-swap', 'blame-swapped?', 'blame-update', 'blame-value', 'blame?', 'boolean=?', 'boolean?', 'bound-identifier=?', 'box', 'box-cas!', 'box-immutable', 'box-immutable/c', 'box/c', 'box?', 'break-enabled', 'break-parameterization?', 'break-thread', 'build-chaperone-contract-property', 'build-compound-type-name', 'build-contract-property', 'build-flat-contract-property', 'build-list', 'build-path', 'build-path/convention-type', 'build-string', 'build-vector', 'byte-pregexp', 'byte-pregexp?', 'byte-ready?', 'byte-regexp', 'byte-regexp?', 'byte?', 'bytes', 'bytes->immutable-bytes', 'bytes->list', 'bytes->path', 'bytes->path-element', 'bytes->string/latin-1', 'bytes->string/locale', 'bytes->string/utf-8', 'bytes-append', 'bytes-append*', 'bytes-close-converter', 'bytes-convert', 'bytes-convert-end', 'bytes-converter?', 'bytes-copy', 'bytes-copy!', 'bytes-environment-variable-name?', 'bytes-fill!', 'bytes-join', 'bytes-length', 'bytes-no-nuls?', 'bytes-open-converter', 'bytes-ref', 'bytes-set!', 'bytes-utf-8-index', 'bytes-utf-8-length', 'bytes-utf-8-ref', 'bytes<?', 'bytes=?', 'bytes>?', 'bytes?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-in-nested-thread', 'call-with-atomic-output-file', 'call-with-break-parameterization', 'call-with-composable-continuation', 'call-with-continuation-barrier', 'call-with-continuation-prompt', 'call-with-current-continuation', 'call-with-default-reading-parameterization', 'call-with-escape-continuation', 'call-with-exception-handler', 'call-with-file-lock/timeout', 'call-with-immediate-continuation-mark', 'call-with-input-bytes', 'call-with-input-file', 'call-with-input-file*', 'call-with-input-string', 'call-with-output-bytes', 'call-with-output-file', 'call-with-output-file*', 'call-with-output-string', 'call-with-parameterization', 'call-with-semaphore', 'call-with-semaphore/enable-break', 'call-with-values', 'call/cc', 'call/ec', 'car', 'cartesian-product', 'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr', 'ceiling', 'channel-get', 'channel-put', 'channel-put-evt', 'channel-put-evt?', 'channel-try-get', 'channel/c', 'channel?', 'chaperone-box', 'chaperone-channel', 'chaperone-continuation-mark-key', 'chaperone-contract-property?', 'chaperone-contract?', 'chaperone-evt', 'chaperone-hash', 'chaperone-hash-set', 'chaperone-of?', 'chaperone-procedure', 'chaperone-procedure*', 'chaperone-prompt-tag', 'chaperone-struct', 'chaperone-struct-type', 'chaperone-vector', 'chaperone?', 'char->integer', 'char-alphabetic?', 'char-blank?', 'char-ci<=?', 'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase', 'char-foldcase', 'char-general-category', 'char-graphic?', 'char-in', 'char-in/c', 'char-iso-control?', 'char-lower-case?', 'char-numeric?', 'char-punctuation?', 'char-ready?', 'char-symbolic?', 'char-title-case?', 'char-titlecase', 'char-upcase', 'char-upper-case?', 'char-utf-8-length', 'char-whitespace?', 'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?', 'char?', 'check-duplicate-identifier', 'check-duplicates', 'checked-procedure-check-and-extract', 'choice-evt', 'class->interface', 'class-info', 'class-seal', 'class-unseal', 'class?', 'cleanse-path', 'close-input-port', 'close-output-port', 'coerce-chaperone-contract', 'coerce-chaperone-contracts', 'coerce-contract', 'coerce-contract/f', 'coerce-contracts', 'coerce-flat-contract', 'coerce-flat-contracts', 'collect-garbage', 'collection-file-path', 'collection-path', 'combinations', 'compile', 'compile-allow-set!-undefined', 'compile-context-preservation-enabled', 'compile-enforce-module-constants', 'compile-syntax', 'compiled-expression-recompile', 'compiled-expression?', 'compiled-module-expression?', 'complete-path?', 'complex?', 'compose', 'compose1', 'conjoin', 'conjugate', 'cons', 'cons/c', 'cons?', 'const', 'continuation-mark-key/c', 'continuation-mark-key?', 'continuation-mark-set->context', 'continuation-mark-set->list', 'continuation-mark-set->list*', 'continuation-mark-set-first', 'continuation-mark-set?', 'continuation-marks', 'continuation-prompt-available?', 'continuation-prompt-tag?', 'continuation?', 'contract-continuation-mark-key', 'contract-custom-write-property-proc', 'contract-exercise', 'contract-first-order', 'contract-first-order-passes?', 'contract-late-neg-projection', 'contract-name', 'contract-proc', 'contract-projection', 'contract-property?', 'contract-random-generate', 'contract-random-generate-fail', 'contract-random-generate-fail?', 'contract-random-generate-get-current-environment', 'contract-random-generate-stash', 'contract-random-generate/choose', 'contract-stronger?', 'contract-struct-exercise', 'contract-struct-generate', 'contract-struct-late-neg-projection', 'contract-struct-list-contract?', 'contract-val-first-projection', 'contract?', 'convert-stream', 'copy-directory/files', 'copy-file', 'copy-port', 'cos', 'cosh', 'count', 'current-blame-format', 'current-break-parameterization', 'current-code-inspector', 'current-command-line-arguments', 'current-compile', 'current-compiled-file-roots', 'current-continuation-marks', 'current-contract-region', 'current-custodian', 'current-directory', 'current-directory-for-user', 'current-drive', 'current-environment-variables', 'current-error-port', 'current-eval', 'current-evt-pseudo-random-generator', 'current-force-delete-permissions', 'current-future', 'current-gc-milliseconds', 'current-get-interaction-input-port', 'current-inexact-milliseconds', 'current-input-port', 'current-inspector', 'current-library-collection-links', 'current-library-collection-paths', 'current-load', 'current-load-extension', 'current-load-relative-directory', 'current-load/use-compiled', 'current-locale', 'current-logger', 'current-memory-use', 'current-milliseconds', 'current-module-declare-name', 'current-module-declare-source', 'current-module-name-resolver', 'current-module-path-for-load', 'current-namespace', 'current-output-port', 'current-parameterization', 'current-plumber', 'current-preserved-thread-cell-values', 'current-print', 'current-process-milliseconds', 'current-prompt-read', 'current-pseudo-random-generator', 'current-read-interaction', 'current-reader-guard', 'current-readtable', 'current-seconds', 'current-security-guard', 'current-subprocess-custodian-mode', 'current-thread', 'current-thread-group', 'current-thread-initial-stack-size', 'current-write-relative-directory', 'curry', 'curryr', 'custodian-box-value', 'custodian-box?', 'custodian-limit-memory', 'custodian-managed-list', 'custodian-memory-accounting-available?', 'custodian-require-memory', 'custodian-shutdown-all', 'custodian?', 'custom-print-quotable-accessor', 'custom-print-quotable?', 'custom-write-accessor', 'custom-write-property-proc', 'custom-write?', 'date', 'date*', 'date*-nanosecond', 'date*-time-zone-name', 'date*?', 'date-day', 'date-dst?', 'date-hour', 'date-minute', 'date-month', 'date-second', 'date-time-zone-offset', 'date-week-day', 'date-year', 'date-year-day', 'date?', 'datum->syntax', 'datum-intern-literal', 'default-continuation-prompt-tag', 'degrees->radians', 'delete-directory', 'delete-directory/files', 'delete-file', 'denominator', 'dict->list', 'dict-can-functional-set?', 'dict-can-remove-keys?', 'dict-clear', 'dict-clear!', 'dict-copy', 'dict-count', 'dict-empty?', 'dict-for-each', 'dict-has-key?', 'dict-implements/c', 'dict-implements?', 'dict-iter-contract', 'dict-iterate-first', 'dict-iterate-key', 'dict-iterate-next', 'dict-iterate-value', 'dict-key-contract', 'dict-keys', 'dict-map', 'dict-mutable?', 'dict-ref', 'dict-ref!', 'dict-remove', 'dict-remove!', 'dict-set', 'dict-set!', 'dict-set*', 'dict-set*!', 'dict-update', 'dict-update!', 'dict-value-contract', 'dict-values', 'dict?', 'directory-exists?', 'directory-list', 'disjoin', 'display', 'display-lines', 'display-lines-to-file', 'display-to-file', 'displayln', 'double-flonum?', 'drop', 'drop-common-prefix', 'drop-right', 'dropf', 'dropf-right', 'dump-memory-stats', 'dup-input-port', 'dup-output-port', 'dynamic->*', 'dynamic-get-field', 'dynamic-object/c', 'dynamic-place', 'dynamic-place*', 'dynamic-require', 'dynamic-require-for-syntax', 'dynamic-send', 'dynamic-set-field!', 'dynamic-wind', 'eighth', 'empty', 'empty-sequence', 'empty-stream', 'empty?', 'environment-variables-copy', 'environment-variables-names', 'environment-variables-ref', 'environment-variables-set!', 'environment-variables?', 'eof', 'eof-evt', 'eof-object?', 'ephemeron-value', 'ephemeron?', 'eprintf', 'eq-contract-val', 'eq-contract?', 'eq-hash-code', 'eq?', 'equal-contract-val', 'equal-contract?', 'equal-hash-code', 'equal-secondary-hash-code', 'equal<%>', 'equal?', 'equal?/recur', 'eqv-hash-code', 'eqv?', 'error', 'error-display-handler', 'error-escape-handler', 'error-print-context-length', 'error-print-source-location', 'error-print-width', 'error-value->string-handler', 'eval', 'eval-jit-enabled', 'eval-syntax', 'even?', 'evt/c', 'evt?', 'exact->inexact', 'exact-ceiling', 'exact-floor', 'exact-integer?', 'exact-nonnegative-integer?', 'exact-positive-integer?', 'exact-round', 'exact-truncate', 'exact?', 'executable-yield-handler', 'exit', 'exit-handler', 'exn', 'exn-continuation-marks', 'exn-message', 'exn:break', 'exn:break-continuation', 'exn:break:hang-up', 'exn:break:hang-up?', 'exn:break:terminate', 'exn:break:terminate?', 'exn:break?', 'exn:fail', 'exn:fail:contract', 'exn:fail:contract:arity', 'exn:fail:contract:arity?', 'exn:fail:contract:blame', 'exn:fail:contract:blame-object', 'exn:fail:contract:blame?', 'exn:fail:contract:continuation', 'exn:fail:contract:continuation?', 'exn:fail:contract:divide-by-zero', 'exn:fail:contract:divide-by-zero?', 'exn:fail:contract:non-fixnum-result', 'exn:fail:contract:non-fixnum-result?', 'exn:fail:contract:variable', 'exn:fail:contract:variable-id', 'exn:fail:contract:variable?', 'exn:fail:contract?', 'exn:fail:filesystem', 'exn:fail:filesystem:errno', 'exn:fail:filesystem:errno-errno', 'exn:fail:filesystem:errno?', 'exn:fail:filesystem:exists', 'exn:fail:filesystem:exists?', 'exn:fail:filesystem:missing-module', 'exn:fail:filesystem:missing-module-path', 'exn:fail:filesystem:missing-module?', 'exn:fail:filesystem:version', 'exn:fail:filesystem:version?', 'exn:fail:filesystem?', 'exn:fail:network', 'exn:fail:network:errno', 'exn:fail:network:errno-errno', 'exn:fail:network:errno?', 'exn:fail:network?', 'exn:fail:object', 'exn:fail:object?', 'exn:fail:out-of-memory', 'exn:fail:out-of-memory?', 'exn:fail:read', 'exn:fail:read-srclocs', 'exn:fail:read:eof', 'exn:fail:read:eof?', 'exn:fail:read:non-char', 'exn:fail:read:non-char?', 'exn:fail:read?', 'exn:fail:syntax', 'exn:fail:syntax-exprs', 'exn:fail:syntax:missing-module', 'exn:fail:syntax:missing-module-path', 'exn:fail:syntax:missing-module?', 'exn:fail:syntax:unbound', 'exn:fail:syntax:unbound?', 'exn:fail:syntax?', 'exn:fail:unsupported', 'exn:fail:unsupported?', 'exn:fail:user', 'exn:fail:user?', 'exn:fail?', 'exn:misc:match?', 'exn:missing-module-accessor', 'exn:missing-module?', 'exn:srclocs-accessor', 'exn:srclocs?', 'exn?', 'exp', 'expand', 'expand-once', 'expand-syntax', 'expand-syntax-once', 'expand-syntax-to-top-form', 'expand-to-top-form', 'expand-user-path', 'explode-path', 'expt', 'externalizable<%>', 'failure-result/c', 'false?', 'field-names', 'fifth', 'file->bytes', 'file->bytes-lines', 'file->lines', 'file->list', 'file->string', 'file->value', 'file-exists?', 'file-name-from-path', 'file-or-directory-identity', 'file-or-directory-modify-seconds', 'file-or-directory-permissions', 'file-position', 'file-position*', 'file-size', 'file-stream-buffer-mode', 'file-stream-port?', 'file-truncate', 'filename-extension', 'filesystem-change-evt', 'filesystem-change-evt-cancel', 'filesystem-change-evt?', 'filesystem-root-list', 'filter', 'filter-map', 'filter-not', 'filter-read-input-port', 'find-executable-path', 'find-files', 'find-library-collection-links', 'find-library-collection-paths', 'find-relative-path', 'find-system-path', 'findf', 'first', 'first-or/c', 'fixnum?', 'flat-contract', 'flat-contract-predicate', 'flat-contract-property?', 'flat-contract?', 'flat-named-contract', 'flatten', 'floating-point-bytes->real', 'flonum?', 'floor', 'flush-output', 'fold-files', 'foldl', 'foldr', 'for-each', 'force', 'format', 'fourth', 'fprintf', 'free-identifier=?', 'free-label-identifier=?', 'free-template-identifier=?', 'free-transformer-identifier=?', 'fsemaphore-count', 'fsemaphore-post', 'fsemaphore-try-wait?', 'fsemaphore-wait', 'fsemaphore?', 'future', 'future?', 'futures-enabled?', 'gcd', 'generate-member-key', 'generate-temporaries', 'generic-set?', 'generic?', 'gensym', 'get-output-bytes', 'get-output-string', 'get-preference', 'get/build-late-neg-projection', 'get/build-val-first-projection', 'getenv', 'global-port-print-handler', 'group-by', 'group-execute-bit', 'group-read-bit', 'group-write-bit', 'guard-evt', 'handle-evt', 'handle-evt?', 'has-blame?', 'has-contract?', 'hash', 'hash->list', 'hash-clear', 'hash-clear!', 'hash-copy', 'hash-copy-clear', 'hash-count', 'hash-empty?', 'hash-eq?', 'hash-equal?', 'hash-eqv?', 'hash-for-each', 'hash-has-key?', 'hash-iterate-first', 'hash-iterate-key', 'hash-iterate-key+value', 'hash-iterate-next', 'hash-iterate-pair', 'hash-iterate-value', 'hash-keys', 'hash-map', 'hash-placeholder?', 'hash-ref', 'hash-ref!', 'hash-remove', 'hash-remove!', 'hash-set', 'hash-set!', 'hash-set*', 'hash-set*!', 'hash-update', 'hash-update!', 'hash-values', 'hash-weak?', 'hash/c', 'hash?', 'hasheq', 'hasheqv', 'identifier-binding', 'identifier-binding-symbol', 'identifier-label-binding', 'identifier-prune-lexical-context', 'identifier-prune-to-source-module', 'identifier-remove-from-definition-context', 'identifier-template-binding', 'identifier-transformer-binding', 'identifier?', 'identity', 'if/c', 'imag-part', 'immutable?', 'impersonate-box', 'impersonate-channel', 'impersonate-continuation-mark-key', 'impersonate-hash', 'impersonate-hash-set', 'impersonate-procedure', 'impersonate-procedure*', 'impersonate-prompt-tag', 'impersonate-struct', 'impersonate-vector', 'impersonator-contract?', 'impersonator-ephemeron', 'impersonator-of?', 'impersonator-prop:application-mark', 'impersonator-prop:blame', 'impersonator-prop:contracted', 'impersonator-property-accessor-procedure?', 'impersonator-property?', 'impersonator?', 'implementation?', 'implementation?/c', 'in-bytes', 'in-bytes-lines', 'in-combinations', 'in-cycle', 'in-dict', 'in-dict-keys', 'in-dict-pairs', 'in-dict-values', 'in-directory', 'in-hash', 'in-hash-keys', 'in-hash-pairs', 'in-hash-values', 'in-immutable-hash', 'in-immutable-hash-keys', 'in-immutable-hash-pairs', 'in-immutable-hash-values', 'in-immutable-set', 'in-indexed', 'in-input-port-bytes', 'in-input-port-chars', 'in-lines', 'in-list', 'in-mlist', 'in-mutable-hash', 'in-mutable-hash-keys', 'in-mutable-hash-pairs', 'in-mutable-hash-values', 'in-mutable-set', 'in-naturals', 'in-parallel', 'in-permutations', 'in-port', 'in-producer', 'in-range', 'in-sequences', 'in-set', 'in-slice', 'in-stream', 'in-string', 'in-syntax', 'in-value', 'in-values*-sequence', 'in-values-sequence', 'in-vector', 'in-weak-hash', 'in-weak-hash-keys', 'in-weak-hash-pairs', 'in-weak-hash-values', 'in-weak-set', 'inexact->exact', 'inexact-real?', 'inexact?', 'infinite?', 'input-port-append', 'input-port?', 'inspector?', 'instanceof/c', 'integer->char', 'integer->integer-bytes', 'integer-bytes->integer', 'integer-in', 'integer-length', 'integer-sqrt', 'integer-sqrt/remainder', 'integer?', 'interface->method-names', 'interface-extension?', 'interface?', 'internal-definition-context-binding-identifiers', 'internal-definition-context-introduce', 'internal-definition-context-seal', 'internal-definition-context?', 'is-a?', 'is-a?/c', 'keyword->string', 'keyword-apply', 'keyword<?', 'keyword?', 'keywords-match', 'kill-thread', 'last', 'last-pair', 'lcm', 'length', 'liberal-define-context?', 'link-exists?', 'list', 'list*', 'list*of', 'list->bytes', 'list->mutable-set', 'list->mutable-seteq', 'list->mutable-seteqv', 'list->set', 'list->seteq', 'list->seteqv', 'list->string', 'list->vector', 'list->weak-set', 'list->weak-seteq', 'list->weak-seteqv', 'list-contract?', 'list-prefix?', 'list-ref', 'list-set', 'list-tail', 'list-update', 'list/c', 'list?', 'listen-port-number?', 'listof', 'load', 'load-extension', 'load-on-demand-enabled', 'load-relative', 'load-relative-extension', 'load/cd', 'load/use-compiled', 'local-expand', 'local-expand/capture-lifts', 'local-transformer-expand', 'local-transformer-expand/capture-lifts', 'locale-string-encoding', 'log', 'log-all-levels', 'log-level-evt', 'log-level?', 'log-max-level', 'log-message', 'log-receiver?', 'logger-name', 'logger?', 'magnitude', 'make-arity-at-least', 'make-base-empty-namespace', 'make-base-namespace', 'make-bytes', 'make-channel', 'make-chaperone-contract', 'make-continuation-mark-key', 'make-continuation-prompt-tag', 'make-contract', 'make-custodian', 'make-custodian-box', 'make-custom-hash', 'make-custom-hash-types', 'make-custom-set', 'make-custom-set-types', 'make-date', 'make-date*', 'make-derived-parameter', 'make-directory', 'make-directory*', 'make-do-sequence', 'make-empty-namespace', 'make-environment-variables', 'make-ephemeron', 'make-exn', 'make-exn:break', 'make-exn:break:hang-up', 'make-exn:break:terminate', 'make-exn:fail', 'make-exn:fail:contract', 'make-exn:fail:contract:arity', 'make-exn:fail:contract:blame', 'make-exn:fail:contract:continuation', 'make-exn:fail:contract:divide-by-zero', 'make-exn:fail:contract:non-fixnum-result', 'make-exn:fail:contract:variable', 'make-exn:fail:filesystem', 'make-exn:fail:filesystem:errno', 'make-exn:fail:filesystem:exists', 'make-exn:fail:filesystem:missing-module', 'make-exn:fail:filesystem:version', 'make-exn:fail:network', 'make-exn:fail:network:errno', 'make-exn:fail:object', 'make-exn:fail:out-of-memory', 'make-exn:fail:read', 'make-exn:fail:read:eof', 'make-exn:fail:read:non-char', 'make-exn:fail:syntax', 'make-exn:fail:syntax:missing-module', 'make-exn:fail:syntax:unbound', 'make-exn:fail:unsupported', 'make-exn:fail:user', 'make-file-or-directory-link', 'make-flat-contract', 'make-fsemaphore', 'make-generic', 'make-handle-get-preference-locked', 'make-hash', 'make-hash-placeholder', 'make-hasheq', 'make-hasheq-placeholder', 'make-hasheqv', 'make-hasheqv-placeholder', 'make-immutable-custom-hash', 'make-immutable-hash', 'make-immutable-hasheq', 'make-immutable-hasheqv', 'make-impersonator-property', 'make-input-port', 'make-input-port/read-to-peek', 'make-inspector', 'make-keyword-procedure', 'make-known-char-range-list', 'make-limited-input-port', 'make-list', 'make-lock-file-name', 'make-log-receiver', 'make-logger', 'make-mixin-contract', 'make-mutable-custom-set', 'make-none/c', 'make-object', 'make-output-port', 'make-parameter', 'make-parent-directory*', 'make-phantom-bytes', 'make-pipe', 'make-pipe-with-specials', 'make-placeholder', 'make-plumber', 'make-polar', 'make-prefab-struct', 'make-primitive-class', 'make-proj-contract', 'make-pseudo-random-generator', 'make-reader-graph', 'make-readtable', 'make-rectangular', 'make-rename-transformer', 'make-resolved-module-path', 'make-security-guard', 'make-semaphore', 'make-set!-transformer', 'make-shared-bytes', 'make-sibling-inspector', 'make-special-comment', 'make-srcloc', 'make-string', 'make-struct-field-accessor', 'make-struct-field-mutator', 'make-struct-type', 'make-struct-type-property', 'make-syntax-delta-introducer', 'make-syntax-introducer', 'make-temporary-file', 'make-tentative-pretty-print-output-port', 'make-thread-cell', 'make-thread-group', 'make-vector', 'make-weak-box', 'make-weak-custom-hash', 'make-weak-custom-set', 'make-weak-hash', 'make-weak-hasheq', 'make-weak-hasheqv', 'make-will-executor', 'map', 'match-equality-test', 'matches-arity-exactly?', 'max', 'mcar', 'mcdr', 'mcons', 'member', 'member-name-key-hash-code', 'member-name-key=?', 'member-name-key?', 'memf', 'memq', 'memv', 'merge-input', 'method-in-interface?', 'min', 'mixin-contract', 'module->exports', 'module->imports', 'module->language-info', 'module->namespace', 'module-compiled-cross-phase-persistent?', 'module-compiled-exports', 'module-compiled-imports', 'module-compiled-language-info', 'module-compiled-name', 'module-compiled-submodules', 'module-declared?', 'module-path-index-join', 'module-path-index-resolve', 'module-path-index-split', 'module-path-index-submodule', 'module-path-index?', 'module-path?', 'module-predefined?', 'module-provide-protected?', 'modulo', 'mpair?', 'mutable-set', 'mutable-seteq', 'mutable-seteqv', 'n->th', 'nack-guard-evt', 'namespace-anchor->empty-namespace', 'namespace-anchor->namespace', 'namespace-anchor?', 'namespace-attach-module', 'namespace-attach-module-declaration', 'namespace-base-phase', 'namespace-mapped-symbols', 'namespace-module-identifier', 'namespace-module-registry', 'namespace-require', 'namespace-require/constant', 'namespace-require/copy', 'namespace-require/expansion-time', 'namespace-set-variable-value!', 'namespace-symbol->identifier', 'namespace-syntax-introduce', 'namespace-undefine-variable!', 'namespace-unprotect-module', 'namespace-variable-value', 'namespace?', 'nan?', 'natural-number/c', 'negate', 'negative?', 'never-evt', 'new-∀/c', 'new-∃/c', 'newline', 'ninth', 'non-empty-listof', 'non-empty-string?', 'none/c', 'normal-case-path', 'normalize-arity', 'normalize-path', 'normalized-arity?', 'not', 'not/c', 'null', 'null?', 'number->string', 'number?', 'numerator', 'object%', 'object->vector', 'object-info', 'object-interface', 'object-method-arity-includes?', 'object-name', 'object-or-false=?', 'object=?', 'object?', 'odd?', 'one-of/c', 'open-input-bytes', 'open-input-file', 'open-input-output-file', 'open-input-string', 'open-output-bytes', 'open-output-file', 'open-output-nowhere', 'open-output-string', 'or/c', 'order-of-magnitude', 'ormap', 'other-execute-bit', 'other-read-bit', 'other-write-bit', 'output-port?', 'pair?', 'parameter-procedure=?', 'parameter/c', 'parameter?', 'parameterization?', 'parse-command-line', 'partition', 'path->bytes', 'path->complete-path', 'path->directory-path', 'path->string', 'path-add-suffix', 'path-convention-type', 'path-element->bytes', 'path-element->string', 'path-element?', 'path-for-some-system?', 'path-list-string->path-list', 'path-only', 'path-replace-suffix', 'path-string?', 'path<?', 'path?', 'pathlist-closure', 'peek-byte', 'peek-byte-or-special', 'peek-bytes', 'peek-bytes!', 'peek-bytes!-evt', 'peek-bytes-avail!', 'peek-bytes-avail!*', 'peek-bytes-avail!-evt', 'peek-bytes-avail!/enable-break', 'peek-bytes-evt', 'peek-char', 'peek-char-or-special', 'peek-string', 'peek-string!', 'peek-string!-evt', 'peek-string-evt', 'peeking-input-port', 'permutations', 'phantom-bytes?', 'pi', 'pi.f', 'pipe-content-length', 'place-break', 'place-channel', 'place-channel-get', 'place-channel-put', 'place-channel-put/get', 'place-channel?', 'place-dead-evt', 'place-enabled?', 'place-kill', 'place-location?', 'place-message-allowed?', 'place-sleep', 'place-wait', 'place?', 'placeholder-get', 'placeholder-set!', 'placeholder?', 'plumber-add-flush!', 'plumber-flush-all', 'plumber-flush-handle-remove!', 'plumber-flush-handle?', 'plumber?', 'poll-guard-evt', 'port->bytes', 'port->bytes-lines', 'port->lines', 'port->list', 'port->string', 'port-closed-evt', 'port-closed?', 'port-commit-peeked', 'port-count-lines!', 'port-count-lines-enabled', 'port-counts-lines?', 'port-display-handler', 'port-file-identity', 'port-file-unlock', 'port-next-location', 'port-number?', 'port-print-handler', 'port-progress-evt', 'port-provides-progress-evts?', 'port-read-handler', 'port-try-file-lock?', 'port-write-handler', 'port-writes-atomic?', 'port-writes-special?', 'port?', 'positive?', 'predicate/c', 'prefab-key->struct-type', 'prefab-key?', 'prefab-struct-key', 'preferences-lock-file-mode', 'pregexp', 'pregexp?', 'pretty-display', 'pretty-format', 'pretty-print', 'pretty-print-.-symbol-without-bars', 'pretty-print-abbreviate-read-macros', 'pretty-print-columns', 'pretty-print-current-style-table', 'pretty-print-depth', 'pretty-print-exact-as-decimal', 'pretty-print-extend-style-table', 'pretty-print-handler', 'pretty-print-newline', 'pretty-print-post-print-hook', 'pretty-print-pre-print-hook', 'pretty-print-print-hook', 'pretty-print-print-line', 'pretty-print-remap-stylable', 'pretty-print-show-inexactness', 'pretty-print-size-hook', 'pretty-print-style-table?', 'pretty-printing', 'pretty-write', 'primitive-closure?', 'primitive-result-arity', 'primitive?', 'print', 'print-as-expression', 'print-boolean-long-form', 'print-box', 'print-graph', 'print-hash-table', 'print-mpair-curly-braces', 'print-pair-curly-braces', 'print-reader-abbreviations', 'print-struct', 'print-syntax-width', 'print-unreadable', 'print-vector-length', 'printable/c', 'printable<%>', 'printf', 'println', 'procedure->method', 'procedure-arity', 'procedure-arity-includes/c', 'procedure-arity-includes?', 'procedure-arity?', 'procedure-closure-contents-eq?', 'procedure-extract-target', 'procedure-keywords', 'procedure-reduce-arity', 'procedure-reduce-keyword-arity', 'procedure-rename', 'procedure-result-arity', 'procedure-specialize', 'procedure-struct-type?', 'procedure?', 'process', 'process*', 'process*/ports', 'process/ports', 'processor-count', 'progress-evt?', 'promise-forced?', 'promise-running?', 'promise/c', 'promise/name?', 'promise?', 'prop:arity-string', 'prop:arrow-contract', 'prop:arrow-contract-get-info', 'prop:arrow-contract?', 'prop:blame', 'prop:chaperone-contract', 'prop:checked-procedure', 'prop:contract', 'prop:contracted', 'prop:custom-print-quotable', 'prop:custom-write', 'prop:dict', 'prop:dict/contract', 'prop:equal+hash', 'prop:evt', 'prop:exn:missing-module', 'prop:exn:srclocs', 'prop:expansion-contexts', 'prop:flat-contract', 'prop:impersonator-of', 'prop:input-port', 'prop:liberal-define-context', 'prop:object-name', 'prop:opt-chaperone-contract', 'prop:opt-chaperone-contract-get-test', 'prop:opt-chaperone-contract?', 'prop:orc-contract', 'prop:orc-contract-get-subcontracts', 'prop:orc-contract?', 'prop:output-port', 'prop:place-location', 'prop:procedure', 'prop:recursive-contract', 'prop:recursive-contract-unroll', 'prop:recursive-contract?', 'prop:rename-transformer', 'prop:sequence', 'prop:set!-transformer', 'prop:stream', 'proper-subset?', 'pseudo-random-generator->vector', 'pseudo-random-generator-vector?', 'pseudo-random-generator?', 'put-preferences', 'putenv', 'quotient', 'quotient/remainder', 'radians->degrees', 'raise', 'raise-argument-error', 'raise-arguments-error', 'raise-arity-error', 'raise-blame-error', 'raise-contract-error', 'raise-mismatch-error', 'raise-not-cons-blame-error', 'raise-range-error', 'raise-result-error', 'raise-syntax-error', 'raise-type-error', 'raise-user-error', 'random', 'random-seed', 'range', 'rational?', 'rationalize', 'read', 'read-accept-bar-quote', 'read-accept-box', 'read-accept-compiled', 'read-accept-dot', 'read-accept-graph', 'read-accept-infix-dot', 'read-accept-lang', 'read-accept-quasiquote', 'read-accept-reader', 'read-byte', 'read-byte-or-special', 'read-bytes', 'read-bytes!', 'read-bytes!-evt', 'read-bytes-avail!', 'read-bytes-avail!*', 'read-bytes-avail!-evt', 'read-bytes-avail!/enable-break', 'read-bytes-evt', 'read-bytes-line', 'read-bytes-line-evt', 'read-case-sensitive', 'read-cdot', 'read-char', 'read-char-or-special', 'read-curly-brace-as-paren', 'read-curly-brace-with-tag', 'read-decimal-as-inexact', 'read-eval-print-loop', 'read-language', 'read-line', 'read-line-evt', 'read-on-demand-source', 'read-square-bracket-as-paren', 'read-square-bracket-with-tag', 'read-string', 'read-string!', 'read-string!-evt', 'read-string-evt', 'read-syntax', 'read-syntax/recursive', 'read/recursive', 'readtable-mapping', 'readtable?', 'real->decimal-string', 'real->double-flonum', 'real->floating-point-bytes', 'real->single-flonum', 'real-in', 'real-part', 'real?', 'reencode-input-port', 'reencode-output-port', 'regexp', 'regexp-match', 'regexp-match*', 'regexp-match-evt', 'regexp-match-exact?', 'regexp-match-peek', 'regexp-match-peek-immediate', 'regexp-match-peek-positions', 'regexp-match-peek-positions*', 'regexp-match-peek-positions-immediate', 'regexp-match-peek-positions-immediate/end', 'regexp-match-peek-positions/end', 'regexp-match-positions', 'regexp-match-positions*', 'regexp-match-positions/end', 'regexp-match/end', 'regexp-match?', 'regexp-max-lookbehind', 'regexp-quote', 'regexp-replace', 'regexp-replace*', 'regexp-replace-quote', 'regexp-replaces', 'regexp-split', 'regexp-try-match', 'regexp?', 'relative-path?', 'relocate-input-port', 'relocate-output-port', 'remainder', 'remf', 'remf*', 'remove', 'remove*', 'remove-duplicates', 'remq', 'remq*', 'remv', 'remv*', 'rename-contract', 'rename-file-or-directory', 'rename-transformer-target', 'rename-transformer?', 'replace-evt', 'reroot-path', 'resolve-path', 'resolved-module-path-name', 'resolved-module-path?', 'rest', 'reverse', 'round', 'second', 'seconds->date', 'security-guard?', 'semaphore-peek-evt', 'semaphore-peek-evt?', 'semaphore-post', 'semaphore-try-wait?', 'semaphore-wait', 'semaphore-wait/enable-break', 'semaphore?', 'sequence->list', 'sequence->stream', 'sequence-add-between', 'sequence-andmap', 'sequence-append', 'sequence-count', 'sequence-filter', 'sequence-fold', 'sequence-for-each', 'sequence-generate', 'sequence-generate*', 'sequence-length', 'sequence-map', 'sequence-ormap', 'sequence-ref', 'sequence-tail', 'sequence/c', 'sequence?', 'set', 'set!-transformer-procedure', 'set!-transformer?', 'set->list', 'set->stream', 'set-add', 'set-add!', 'set-box!', 'set-clear', 'set-clear!', 'set-copy', 'set-copy-clear', 'set-count', 'set-empty?', 'set-eq?', 'set-equal?', 'set-eqv?', 'set-first', 'set-for-each', 'set-implements/c', 'set-implements?', 'set-intersect', 'set-intersect!', 'set-map', 'set-mcar!', 'set-mcdr!', 'set-member?', 'set-mutable?', 'set-phantom-bytes!', 'set-port-next-location!', 'set-remove', 'set-remove!', 'set-rest', 'set-some-basic-contracts!', 'set-subtract', 'set-subtract!', 'set-symmetric-difference', 'set-symmetric-difference!', 'set-union', 'set-union!', 'set-weak?', 'set/c', 'set=?', 'set?', 'seteq', 'seteqv', 'seventh', 'sgn', 'shared-bytes', 'shell-execute', 'shrink-path-wrt', 'shuffle', 'simple-form-path', 'simplify-path', 'sin', 'single-flonum?', 'sinh', 'sixth', 'skip-projection-wrapper?', 'sleep', 'some-system-path->string', 'sort', 'special-comment-value', 'special-comment?', 'special-filter-input-port', 'split-at', 'split-at-right', 'split-common-prefix', 'split-path', 'splitf-at', 'splitf-at-right', 'sqr', 'sqrt', 'srcloc', 'srcloc->string', 'srcloc-column', 'srcloc-line', 'srcloc-position', 'srcloc-source', 'srcloc-span', 'srcloc?', 'stop-after', 'stop-before', 'stream->list', 'stream-add-between', 'stream-andmap', 'stream-append', 'stream-count', 'stream-empty?', 'stream-filter', 'stream-first', 'stream-fold', 'stream-for-each', 'stream-length', 'stream-map', 'stream-ormap', 'stream-ref', 'stream-rest', 'stream-tail', 'stream/c', 'stream?', 'string', 'string->bytes/latin-1', 'string->bytes/locale', 'string->bytes/utf-8', 'string->immutable-string', 'string->keyword', 'string->list', 'string->number', 'string->path', 'string->path-element', 'string->some-system-path', 'string->symbol', 'string->uninterned-symbol', 'string->unreadable-symbol', 'string-append', 'string-append*', 'string-ci<=?', 'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?', 'string-contains?', 'string-copy', 'string-copy!', 'string-downcase', 'string-environment-variable-name?', 'string-fill!', 'string-foldcase', 'string-join', 'string-len/c', 'string-length', 'string-locale-ci<?', 'string-locale-ci=?', 'string-locale-ci>?', 'string-locale-downcase', 'string-locale-upcase', 'string-locale<?', 'string-locale=?', 'string-locale>?', 'string-no-nuls?', 'string-normalize-nfc', 'string-normalize-nfd', 'string-normalize-nfkc', 'string-normalize-nfkd', 'string-normalize-spaces', 'string-port?', 'string-prefix?', 'string-ref', 'string-replace', 'string-set!', 'string-split', 'string-suffix?', 'string-titlecase', 'string-trim', 'string-upcase', 'string-utf-8-length', 'string<=?', 'string<?', 'string=?', 'string>=?', 'string>?', 'string?', 'struct->vector', 'struct-accessor-procedure?', 'struct-constructor-procedure?', 'struct-info', 'struct-mutator-procedure?', 'struct-predicate-procedure?', 'struct-type-info', 'struct-type-make-constructor', 'struct-type-make-predicate', 'struct-type-property-accessor-procedure?', 'struct-type-property/c', 'struct-type-property?', 'struct-type?', 'struct:arity-at-least', 'struct:arrow-contract-info', 'struct:date', 'struct:date*', 'struct:exn', 'struct:exn:break', 'struct:exn:break:hang-up', 'struct:exn:break:terminate', 'struct:exn:fail', 'struct:exn:fail:contract', 'struct:exn:fail:contract:arity', 'struct:exn:fail:contract:blame', 'struct:exn:fail:contract:continuation', 'struct:exn:fail:contract:divide-by-zero', 'struct:exn:fail:contract:non-fixnum-result', 'struct:exn:fail:contract:variable', 'struct:exn:fail:filesystem', 'struct:exn:fail:filesystem:errno', 'struct:exn:fail:filesystem:exists', 'struct:exn:fail:filesystem:missing-module', 'struct:exn:fail:filesystem:version', 'struct:exn:fail:network', 'struct:exn:fail:network:errno', 'struct:exn:fail:object', 'struct:exn:fail:out-of-memory', 'struct:exn:fail:read', 'struct:exn:fail:read:eof', 'struct:exn:fail:read:non-char', 'struct:exn:fail:syntax', 'struct:exn:fail:syntax:missing-module', 'struct:exn:fail:syntax:unbound', 'struct:exn:fail:unsupported', 'struct:exn:fail:user', 'struct:srcloc', 'struct:wrapped-extra-arg-arrow', 'struct?', 'sub1', 'subbytes', 'subclass?', 'subclass?/c', 'subprocess', 'subprocess-group-enabled', 'subprocess-kill', 'subprocess-pid', 'subprocess-status', 'subprocess-wait', 'subprocess?', 'subset?', 'substring', 'suggest/c', 'symbol->string', 'symbol-interned?', 'symbol-unreadable?', 'symbol<?', 'symbol=?', 'symbol?', 'symbols', 'sync', 'sync/enable-break', 'sync/timeout', 'sync/timeout/enable-break', 'syntax->datum', 'syntax->list', 'syntax-arm', 'syntax-column', 'syntax-debug-info', 'syntax-disarm', 'syntax-e', 'syntax-line', 'syntax-local-bind-syntaxes', 'syntax-local-certifier', 'syntax-local-context', 'syntax-local-expand-expression', 'syntax-local-get-shadower', 'syntax-local-identifier-as-binding', 'syntax-local-introduce', 'syntax-local-lift-context', 'syntax-local-lift-expression', 'syntax-local-lift-module', 'syntax-local-lift-module-end-declaration', 'syntax-local-lift-provide', 'syntax-local-lift-require', 'syntax-local-lift-values-expression', 'syntax-local-make-definition-context', 'syntax-local-make-delta-introducer', 'syntax-local-module-defined-identifiers', 'syntax-local-module-exports', 'syntax-local-module-required-identifiers', 'syntax-local-name', 'syntax-local-phase-level', 'syntax-local-submodules', 'syntax-local-transforming-module-provides?', 'syntax-local-value', 'syntax-local-value/immediate', 'syntax-original?', 'syntax-position', 'syntax-property', 'syntax-property-preserved?', 'syntax-property-symbol-keys', 'syntax-protect', 'syntax-rearm', 'syntax-recertify', 'syntax-shift-phase-level', 'syntax-source', 'syntax-source-module', 'syntax-span', 'syntax-taint', 'syntax-tainted?', 'syntax-track-origin', 'syntax-transforming-module-expression?', 'syntax-transforming-with-lifts?', 'syntax-transforming?', 'syntax/c', 'syntax?', 'system', 'system*', 'system*/exit-code', 'system-big-endian?', 'system-idle-evt', 'system-language+country', 'system-library-subpath', 'system-path-convention-type', 'system-type', 'system/exit-code', 'tail-marks-match?', 'take', 'take-common-prefix', 'take-right', 'takef', 'takef-right', 'tan', 'tanh', 'tcp-abandon-port', 'tcp-accept', 'tcp-accept-evt', 'tcp-accept-ready?', 'tcp-accept/enable-break', 'tcp-addresses', 'tcp-close', 'tcp-connect', 'tcp-connect/enable-break', 'tcp-listen', 'tcp-listener?', 'tcp-port?', 'tentative-pretty-print-port-cancel', 'tentative-pretty-print-port-transfer', 'tenth', 'terminal-port?', 'the-unsupplied-arg', 'third', 'thread', 'thread-cell-ref', 'thread-cell-set!', 'thread-cell-values?', 'thread-cell?', 'thread-dead-evt', 'thread-dead?', 'thread-group?', 'thread-receive', 'thread-receive-evt', 'thread-resume', 'thread-resume-evt', 'thread-rewind-receive', 'thread-running?', 'thread-send', 'thread-suspend', 'thread-suspend-evt', 'thread-try-receive', 'thread-wait', 'thread/suspend-to-kill', 'thread?', 'time-apply', 'touch', 'transplant-input-port', 'transplant-output-port', 'true', 'truncate', 'udp-addresses', 'udp-bind!', 'udp-bound?', 'udp-close', 'udp-connect!', 'udp-connected?', 'udp-multicast-interface', 'udp-multicast-join-group!', 'udp-multicast-leave-group!', 'udp-multicast-loopback?', 'udp-multicast-set-interface!', 'udp-multicast-set-loopback!', 'udp-multicast-set-ttl!', 'udp-multicast-ttl', 'udp-open-socket', 'udp-receive!', 'udp-receive!*', 'udp-receive!-evt', 'udp-receive!/enable-break', 'udp-receive-ready-evt', 'udp-send', 'udp-send*', 'udp-send-evt', 'udp-send-ready-evt', 'udp-send-to', 'udp-send-to*', 'udp-send-to-evt', 'udp-send-to/enable-break', 'udp-send/enable-break', 'udp?', 'unbox', 'uncaught-exception-handler', 'unit?', 'unspecified-dom', 'unsupplied-arg?', 'use-collection-link-paths', 'use-compiled-file-paths', 'use-user-specific-search-paths', 'user-execute-bit', 'user-read-bit', 'user-write-bit', 'value-blame', 'value-contract', 'values', 'variable-reference->empty-namespace', 'variable-reference->module-base-phase', 'variable-reference->module-declaration-inspector', 'variable-reference->module-path-index', 'variable-reference->module-source', 'variable-reference->namespace', 'variable-reference->phase', 'variable-reference->resolved-module-path', 'variable-reference-constant?', 'variable-reference?', 'vector', 'vector->immutable-vector', 'vector->list', 'vector->pseudo-random-generator', 'vector->pseudo-random-generator!', 'vector->values', 'vector-append', 'vector-argmax', 'vector-argmin', 'vector-copy', 'vector-copy!', 'vector-count', 'vector-drop', 'vector-drop-right', 'vector-fill!', 'vector-filter', 'vector-filter-not', 'vector-immutable', 'vector-immutable/c', 'vector-immutableof', 'vector-length', 'vector-map', 'vector-map!', 'vector-member', 'vector-memq', 'vector-memv', 'vector-ref', 'vector-set!', 'vector-set*!', 'vector-set-performance-stats!', 'vector-split-at', 'vector-split-at-right', 'vector-take', 'vector-take-right', 'vector/c', 'vector?', 'vectorof', 'version', 'void', 'void?', 'weak-box-value', 'weak-box?', 'weak-set', 'weak-seteq', 'weak-seteqv', 'will-execute', 'will-executor?', 'will-register', 'will-try-execute', 'with-input-from-bytes', 'with-input-from-file', 'with-input-from-string', 'with-output-to-bytes', 'with-output-to-file', 'with-output-to-string', 'would-be-future', 'wrap-evt', 'wrapped-extra-arg-arrow', 'wrapped-extra-arg-arrow-extra-neg-party-argument', 'wrapped-extra-arg-arrow-real-func', 'wrapped-extra-arg-arrow?', 'writable<%>', 'write', 'write-byte', 'write-bytes', 'write-bytes-avail', 'write-bytes-avail*', 'write-bytes-avail-evt', 'write-bytes-avail/enable-break', 'write-char', 'write-special', 'write-special-avail*', 'write-special-evt', 'write-string', 'write-to-file', 'writeln', 'xor', 'zero?', '~.a', '~.s', '~.v', '~a', '~e', '~r', '~s', '~v' ) _opening_parenthesis = r'[([{]' _closing_parenthesis = r'[)\]}]' _delimiters = r'()[\]{}",\'`;\s' _symbol = r'(?:\|[^|]*\||\\[\w\W]|[^|\\%s]+)+' % _delimiters _exact_decimal_prefix = r'(?:#e)?(?:#d)?(?:#e)?' _exponent = r'(?:[defls][-+]?\d+)' _inexact_simple_no_hashes = r'(?:\d+(?:/\d+|\.\d*)?|\.\d+)' _inexact_simple = (r'(?:%s|(?:\d+#+(?:\.#*|/\d+#*)?|\.\d+#+|' r'\d+(?:\.\d*#+|/\d+#+)))' % _inexact_simple_no_hashes) _inexact_normal_no_hashes = r'(?:%s%s?)' % (_inexact_simple_no_hashes, _exponent) _inexact_normal = r'(?:%s%s?)' % (_inexact_simple, _exponent) _inexact_special = r'(?:(?:inf|nan)\.[0f])' _inexact_real = r'(?:[-+]?%s|[-+]%s)' % (_inexact_normal, _inexact_special) _inexact_unsigned = r'(?:%s|%s)' % (_inexact_normal, _inexact_special) tokens = { 'root': [ (_closing_parenthesis, Error), (r'(?!\Z)', Text, 'unquoted-datum') ], 'datum': [ (r'(?s)#;|#![ /]([^\\\n]|\\.)*', Comment), (r';[^\n\r\x85\u2028\u2029]*', Comment.Single), (r'#\|', Comment.Multiline, 'block-comment'), # Whitespaces (r'(?u)\s+', Whitespace), # Numbers: Keep in mind Racket reader hash prefixes, which # can denote the base or the type. These don't map neatly # onto Pygments token types; some judgment calls here. # #d or no prefix (r'(?i)%s[-+]?\d+(?=[%s])' % (_exact_decimal_prefix, _delimiters), Number.Integer, '#pop'), (r'(?i)%s[-+]?(\d+(\.\d*)?|\.\d+)([deflst][-+]?\d+)?(?=[%s])' % (_exact_decimal_prefix, _delimiters), Number.Float, '#pop'), (r'(?i)%s[-+]?(%s([-+]%s?i)?|[-+]%s?i)(?=[%s])' % (_exact_decimal_prefix, _inexact_normal_no_hashes, _inexact_normal_no_hashes, _inexact_normal_no_hashes, _delimiters), Number, '#pop'), # Inexact without explicit #i (r'(?i)(#d)?(%s([-+]%s?i)?|[-+]%s?i|%s@%s)(?=[%s])' % (_inexact_real, _inexact_unsigned, _inexact_unsigned, _inexact_real, _inexact_real, _delimiters), Number.Float, '#pop'), # The remaining extflonums (r'(?i)(([-+]?%st[-+]?\d+)|[-+](inf|nan)\.t)(?=[%s])' % (_inexact_simple, _delimiters), Number.Float, '#pop'), # #b (r'(?iu)(#[ei])?#b%s' % _symbol, Number.Bin, '#pop'), # #o (r'(?iu)(#[ei])?#o%s' % _symbol, Number.Oct, '#pop'), # #x (r'(?iu)(#[ei])?#x%s' % _symbol, Number.Hex, '#pop'), # #i is always inexact, i.e. float (r'(?iu)(#d)?#i%s' % _symbol, Number.Float, '#pop'), # Strings and characters (r'#?"', String.Double, ('#pop', 'string')), (r'#<<(.+)\n(^(?!\1$).*$\n)*^\1$', String.Heredoc, '#pop'), (r'#\\(u[\da-fA-F]{1,4}|U[\da-fA-F]{1,8})', String.Char, '#pop'), (r'(?is)#\\([0-7]{3}|[a-z]+|.)', String.Char, '#pop'), (r'(?s)#[pr]x#?"(\\?.)*?"', String.Regex, '#pop'), # Constants (r'#(true|false|[tTfF])', Name.Constant, '#pop'), # Keyword argument names (e.g. #:keyword) (r'#:%s' % _symbol, Keyword.Declaration, '#pop'), # Reader extensions (r'(#lang |#!)(\S+)', bygroups(Keyword.Namespace, Name.Namespace)), (r'#reader', Keyword.Namespace, 'quoted-datum'), # Other syntax (r"(?i)\.(?=[%s])|#c[is]|#['`]|#,@?" % _delimiters, Operator), (r"'|#[s&]|#hash(eqv?)?|#\d*(?=%s)" % _opening_parenthesis, Operator, ('#pop', 'quoted-datum')) ], 'datum*': [ (r'`|,@?', Operator), (_symbol, String.Symbol, '#pop'), (r'[|\\]', Error), default('#pop') ], 'list': [ (_closing_parenthesis, Punctuation, '#pop') ], 'unquoted-datum': [ include('datum'), (r'quote(?=[%s])' % _delimiters, Keyword, ('#pop', 'quoted-datum')), (r'`', Operator, ('#pop', 'quasiquoted-datum')), (r'quasiquote(?=[%s])' % _delimiters, Keyword, ('#pop', 'quasiquoted-datum')), (_opening_parenthesis, Punctuation, ('#pop', 'unquoted-list')), (words(_keywords, suffix='(?=[%s])' % _delimiters), Keyword, '#pop'), (words(_builtins, suffix='(?=[%s])' % _delimiters), Name.Builtin, '#pop'), (_symbol, Name, '#pop'), include('datum*') ], 'unquoted-list': [ include('list'), (r'(?!\Z)', Text, 'unquoted-datum') ], 'quasiquoted-datum': [ include('datum'), (r',@?', Operator, ('#pop', 'unquoted-datum')), (r'unquote(-splicing)?(?=[%s])' % _delimiters, Keyword, ('#pop', 'unquoted-datum')), (_opening_parenthesis, Punctuation, ('#pop', 'quasiquoted-list')), include('datum*') ], 'quasiquoted-list': [ include('list'), (r'(?!\Z)', Text, 'quasiquoted-datum') ], 'quoted-datum': [ include('datum'), (_opening_parenthesis, Punctuation, ('#pop', 'quoted-list')), include('datum*') ], 'quoted-list': [ include('list'), (r'(?!\Z)', Text, 'quoted-datum') ], 'block-comment': [ (r'#\|', Comment.Multiline, '#push'), (r'\|#', Comment.Multiline, '#pop'), (r'[^#|]+|.', Comment.Multiline) ], 'string': [ (r'"', String.Double, '#pop'), (r'(?s)\\([0-7]{1,3}|x[\da-fA-F]{1,2}|u[\da-fA-F]{1,4}|' r'U[\da-fA-F]{1,8}|.)', String.Escape), (r'[^\\"]+', String.Double) ] } class NewLispLexer(RegexLexer): """ For newLISP source code (version 10.3.0). .. versionadded:: 1.5 """ name = 'NewLisp' url = 'http://www.newlisp.org/' aliases = ['newlisp'] filenames = ['*.lsp', '*.nl', '*.kif'] mimetypes = ['text/x-newlisp', 'application/x-newlisp'] flags = re.IGNORECASE | re.MULTILINE # list of built-in functions for newLISP version 10.3 builtins = ( '^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++', '<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10', '$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7', '$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs', 'acos', 'acosh', 'add', 'address', 'amb', 'and', 'append-file', 'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin', 'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec', 'base64-enc', 'bayes-query', 'bayes-train', 'begin', 'beta', 'betai', 'bind', 'binomial', 'bits', 'callback', 'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean', 'close', 'command-event', 'cond', 'cons', 'constant', 'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count', 'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry', 'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec', 'def-new', 'default', 'define-macro', 'define', 'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device', 'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while', 'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup', 'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event', 'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand', 'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter', 'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt', 'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln', 'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string', 'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc', 'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert', 'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error', 'last', 'legal?', 'length', 'let', 'letex', 'letn', 'list?', 'list', 'load', 'local', 'log', 'lookup', 'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat', 'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply', 'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error', 'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local', 'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping', 'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select', 'net-send-to', 'net-send-udp', 'net-send', 'net-service', 'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper', 'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack', 'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop', 'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print', 'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event', 'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand', 'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file', 'read-key', 'read-line', 'read-utf8', 'reader-event', 'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex', 'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse', 'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self', 'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all', 'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent', 'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt', 'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?', 'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term', 'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case', 'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?', 'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until', 'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while', 'write', 'write-char', 'write-file', 'write-line', 'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?', ) # valid names valid_name = r'([\w!$%&*+.,/<=>?@^~|-])+|(\[.*?\])+' tokens = { 'root': [ # shebang (r'#!(.*?)$', Comment.Preproc), # comments starting with semicolon (r';.*$', Comment.Single), # comments starting with # (r'#.*$', Comment.Single), # whitespace (r'\s+', Whitespace), # strings, symbols and characters (r'"(\\\\|\\[^\\]|[^"\\])*"', String), # braces (r'\{', String, "bracestring"), # [text] ... [/text] delimited strings (r'\[text\]*', String, "tagstring"), # 'special' operators... (r"('|:)", Operator), # highlight the builtins (words(builtins, suffix=r'\b'), Keyword), # the remaining functions (r'(?<=\()' + valid_name, Name.Variable), # the remaining variables (valid_name, String.Symbol), # parentheses (r'(\(|\))', Punctuation), ], # braced strings... 'bracestring': [ (r'\{', String, "#push"), (r'\}', String, "#pop"), ('[^{}]+', String), ], # tagged [text]...[/text] delimited strings... 'tagstring': [ (r'(?s)(.*?)(\[/text\])', String, '#pop'), ], } class EmacsLispLexer(RegexLexer): """ An ELisp lexer, parsing a stream and outputting the tokens needed to highlight elisp code. .. versionadded:: 2.1 """ name = 'EmacsLisp' aliases = ['emacs-lisp', 'elisp', 'emacs'] filenames = ['*.el'] mimetypes = ['text/x-elisp', 'application/x-elisp'] flags = re.MULTILINE # couple of useful regexes # characters that are not macro-characters and can be used to begin a symbol nonmacro = r'\\.|[\w!$%&*+-/<=>?@^{}~|]' constituent = nonmacro + '|[#.:]' terminated = r'(?=[ "()\]\'\n,;`])' # whitespace or terminating macro characters # symbol token, reverse-engineered from hyperspec # Take a deep breath... symbol = r'((?:%s)(?:%s)*)' % (nonmacro, constituent) macros = { 'atomic-change-group', 'case', 'block', 'cl-block', 'cl-callf', 'cl-callf2', 'cl-case', 'cl-decf', 'cl-declaim', 'cl-declare', 'cl-define-compiler-macro', 'cl-defmacro', 'cl-defstruct', 'cl-defsubst', 'cl-deftype', 'cl-defun', 'cl-destructuring-bind', 'cl-do', 'cl-do*', 'cl-do-all-symbols', 'cl-do-symbols', 'cl-dolist', 'cl-dotimes', 'cl-ecase', 'cl-etypecase', 'eval-when', 'cl-eval-when', 'cl-flet', 'cl-flet*', 'cl-function', 'cl-incf', 'cl-labels', 'cl-letf', 'cl-letf*', 'cl-load-time-value', 'cl-locally', 'cl-loop', 'cl-macrolet', 'cl-multiple-value-bind', 'cl-multiple-value-setq', 'cl-progv', 'cl-psetf', 'cl-psetq', 'cl-pushnew', 'cl-remf', 'cl-return', 'cl-return-from', 'cl-rotatef', 'cl-shiftf', 'cl-symbol-macrolet', 'cl-tagbody', 'cl-the', 'cl-typecase', 'combine-after-change-calls', 'condition-case-unless-debug', 'decf', 'declaim', 'declare', 'declare-function', 'def-edebug-spec', 'defadvice', 'defclass', 'defcustom', 'defface', 'defgeneric', 'defgroup', 'define-advice', 'define-alternatives', 'define-compiler-macro', 'define-derived-mode', 'define-generic-mode', 'define-global-minor-mode', 'define-globalized-minor-mode', 'define-minor-mode', 'define-modify-macro', 'define-obsolete-face-alias', 'define-obsolete-function-alias', 'define-obsolete-variable-alias', 'define-setf-expander', 'define-skeleton', 'defmacro', 'defmethod', 'defsetf', 'defstruct', 'defsubst', 'deftheme', 'deftype', 'defun', 'defvar-local', 'delay-mode-hooks', 'destructuring-bind', 'do', 'do*', 'do-all-symbols', 'do-symbols', 'dolist', 'dont-compile', 'dotimes', 'dotimes-with-progress-reporter', 'ecase', 'ert-deftest', 'etypecase', 'eval-and-compile', 'eval-when-compile', 'flet', 'ignore-errors', 'incf', 'labels', 'lambda', 'letrec', 'lexical-let', 'lexical-let*', 'loop', 'multiple-value-bind', 'multiple-value-setq', 'noreturn', 'oref', 'oref-default', 'oset', 'oset-default', 'pcase', 'pcase-defmacro', 'pcase-dolist', 'pcase-exhaustive', 'pcase-let', 'pcase-let*', 'pop', 'psetf', 'psetq', 'push', 'pushnew', 'remf', 'return', 'rotatef', 'rx', 'save-match-data', 'save-selected-window', 'save-window-excursion', 'setf', 'setq-local', 'shiftf', 'track-mouse', 'typecase', 'unless', 'use-package', 'when', 'while-no-input', 'with-case-table', 'with-category-table', 'with-coding-priority', 'with-current-buffer', 'with-demoted-errors', 'with-eval-after-load', 'with-file-modes', 'with-local-quit', 'with-output-to-string', 'with-output-to-temp-buffer', 'with-parsed-tramp-file-name', 'with-selected-frame', 'with-selected-window', 'with-silent-modifications', 'with-slots', 'with-syntax-table', 'with-temp-buffer', 'with-temp-file', 'with-temp-message', 'with-timeout', 'with-tramp-connection-property', 'with-tramp-file-property', 'with-tramp-progress-reporter', 'with-wrapper-hook', 'load-time-value', 'locally', 'macrolet', 'progv', 'return-from', } special_forms = { 'and', 'catch', 'cond', 'condition-case', 'defconst', 'defvar', 'function', 'if', 'interactive', 'let', 'let*', 'or', 'prog1', 'prog2', 'progn', 'quote', 'save-current-buffer', 'save-excursion', 'save-restriction', 'setq', 'setq-default', 'subr-arity', 'unwind-protect', 'while', } builtin_function = { '%', '*', '+', '-', '/', '/=', '1+', '1-', '<', '<=', '=', '>', '>=', 'Snarf-documentation', 'abort-recursive-edit', 'abs', 'accept-process-output', 'access-file', 'accessible-keymaps', 'acos', 'active-minibuffer-window', 'add-face-text-property', 'add-name-to-file', 'add-text-properties', 'all-completions', 'append', 'apply', 'apropos-internal', 'aref', 'arrayp', 'aset', 'ash', 'asin', 'assoc', 'assoc-string', 'assq', 'atan', 'atom', 'autoload', 'autoload-do-load', 'backtrace', 'backtrace--locals', 'backtrace-debug', 'backtrace-eval', 'backtrace-frame', 'backward-char', 'backward-prefix-chars', 'barf-if-buffer-read-only', 'base64-decode-region', 'base64-decode-string', 'base64-encode-region', 'base64-encode-string', 'beginning-of-line', 'bidi-find-overridden-directionality', 'bidi-resolved-levels', 'bitmap-spec-p', 'bobp', 'bolp', 'bool-vector', 'bool-vector-count-consecutive', 'bool-vector-count-population', 'bool-vector-exclusive-or', 'bool-vector-intersection', 'bool-vector-not', 'bool-vector-p', 'bool-vector-set-difference', 'bool-vector-subsetp', 'bool-vector-union', 'boundp', 'buffer-base-buffer', 'buffer-chars-modified-tick', 'buffer-enable-undo', 'buffer-file-name', 'buffer-has-markers-at', 'buffer-list', 'buffer-live-p', 'buffer-local-value', 'buffer-local-variables', 'buffer-modified-p', 'buffer-modified-tick', 'buffer-name', 'buffer-size', 'buffer-string', 'buffer-substring', 'buffer-substring-no-properties', 'buffer-swap-text', 'bufferp', 'bury-buffer-internal', 'byte-code', 'byte-code-function-p', 'byte-to-position', 'byte-to-string', 'byteorder', 'call-interactively', 'call-last-kbd-macro', 'call-process', 'call-process-region', 'cancel-kbd-macro-events', 'capitalize', 'capitalize-region', 'capitalize-word', 'car', 'car-less-than-car', 'car-safe', 'case-table-p', 'category-docstring', 'category-set-mnemonics', 'category-table', 'category-table-p', 'ccl-execute', 'ccl-execute-on-string', 'ccl-program-p', 'cdr', 'cdr-safe', 'ceiling', 'char-after', 'char-before', 'char-category-set', 'char-charset', 'char-equal', 'char-or-string-p', 'char-resolve-modifiers', 'char-syntax', 'char-table-extra-slot', 'char-table-p', 'char-table-parent', 'char-table-range', 'char-table-subtype', 'char-to-string', 'char-width', 'characterp', 'charset-after', 'charset-id-internal', 'charset-plist', 'charset-priority-list', 'charsetp', 'check-coding-system', 'check-coding-systems-region', 'clear-buffer-auto-save-failure', 'clear-charset-maps', 'clear-face-cache', 'clear-font-cache', 'clear-image-cache', 'clear-string', 'clear-this-command-keys', 'close-font', 'clrhash', 'coding-system-aliases', 'coding-system-base', 'coding-system-eol-type', 'coding-system-p', 'coding-system-plist', 'coding-system-priority-list', 'coding-system-put', 'color-distance', 'color-gray-p', 'color-supported-p', 'combine-after-change-execute', 'command-error-default-function', 'command-remapping', 'commandp', 'compare-buffer-substrings', 'compare-strings', 'compare-window-configurations', 'completing-read', 'compose-region-internal', 'compose-string-internal', 'composition-get-gstring', 'compute-motion', 'concat', 'cons', 'consp', 'constrain-to-field', 'continue-process', 'controlling-tty-p', 'coordinates-in-window-p', 'copy-alist', 'copy-category-table', 'copy-file', 'copy-hash-table', 'copy-keymap', 'copy-marker', 'copy-sequence', 'copy-syntax-table', 'copysign', 'cos', 'current-active-maps', 'current-bidi-paragraph-direction', 'current-buffer', 'current-case-table', 'current-column', 'current-global-map', 'current-idle-time', 'current-indentation', 'current-input-mode', 'current-local-map', 'current-message', 'current-minor-mode-maps', 'current-time', 'current-time-string', 'current-time-zone', 'current-window-configuration', 'cygwin-convert-file-name-from-windows', 'cygwin-convert-file-name-to-windows', 'daemon-initialized', 'daemonp', 'dbus--init-bus', 'dbus-get-unique-name', 'dbus-message-internal', 'debug-timer-check', 'declare-equiv-charset', 'decode-big5-char', 'decode-char', 'decode-coding-region', 'decode-coding-string', 'decode-sjis-char', 'decode-time', 'default-boundp', 'default-file-modes', 'default-printer-name', 'default-toplevel-value', 'default-value', 'define-category', 'define-charset-alias', 'define-charset-internal', 'define-coding-system-alias', 'define-coding-system-internal', 'define-fringe-bitmap', 'define-hash-table-test', 'define-key', 'define-prefix-command', 'delete', 'delete-all-overlays', 'delete-and-extract-region', 'delete-char', 'delete-directory-internal', 'delete-field', 'delete-file', 'delete-frame', 'delete-other-windows-internal', 'delete-overlay', 'delete-process', 'delete-region', 'delete-terminal', 'delete-window-internal', 'delq', 'describe-buffer-bindings', 'describe-vector', 'destroy-fringe-bitmap', 'detect-coding-region', 'detect-coding-string', 'ding', 'directory-file-name', 'directory-files', 'directory-files-and-attributes', 'discard-input', 'display-supports-face-attributes-p', 'do-auto-save', 'documentation', 'documentation-property', 'downcase', 'downcase-region', 'downcase-word', 'draw-string', 'dump-colors', 'dump-emacs', 'dump-face', 'dump-frame-glyph-matrix', 'dump-glyph-matrix', 'dump-glyph-row', 'dump-redisplay-history', 'dump-tool-bar-row', 'elt', 'emacs-pid', 'encode-big5-char', 'encode-char', 'encode-coding-region', 'encode-coding-string', 'encode-sjis-char', 'encode-time', 'end-kbd-macro', 'end-of-line', 'eobp', 'eolp', 'eq', 'eql', 'equal', 'equal-including-properties', 'erase-buffer', 'error-message-string', 'eval', 'eval-buffer', 'eval-region', 'event-convert-list', 'execute-kbd-macro', 'exit-recursive-edit', 'exp', 'expand-file-name', 'expt', 'external-debugging-output', 'face-attribute-relative-p', 'face-attributes-as-vector', 'face-font', 'fboundp', 'fceiling', 'fetch-bytecode', 'ffloor', 'field-beginning', 'field-end', 'field-string', 'field-string-no-properties', 'file-accessible-directory-p', 'file-acl', 'file-attributes', 'file-attributes-lessp', 'file-directory-p', 'file-executable-p', 'file-exists-p', 'file-locked-p', 'file-modes', 'file-name-absolute-p', 'file-name-all-completions', 'file-name-as-directory', 'file-name-completion', 'file-name-directory', 'file-name-nondirectory', 'file-newer-than-file-p', 'file-readable-p', 'file-regular-p', 'file-selinux-context', 'file-symlink-p', 'file-system-info', 'file-system-info', 'file-writable-p', 'fillarray', 'find-charset-region', 'find-charset-string', 'find-coding-systems-region-internal', 'find-composition-internal', 'find-file-name-handler', 'find-font', 'find-operation-coding-system', 'float', 'float-time', 'floatp', 'floor', 'fmakunbound', 'following-char', 'font-at', 'font-drive-otf', 'font-face-attributes', 'font-family-list', 'font-get', 'font-get-glyphs', 'font-get-system-font', 'font-get-system-normal-font', 'font-info', 'font-match-p', 'font-otf-alternates', 'font-put', 'font-shape-gstring', 'font-spec', 'font-variation-glyphs', 'font-xlfd-name', 'fontp', 'fontset-font', 'fontset-info', 'fontset-list', 'fontset-list-all', 'force-mode-line-update', 'force-window-update', 'format', 'format-mode-line', 'format-network-address', 'format-time-string', 'forward-char', 'forward-comment', 'forward-line', 'forward-word', 'frame-border-width', 'frame-bottom-divider-width', 'frame-can-run-window-configuration-change-hook', 'frame-char-height', 'frame-char-width', 'frame-face-alist', 'frame-first-window', 'frame-focus', 'frame-font-cache', 'frame-fringe-width', 'frame-list', 'frame-live-p', 'frame-or-buffer-changed-p', 'frame-parameter', 'frame-parameters', 'frame-pixel-height', 'frame-pixel-width', 'frame-pointer-visible-p', 'frame-right-divider-width', 'frame-root-window', 'frame-scroll-bar-height', 'frame-scroll-bar-width', 'frame-selected-window', 'frame-terminal', 'frame-text-cols', 'frame-text-height', 'frame-text-lines', 'frame-text-width', 'frame-total-cols', 'frame-total-lines', 'frame-visible-p', 'framep', 'frexp', 'fringe-bitmaps-at-pos', 'fround', 'fset', 'ftruncate', 'funcall', 'funcall-interactively', 'function-equal', 'functionp', 'gap-position', 'gap-size', 'garbage-collect', 'gc-status', 'generate-new-buffer-name', 'get', 'get-buffer', 'get-buffer-create', 'get-buffer-process', 'get-buffer-window', 'get-byte', 'get-char-property', 'get-char-property-and-overlay', 'get-file-buffer', 'get-file-char', 'get-internal-run-time', 'get-load-suffixes', 'get-pos-property', 'get-process', 'get-screen-color', 'get-text-property', 'get-unicode-property-internal', 'get-unused-category', 'get-unused-iso-final-char', 'getenv-internal', 'gethash', 'gfile-add-watch', 'gfile-rm-watch', 'global-key-binding', 'gnutls-available-p', 'gnutls-boot', 'gnutls-bye', 'gnutls-deinit', 'gnutls-error-fatalp', 'gnutls-error-string', 'gnutls-errorp', 'gnutls-get-initstage', 'gnutls-peer-status', 'gnutls-peer-status-warning-describe', 'goto-char', 'gpm-mouse-start', 'gpm-mouse-stop', 'group-gid', 'group-real-gid', 'handle-save-session', 'handle-switch-frame', 'hash-table-count', 'hash-table-p', 'hash-table-rehash-size', 'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test', 'hash-table-weakness', 'iconify-frame', 'identity', 'image-flush', 'image-mask-p', 'image-metadata', 'image-size', 'imagemagick-types', 'imagep', 'indent-to', 'indirect-function', 'indirect-variable', 'init-image-library', 'inotify-add-watch', 'inotify-rm-watch', 'input-pending-p', 'insert', 'insert-and-inherit', 'insert-before-markers', 'insert-before-markers-and-inherit', 'insert-buffer-substring', 'insert-byte', 'insert-char', 'insert-file-contents', 'insert-startup-screen', 'int86', 'integer-or-marker-p', 'integerp', 'interactive-form', 'intern', 'intern-soft', 'internal--track-mouse', 'internal-char-font', 'internal-complete-buffer', 'internal-copy-lisp-face', 'internal-default-process-filter', 'internal-default-process-sentinel', 'internal-describe-syntax-value', 'internal-event-symbol-parse-modifiers', 'internal-face-x-get-resource', 'internal-get-lisp-face-attribute', 'internal-lisp-face-attribute-values', 'internal-lisp-face-empty-p', 'internal-lisp-face-equal-p', 'internal-lisp-face-p', 'internal-make-lisp-face', 'internal-make-var-non-special', 'internal-merge-in-global-face', 'internal-set-alternative-font-family-alist', 'internal-set-alternative-font-registry-alist', 'internal-set-font-selection-order', 'internal-set-lisp-face-attribute', 'internal-set-lisp-face-attribute-from-resource', 'internal-show-cursor', 'internal-show-cursor-p', 'interrupt-process', 'invisible-p', 'invocation-directory', 'invocation-name', 'isnan', 'iso-charset', 'key-binding', 'key-description', 'keyboard-coding-system', 'keymap-parent', 'keymap-prompt', 'keymapp', 'keywordp', 'kill-all-local-variables', 'kill-buffer', 'kill-emacs', 'kill-local-variable', 'kill-process', 'last-nonminibuffer-frame', 'lax-plist-get', 'lax-plist-put', 'ldexp', 'length', 'libxml-parse-html-region', 'libxml-parse-xml-region', 'line-beginning-position', 'line-end-position', 'line-pixel-height', 'list', 'list-fonts', 'list-system-processes', 'listp', 'load', 'load-average', 'local-key-binding', 'local-variable-if-set-p', 'local-variable-p', 'locale-info', 'locate-file-internal', 'lock-buffer', 'log', 'logand', 'logb', 'logior', 'lognot', 'logxor', 'looking-at', 'lookup-image', 'lookup-image-map', 'lookup-key', 'lower-frame', 'lsh', 'macroexpand', 'make-bool-vector', 'make-byte-code', 'make-category-set', 'make-category-table', 'make-char', 'make-char-table', 'make-directory-internal', 'make-frame-invisible', 'make-frame-visible', 'make-hash-table', 'make-indirect-buffer', 'make-keymap', 'make-list', 'make-local-variable', 'make-marker', 'make-network-process', 'make-overlay', 'make-serial-process', 'make-sparse-keymap', 'make-string', 'make-symbol', 'make-symbolic-link', 'make-temp-name', 'make-terminal-frame', 'make-variable-buffer-local', 'make-variable-frame-local', 'make-vector', 'makunbound', 'map-char-table', 'map-charset-chars', 'map-keymap', 'map-keymap-internal', 'mapatoms', 'mapc', 'mapcar', 'mapconcat', 'maphash', 'mark-marker', 'marker-buffer', 'marker-insertion-type', 'marker-position', 'markerp', 'match-beginning', 'match-data', 'match-end', 'matching-paren', 'max', 'max-char', 'md5', 'member', 'memory-info', 'memory-limit', 'memory-use-counts', 'memq', 'memql', 'menu-bar-menu-at-x-y', 'menu-or-popup-active-p', 'menu-or-popup-active-p', 'merge-face-attribute', 'message', 'message-box', 'message-or-box', 'min', 'minibuffer-completion-contents', 'minibuffer-contents', 'minibuffer-contents-no-properties', 'minibuffer-depth', 'minibuffer-prompt', 'minibuffer-prompt-end', 'minibuffer-selected-window', 'minibuffer-window', 'minibufferp', 'minor-mode-key-binding', 'mod', 'modify-category-entry', 'modify-frame-parameters', 'modify-syntax-entry', 'mouse-pixel-position', 'mouse-position', 'move-overlay', 'move-point-visually', 'move-to-column', 'move-to-window-line', 'msdos-downcase-filename', 'msdos-long-file-names', 'msdos-memget', 'msdos-memput', 'msdos-mouse-disable', 'msdos-mouse-enable', 'msdos-mouse-init', 'msdos-mouse-p', 'msdos-remember-default-colors', 'msdos-set-keyboard', 'msdos-set-mouse-buttons', 'multibyte-char-to-unibyte', 'multibyte-string-p', 'narrow-to-region', 'natnump', 'nconc', 'network-interface-info', 'network-interface-list', 'new-fontset', 'newline-cache-check', 'next-char-property-change', 'next-frame', 'next-overlay-change', 'next-property-change', 'next-read-file-uses-dialog-p', 'next-single-char-property-change', 'next-single-property-change', 'next-window', 'nlistp', 'nreverse', 'nth', 'nthcdr', 'null', 'number-or-marker-p', 'number-to-string', 'numberp', 'open-dribble-file', 'open-font', 'open-termscript', 'optimize-char-table', 'other-buffer', 'other-window-for-scrolling', 'overlay-buffer', 'overlay-end', 'overlay-get', 'overlay-lists', 'overlay-properties', 'overlay-put', 'overlay-recenter', 'overlay-start', 'overlayp', 'overlays-at', 'overlays-in', 'parse-partial-sexp', 'play-sound-internal', 'plist-get', 'plist-member', 'plist-put', 'point', 'point-marker', 'point-max', 'point-max-marker', 'point-min', 'point-min-marker', 'pos-visible-in-window-p', 'position-bytes', 'posix-looking-at', 'posix-search-backward', 'posix-search-forward', 'posix-string-match', 'posn-at-point', 'posn-at-x-y', 'preceding-char', 'prefix-numeric-value', 'previous-char-property-change', 'previous-frame', 'previous-overlay-change', 'previous-property-change', 'previous-single-char-property-change', 'previous-single-property-change', 'previous-window', 'prin1', 'prin1-to-string', 'princ', 'print', 'process-attributes', 'process-buffer', 'process-coding-system', 'process-command', 'process-connection', 'process-contact', 'process-datagram-address', 'process-exit-status', 'process-filter', 'process-filter-multibyte-p', 'process-id', 'process-inherit-coding-system-flag', 'process-list', 'process-mark', 'process-name', 'process-plist', 'process-query-on-exit-flag', 'process-running-child-p', 'process-send-eof', 'process-send-region', 'process-send-string', 'process-sentinel', 'process-status', 'process-tty-name', 'process-type', 'processp', 'profiler-cpu-log', 'profiler-cpu-running-p', 'profiler-cpu-start', 'profiler-cpu-stop', 'profiler-memory-log', 'profiler-memory-running-p', 'profiler-memory-start', 'profiler-memory-stop', 'propertize', 'purecopy', 'put', 'put-text-property', 'put-unicode-property-internal', 'puthash', 'query-font', 'query-fontset', 'quit-process', 'raise-frame', 'random', 'rassoc', 'rassq', 're-search-backward', 're-search-forward', 'read', 'read-buffer', 'read-char', 'read-char-exclusive', 'read-coding-system', 'read-command', 'read-event', 'read-from-minibuffer', 'read-from-string', 'read-function', 'read-key-sequence', 'read-key-sequence-vector', 'read-no-blanks-input', 'read-non-nil-coding-system', 'read-string', 'read-variable', 'recent-auto-save-p', 'recent-doskeys', 'recent-keys', 'recenter', 'recursion-depth', 'recursive-edit', 'redirect-debugging-output', 'redirect-frame-focus', 'redisplay', 'redraw-display', 'redraw-frame', 'regexp-quote', 'region-beginning', 'region-end', 'register-ccl-program', 'register-code-conversion-map', 'remhash', 'remove-list-of-text-properties', 'remove-text-properties', 'rename-buffer', 'rename-file', 'replace-match', 'reset-this-command-lengths', 'resize-mini-window-internal', 'restore-buffer-modified-p', 'resume-tty', 'reverse', 'round', 'run-hook-with-args', 'run-hook-with-args-until-failure', 'run-hook-with-args-until-success', 'run-hook-wrapped', 'run-hooks', 'run-window-configuration-change-hook', 'run-window-scroll-functions', 'safe-length', 'scan-lists', 'scan-sexps', 'scroll-down', 'scroll-left', 'scroll-other-window', 'scroll-right', 'scroll-up', 'search-backward', 'search-forward', 'secure-hash', 'select-frame', 'select-window', 'selected-frame', 'selected-window', 'self-insert-command', 'send-string-to-terminal', 'sequencep', 'serial-process-configure', 'set', 'set-buffer', 'set-buffer-auto-saved', 'set-buffer-major-mode', 'set-buffer-modified-p', 'set-buffer-multibyte', 'set-case-table', 'set-category-table', 'set-char-table-extra-slot', 'set-char-table-parent', 'set-char-table-range', 'set-charset-plist', 'set-charset-priority', 'set-coding-system-priority', 'set-cursor-size', 'set-default', 'set-default-file-modes', 'set-default-toplevel-value', 'set-file-acl', 'set-file-modes', 'set-file-selinux-context', 'set-file-times', 'set-fontset-font', 'set-frame-height', 'set-frame-position', 'set-frame-selected-window', 'set-frame-size', 'set-frame-width', 'set-fringe-bitmap-face', 'set-input-interrupt-mode', 'set-input-meta-mode', 'set-input-mode', 'set-keyboard-coding-system-internal', 'set-keymap-parent', 'set-marker', 'set-marker-insertion-type', 'set-match-data', 'set-message-beep', 'set-minibuffer-window', 'set-mouse-pixel-position', 'set-mouse-position', 'set-network-process-option', 'set-output-flow-control', 'set-process-buffer', 'set-process-coding-system', 'set-process-datagram-address', 'set-process-filter', 'set-process-filter-multibyte', 'set-process-inherit-coding-system-flag', 'set-process-plist', 'set-process-query-on-exit-flag', 'set-process-sentinel', 'set-process-window-size', 'set-quit-char', 'set-safe-terminal-coding-system-internal', 'set-screen-color', 'set-standard-case-table', 'set-syntax-table', 'set-terminal-coding-system-internal', 'set-terminal-local-value', 'set-terminal-parameter', 'set-text-properties', 'set-time-zone-rule', 'set-visited-file-modtime', 'set-window-buffer', 'set-window-combination-limit', 'set-window-configuration', 'set-window-dedicated-p', 'set-window-display-table', 'set-window-fringes', 'set-window-hscroll', 'set-window-margins', 'set-window-new-normal', 'set-window-new-pixel', 'set-window-new-total', 'set-window-next-buffers', 'set-window-parameter', 'set-window-point', 'set-window-prev-buffers', 'set-window-redisplay-end-trigger', 'set-window-scroll-bars', 'set-window-start', 'set-window-vscroll', 'setcar', 'setcdr', 'setplist', 'show-face-resources', 'signal', 'signal-process', 'sin', 'single-key-description', 'skip-chars-backward', 'skip-chars-forward', 'skip-syntax-backward', 'skip-syntax-forward', 'sleep-for', 'sort', 'sort-charsets', 'special-variable-p', 'split-char', 'split-window-internal', 'sqrt', 'standard-case-table', 'standard-category-table', 'standard-syntax-table', 'start-kbd-macro', 'start-process', 'stop-process', 'store-kbd-macro-event', 'string', 'string=', 'string<', 'string>', 'string-as-multibyte', 'string-as-unibyte', 'string-bytes', 'string-collate-equalp', 'string-collate-lessp', 'string-equal', 'string-greaterp', 'string-lessp', 'string-make-multibyte', 'string-make-unibyte', 'string-match', 'string-to-char', 'string-to-multibyte', 'string-to-number', 'string-to-syntax', 'string-to-unibyte', 'string-width', 'stringp', 'subr-name', 'subrp', 'subst-char-in-region', 'substitute-command-keys', 'substitute-in-file-name', 'substring', 'substring-no-properties', 'suspend-emacs', 'suspend-tty', 'suspicious-object', 'sxhash', 'symbol-function', 'symbol-name', 'symbol-plist', 'symbol-value', 'symbolp', 'syntax-table', 'syntax-table-p', 'system-groups', 'system-move-file-to-trash', 'system-name', 'system-users', 'tan', 'terminal-coding-system', 'terminal-list', 'terminal-live-p', 'terminal-local-value', 'terminal-name', 'terminal-parameter', 'terminal-parameters', 'terpri', 'test-completion', 'text-char-description', 'text-properties-at', 'text-property-any', 'text-property-not-all', 'this-command-keys', 'this-command-keys-vector', 'this-single-command-keys', 'this-single-command-raw-keys', 'time-add', 'time-less-p', 'time-subtract', 'tool-bar-get-system-style', 'tool-bar-height', 'tool-bar-pixel-width', 'top-level', 'trace-redisplay', 'trace-to-stderr', 'translate-region-internal', 'transpose-regions', 'truncate', 'try-completion', 'tty-display-color-cells', 'tty-display-color-p', 'tty-no-underline', 'tty-suppress-bold-inverse-default-colors', 'tty-top-frame', 'tty-type', 'type-of', 'undo-boundary', 'unencodable-char-position', 'unhandled-file-name-directory', 'unibyte-char-to-multibyte', 'unibyte-string', 'unicode-property-table-internal', 'unify-charset', 'unintern', 'unix-sync', 'unlock-buffer', 'upcase', 'upcase-initials', 'upcase-initials-region', 'upcase-region', 'upcase-word', 'use-global-map', 'use-local-map', 'user-full-name', 'user-login-name', 'user-real-login-name', 'user-real-uid', 'user-uid', 'variable-binding-locus', 'vconcat', 'vector', 'vector-or-char-table-p', 'vectorp', 'verify-visited-file-modtime', 'vertical-motion', 'visible-frame-list', 'visited-file-modtime', 'w16-get-clipboard-data', 'w16-selection-exists-p', 'w16-set-clipboard-data', 'w32-battery-status', 'w32-default-color-map', 'w32-define-rgb-color', 'w32-display-monitor-attributes-list', 'w32-frame-menu-bar-size', 'w32-frame-rect', 'w32-get-clipboard-data', 'w32-get-codepage-charset', 'w32-get-console-codepage', 'w32-get-console-output-codepage', 'w32-get-current-locale-id', 'w32-get-default-locale-id', 'w32-get-keyboard-layout', 'w32-get-locale-info', 'w32-get-valid-codepages', 'w32-get-valid-keyboard-layouts', 'w32-get-valid-locale-ids', 'w32-has-winsock', 'w32-long-file-name', 'w32-reconstruct-hot-key', 'w32-register-hot-key', 'w32-registered-hot-keys', 'w32-selection-exists-p', 'w32-send-sys-command', 'w32-set-clipboard-data', 'w32-set-console-codepage', 'w32-set-console-output-codepage', 'w32-set-current-locale', 'w32-set-keyboard-layout', 'w32-set-process-priority', 'w32-shell-execute', 'w32-short-file-name', 'w32-toggle-lock-key', 'w32-unload-winsock', 'w32-unregister-hot-key', 'w32-window-exists-p', 'w32notify-add-watch', 'w32notify-rm-watch', 'waiting-for-user-input-p', 'where-is-internal', 'widen', 'widget-apply', 'widget-get', 'widget-put', 'window-absolute-pixel-edges', 'window-at', 'window-body-height', 'window-body-width', 'window-bottom-divider-width', 'window-buffer', 'window-combination-limit', 'window-configuration-frame', 'window-configuration-p', 'window-dedicated-p', 'window-display-table', 'window-edges', 'window-end', 'window-frame', 'window-fringes', 'window-header-line-height', 'window-hscroll', 'window-inside-absolute-pixel-edges', 'window-inside-edges', 'window-inside-pixel-edges', 'window-left-child', 'window-left-column', 'window-line-height', 'window-list', 'window-list-1', 'window-live-p', 'window-margins', 'window-minibuffer-p', 'window-mode-line-height', 'window-new-normal', 'window-new-pixel', 'window-new-total', 'window-next-buffers', 'window-next-sibling', 'window-normal-size', 'window-old-point', 'window-parameter', 'window-parameters', 'window-parent', 'window-pixel-edges', 'window-pixel-height', 'window-pixel-left', 'window-pixel-top', 'window-pixel-width', 'window-point', 'window-prev-buffers', 'window-prev-sibling', 'window-redisplay-end-trigger', 'window-resize-apply', 'window-resize-apply-total', 'window-right-divider-width', 'window-scroll-bar-height', 'window-scroll-bar-width', 'window-scroll-bars', 'window-start', 'window-system', 'window-text-height', 'window-text-pixel-size', 'window-text-width', 'window-top-child', 'window-top-line', 'window-total-height', 'window-total-width', 'window-use-time', 'window-valid-p', 'window-vscroll', 'windowp', 'write-char', 'write-region', 'x-backspace-delete-keys-p', 'x-change-window-property', 'x-change-window-property', 'x-close-connection', 'x-close-connection', 'x-create-frame', 'x-create-frame', 'x-delete-window-property', 'x-delete-window-property', 'x-disown-selection-internal', 'x-display-backing-store', 'x-display-backing-store', 'x-display-color-cells', 'x-display-color-cells', 'x-display-grayscale-p', 'x-display-grayscale-p', 'x-display-list', 'x-display-list', 'x-display-mm-height', 'x-display-mm-height', 'x-display-mm-width', 'x-display-mm-width', 'x-display-monitor-attributes-list', 'x-display-pixel-height', 'x-display-pixel-height', 'x-display-pixel-width', 'x-display-pixel-width', 'x-display-planes', 'x-display-planes', 'x-display-save-under', 'x-display-save-under', 'x-display-screens', 'x-display-screens', 'x-display-visual-class', 'x-display-visual-class', 'x-family-fonts', 'x-file-dialog', 'x-file-dialog', 'x-file-dialog', 'x-focus-frame', 'x-frame-geometry', 'x-frame-geometry', 'x-get-atom-name', 'x-get-resource', 'x-get-selection-internal', 'x-hide-tip', 'x-hide-tip', 'x-list-fonts', 'x-load-color-file', 'x-menu-bar-open-internal', 'x-menu-bar-open-internal', 'x-open-connection', 'x-open-connection', 'x-own-selection-internal', 'x-parse-geometry', 'x-popup-dialog', 'x-popup-menu', 'x-register-dnd-atom', 'x-select-font', 'x-select-font', 'x-selection-exists-p', 'x-selection-owner-p', 'x-send-client-message', 'x-server-max-request-size', 'x-server-max-request-size', 'x-server-vendor', 'x-server-vendor', 'x-server-version', 'x-server-version', 'x-show-tip', 'x-show-tip', 'x-synchronize', 'x-synchronize', 'x-uses-old-gtk-dialog', 'x-window-property', 'x-window-property', 'x-wm-set-size-hint', 'xw-color-defined-p', 'xw-color-defined-p', 'xw-color-values', 'xw-color-values', 'xw-display-color-p', 'xw-display-color-p', 'yes-or-no-p', 'zlib-available-p', 'zlib-decompress-region', 'forward-point', } builtin_function_highlighted = { 'defvaralias', 'provide', 'require', 'with-no-warnings', 'define-widget', 'with-electric-help', 'throw', 'defalias', 'featurep' } lambda_list_keywords = { '&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional', '&rest', '&whole', } error_keywords = { 'cl-assert', 'cl-check-type', 'error', 'signal', 'user-error', 'warn', } def get_tokens_unprocessed(self, text): stack = ['root'] for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name.Variable: if value in EmacsLispLexer.builtin_function: yield index, Name.Function, value continue if value in EmacsLispLexer.special_forms: yield index, Keyword, value continue if value in EmacsLispLexer.error_keywords: yield index, Name.Exception, value continue if value in EmacsLispLexer.builtin_function_highlighted: yield index, Name.Builtin, value continue if value in EmacsLispLexer.macros: yield index, Name.Builtin, value continue if value in EmacsLispLexer.lambda_list_keywords: yield index, Keyword.Pseudo, value continue yield index, token, value tokens = { 'root': [ default('body'), ], 'body': [ # whitespace (r'\s+', Whitespace), # single-line comment (r';.*$', Comment.Single), # strings and characters (r'"', String, 'string'), (r'\?([^\\]|\\.)', String.Char), # quoting (r":" + symbol, Name.Builtin), (r"::" + symbol, String.Symbol), (r"'" + symbol, String.Symbol), (r"'", Operator), (r"`", Operator), # decimal numbers (r'[-+]?\d+\.?' + terminated, Number.Integer), (r'[-+]?\d+/\d+' + terminated, Number), (r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' + terminated, Number.Float), # vectors (r'\[|\]', Punctuation), # uninterned symbol (r'#:' + symbol, String.Symbol), # read syntax for char tables (r'#\^\^?', Operator), # function shorthand (r'#\'', Name.Function), # binary rational (r'#[bB][+-]?[01]+(/[01]+)?', Number.Bin), # octal rational (r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct), # hex rational (r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex), # radix rational (r'#\d+r[+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number), # reference (r'#\d+=', Operator), (r'#\d+#', Operator), # special operators that should have been parsed already (r'(,@|,|\.|:)', Operator), # special constants (r'(t|nil)' + terminated, Name.Constant), # functions and variables (r'\*' + symbol + r'\*', Name.Variable.Global), (symbol, Name.Variable), # parentheses (r'#\(', Operator, 'body'), (r'\(', Punctuation, 'body'), (r'\)', Punctuation, '#pop'), ], 'string': [ (r'[^"\\`]+', String), (r'`%s\'' % symbol, String.Symbol), (r'`', String), (r'\\.', String), (r'\\\n', String), (r'"', String, '#pop'), ], } class ShenLexer(RegexLexer): """ Lexer for Shen source code. .. versionadded:: 2.1 """ name = 'Shen' url = 'http://shenlanguage.org/' aliases = ['shen'] filenames = ['*.shen'] mimetypes = ['text/x-shen', 'application/x-shen'] DECLARATIONS = ( 'datatype', 'define', 'defmacro', 'defprolog', 'defcc', 'synonyms', 'declare', 'package', 'type', 'function', ) SPECIAL_FORMS = ( 'lambda', 'get', 'let', 'if', 'cases', 'cond', 'put', 'time', 'freeze', 'value', 'load', '$', 'protect', 'or', 'and', 'not', 'do', 'output', 'prolog?', 'trap-error', 'error', 'make-string', '/.', 'set', '@p', '@s', '@v', ) BUILTINS = ( '==', '=', '*', '+', '-', '/', '<', '>', '>=', '<=', '<-address', '<-vector', 'abort', 'absvector', 'absvector?', 'address->', 'adjoin', 'append', 'arity', 'assoc', 'bind', 'boolean?', 'bound?', 'call', 'cd', 'close', 'cn', 'compile', 'concat', 'cons', 'cons?', 'cut', 'destroy', 'difference', 'element?', 'empty?', 'enable-type-theory', 'error-to-string', 'eval', 'eval-kl', 'exception', 'explode', 'external', 'fail', 'fail-if', 'file', 'findall', 'fix', 'fst', 'fwhen', 'gensym', 'get-time', 'hash', 'hd', 'hdstr', 'hdv', 'head', 'identical', 'implementation', 'in', 'include', 'include-all-but', 'inferences', 'input', 'input+', 'integer?', 'intern', 'intersection', 'is', 'kill', 'language', 'length', 'limit', 'lineread', 'loaded', 'macro', 'macroexpand', 'map', 'mapcan', 'maxinferences', 'mode', 'n->string', 'nl', 'nth', 'null', 'number?', 'occurrences', 'occurs-check', 'open', 'os', 'out', 'port', 'porters', 'pos', 'pr', 'preclude', 'preclude-all-but', 'print', 'profile', 'profile-results', 'ps', 'quit', 'read', 'read+', 'read-byte', 'read-file', 'read-file-as-bytelist', 'read-file-as-string', 'read-from-string', 'release', 'remove', 'return', 'reverse', 'run', 'save', 'set', 'simple-error', 'snd', 'specialise', 'spy', 'step', 'stinput', 'stoutput', 'str', 'string->n', 'string->symbol', 'string?', 'subst', 'symbol?', 'systemf', 'tail', 'tc', 'tc?', 'thaw', 'tl', 'tlstr', 'tlv', 'track', 'tuple?', 'undefmacro', 'unify', 'unify!', 'union', 'unprofile', 'unspecialise', 'untrack', 'variable?', 'vector', 'vector->', 'vector?', 'verified', 'version', 'warn', 'when', 'write-byte', 'write-to-file', 'y-or-n?', ) BUILTINS_ANYWHERE = ('where', 'skip', '>>', '_', '!', '<e>', '<!>') MAPPINGS = {s: Keyword for s in DECLARATIONS} MAPPINGS.update((s, Name.Builtin) for s in BUILTINS) MAPPINGS.update((s, Keyword) for s in SPECIAL_FORMS) valid_symbol_chars = r'[\w!$%*+,<=>?/.\'@&#:-]' valid_name = '%s+' % valid_symbol_chars symbol_name = r'[a-z!$%%*+,<=>?/.\'@&#_-]%s*' % valid_symbol_chars variable = r'[A-Z]%s*' % valid_symbol_chars tokens = { 'string': [ (r'"', String, '#pop'), (r'c#\d{1,3};', String.Escape), (r'~[ARS%]', String.Interpol), (r'(?s).', String), ], 'root': [ (r'(?s)\\\*.*?\*\\', Comment.Multiline), # \* ... *\ (r'\\\\.*', Comment.Single), # \\ ... (r'\s+', Whitespace), (r'_{5,}', Punctuation), (r'={5,}', Punctuation), (r'(;|:=|\||--?>|<--?)', Punctuation), (r'(:-|:|\{|\})', Literal), (r'[+-]*\d*\.\d+(e[+-]?\d+)?', Number.Float), (r'[+-]*\d+', Number.Integer), (r'"', String, 'string'), (variable, Name.Variable), (r'(true|false|<>|\[\])', Keyword.Pseudo), (symbol_name, Literal), (r'(\[|\]|\(|\))', Punctuation), ], } def get_tokens_unprocessed(self, text): tokens = RegexLexer.get_tokens_unprocessed(self, text) tokens = self._process_symbols(tokens) tokens = self._process_declarations(tokens) return tokens def _relevant(self, token): return token not in (Text, Whitespace, Comment.Single, Comment.Multiline) def _process_declarations(self, tokens): opening_paren = False for index, token, value in tokens: yield index, token, value if self._relevant(token): if opening_paren and token == Keyword and value in self.DECLARATIONS: declaration = value yield from self._process_declaration(declaration, tokens) opening_paren = value == '(' and token == Punctuation def _process_symbols(self, tokens): opening_paren = False for index, token, value in tokens: if opening_paren and token in (Literal, Name.Variable): token = self.MAPPINGS.get(value, Name.Function) elif token == Literal and value in self.BUILTINS_ANYWHERE: token = Name.Builtin opening_paren = value == '(' and token == Punctuation yield index, token, value def _process_declaration(self, declaration, tokens): for index, token, value in tokens: if self._relevant(token): break yield index, token, value if declaration == 'datatype': prev_was_colon = False token = Keyword.Type if token == Literal else token yield index, token, value for index, token, value in tokens: if prev_was_colon and token == Literal: token = Keyword.Type yield index, token, value if self._relevant(token): prev_was_colon = token == Literal and value == ':' elif declaration == 'package': token = Name.Namespace if token == Literal else token yield index, token, value elif declaration == 'define': token = Name.Function if token == Literal else token yield index, token, value for index, token, value in tokens: if self._relevant(token): break yield index, token, value if value == '{' and token == Literal: yield index, Punctuation, value for index, token, value in self._process_signature(tokens): yield index, token, value else: yield index, token, value else: token = Name.Function if token == Literal else token yield index, token, value return def _process_signature(self, tokens): for index, token, value in tokens: if token == Literal and value == '}': yield index, Punctuation, value return elif token in (Literal, Name.Function): token = Name.Variable if value.istitle() else Keyword.Type yield index, token, value class CPSALexer(RegexLexer): """ A CPSA lexer based on the CPSA language as of version 2.2.12 .. versionadded:: 2.1 """ name = 'CPSA' aliases = ['cpsa'] filenames = ['*.cpsa'] mimetypes = [] # list of known keywords and builtins taken form vim 6.4 scheme.vim # syntax file. _keywords = ( 'herald', 'vars', 'defmacro', 'include', 'defprotocol', 'defrole', 'defskeleton', 'defstrand', 'deflistener', 'non-orig', 'uniq-orig', 'pen-non-orig', 'precedes', 'trace', 'send', 'recv', 'name', 'text', 'skey', 'akey', 'data', 'mesg', ) _builtins = ( 'cat', 'enc', 'hash', 'privk', 'pubk', 'invk', 'ltk', 'gen', 'exp', ) # valid names for identifiers # well, names can only not consist fully of numbers # but this should be good enough for now valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+' tokens = { 'root': [ # the comments - always starting with semicolon # and going to the end of the line (r';.*$', Comment.Single), # whitespaces - usually not relevant (r'\s+', Whitespace), # numbers (r'-?\d+\.\d+', Number.Float), (r'-?\d+', Number.Integer), # support for uncommon kinds of numbers - # have to figure out what the characters mean # (r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number), # strings, symbols and characters (r'"(\\\\|\\[^\\]|[^"\\])*"', String), (r"'" + valid_name, String.Symbol), (r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char), # constants (r'(#t|#f)', Name.Constant), # special operators (r"('|#|`|,@|,|\.)", Operator), # highlight the keywords (words(_keywords, suffix=r'\b'), Keyword), # first variable in a quoted string like # '(this is syntactic sugar) (r"(?<='\()" + valid_name, Name.Variable), (r"(?<=#\()" + valid_name, Name.Variable), # highlight the builtins (words(_builtins, prefix=r'(?<=\()', suffix=r'\b'), Name.Builtin), # the remaining functions (r'(?<=\()' + valid_name, Name.Function), # find the remaining variables (valid_name, Name.Variable), # the famous parentheses! (r'(\(|\))', Punctuation), (r'(\[|\])', Punctuation), ], } class XtlangLexer(RegexLexer): """An xtlang lexer for the Extempore programming environment. This is a mixture of Scheme and xtlang, really. Keyword lists are taken from the Extempore Emacs mode (https://github.com/extemporelang/extempore-emacs-mode) .. versionadded:: 2.2 """ name = 'xtlang' url = 'http://extempore.moso.com.au' aliases = ['extempore'] filenames = ['*.xtm'] mimetypes = [] common_keywords = ( 'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'let', 'begin', 'set!', 'map', 'for-each', ) scheme_keywords = ( 'do', 'delay', 'quasiquote', 'unquote', 'unquote-splicing', 'eval', 'case', 'let*', 'letrec', 'quote', ) xtlang_bind_keywords = ( 'bind-func', 'bind-val', 'bind-lib', 'bind-type', 'bind-alias', 'bind-poly', 'bind-dylib', 'bind-lib-func', 'bind-lib-val', ) xtlang_keywords = ( 'letz', 'memzone', 'cast', 'convert', 'dotimes', 'doloop', ) common_functions = ( '*', '+', '-', '/', '<', '<=', '=', '>', '>=', '%', 'abs', 'acos', 'angle', 'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan', 'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'car', 'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr', 'ceiling', 'cons', 'cos', 'floor', 'length', 'list', 'log', 'max', 'member', 'min', 'modulo', 'not', 'reverse', 'round', 'sin', 'sqrt', 'substring', 'tan', 'println', 'random', 'null?', 'callback', 'now', ) scheme_functions = ( 'call-with-current-continuation', 'call-with-input-file', 'call-with-output-file', 'call-with-values', 'call/cc', 'char->integer', 'char-alphabetic?', 'char-ci<=?', 'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase', 'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase', 'char-upper-case?', 'char-whitespace?', 'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?', 'char?', 'close-input-port', 'close-output-port', 'complex?', 'current-input-port', 'current-output-port', 'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?', 'equal?', 'eqv?', 'even?', 'exact->inexact', 'exact?', 'exp', 'expt', 'force', 'gcd', 'imag-part', 'inexact->exact', 'inexact?', 'input-port?', 'integer->char', 'integer?', 'interaction-environment', 'lcm', 'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?', 'load', 'magnitude', 'make-polar', 'make-rectangular', 'make-string', 'make-vector', 'memq', 'memv', 'negative?', 'newline', 'null-environment', 'number->string', 'number?', 'numerator', 'odd?', 'open-input-file', 'open-output-file', 'output-port?', 'pair?', 'peek-char', 'port?', 'positive?', 'procedure?', 'quotient', 'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?', 'remainder', 'scheme-report-environment', 'set-car!', 'set-cdr!', 'string', 'string->list', 'string->number', 'string->symbol', 'string-append', 'string-ci<=?', 'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?', 'string-copy', 'string-fill!', 'string-length', 'string-ref', 'string-set!', 'string<=?', 'string<?', 'string=?', 'string>=?', 'string>?', 'string?', 'symbol->string', 'symbol?', 'transcript-off', 'transcript-on', 'truncate', 'values', 'vector', 'vector->list', 'vector-fill!', 'vector-length', 'vector?', 'with-input-from-file', 'with-output-to-file', 'write', 'write-char', 'zero?', ) xtlang_functions = ( 'toString', 'afill!', 'pfill!', 'tfill!', 'tbind', 'vfill!', 'array-fill!', 'pointer-fill!', 'tuple-fill!', 'vector-fill!', 'free', 'array', 'tuple', 'list', '~', 'cset!', 'cref', '&', 'bor', 'ang-names', '<<', '>>', 'nil', 'printf', 'sprintf', 'null', 'now', 'pset!', 'pref-ptr', 'vset!', 'vref', 'aset!', 'aref', 'aref-ptr', 'tset!', 'tref', 'tref-ptr', 'salloc', 'halloc', 'zalloc', 'alloc', 'schedule', 'exp', 'log', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sqrt', 'expt', 'floor', 'ceiling', 'truncate', 'round', 'llvm_printf', 'push_zone', 'pop_zone', 'memzone', 'callback', 'llvm_sprintf', 'make-array', 'array-set!', 'array-ref', 'array-ref-ptr', 'pointer-set!', 'pointer-ref', 'pointer-ref-ptr', 'stack-alloc', 'heap-alloc', 'zone-alloc', 'make-tuple', 'tuple-set!', 'tuple-ref', 'tuple-ref-ptr', 'closure-set!', 'closure-ref', 'pref', 'pdref', 'impc_null', 'bitcast', 'void', 'ifret', 'ret->', 'clrun->', 'make-env-zone', 'make-env', '<>', 'dtof', 'ftod', 'i1tof', 'i1tod', 'i1toi8', 'i1toi32', 'i1toi64', 'i8tof', 'i8tod', 'i8toi1', 'i8toi32', 'i8toi64', 'i32tof', 'i32tod', 'i32toi1', 'i32toi8', 'i32toi64', 'i64tof', 'i64tod', 'i64toi1', 'i64toi8', 'i64toi32', ) # valid names for Scheme identifiers (names cannot consist fully # of numbers, but this should be good enough for now) valid_scheme_name = r'[\w!$%&*+,/:<=>?@^~|-]+' # valid characters in xtlang names & types valid_xtlang_name = r'[\w.!-]+' valid_xtlang_type = r'[]{}[\w<>,*/|!-]+' tokens = { # keep track of when we're exiting the xtlang form 'xtlang': [ (r'\(', Punctuation, '#push'), (r'\)', Punctuation, '#pop'), (r'(?<=bind-func\s)' + valid_xtlang_name, Name.Function), (r'(?<=bind-val\s)' + valid_xtlang_name, Name.Function), (r'(?<=bind-type\s)' + valid_xtlang_name, Name.Function), (r'(?<=bind-alias\s)' + valid_xtlang_name, Name.Function), (r'(?<=bind-poly\s)' + valid_xtlang_name, Name.Function), (r'(?<=bind-lib\s)' + valid_xtlang_name, Name.Function), (r'(?<=bind-dylib\s)' + valid_xtlang_name, Name.Function), (r'(?<=bind-lib-func\s)' + valid_xtlang_name, Name.Function), (r'(?<=bind-lib-val\s)' + valid_xtlang_name, Name.Function), # type annotations (r':' + valid_xtlang_type, Keyword.Type), # types (r'(<' + valid_xtlang_type + r'>|\|' + valid_xtlang_type + r'\||/' + valid_xtlang_type + r'/|' + valid_xtlang_type + r'\*)\**', Keyword.Type), # keywords (words(xtlang_keywords, prefix=r'(?<=\()'), Keyword), # builtins (words(xtlang_functions, prefix=r'(?<=\()'), Name.Function), include('common'), # variables (valid_xtlang_name, Name.Variable), ], 'scheme': [ # quoted symbols (r"'" + valid_scheme_name, String.Symbol), # char literals (r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char), # special operators (r"('|#|`|,@|,|\.)", Operator), # keywords (words(scheme_keywords, prefix=r'(?<=\()'), Keyword), # builtins (words(scheme_functions, prefix=r'(?<=\()'), Name.Function), include('common'), # variables (valid_scheme_name, Name.Variable), ], # common to both xtlang and Scheme 'common': [ # comments (r';.*$', Comment.Single), # whitespaces - usually not relevant (r'\s+', Whitespace), # numbers (r'-?\d+\.\d+', Number.Float), (r'-?\d+', Number.Integer), # binary/oct/hex literals (r'(#b|#o|#x)[\d.]+', Number), # strings (r'"(\\\\|\\[^\\]|[^"\\])*"', String), # true/false constants (r'(#t|#f)', Name.Constant), # keywords (words(common_keywords, prefix=r'(?<=\()'), Keyword), # builtins (words(common_functions, prefix=r'(?<=\()'), Name.Function), # the famous parentheses! (r'(\(|\))', Punctuation), ], 'root': [ # go into xtlang mode (words(xtlang_bind_keywords, prefix=r'(?<=\()', suffix=r'\b'), Keyword, 'xtlang'), include('scheme') ], } class FennelLexer(RegexLexer): """A lexer for the Fennel programming language. Fennel compiles to Lua, so all the Lua builtins are recognized as well as the special forms that are particular to the Fennel compiler. .. versionadded:: 2.3 """ name = 'Fennel' url = 'https://fennel-lang.org' aliases = ['fennel', 'fnl'] filenames = ['*.fnl'] # this list is current as of Fennel version 0.10.0. special_forms = ( '#', '%', '*', '+', '-', '->', '->>', '-?>', '-?>>', '.', '..', '/', '//', ':', '<', '<=', '=', '>', '>=', '?.', '^', 'accumulate', 'and', 'band', 'bnot', 'bor', 'bxor', 'collect', 'comment', 'do', 'doc', 'doto', 'each', 'eval-compiler', 'for', 'hashfn', 'icollect', 'if', 'import-macros', 'include', 'length', 'let', 'lshift', 'lua', 'macrodebug', 'match', 'not', 'not=', 'or', 'partial', 'pick-args', 'pick-values', 'quote', 'require-macros', 'rshift', 'set', 'set-forcibly!', 'tset', 'values', 'when', 'while', 'with-open', '~=' ) declarations = ( 'fn', 'global', 'lambda', 'local', 'macro', 'macros', 'var', 'λ' ) builtins = ( '_G', '_VERSION', 'arg', 'assert', 'bit32', 'collectgarbage', 'coroutine', 'debug', 'dofile', 'error', 'getfenv', 'getmetatable', 'io', 'ipairs', 'load', 'loadfile', 'loadstring', 'math', 'next', 'os', 'package', 'pairs', 'pcall', 'print', 'rawequal', 'rawget', 'rawlen', 'rawset', 'require', 'select', 'setfenv', 'setmetatable', 'string', 'table', 'tonumber', 'tostring', 'type', 'unpack', 'xpcall' ) # based on the scheme definition, but disallowing leading digits and # commas, and @ is not allowed. valid_name = r'[a-zA-Z_!$%&*+/:<=>?^~|-][\w!$%&*+/:<=>?^~|\.-]*' tokens = { 'root': [ # the only comment form is a semicolon; goes to the end of the line (r';.*$', Comment.Single), (r',+', Text), (r'\s+', Whitespace), (r'-?\d+\.\d+', Number.Float), (r'-?\d+', Number.Integer), (r'"(\\\\|\\[^\\]|[^"\\])*"', String), (r'(true|false|nil)', Name.Constant), # these are technically strings, but it's worth visually # distinguishing them because their intent is different # from regular strings. (r':' + valid_name, String.Symbol), # special forms are keywords (words(special_forms, suffix=' '), Keyword), # these are ... even more special! (words(declarations, suffix=' '), Keyword.Declaration), # lua standard library are builtins (words(builtins, suffix=' '), Name.Builtin), # special-case the vararg symbol (r'\.\.\.', Name.Variable), # regular identifiers (valid_name, Name.Variable), # all your normal paired delimiters for your programming enjoyment (r'(\(|\))', Punctuation), (r'(\[|\])', Punctuation), (r'(\{|\})', Punctuation), # the # symbol is shorthand for a lambda function (r'#', Punctuation), ] }
144,030
Python
49.733005
89
0.562841
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/devicetree.py
""" pygments.lexers.devicetree ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Devicetree language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, include, default, words from pygments.token import Comment, Keyword, Name, Number, Operator, \ Punctuation, String, Text, Whitespace __all__ = ['DevicetreeLexer'] class DevicetreeLexer(RegexLexer): """ Lexer for Devicetree files. .. versionadded:: 2.7 """ name = 'Devicetree' url = 'https://www.devicetree.org/' aliases = ['devicetree', 'dts'] filenames = ['*.dts', '*.dtsi'] mimetypes = ['text/x-c'] #: optional Whitespace or /*...*/ style comment _ws = r'\s*(?:/[*][^*/]*?[*]/\s*)*' tokens = { 'macro': [ # Include preprocessor directives (C style): (r'(#include)(' + _ws + r')([^\n]+)', bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)), # Define preprocessor directives (C style): (r'(#define)(' + _ws + r')([^\n]+)', bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc)), # devicetree style with file: (r'(/[^*/{]+/)(' + _ws + r')("[^\n{]+")', bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)), # devicetree style with property: (r'(/[^*/{]+/)(' + _ws + r')([^\n;{]*)([;]?)', bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc, Punctuation)), ], 'whitespace': [ (r'\n', Whitespace), (r'\s+', Whitespace), (r'\\\n', Text), # line continuation (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline), # Open until EOF, so no ending delimiter (r'/(\\\n)?[*][\w\W]*', Comment.Multiline), ], 'statements': [ (r'(L?)(")', bygroups(String.Affix, String), 'string'), (r'0x[0-9a-fA-F]+', Number.Hex), (r'\d+', Number.Integer), (r'([^\s{}/*]*)(\s*)(:)', bygroups(Name.Label, Text, Punctuation), '#pop'), (words(('compatible', 'model', 'phandle', 'status', '#address-cells', '#size-cells', 'reg', 'virtual-reg', 'ranges', 'dma-ranges', 'device_type', 'name'), suffix=r'\b'), Keyword.Reserved), (r'([~!%^&*+=|?:<>/#-])', Operator), (r'[()\[\]{},.]', Punctuation), (r'[a-zA-Z_][\w-]*(?=(?:\s*,\s*[a-zA-Z_][\w-]*|(?:' + _ws + r'))*\s*[=;])', Name), (r'[a-zA-Z_]\w*', Name.Attribute), ], 'root': [ include('whitespace'), include('macro'), # Nodes (r'([^/*@\s&]+|/)(@?)((?:0x)?[0-9a-fA-F,]*)(' + _ws + r')(\{)', bygroups(Name.Function, Operator, Number.Integer, Comment.Multiline, Punctuation), 'node'), default('statement'), ], 'statement': [ include('whitespace'), include('statements'), (';', Punctuation, '#pop'), ], 'node': [ include('whitespace'), include('macro'), (r'([^/*@\s&]+|/)(@?)((?:0x)?[0-9a-fA-F,]*)(' + _ws + r')(\{)', bygroups(Name.Function, Operator, Number.Integer, Comment.Multiline, Punctuation), '#push'), include('statements'), (r'\};', Punctuation, '#pop'), (';', Punctuation), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], }
4,020
Python
35.554545
89
0.451493
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/eiffel.py
""" pygments.lexers.eiffel ~~~~~~~~~~~~~~~~~~~~~~ Lexer for the Eiffel language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, words, bygroups from pygments.token import Comment, Operator, Keyword, Name, String, Number, \ Punctuation, Whitespace __all__ = ['EiffelLexer'] class EiffelLexer(RegexLexer): """ For Eiffel source code. .. versionadded:: 2.0 """ name = 'Eiffel' url = 'http://www.eiffel.com' aliases = ['eiffel'] filenames = ['*.e'] mimetypes = ['text/x-eiffel'] tokens = { 'root': [ (r'[^\S\n]+', Whitespace), (r'--.*?$', Comment.Single), (r'[^\S\n]+', Whitespace), # Please note that keyword and operator are case insensitive. (r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant), (r'(?i)(not|xor|implies|or)\b', Operator.Word), (r'(?i)(and)(?:(\s+)(then))?\b', bygroups(Operator.Word, Whitespace, Operator.Word)), (r'(?i)(or)(?:(\s+)(else))?\b', bygroups(Operator.Word, Whitespace, Operator.Word)), (words(( 'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached', 'attribute', 'check', 'class', 'convert', 'create', 'debug', 'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure', 'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if', 'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none', 'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename', 'require', 'rescue', 'retry', 'select', 'separate', 'then', 'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'), Keyword.Reserved), (r'"\[([^\]%]|%(.|\n)|\][^"])*?\]"', String), (r'"([^"%\n]|%.)*?"', String), include('numbers'), (r"'([^'%]|%'|%%)'", String.Char), (r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator), (r"([{}():;,.])", Punctuation), (r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name), (r'([A-Z][A-Z0-9_]*)', Name.Class), (r'\n+', Whitespace), ], 'numbers': [ (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'0[bB][01]+', Number.Bin), (r'0[cC][0-7]+', Number.Oct), (r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float), (r'[0-9]+', Number.Integer), ], }
2,690
Python
37.442857
89
0.453903
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/iolang.py
""" pygments.lexers.iolang ~~~~~~~~~~~~~~~~~~~~~~ Lexers for the Io language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer from pygments.token import Comment, Operator, Keyword, Name, String, Number, \ Whitespace __all__ = ['IoLexer'] class IoLexer(RegexLexer): """ For Io (a small, prototype-based programming language) source. .. versionadded:: 0.10 """ name = 'Io' url = 'http://iolanguage.com/' filenames = ['*.io'] aliases = ['io'] mimetypes = ['text/x-iosrc'] tokens = { 'root': [ (r'\n', Whitespace), (r'\s+', Whitespace), # Comments (r'//(.*?)$', Comment.Single), (r'#(.*?)$', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'/\+', Comment.Multiline, 'nestedcomment'), # DoubleQuotedString (r'"(\\\\|\\[^\\]|[^"\\])*"', String), # Operators (r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}', Operator), # keywords (r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b', Keyword), # constants (r'(nil|false|true)\b', Name.Constant), # names (r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b', Name.Builtin), (r'[a-zA-Z_]\w*', Name), # numbers (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+', Number.Integer) ], 'nestedcomment': [ (r'[^+/]+', Comment.Multiline), (r'/\+', Comment.Multiline, '#push'), (r'\+/', Comment.Multiline, '#pop'), (r'[+/]', Comment.Multiline), ] }
1,906
Python
29.269841
78
0.447009
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/verification.py
""" pygments.lexers.verification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for Intermediate Verification Languages (IVLs). :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, words from pygments.token import Comment, Operator, Keyword, Name, Number, \ Punctuation, Text, Generic __all__ = ['BoogieLexer', 'SilverLexer'] class BoogieLexer(RegexLexer): """ For Boogie source code. .. versionadded:: 2.1 """ name = 'Boogie' url = 'https://boogie-docs.readthedocs.io/en/latest/' aliases = ['boogie'] filenames = ['*.bpl'] tokens = { 'root': [ # Whitespace and Comments (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//[/!](.*?)\n', Comment.Doc), (r'//(.*?)\n', Comment.Single), (r'/\*', Comment.Multiline, 'comment'), (words(( 'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function', 'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires', 'then', 'var', 'while'), suffix=r'\b'), Keyword), (words(('const',), suffix=r'\b'), Keyword.Reserved), (words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type), include('numbers'), (r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator), (r'\{.*?\}', Generic.Emph), #triggers (r"([{}():;,.])", Punctuation), # Identifier (r'[a-zA-Z_]\w*', Name), ], 'comment': [ (r'[^*/]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'numbers': [ (r'[0-9]+', Number.Integer), ], } class SilverLexer(RegexLexer): """ For Silver source code. .. versionadded:: 2.2 """ name = 'Silver' aliases = ['silver'] filenames = ['*.sil', '*.vpr'] tokens = { 'root': [ # Whitespace and Comments (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//[/!](.*?)\n', Comment.Doc), (r'//(.*?)\n', Comment.Single), (r'/\*', Comment.Multiline, 'comment'), (words(( 'result', 'true', 'false', 'null', 'method', 'function', 'predicate', 'program', 'domain', 'axiom', 'var', 'returns', 'field', 'define', 'fold', 'unfold', 'inhale', 'exhale', 'new', 'assert', 'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh', 'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection', 'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists', 'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique', 'apply', 'package', 'folding', 'label', 'forperm'), suffix=r'\b'), Keyword), (words(('requires', 'ensures', 'invariant'), suffix=r'\b'), Name.Decorator), (words(('Int', 'Perm', 'Bool', 'Ref', 'Rational'), suffix=r'\b'), Keyword.Type), include('numbers'), (r'[!%&*+=|?:<>/\-\[\]]', Operator), (r'\{.*?\}', Generic.Emph), #triggers (r'([{}():;,.])', Punctuation), # Identifier (r'[\w$]\w*', Name), ], 'comment': [ (r'[^*/]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'numbers': [ (r'[0-9]+', Number.Integer), ], }
3,885
Python
32.791304
92
0.440412
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/modula2.py
""" pygments.lexers.modula2 ~~~~~~~~~~~~~~~~~~~~~~~ Multi-Dialect Lexer for Modula-2. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include from pygments.util import get_bool_opt, get_list_opt from pygments.token import Text, Comment, Operator, Keyword, Name, \ String, Number, Punctuation, Error __all__ = ['Modula2Lexer'] # Multi-Dialect Modula-2 Lexer class Modula2Lexer(RegexLexer): """ For Modula-2 source code. The Modula-2 lexer supports several dialects. By default, it operates in fallback mode, recognising the *combined* literals, punctuation symbols and operators of all supported dialects, and the *combined* reserved words and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not differentiating between library defined identifiers. To select a specific dialect, a dialect option may be passed or a dialect tag may be embedded into a source file. Dialect Options: `m2pim` Select PIM Modula-2 dialect. `m2iso` Select ISO Modula-2 dialect. `m2r10` Select Modula-2 R10 dialect. `objm2` Select Objective Modula-2 dialect. The PIM and ISO dialect options may be qualified with a language extension. Language Extensions: `+aglet` Select Aglet Modula-2 extensions, available with m2iso. `+gm2` Select GNU Modula-2 extensions, available with m2pim. `+p1` Select p1 Modula-2 extensions, available with m2iso. `+xds` Select XDS Modula-2 extensions, available with m2iso. Passing a Dialect Option via Unix Commandline Interface Dialect options may be passed to the lexer using the `dialect` key. Only one such option should be passed. If multiple dialect options are passed, the first valid option is used, any subsequent options are ignored. Examples: `$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input` Use ISO dialect to render input to HTML output `$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input` Use ISO dialect with p1 extensions to render input to RTF output Embedding a Dialect Option within a source file A dialect option may be embedded in a source file in form of a dialect tag, a specially formatted comment that specifies a dialect option. Dialect Tag EBNF:: dialectTag : OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ; dialectOption : 'm2pim' | 'm2iso' | 'm2r10' | 'objm2' | 'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ; Prefix : '!' ; OpeningCommentDelim : '(*' ; ClosingCommentDelim : '*)' ; No whitespace is permitted between the tokens of a dialect tag. In the event that a source file contains multiple dialect tags, the first tag that contains a valid dialect option will be used and any subsequent dialect tags will be ignored. Ideally, a dialect tag should be placed at the beginning of a source file. An embedded dialect tag overrides a dialect option set via command line. Examples: ``(*!m2r10*) DEFINITION MODULE Foobar; ...`` Use Modula2 R10 dialect to render this source file. ``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...`` Use PIM dialect with GNU extensions to render this source file. Algol Publication Mode: In Algol publication mode, source text is rendered for publication of algorithms in scientific papers and academic texts, following the format of the Revised Algol-60 Language Report. It is activated by passing one of two corresponding styles as an option: `algol` render reserved words lowercase underline boldface and builtins lowercase boldface italic `algol_nu` render reserved words lowercase boldface (no underlining) and builtins lowercase boldface italic The lexer automatically performs the required lowercase conversion when this mode is activated. Example: ``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input`` Render input file in Algol publication mode to LaTeX output. Rendering Mode of First Class ADT Identifiers: The rendering of standard library first class ADT identifiers is controlled by option flag "treat_stdlib_adts_as_builtins". When this option is turned on, standard library ADT identifiers are rendered as builtins. When it is turned off, they are rendered as ordinary library identifiers. `treat_stdlib_adts_as_builtins` (default: On) The option is useful for dialects that support ADTs as first class objects and provide ADTs in the standard library that would otherwise be built-in. At present, only Modula-2 R10 supports library ADTs as first class objects and therefore, no ADT identifiers are defined for any other dialects. Example: ``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...`` Render standard library ADTs as ordinary library types. .. versionadded:: 1.3 .. versionchanged:: 2.1 Added multi-dialect support. """ name = 'Modula-2' url = 'http://www.modula2.org/' aliases = ['modula2', 'm2'] filenames = ['*.def', '*.mod'] mimetypes = ['text/x-modula2'] flags = re.MULTILINE | re.DOTALL tokens = { 'whitespace': [ (r'\n+', Text), # blank lines (r'\s+', Text), # whitespace ], 'dialecttags': [ # PIM Dialect Tag (r'\(\*!m2pim\*\)', Comment.Special), # ISO Dialect Tag (r'\(\*!m2iso\*\)', Comment.Special), # M2R10 Dialect Tag (r'\(\*!m2r10\*\)', Comment.Special), # ObjM2 Dialect Tag (r'\(\*!objm2\*\)', Comment.Special), # Aglet Extensions Dialect Tag (r'\(\*!m2iso\+aglet\*\)', Comment.Special), # GNU Extensions Dialect Tag (r'\(\*!m2pim\+gm2\*\)', Comment.Special), # p1 Extensions Dialect Tag (r'\(\*!m2iso\+p1\*\)', Comment.Special), # XDS Extensions Dialect Tag (r'\(\*!m2iso\+xds\*\)', Comment.Special), ], 'identifiers': [ (r'([a-zA-Z_$][\w$]*)', Name), ], 'prefixed_number_literals': [ # # Base-2, whole number (r'0b[01]+(\'[01]+)*', Number.Bin), # # Base-16, whole number (r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex), ], 'plain_number_literals': [ # # Base-10, real number with exponent (r'[0-9]+(\'[0-9]+)*' # integral part r'\.[0-9]+(\'[0-9]+)*' # fractional part r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent Number.Float), # # Base-10, real number without exponent (r'[0-9]+(\'[0-9]+)*' # integral part r'\.[0-9]+(\'[0-9]+)*', # fractional part Number.Float), # # Base-10, whole number (r'[0-9]+(\'[0-9]+)*', Number.Integer), ], 'suffixed_number_literals': [ # # Base-8, whole number (r'[0-7]+B', Number.Oct), # # Base-8, character code (r'[0-7]+C', Number.Oct), # # Base-16, number (r'[0-9A-F]+H', Number.Hex), ], 'string_literals': [ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double), (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single), ], 'digraph_operators': [ # Dot Product Operator (r'\*\.', Operator), # Array Concatenation Operator (r'\+>', Operator), # M2R10 + ObjM2 # Inequality Operator (r'<>', Operator), # ISO + PIM # Less-Or-Equal, Subset (r'<=', Operator), # Greater-Or-Equal, Superset (r'>=', Operator), # Identity Operator (r'==', Operator), # M2R10 + ObjM2 # Type Conversion Operator (r'::', Operator), # M2R10 + ObjM2 # Assignment Symbol (r':=', Operator), # Postfix Increment Mutator (r'\+\+', Operator), # M2R10 + ObjM2 # Postfix Decrement Mutator (r'--', Operator), # M2R10 + ObjM2 ], 'unigraph_operators': [ # Arithmetic Operators (r'[+-]', Operator), (r'[*/]', Operator), # ISO 80000-2 compliant Set Difference Operator (r'\\', Operator), # M2R10 + ObjM2 # Relational Operators (r'[=#<>]', Operator), # Dereferencing Operator (r'\^', Operator), # Dereferencing Operator Synonym (r'@', Operator), # ISO # Logical AND Operator Synonym (r'&', Operator), # PIM + ISO # Logical NOT Operator Synonym (r'~', Operator), # PIM + ISO # Smalltalk Message Prefix (r'`', Operator), # ObjM2 ], 'digraph_punctuation': [ # Range Constructor (r'\.\.', Punctuation), # Opening Chevron Bracket (r'<<', Punctuation), # M2R10 + ISO # Closing Chevron Bracket (r'>>', Punctuation), # M2R10 + ISO # Blueprint Punctuation (r'->', Punctuation), # M2R10 + ISO # Distinguish |# and # in M2 R10 (r'\|#', Punctuation), # Distinguish ## and # in M2 R10 (r'##', Punctuation), # Distinguish |* and * in M2 R10 (r'\|\*', Punctuation), ], 'unigraph_punctuation': [ # Common Punctuation (r'[()\[\]{},.:;|]', Punctuation), # Case Label Separator Synonym (r'!', Punctuation), # ISO # Blueprint Punctuation (r'\?', Punctuation), # M2R10 + ObjM2 ], 'comments': [ # Single Line Comment (r'^//.*?\n', Comment.Single), # M2R10 + ObjM2 # Block Comment (r'\(\*([^$].*?)\*\)', Comment.Multiline), # Template Block Comment (r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2 ], 'pragmas': [ # ISO Style Pragmas (r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2 # Pascal Style Pragmas (r'\(\*\$.*?\*\)', Comment.Preproc), # PIM ], 'root': [ include('whitespace'), include('dialecttags'), include('pragmas'), include('comments'), include('identifiers'), include('suffixed_number_literals'), # PIM + ISO include('prefixed_number_literals'), # M2R10 + ObjM2 include('plain_number_literals'), include('string_literals'), include('digraph_punctuation'), include('digraph_operators'), include('unigraph_punctuation'), include('unigraph_operators'), ] } # C o m m o n D a t a s e t s # Common Reserved Words Dataset common_reserved_words = ( # 37 common reserved words 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE', ) # Common Builtins Dataset common_builtins = ( # 16 common builtins 'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL', 'TRUE', ) # Common Pseudo-Module Builtins Dataset common_pseudo_builtins = ( # 4 common pseudo builtins 'ADDRESS', 'BYTE', 'WORD', 'ADR' ) # P I M M o d u l a - 2 D a t a s e t s # Lexemes to Mark as Error Tokens for PIM Modula-2 pim_lexemes_to_reject = ( '!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->', '<<', '>>', '|#', '##', ) # PIM Modula-2 Additional Reserved Words Dataset pim_additional_reserved_words = ( # 3 additional reserved words 'EXPORT', 'QUALIFIED', 'WITH', ) # PIM Modula-2 Additional Builtins Dataset pim_additional_builtins = ( # 16 additional builtins 'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL', ) # PIM Modula-2 Additional Pseudo-Module Builtins Dataset pim_additional_pseudo_builtins = ( # 5 additional pseudo builtins 'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER', ) # I S O M o d u l a - 2 D a t a s e t s # Lexemes to Mark as Error Tokens for ISO Modula-2 iso_lexemes_to_reject = ( '`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->', '<<', '>>', '|#', '##', ) # ISO Modula-2 Additional Reserved Words Dataset iso_additional_reserved_words = ( # 9 additional reserved words (ISO 10514-1) 'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED', 'REM', 'RETRY', 'WITH', # 10 additional reserved words (ISO 10514-2 & ISO 10514-3) 'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY', 'REVEAL', 'TRACED', 'UNSAFEGUARDED', ) # ISO Modula-2 Additional Builtins Dataset iso_additional_builtins = ( # 26 additional builtins (ISO 10514-1) 'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH', 'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL', # 5 additional builtins (ISO 10514-2 & ISO 10514-3) 'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF', ) # ISO Modula-2 Additional Pseudo-Module Builtins Dataset iso_additional_pseudo_builtins = ( # 14 additional builtins (SYSTEM) 'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC', 'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR', 'ROTATE', 'SHIFT', 'CAST', 'TSIZE', # 13 additional builtins (COROUTINES) 'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER', 'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN', 'NEWCOROUTINE', 'PROT', 'TRANSFER', # 9 additional builtins (EXCEPTIONS) 'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber', 'ExceptionSource', 'GetMessage', 'IsCurrentSource', 'IsExceptionalExecution', 'RAISE', # 3 additional builtins (TERMINATION) 'TERMINATION', 'IsTerminating', 'HasHalted', # 4 additional builtins (M2EXCEPTION) 'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception', 'indexException', 'rangeException', 'caseSelectException', 'invalidLocation', 'functionException', 'wholeValueException', 'wholeDivException', 'realValueException', 'realDivException', 'complexValueException', 'complexDivException', 'protException', 'sysException', 'coException', 'exException', ) # M o d u l a - 2 R 1 0 D a t a s e t s # Lexemes to Mark as Error Tokens for Modula-2 R10 m2r10_lexemes_to_reject = ( '!', '`', '@', '$', '%', '&', '<>', ) # Modula-2 R10 reserved words in addition to the common set m2r10_additional_reserved_words = ( # 12 additional reserved words 'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE', 'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN', # 2 additional reserved words with symbolic assembly option 'ASM', 'REG', ) # Modula-2 R10 builtins in addition to the common set m2r10_additional_builtins = ( # 26 additional builtins 'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD', 'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT', 'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'WRITE', 'WRITEF', ) # Modula-2 R10 Additional Pseudo-Module Builtins Dataset m2r10_additional_pseudo_builtins = ( # 13 additional builtins (TPROPERTIES) 'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL', 'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION', 'TMAXEXP', 'TMINEXP', # 4 additional builtins (CONVERSION) 'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL', # 35 additional builtins (UNSAFE) 'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC', 'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC', 'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR', 'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT', 'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC', # 11 additional builtins (ATOMIC) 'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND', 'BWNAND', 'BWOR', 'BWXOR', # 7 additional builtins (COMPILER) 'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT', 'HASH', # 5 additional builtins (ASSEMBLER) 'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE', ) # O b j e c t i v e M o d u l a - 2 D a t a s e t s # Lexemes to Mark as Error Tokens for Objective Modula-2 objm2_lexemes_to_reject = ( '!', '$', '%', '&', '<>', ) # Objective Modula-2 Extensions # reserved words in addition to Modula-2 R10 objm2_additional_reserved_words = ( # 16 additional reserved words 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD', 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC', 'SUPER', 'TRY', ) # Objective Modula-2 Extensions # builtins in addition to Modula-2 R10 objm2_additional_builtins = ( # 3 additional builtins 'OBJECT', 'NO', 'YES', ) # Objective Modula-2 Extensions # pseudo-module builtins in addition to Modula-2 R10 objm2_additional_pseudo_builtins = ( # None ) # A g l e t M o d u l a - 2 D a t a s e t s # Aglet Extensions # reserved words in addition to ISO Modula-2 aglet_additional_reserved_words = ( # None ) # Aglet Extensions # builtins in addition to ISO Modula-2 aglet_additional_builtins = ( # 9 additional builtins 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', 'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32', ) # Aglet Modula-2 Extensions # pseudo-module builtins in addition to ISO Modula-2 aglet_additional_pseudo_builtins = ( # None ) # G N U M o d u l a - 2 D a t a s e t s # GNU Extensions # reserved words in addition to PIM Modula-2 gm2_additional_reserved_words = ( # 10 additional reserved words 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__', '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE', ) # GNU Extensions # builtins in addition to PIM Modula-2 gm2_additional_builtins = ( # 21 additional builtins 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96', 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW', ) # GNU Extensions # pseudo-module builtins in addition to PIM Modula-2 gm2_additional_pseudo_builtins = ( # None ) # p 1 M o d u l a - 2 D a t a s e t s # p1 Extensions # reserved words in addition to ISO Modula-2 p1_additional_reserved_words = ( # None ) # p1 Extensions # builtins in addition to ISO Modula-2 p1_additional_builtins = ( # None ) # p1 Modula-2 Extensions # pseudo-module builtins in addition to ISO Modula-2 p1_additional_pseudo_builtins = ( # 1 additional builtin 'BCD', ) # X D S M o d u l a - 2 D a t a s e t s # XDS Extensions # reserved words in addition to ISO Modula-2 xds_additional_reserved_words = ( # 1 additional reserved word 'SEQ', ) # XDS Extensions # builtins in addition to ISO Modula-2 xds_additional_builtins = ( # 9 additional builtins 'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN', 'LONGCARD', 'SHORTCARD', 'SHORTINT', ) # XDS Modula-2 Extensions # pseudo-module builtins in addition to ISO Modula-2 xds_additional_pseudo_builtins = ( # 22 additional builtins (SYSTEM) 'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8', 'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE', 'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void' # 3 additional builtins (COMPILER) 'COMPILER', 'OPTION', 'EQUATION' ) # P I M S t a n d a r d L i b r a r y D a t a s e t s # PIM Modula-2 Standard Library Modules Dataset pim_stdlib_module_identifiers = ( 'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage', ) # PIM Modula-2 Standard Library Types Dataset pim_stdlib_type_identifiers = ( 'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission', 'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand', 'DirectoryCommand', ) # PIM Modula-2 Standard Library Procedures Dataset pim_stdlib_proc_identifiers = ( 'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn', 'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite', 'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset', 'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar', 'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName', 'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput', 'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd', 'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd', 'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp', 'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE', ) # PIM Modula-2 Standard Library Variables Dataset pim_stdlib_var_identifiers = ( 'Done', 'termCH', 'in', 'out' ) # PIM Modula-2 Standard Library Constants Dataset pim_stdlib_const_identifiers = ( 'EOL', ) # I S O S t a n d a r d L i b r a r y D a t a s e t s # ISO Modula-2 Standard Library Modules Dataset iso_stdlib_module_identifiers = ( # TO DO ) # ISO Modula-2 Standard Library Types Dataset iso_stdlib_type_identifiers = ( # TO DO ) # ISO Modula-2 Standard Library Procedures Dataset iso_stdlib_proc_identifiers = ( # TO DO ) # ISO Modula-2 Standard Library Variables Dataset iso_stdlib_var_identifiers = ( # TO DO ) # ISO Modula-2 Standard Library Constants Dataset iso_stdlib_const_identifiers = ( # TO DO ) # M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s # Modula-2 R10 Standard Library ADTs Dataset m2r10_stdlib_adt_identifiers = ( 'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET', 'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD', 'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT', 'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64', 'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8', 'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8', 'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32', 'INT64', 'INT128', 'STRING', 'UNISTRING', ) # Modula-2 R10 Standard Library Blueprints Dataset m2r10_stdlib_blueprint_identifiers = ( 'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar', 'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal', 'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray', 'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet', 'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet', 'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension', 'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath', ) # Modula-2 R10 Standard Library Modules Dataset m2r10_stdlib_module_identifiers = ( 'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO', 'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO', 'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath', 'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath', 'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport', ) # Modula-2 R10 Standard Library Types Dataset m2r10_stdlib_type_identifiers = ( 'File', 'Status', # TO BE COMPLETED ) # Modula-2 R10 Standard Library Procedures Dataset m2r10_stdlib_proc_identifiers = ( 'ALLOCATE', 'DEALLOCATE', 'SIZE', # TO BE COMPLETED ) # Modula-2 R10 Standard Library Variables Dataset m2r10_stdlib_var_identifiers = ( 'stdIn', 'stdOut', 'stdErr', ) # Modula-2 R10 Standard Library Constants Dataset m2r10_stdlib_const_identifiers = ( 'pi', 'tau', ) # D i a l e c t s # Dialect modes dialects = ( 'unknown', 'm2pim', 'm2iso', 'm2r10', 'objm2', 'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds', ) # D a t a b a s e s # Lexemes to Mark as Errors Database lexemes_to_reject_db = { # Lexemes to reject for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Lexemes to reject for PIM Modula-2 'm2pim': ( pim_lexemes_to_reject, ), # Lexemes to reject for ISO Modula-2 'm2iso': ( iso_lexemes_to_reject, ), # Lexemes to reject for Modula-2 R10 'm2r10': ( m2r10_lexemes_to_reject, ), # Lexemes to reject for Objective Modula-2 'objm2': ( objm2_lexemes_to_reject, ), # Lexemes to reject for Aglet Modula-2 'm2iso+aglet': ( iso_lexemes_to_reject, ), # Lexemes to reject for GNU Modula-2 'm2pim+gm2': ( pim_lexemes_to_reject, ), # Lexemes to reject for p1 Modula-2 'm2iso+p1': ( iso_lexemes_to_reject, ), # Lexemes to reject for XDS Modula-2 'm2iso+xds': ( iso_lexemes_to_reject, ), } # Reserved Words Database reserved_words_db = { # Reserved words for unknown dialect 'unknown': ( common_reserved_words, pim_additional_reserved_words, iso_additional_reserved_words, m2r10_additional_reserved_words, ), # Reserved words for PIM Modula-2 'm2pim': ( common_reserved_words, pim_additional_reserved_words, ), # Reserved words for Modula-2 R10 'm2iso': ( common_reserved_words, iso_additional_reserved_words, ), # Reserved words for ISO Modula-2 'm2r10': ( common_reserved_words, m2r10_additional_reserved_words, ), # Reserved words for Objective Modula-2 'objm2': ( common_reserved_words, m2r10_additional_reserved_words, objm2_additional_reserved_words, ), # Reserved words for Aglet Modula-2 Extensions 'm2iso+aglet': ( common_reserved_words, iso_additional_reserved_words, aglet_additional_reserved_words, ), # Reserved words for GNU Modula-2 Extensions 'm2pim+gm2': ( common_reserved_words, pim_additional_reserved_words, gm2_additional_reserved_words, ), # Reserved words for p1 Modula-2 Extensions 'm2iso+p1': ( common_reserved_words, iso_additional_reserved_words, p1_additional_reserved_words, ), # Reserved words for XDS Modula-2 Extensions 'm2iso+xds': ( common_reserved_words, iso_additional_reserved_words, xds_additional_reserved_words, ), } # Builtins Database builtins_db = { # Builtins for unknown dialect 'unknown': ( common_builtins, pim_additional_builtins, iso_additional_builtins, m2r10_additional_builtins, ), # Builtins for PIM Modula-2 'm2pim': ( common_builtins, pim_additional_builtins, ), # Builtins for ISO Modula-2 'm2iso': ( common_builtins, iso_additional_builtins, ), # Builtins for ISO Modula-2 'm2r10': ( common_builtins, m2r10_additional_builtins, ), # Builtins for Objective Modula-2 'objm2': ( common_builtins, m2r10_additional_builtins, objm2_additional_builtins, ), # Builtins for Aglet Modula-2 Extensions 'm2iso+aglet': ( common_builtins, iso_additional_builtins, aglet_additional_builtins, ), # Builtins for GNU Modula-2 Extensions 'm2pim+gm2': ( common_builtins, pim_additional_builtins, gm2_additional_builtins, ), # Builtins for p1 Modula-2 Extensions 'm2iso+p1': ( common_builtins, iso_additional_builtins, p1_additional_builtins, ), # Builtins for XDS Modula-2 Extensions 'm2iso+xds': ( common_builtins, iso_additional_builtins, xds_additional_builtins, ), } # Pseudo-Module Builtins Database pseudo_builtins_db = { # Builtins for unknown dialect 'unknown': ( common_pseudo_builtins, pim_additional_pseudo_builtins, iso_additional_pseudo_builtins, m2r10_additional_pseudo_builtins, ), # Builtins for PIM Modula-2 'm2pim': ( common_pseudo_builtins, pim_additional_pseudo_builtins, ), # Builtins for ISO Modula-2 'm2iso': ( common_pseudo_builtins, iso_additional_pseudo_builtins, ), # Builtins for ISO Modula-2 'm2r10': ( common_pseudo_builtins, m2r10_additional_pseudo_builtins, ), # Builtins for Objective Modula-2 'objm2': ( common_pseudo_builtins, m2r10_additional_pseudo_builtins, objm2_additional_pseudo_builtins, ), # Builtins for Aglet Modula-2 Extensions 'm2iso+aglet': ( common_pseudo_builtins, iso_additional_pseudo_builtins, aglet_additional_pseudo_builtins, ), # Builtins for GNU Modula-2 Extensions 'm2pim+gm2': ( common_pseudo_builtins, pim_additional_pseudo_builtins, gm2_additional_pseudo_builtins, ), # Builtins for p1 Modula-2 Extensions 'm2iso+p1': ( common_pseudo_builtins, iso_additional_pseudo_builtins, p1_additional_pseudo_builtins, ), # Builtins for XDS Modula-2 Extensions 'm2iso+xds': ( common_pseudo_builtins, iso_additional_pseudo_builtins, xds_additional_pseudo_builtins, ), } # Standard Library ADTs Database stdlib_adts_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library ADTs for PIM Modula-2 'm2pim': ( # No first class library types ), # Standard Library ADTs for ISO Modula-2 'm2iso': ( # No first class library types ), # Standard Library ADTs for Modula-2 R10 'm2r10': ( m2r10_stdlib_adt_identifiers, ), # Standard Library ADTs for Objective Modula-2 'objm2': ( m2r10_stdlib_adt_identifiers, ), # Standard Library ADTs for Aglet Modula-2 'm2iso+aglet': ( # No first class library types ), # Standard Library ADTs for GNU Modula-2 'm2pim+gm2': ( # No first class library types ), # Standard Library ADTs for p1 Modula-2 'm2iso+p1': ( # No first class library types ), # Standard Library ADTs for XDS Modula-2 'm2iso+xds': ( # No first class library types ), } # Standard Library Modules Database stdlib_modules_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Modules for PIM Modula-2 'm2pim': ( pim_stdlib_module_identifiers, ), # Standard Library Modules for ISO Modula-2 'm2iso': ( iso_stdlib_module_identifiers, ), # Standard Library Modules for Modula-2 R10 'm2r10': ( m2r10_stdlib_blueprint_identifiers, m2r10_stdlib_module_identifiers, m2r10_stdlib_adt_identifiers, ), # Standard Library Modules for Objective Modula-2 'objm2': ( m2r10_stdlib_blueprint_identifiers, m2r10_stdlib_module_identifiers, ), # Standard Library Modules for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_module_identifiers, ), # Standard Library Modules for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_module_identifiers, ), # Standard Library Modules for p1 Modula-2 'm2iso+p1': ( iso_stdlib_module_identifiers, ), # Standard Library Modules for XDS Modula-2 'm2iso+xds': ( iso_stdlib_module_identifiers, ), } # Standard Library Types Database stdlib_types_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Types for PIM Modula-2 'm2pim': ( pim_stdlib_type_identifiers, ), # Standard Library Types for ISO Modula-2 'm2iso': ( iso_stdlib_type_identifiers, ), # Standard Library Types for Modula-2 R10 'm2r10': ( m2r10_stdlib_type_identifiers, ), # Standard Library Types for Objective Modula-2 'objm2': ( m2r10_stdlib_type_identifiers, ), # Standard Library Types for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_type_identifiers, ), # Standard Library Types for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_type_identifiers, ), # Standard Library Types for p1 Modula-2 'm2iso+p1': ( iso_stdlib_type_identifiers, ), # Standard Library Types for XDS Modula-2 'm2iso+xds': ( iso_stdlib_type_identifiers, ), } # Standard Library Procedures Database stdlib_procedures_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Procedures for PIM Modula-2 'm2pim': ( pim_stdlib_proc_identifiers, ), # Standard Library Procedures for ISO Modula-2 'm2iso': ( iso_stdlib_proc_identifiers, ), # Standard Library Procedures for Modula-2 R10 'm2r10': ( m2r10_stdlib_proc_identifiers, ), # Standard Library Procedures for Objective Modula-2 'objm2': ( m2r10_stdlib_proc_identifiers, ), # Standard Library Procedures for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_proc_identifiers, ), # Standard Library Procedures for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_proc_identifiers, ), # Standard Library Procedures for p1 Modula-2 'm2iso+p1': ( iso_stdlib_proc_identifiers, ), # Standard Library Procedures for XDS Modula-2 'm2iso+xds': ( iso_stdlib_proc_identifiers, ), } # Standard Library Variables Database stdlib_variables_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Variables for PIM Modula-2 'm2pim': ( pim_stdlib_var_identifiers, ), # Standard Library Variables for ISO Modula-2 'm2iso': ( iso_stdlib_var_identifiers, ), # Standard Library Variables for Modula-2 R10 'm2r10': ( m2r10_stdlib_var_identifiers, ), # Standard Library Variables for Objective Modula-2 'objm2': ( m2r10_stdlib_var_identifiers, ), # Standard Library Variables for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_var_identifiers, ), # Standard Library Variables for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_var_identifiers, ), # Standard Library Variables for p1 Modula-2 'm2iso+p1': ( iso_stdlib_var_identifiers, ), # Standard Library Variables for XDS Modula-2 'm2iso+xds': ( iso_stdlib_var_identifiers, ), } # Standard Library Constants Database stdlib_constants_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Constants for PIM Modula-2 'm2pim': ( pim_stdlib_const_identifiers, ), # Standard Library Constants for ISO Modula-2 'm2iso': ( iso_stdlib_const_identifiers, ), # Standard Library Constants for Modula-2 R10 'm2r10': ( m2r10_stdlib_const_identifiers, ), # Standard Library Constants for Objective Modula-2 'objm2': ( m2r10_stdlib_const_identifiers, ), # Standard Library Constants for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_const_identifiers, ), # Standard Library Constants for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_const_identifiers, ), # Standard Library Constants for p1 Modula-2 'm2iso+p1': ( iso_stdlib_const_identifiers, ), # Standard Library Constants for XDS Modula-2 'm2iso+xds': ( iso_stdlib_const_identifiers, ), } # M e t h o d s # initialise a lexer instance def __init__(self, **options): # # check dialect options # dialects = get_list_opt(options, 'dialect', []) # for dialect_option in dialects: if dialect_option in self.dialects[1:-1]: # valid dialect option found self.set_dialect(dialect_option) break # # Fallback Mode (DEFAULT) else: # no valid dialect option self.set_dialect('unknown') # self.dialect_set_by_tag = False # # check style options # styles = get_list_opt(options, 'style', []) # # use lowercase mode for Algol style if 'algol' in styles or 'algol_nu' in styles: self.algol_publication_mode = True else: self.algol_publication_mode = False # # Check option flags # self.treat_stdlib_adts_as_builtins = get_bool_opt( options, 'treat_stdlib_adts_as_builtins', True) # # call superclass initialiser RegexLexer.__init__(self, **options) # Set lexer to a specified dialect def set_dialect(self, dialect_id): # # if __debug__: # print 'entered set_dialect with arg: ', dialect_id # # check dialect name against known dialects if dialect_id not in self.dialects: dialect = 'unknown' # default else: dialect = dialect_id # # compose lexemes to reject set lexemes_to_reject_set = set() # add each list of reject lexemes for this dialect for list in self.lexemes_to_reject_db[dialect]: lexemes_to_reject_set.update(set(list)) # # compose reserved words set reswords_set = set() # add each list of reserved words for this dialect for list in self.reserved_words_db[dialect]: reswords_set.update(set(list)) # # compose builtins set builtins_set = set() # add each list of builtins for this dialect excluding reserved words for list in self.builtins_db[dialect]: builtins_set.update(set(list).difference(reswords_set)) # # compose pseudo-builtins set pseudo_builtins_set = set() # add each list of builtins for this dialect excluding reserved words for list in self.pseudo_builtins_db[dialect]: pseudo_builtins_set.update(set(list).difference(reswords_set)) # # compose ADTs set adts_set = set() # add each list of ADTs for this dialect excluding reserved words for list in self.stdlib_adts_db[dialect]: adts_set.update(set(list).difference(reswords_set)) # # compose modules set modules_set = set() # add each list of builtins for this dialect excluding builtins for list in self.stdlib_modules_db[dialect]: modules_set.update(set(list).difference(builtins_set)) # # compose types set types_set = set() # add each list of types for this dialect excluding builtins for list in self.stdlib_types_db[dialect]: types_set.update(set(list).difference(builtins_set)) # # compose procedures set procedures_set = set() # add each list of procedures for this dialect excluding builtins for list in self.stdlib_procedures_db[dialect]: procedures_set.update(set(list).difference(builtins_set)) # # compose variables set variables_set = set() # add each list of variables for this dialect excluding builtins for list in self.stdlib_variables_db[dialect]: variables_set.update(set(list).difference(builtins_set)) # # compose constants set constants_set = set() # add each list of constants for this dialect excluding builtins for list in self.stdlib_constants_db[dialect]: constants_set.update(set(list).difference(builtins_set)) # # update lexer state self.dialect = dialect self.lexemes_to_reject = lexemes_to_reject_set self.reserved_words = reswords_set self.builtins = builtins_set self.pseudo_builtins = pseudo_builtins_set self.adts = adts_set self.modules = modules_set self.types = types_set self.procedures = procedures_set self.variables = variables_set self.constants = constants_set # # if __debug__: # print 'exiting set_dialect' # print ' self.dialect: ', self.dialect # print ' self.lexemes_to_reject: ', self.lexemes_to_reject # print ' self.reserved_words: ', self.reserved_words # print ' self.builtins: ', self.builtins # print ' self.pseudo_builtins: ', self.pseudo_builtins # print ' self.adts: ', self.adts # print ' self.modules: ', self.modules # print ' self.types: ', self.types # print ' self.procedures: ', self.procedures # print ' self.variables: ', self.variables # print ' self.types: ', self.types # print ' self.constants: ', self.constants # Extracts a dialect name from a dialect tag comment string and checks # the extracted name against known dialects. If a match is found, the # matching name is returned, otherwise dialect id 'unknown' is returned def get_dialect_from_dialect_tag(self, dialect_tag): # # if __debug__: # print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag # # constants left_tag_delim = '(*!' right_tag_delim = '*)' left_tag_delim_len = len(left_tag_delim) right_tag_delim_len = len(right_tag_delim) indicator_start = left_tag_delim_len indicator_end = -(right_tag_delim_len) # # check comment string for dialect indicator if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \ and dialect_tag.startswith(left_tag_delim) \ and dialect_tag.endswith(right_tag_delim): # # if __debug__: # print 'dialect tag found' # # extract dialect indicator indicator = dialect_tag[indicator_start:indicator_end] # # if __debug__: # print 'extracted: ', indicator # # check against known dialects for index in range(1, len(self.dialects)): # # if __debug__: # print 'dialects[', index, ']: ', self.dialects[index] # if indicator == self.dialects[index]: # # if __debug__: # print 'matching dialect found' # # indicator matches known dialect return indicator else: # indicator does not match any dialect return 'unknown' # default else: # invalid indicator string return 'unknown' # default # intercept the token stream, modify token attributes and return them def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): # # check for dialect tag if dialect has not been set by tag if not self.dialect_set_by_tag and token == Comment.Special: indicated_dialect = self.get_dialect_from_dialect_tag(value) if indicated_dialect != 'unknown': # token is a dialect indicator # reset reserved words and builtins self.set_dialect(indicated_dialect) self.dialect_set_by_tag = True # # check for reserved words, predefined and stdlib identifiers if token is Name: if value in self.reserved_words: token = Keyword.Reserved if self.algol_publication_mode: value = value.lower() # elif value in self.builtins: token = Name.Builtin if self.algol_publication_mode: value = value.lower() # elif value in self.pseudo_builtins: token = Name.Builtin.Pseudo if self.algol_publication_mode: value = value.lower() # elif value in self.adts: if not self.treat_stdlib_adts_as_builtins: token = Name.Namespace else: token = Name.Builtin.Pseudo if self.algol_publication_mode: value = value.lower() # elif value in self.modules: token = Name.Namespace # elif value in self.types: token = Name.Class # elif value in self.procedures: token = Name.Function # elif value in self.variables: token = Name.Variable # elif value in self.constants: token = Name.Constant # elif token in Number: # # mark prefix number literals as error for PIM and ISO dialects if self.dialect not in ('unknown', 'm2r10', 'objm2'): if "'" in value or value[0:2] in ('0b', '0x', '0u'): token = Error # elif self.dialect in ('m2r10', 'objm2'): # mark base-8 number literals as errors for M2 R10 and ObjM2 if token is Number.Oct: token = Error # mark suffix base-16 literals as errors for M2 R10 and ObjM2 elif token is Number.Hex and 'H' in value: token = Error # mark real numbers with E as errors for M2 R10 and ObjM2 elif token is Number.Float and 'E' in value: token = Error # elif token in Comment: # # mark single line comment as error for PIM and ISO dialects if token is Comment.Single: if self.dialect not in ('unknown', 'm2r10', 'objm2'): token = Error # if token is Comment.Preproc: # mark ISO pragma as error for PIM dialects if value.startswith('<*') and \ self.dialect.startswith('m2pim'): token = Error # mark PIM pragma as comment for other dialects elif value.startswith('(*$') and \ self.dialect != 'unknown' and \ not self.dialect.startswith('m2pim'): token = Comment.Multiline # else: # token is neither Name nor Comment # # mark lexemes matching the dialect's error token set as errors if value in self.lexemes_to_reject: token = Error # # substitute lexemes when in Algol mode if self.algol_publication_mode: if value == '#': value = '≠' elif value == '<=': value = '≤' elif value == '>=': value = '≥' elif value == '==': value = '≡' elif value == '*.': value = '•' # return result yield index, token, value def analyse_text(text): """It's Pascal-like, but does not use FUNCTION -- uses PROCEDURE instead.""" # Check if this looks like Pascal, if not, bail out early if not ('(*' in text and '*)' in text and ':=' in text): return result = 0 # Procedure is in Modula2 if re.search(r'\bPROCEDURE\b', text): result += 0.6 # FUNCTION is only valid in Pascal, but not in Modula2 if re.search(r'\bFUNCTION\b', text): result = 0.0 return result
53,063
Python
32.563567
84
0.541526
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_mysql_builtins.py
""" pygments.lexers._mysql_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Self-updating data files for the MySQL lexer. Run with `python -I` to update. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ MYSQL_CONSTANTS = ( 'false', 'null', 'true', 'unknown', ) # At this time, no easily-parsed, definitive list of data types # has been found in the MySQL source code or documentation. (The # `sql/sql_yacc.yy` file is definitive but is difficult to parse.) # Therefore these types are currently maintained manually. # # Some words in this list -- like "long", "national", "precision", # and "varying" -- appear to only occur in combination with other # data type keywords. Therefore they are included as separate words # even though they do not naturally occur in syntax separately. # # This list is also used to strip data types out of the list of # MySQL keywords, which is automatically updated later in the file. # MYSQL_DATATYPES = ( # Numeric data types 'bigint', 'bit', 'bool', 'boolean', 'dec', 'decimal', 'double', 'fixed', 'float', 'float4', 'float8', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer', 'mediumint', 'middleint', 'numeric', 'precision', 'real', 'serial', 'smallint', 'tinyint', # Date and time data types 'date', 'datetime', 'time', 'timestamp', 'year', # String data types 'binary', 'blob', 'char', 'enum', 'long', 'longblob', 'longtext', 'mediumblob', 'mediumtext', 'national', 'nchar', 'nvarchar', 'set', 'text', 'tinyblob', 'tinytext', 'varbinary', 'varchar', 'varcharacter', 'varying', # Spatial data types 'geometry', 'geometrycollection', 'linestring', 'multilinestring', 'multipoint', 'multipolygon', 'point', 'polygon', # JSON data types 'json', ) # Everything below this line is auto-generated from the MySQL source code. # Run this file in Python and it will update itself. # ----------------------------------------------------------------------------- MYSQL_FUNCTIONS = ( 'abs', 'acos', 'adddate', 'addtime', 'aes_decrypt', 'aes_encrypt', 'any_value', 'asin', 'atan', 'atan2', 'benchmark', 'bin', 'bin_to_uuid', 'bit_and', 'bit_count', 'bit_length', 'bit_or', 'bit_xor', 'can_access_column', 'can_access_database', 'can_access_event', 'can_access_resource_group', 'can_access_routine', 'can_access_table', 'can_access_trigger', 'can_access_user', 'can_access_view', 'cast', 'ceil', 'ceiling', 'char_length', 'character_length', 'coercibility', 'compress', 'concat', 'concat_ws', 'connection_id', 'conv', 'convert_cpu_id_mask', 'convert_interval_to_user_interval', 'convert_tz', 'cos', 'cot', 'count', 'crc32', 'curdate', 'current_role', 'curtime', 'date_add', 'date_format', 'date_sub', 'datediff', 'dayname', 'dayofmonth', 'dayofweek', 'dayofyear', 'degrees', 'elt', 'exp', 'export_set', 'extract', 'extractvalue', 'field', 'find_in_set', 'floor', 'format_bytes', 'format_pico_time', 'found_rows', 'from_base64', 'from_days', 'from_unixtime', 'get_dd_column_privileges', 'get_dd_create_options', 'get_dd_index_private_data', 'get_dd_index_sub_part_length', 'get_dd_property_key_value', 'get_dd_schema_options', 'get_dd_tablespace_private_data', 'get_lock', 'greatest', 'group_concat', 'gtid_subset', 'gtid_subtract', 'hex', 'icu_version', 'ifnull', 'inet6_aton', 'inet6_ntoa', 'inet_aton', 'inet_ntoa', 'instr', 'internal_auto_increment', 'internal_avg_row_length', 'internal_check_time', 'internal_checksum', 'internal_data_free', 'internal_data_length', 'internal_dd_char_length', 'internal_get_comment_or_error', 'internal_get_dd_column_extra', 'internal_get_enabled_role_json', 'internal_get_hostname', 'internal_get_mandatory_roles_json', 'internal_get_partition_nodegroup', 'internal_get_username', 'internal_get_view_warning_or_error', 'internal_index_column_cardinality', 'internal_index_length', 'internal_is_enabled_role', 'internal_is_mandatory_role', 'internal_keys_disabled', 'internal_max_data_length', 'internal_table_rows', 'internal_tablespace_autoextend_size', 'internal_tablespace_data_free', 'internal_tablespace_extent_size', 'internal_tablespace_extra', 'internal_tablespace_free_extents', 'internal_tablespace_id', 'internal_tablespace_initial_size', 'internal_tablespace_logfile_group_name', 'internal_tablespace_logfile_group_number', 'internal_tablespace_maximum_size', 'internal_tablespace_row_format', 'internal_tablespace_status', 'internal_tablespace_total_extents', 'internal_tablespace_type', 'internal_tablespace_version', 'internal_update_time', 'is_free_lock', 'is_ipv4', 'is_ipv4_compat', 'is_ipv4_mapped', 'is_ipv6', 'is_used_lock', 'is_uuid', 'is_visible_dd_object', 'isnull', 'json_array', 'json_array_append', 'json_array_insert', 'json_arrayagg', 'json_contains', 'json_contains_path', 'json_depth', 'json_extract', 'json_insert', 'json_keys', 'json_length', 'json_merge', 'json_merge_patch', 'json_merge_preserve', 'json_object', 'json_objectagg', 'json_overlaps', 'json_pretty', 'json_quote', 'json_remove', 'json_replace', 'json_schema_valid', 'json_schema_validation_report', 'json_search', 'json_set', 'json_storage_free', 'json_storage_size', 'json_type', 'json_unquote', 'json_valid', 'last_day', 'last_insert_id', 'lcase', 'least', 'length', 'like_range_max', 'like_range_min', 'ln', 'load_file', 'locate', 'log', 'log10', 'log2', 'lower', 'lpad', 'ltrim', 'make_set', 'makedate', 'maketime', 'master_pos_wait', 'max', 'mbrcontains', 'mbrcoveredby', 'mbrcovers', 'mbrdisjoint', 'mbrequals', 'mbrintersects', 'mbroverlaps', 'mbrtouches', 'mbrwithin', 'md5', 'mid', 'min', 'monthname', 'name_const', 'now', 'nullif', 'oct', 'octet_length', 'ord', 'period_add', 'period_diff', 'pi', 'position', 'pow', 'power', 'ps_current_thread_id', 'ps_thread_id', 'quote', 'radians', 'rand', 'random_bytes', 'regexp_instr', 'regexp_like', 'regexp_replace', 'regexp_substr', 'release_all_locks', 'release_lock', 'remove_dd_property_key', 'reverse', 'roles_graphml', 'round', 'rpad', 'rtrim', 'sec_to_time', 'session_user', 'sha', 'sha1', 'sha2', 'sign', 'sin', 'sleep', 'soundex', 'source_pos_wait', 'space', 'sqrt', 'st_area', 'st_asbinary', 'st_asgeojson', 'st_astext', 'st_aswkb', 'st_aswkt', 'st_buffer', 'st_buffer_strategy', 'st_centroid', 'st_collect', 'st_contains', 'st_convexhull', 'st_crosses', 'st_difference', 'st_dimension', 'st_disjoint', 'st_distance', 'st_distance_sphere', 'st_endpoint', 'st_envelope', 'st_equals', 'st_exteriorring', 'st_frechetdistance', 'st_geohash', 'st_geomcollfromtext', 'st_geomcollfromtxt', 'st_geomcollfromwkb', 'st_geometrycollectionfromtext', 'st_geometrycollectionfromwkb', 'st_geometryfromtext', 'st_geometryfromwkb', 'st_geometryn', 'st_geometrytype', 'st_geomfromgeojson', 'st_geomfromtext', 'st_geomfromwkb', 'st_hausdorffdistance', 'st_interiorringn', 'st_intersection', 'st_intersects', 'st_isclosed', 'st_isempty', 'st_issimple', 'st_isvalid', 'st_latfromgeohash', 'st_latitude', 'st_length', 'st_linefromtext', 'st_linefromwkb', 'st_lineinterpolatepoint', 'st_lineinterpolatepoints', 'st_linestringfromtext', 'st_linestringfromwkb', 'st_longfromgeohash', 'st_longitude', 'st_makeenvelope', 'st_mlinefromtext', 'st_mlinefromwkb', 'st_mpointfromtext', 'st_mpointfromwkb', 'st_mpolyfromtext', 'st_mpolyfromwkb', 'st_multilinestringfromtext', 'st_multilinestringfromwkb', 'st_multipointfromtext', 'st_multipointfromwkb', 'st_multipolygonfromtext', 'st_multipolygonfromwkb', 'st_numgeometries', 'st_numinteriorring', 'st_numinteriorrings', 'st_numpoints', 'st_overlaps', 'st_pointatdistance', 'st_pointfromgeohash', 'st_pointfromtext', 'st_pointfromwkb', 'st_pointn', 'st_polyfromtext', 'st_polyfromwkb', 'st_polygonfromtext', 'st_polygonfromwkb', 'st_simplify', 'st_srid', 'st_startpoint', 'st_swapxy', 'st_symdifference', 'st_touches', 'st_transform', 'st_union', 'st_validate', 'st_within', 'st_x', 'st_y', 'statement_digest', 'statement_digest_text', 'std', 'stddev', 'stddev_pop', 'stddev_samp', 'str_to_date', 'strcmp', 'subdate', 'substr', 'substring', 'substring_index', 'subtime', 'sum', 'sysdate', 'system_user', 'tan', 'time_format', 'time_to_sec', 'timediff', 'to_base64', 'to_days', 'to_seconds', 'trim', 'ucase', 'uncompress', 'uncompressed_length', 'unhex', 'unix_timestamp', 'updatexml', 'upper', 'uuid', 'uuid_short', 'uuid_to_bin', 'validate_password_strength', 'var_pop', 'var_samp', 'variance', 'version', 'wait_for_executed_gtid_set', 'wait_until_sql_thread_after_gtids', 'weekday', 'weekofyear', 'yearweek', ) MYSQL_OPTIMIZER_HINTS = ( 'bka', 'bnl', 'derived_condition_pushdown', 'dupsweedout', 'firstmatch', 'group_index', 'hash_join', 'index', 'index_merge', 'intoexists', 'join_fixed_order', 'join_index', 'join_order', 'join_prefix', 'join_suffix', 'loosescan', 'materialization', 'max_execution_time', 'merge', 'mrr', 'no_bka', 'no_bnl', 'no_derived_condition_pushdown', 'no_group_index', 'no_hash_join', 'no_icp', 'no_index', 'no_index_merge', 'no_join_index', 'no_merge', 'no_mrr', 'no_order_index', 'no_range_optimization', 'no_semijoin', 'no_skip_scan', 'order_index', 'qb_name', 'resource_group', 'semijoin', 'set_var', 'skip_scan', 'subquery', ) MYSQL_KEYWORDS = ( 'accessible', 'account', 'action', 'active', 'add', 'admin', 'after', 'against', 'aggregate', 'algorithm', 'all', 'alter', 'always', 'analyze', 'and', 'any', 'array', 'as', 'asc', 'ascii', 'asensitive', 'assign_gtids_to_anonymous_transactions', 'at', 'attribute', 'authentication', 'auto_increment', 'autoextend_size', 'avg', 'avg_row_length', 'backup', 'before', 'begin', 'between', 'binlog', 'block', 'both', 'btree', 'buckets', 'by', 'byte', 'cache', 'call', 'cascade', 'cascaded', 'case', 'catalog_name', 'chain', 'challenge_response', 'change', 'changed', 'channel', 'character', 'charset', 'check', 'checksum', 'cipher', 'class_origin', 'client', 'clone', 'close', 'coalesce', 'code', 'collate', 'collation', 'column', 'column_format', 'column_name', 'columns', 'comment', 'commit', 'committed', 'compact', 'completion', 'component', 'compressed', 'compression', 'concurrent', 'condition', 'connection', 'consistent', 'constraint', 'constraint_catalog', 'constraint_name', 'constraint_schema', 'contains', 'context', 'continue', 'convert', 'cpu', 'create', 'cross', 'cube', 'cume_dist', 'current', 'current_date', 'current_time', 'current_timestamp', 'current_user', 'cursor', 'cursor_name', 'data', 'database', 'databases', 'datafile', 'day', 'day_hour', 'day_microsecond', 'day_minute', 'day_second', 'deallocate', 'declare', 'default', 'default_auth', 'definer', 'definition', 'delay_key_write', 'delayed', 'delete', 'dense_rank', 'desc', 'describe', 'description', 'deterministic', 'diagnostics', 'directory', 'disable', 'discard', 'disk', 'distinct', 'distinctrow', 'div', 'do', 'drop', 'dual', 'dumpfile', 'duplicate', 'dynamic', 'each', 'else', 'elseif', 'empty', 'enable', 'enclosed', 'encryption', 'end', 'ends', 'enforced', 'engine', 'engine_attribute', 'engines', 'error', 'errors', 'escape', 'escaped', 'event', 'events', 'every', 'except', 'exchange', 'exclude', 'execute', 'exists', 'exit', 'expansion', 'expire', 'explain', 'export', 'extended', 'extent_size', 'factor', 'failed_login_attempts', 'false', 'fast', 'faults', 'fetch', 'fields', 'file', 'file_block_size', 'filter', 'finish', 'first', 'first_value', 'flush', 'following', 'follows', 'for', 'force', 'foreign', 'format', 'found', 'from', 'full', 'fulltext', 'function', 'general', 'generated', 'geomcollection', 'get', 'get_format', 'get_master_public_key', 'get_source_public_key', 'global', 'grant', 'grants', 'group', 'group_replication', 'grouping', 'groups', 'gtid_only', 'handler', 'hash', 'having', 'help', 'high_priority', 'histogram', 'history', 'host', 'hosts', 'hour', 'hour_microsecond', 'hour_minute', 'hour_second', 'identified', 'if', 'ignore', 'ignore_server_ids', 'import', 'in', 'inactive', 'index', 'indexes', 'infile', 'initial', 'initial_size', 'initiate', 'inner', 'inout', 'insensitive', 'insert', 'insert_method', 'install', 'instance', 'interval', 'into', 'invisible', 'invoker', 'io', 'io_after_gtids', 'io_before_gtids', 'io_thread', 'ipc', 'is', 'isolation', 'issuer', 'iterate', 'join', 'json_table', 'json_value', 'key', 'key_block_size', 'keyring', 'keys', 'kill', 'lag', 'language', 'last', 'last_value', 'lateral', 'lead', 'leading', 'leave', 'leaves', 'left', 'less', 'level', 'like', 'limit', 'linear', 'lines', 'list', 'load', 'local', 'localtime', 'localtimestamp', 'lock', 'locked', 'locks', 'logfile', 'logs', 'loop', 'low_priority', 'master', 'master_auto_position', 'master_bind', 'master_compression_algorithms', 'master_connect_retry', 'master_delay', 'master_heartbeat_period', 'master_host', 'master_log_file', 'master_log_pos', 'master_password', 'master_port', 'master_public_key_path', 'master_retry_count', 'master_ssl', 'master_ssl_ca', 'master_ssl_capath', 'master_ssl_cert', 'master_ssl_cipher', 'master_ssl_crl', 'master_ssl_crlpath', 'master_ssl_key', 'master_ssl_verify_server_cert', 'master_tls_ciphersuites', 'master_tls_version', 'master_user', 'master_zstd_compression_level', 'match', 'max_connections_per_hour', 'max_queries_per_hour', 'max_rows', 'max_size', 'max_updates_per_hour', 'max_user_connections', 'maxvalue', 'medium', 'member', 'memory', 'merge', 'message_text', 'microsecond', 'migrate', 'min_rows', 'minute', 'minute_microsecond', 'minute_second', 'mod', 'mode', 'modifies', 'modify', 'month', 'mutex', 'mysql_errno', 'name', 'names', 'natural', 'ndb', 'ndbcluster', 'nested', 'network_namespace', 'never', 'new', 'next', 'no', 'no_wait', 'no_write_to_binlog', 'nodegroup', 'none', 'not', 'nowait', 'nth_value', 'ntile', 'null', 'nulls', 'number', 'of', 'off', 'offset', 'oj', 'old', 'on', 'one', 'only', 'open', 'optimize', 'optimizer_costs', 'option', 'optional', 'optionally', 'options', 'or', 'order', 'ordinality', 'organization', 'others', 'out', 'outer', 'outfile', 'over', 'owner', 'pack_keys', 'page', 'parser', 'partial', 'partition', 'partitioning', 'partitions', 'password', 'password_lock_time', 'path', 'percent_rank', 'persist', 'persist_only', 'phase', 'plugin', 'plugin_dir', 'plugins', 'port', 'precedes', 'preceding', 'prepare', 'preserve', 'prev', 'primary', 'privilege_checks_user', 'privileges', 'procedure', 'process', 'processlist', 'profile', 'profiles', 'proxy', 'purge', 'quarter', 'query', 'quick', 'random', 'range', 'rank', 'read', 'read_only', 'read_write', 'reads', 'rebuild', 'recover', 'recursive', 'redo_buffer_size', 'redundant', 'reference', 'references', 'regexp', 'registration', 'relay', 'relay_log_file', 'relay_log_pos', 'relay_thread', 'relaylog', 'release', 'reload', 'remove', 'rename', 'reorganize', 'repair', 'repeat', 'repeatable', 'replace', 'replica', 'replicas', 'replicate_do_db', 'replicate_do_table', 'replicate_ignore_db', 'replicate_ignore_table', 'replicate_rewrite_db', 'replicate_wild_do_table', 'replicate_wild_ignore_table', 'replication', 'require', 'require_row_format', 'require_table_primary_key_check', 'reset', 'resignal', 'resource', 'respect', 'restart', 'restore', 'restrict', 'resume', 'retain', 'return', 'returned_sqlstate', 'returning', 'returns', 'reuse', 'reverse', 'revoke', 'right', 'rlike', 'role', 'rollback', 'rollup', 'rotate', 'routine', 'row', 'row_count', 'row_format', 'row_number', 'rows', 'rtree', 'savepoint', 'schedule', 'schema', 'schema_name', 'schemas', 'second', 'second_microsecond', 'secondary', 'secondary_engine', 'secondary_engine_attribute', 'secondary_load', 'secondary_unload', 'security', 'select', 'sensitive', 'separator', 'serializable', 'server', 'session', 'share', 'show', 'shutdown', 'signal', 'signed', 'simple', 'skip', 'slave', 'slow', 'snapshot', 'socket', 'some', 'soname', 'sounds', 'source', 'source_auto_position', 'source_bind', 'source_compression_algorithms', 'source_connect_retry', 'source_connection_auto_failover', 'source_delay', 'source_heartbeat_period', 'source_host', 'source_log_file', 'source_log_pos', 'source_password', 'source_port', 'source_public_key_path', 'source_retry_count', 'source_ssl', 'source_ssl_ca', 'source_ssl_capath', 'source_ssl_cert', 'source_ssl_cipher', 'source_ssl_crl', 'source_ssl_crlpath', 'source_ssl_key', 'source_ssl_verify_server_cert', 'source_tls_ciphersuites', 'source_tls_version', 'source_user', 'source_zstd_compression_level', 'spatial', 'specific', 'sql', 'sql_after_gtids', 'sql_after_mts_gaps', 'sql_before_gtids', 'sql_big_result', 'sql_buffer_result', 'sql_calc_found_rows', 'sql_no_cache', 'sql_small_result', 'sql_thread', 'sql_tsi_day', 'sql_tsi_hour', 'sql_tsi_minute', 'sql_tsi_month', 'sql_tsi_quarter', 'sql_tsi_second', 'sql_tsi_week', 'sql_tsi_year', 'sqlexception', 'sqlstate', 'sqlwarning', 'srid', 'ssl', 'stacked', 'start', 'starting', 'starts', 'stats_auto_recalc', 'stats_persistent', 'stats_sample_pages', 'status', 'stop', 'storage', 'stored', 'straight_join', 'stream', 'string', 'subclass_origin', 'subject', 'subpartition', 'subpartitions', 'super', 'suspend', 'swaps', 'switches', 'system', 'table', 'table_checksum', 'table_name', 'tables', 'tablespace', 'temporary', 'temptable', 'terminated', 'than', 'then', 'thread_priority', 'ties', 'timestampadd', 'timestampdiff', 'tls', 'to', 'trailing', 'transaction', 'trigger', 'triggers', 'true', 'truncate', 'type', 'types', 'unbounded', 'uncommitted', 'undefined', 'undo', 'undo_buffer_size', 'undofile', 'unicode', 'uninstall', 'union', 'unique', 'unknown', 'unlock', 'unregister', 'unsigned', 'until', 'update', 'upgrade', 'usage', 'use', 'use_frm', 'user', 'user_resources', 'using', 'utc_date', 'utc_time', 'utc_timestamp', 'validation', 'value', 'values', 'variables', 'vcpu', 'view', 'virtual', 'visible', 'wait', 'warnings', 'week', 'weight_string', 'when', 'where', 'while', 'window', 'with', 'without', 'work', 'wrapper', 'write', 'x509', 'xa', 'xid', 'xml', 'xor', 'year_month', 'zerofill', 'zone', ) if __name__ == '__main__': # pragma: no cover import re from urllib.request import urlopen from pygments.util import format_lines # MySQL source code SOURCE_URL = 'https://github.com/mysql/mysql-server/raw/8.0' LEX_URL = SOURCE_URL + '/sql/lex.h' ITEM_CREATE_URL = SOURCE_URL + '/sql/item_create.cc' def update_myself(): # Pull content from lex.h. lex_file = urlopen(LEX_URL).read().decode('utf8', errors='ignore') keywords = parse_lex_keywords(lex_file) functions = parse_lex_functions(lex_file) optimizer_hints = parse_lex_optimizer_hints(lex_file) # Parse content in item_create.cc. item_create_file = urlopen(ITEM_CREATE_URL).read().decode('utf8', errors='ignore') functions.update(parse_item_create_functions(item_create_file)) # Remove data types from the set of keywords. keywords -= set(MYSQL_DATATYPES) update_content('MYSQL_FUNCTIONS', tuple(sorted(functions))) update_content('MYSQL_KEYWORDS', tuple(sorted(keywords))) update_content('MYSQL_OPTIMIZER_HINTS', tuple(sorted(optimizer_hints))) def parse_lex_keywords(f): """Parse keywords in lex.h.""" results = set() for m in re.finditer(r'{SYM(?:_HK)?\("(?P<keyword>[a-z0-9_]+)",', f, flags=re.I): results.add(m.group('keyword').lower()) if not results: raise ValueError('No keywords found') return results def parse_lex_optimizer_hints(f): """Parse optimizer hints in lex.h.""" results = set() for m in re.finditer(r'{SYM_H\("(?P<keyword>[a-z0-9_]+)",', f, flags=re.I): results.add(m.group('keyword').lower()) if not results: raise ValueError('No optimizer hints found') return results def parse_lex_functions(f): """Parse MySQL function names from lex.h.""" results = set() for m in re.finditer(r'{SYM_FN?\("(?P<function>[a-z0-9_]+)",', f, flags=re.I): results.add(m.group('function').lower()) if not results: raise ValueError('No lex functions found') return results def parse_item_create_functions(f): """Parse MySQL function names from item_create.cc.""" results = set() for m in re.finditer(r'{"(?P<function>[^"]+?)",\s*SQL_F[^(]+?\(', f, flags=re.I): results.add(m.group('function').lower()) if not results: raise ValueError('No item_create functions found') return results def update_content(field_name, content): """Overwrite this file with content parsed from MySQL's source code.""" with open(__file__) as f: data = f.read() # Line to start/end inserting re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % field_name, re.M | re.S) m = re_match.search(data) if not m: raise ValueError('Could not find an existing definition for %s' % field_name) new_block = format_lines(field_name, content) data = data[:m.start()] + new_block + data[m.end():] with open(__file__, 'w', newline='\n') as f: f.write(data) update_myself()
25,806
Python
18.316617
90
0.534837
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/php.py
""" pygments.lexers.php ~~~~~~~~~~~~~~~~~~~ Lexers for PHP and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import Lexer, RegexLexer, include, bygroups, default, \ using, this, words, do_insertions, line_re from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Other, Generic from pygments.util import get_bool_opt, get_list_opt, shebang_matches __all__ = ['ZephirLexer', 'PsyshConsoleLexer', 'PhpLexer'] class ZephirLexer(RegexLexer): """ For Zephir language source code. Zephir is a compiled high level language aimed to the creation of C-extensions for PHP. .. versionadded:: 2.0 """ name = 'Zephir' url = 'http://zephir-lang.com/' aliases = ['zephir'] filenames = ['*.zep'] zephir_keywords = ['fetch', 'echo', 'isset', 'empty'] zephir_type = ['bit', 'bits', 'string'] flags = re.DOTALL | re.MULTILINE tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), (r'/', Operator, '#pop'), default('#pop') ], 'badregex': [ (r'\n', Text, '#pop') ], 'root': [ (r'^(?=\s|/)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|' r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|' r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|' r'empty)\b', Keyword, 'slashstartsregex'), (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'), (r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|' r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|' r'float|unsigned|private|protected|public|short|static|self|throws|reverse|' r'transient|volatile|readonly)\b', Keyword.Reserved), (r'(true|false|null|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|' r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|' r'window)\b', Name.Builtin), (r'[$a-zA-Z_][\w\\]*', Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double), (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single), ] } class PsyshConsoleLexer(Lexer): """ For PsySH console output, such as: .. sourcecode:: psysh >>> $greeting = function($name): string { ... return "Hello, {$name}"; ... }; => Closure($name): string {#2371 …3} >>> $greeting('World') => "Hello, World" .. versionadded:: 2.7 """ name = 'PsySH console session for PHP' url = 'https://psysh.org/' aliases = ['psysh'] def __init__(self, **options): options['startinline'] = True Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): phplexer = PhpLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() if line.startswith('>>> ') or line.startswith('... '): insertions.append((len(curcode), [(0, Generic.Prompt, line[:4])])) curcode += line[4:] elif line.rstrip() == '...': insertions.append((len(curcode), [(0, Generic.Prompt, '...')])) curcode += line[3:] else: if curcode: yield from do_insertions( insertions, phplexer.get_tokens_unprocessed(curcode)) curcode = '' insertions = [] yield match.start(), Generic.Output, line if curcode: yield from do_insertions(insertions, phplexer.get_tokens_unprocessed(curcode)) class PhpLexer(RegexLexer): """ For PHP source code. For PHP embedded in HTML, use the `HtmlPhpLexer`. Additional options accepted: `startinline` If given and ``True`` the lexer starts highlighting with php code (i.e.: no starting ``<?php`` required). The default is ``False``. `funcnamehighlighting` If given and ``True``, highlight builtin function names (default: ``True``). `disabledmodules` If given, must be a list of module names whose function names should not be highlighted. By default all modules are highlighted except the special ``'unknown'`` module that includes functions that are known to php but are undocumented. To get a list of allowed modules have a look into the `_php_builtins` module: .. sourcecode:: pycon >>> from pygments.lexers._php_builtins import MODULES >>> MODULES.keys() ['PHP Options/Info', 'Zip', 'dba', ...] In fact the names of those modules match the module names from the php documentation. """ name = 'PHP' url = 'https://www.php.net/' aliases = ['php', 'php3', 'php4', 'php5'] filenames = ['*.php', '*.php[345]', '*.inc'] mimetypes = ['text/x-php'] # Note that a backslash is included, PHP uses a backslash as a namespace # separator. _ident_inner = r'(?:[\\_a-z]|[^\x00-\x7f])(?:[\\\w]|[^\x00-\x7f])*' # But not inside strings. _ident_nons = r'(?:[_a-z]|[^\x00-\x7f])(?:\w|[^\x00-\x7f])*' flags = re.IGNORECASE | re.DOTALL | re.MULTILINE tokens = { 'root': [ (r'<\?(php)?', Comment.Preproc, 'php'), (r'[^<]+', Other), (r'<', Other) ], 'php': [ (r'\?>', Comment.Preproc, '#pop'), (r'(<<<)([\'"]?)(' + _ident_nons + r')(\2\n.*?\n\s*)(\3)(;?)(\n)', bygroups(String, String, String.Delimiter, String, String.Delimiter, Punctuation, Text)), (r'\s+', Text), (r'#.*?\n', Comment.Single), (r'//.*?\n', Comment.Single), # put the empty comment here, it is otherwise seen as # the start of a docstring (r'/\*\*/', Comment.Multiline), (r'/\*\*.*?\*/', String.Doc), (r'/\*.*?\*/', Comment.Multiline), (r'(->|::)(\s*)(' + _ident_nons + ')', bygroups(Operator, Text, Name.Attribute)), (r'[~!%^&*+=|:.<>/@-]+', Operator), (r'\?', Operator), # don't add to the charclass above! (r'[\[\]{}();,]+', Punctuation), (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)), (r'(function)(\s+)(&?)(\s*)', bygroups(Keyword, Text, Operator, Text), 'functionname'), (r'(const)(\s+)(' + _ident_inner + ')', bygroups(Keyword, Text, Name.Constant)), (r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|' r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|' r'FALSE|print|for|require|continue|foreach|require_once|' r'declare|return|default|static|do|switch|die|stdClass|' r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|' r'virtual|endfor|include_once|while|endforeach|global|' r'endif|list|endswitch|new|endwhile|not|' r'array|E_ALL|NULL|final|php_user_filter|interface|' r'implements|public|private|protected|abstract|clone|try|' r'catch|throw|this|use|namespace|trait|yield|' r'finally|match)\b', Keyword), (r'(true|false|null)\b', Keyword.Constant), include('magicconstants'), (r'\$\{\$+' + _ident_inner + r'\}', Name.Variable), (r'\$+' + _ident_inner, Name.Variable), (_ident_inner, Name.Other), (r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float), (r'\d+e[+-]?[0-9]+', Number.Float), (r'0[0-7]+', Number.Oct), (r'0x[a-f0-9]+', Number.Hex), (r'\d+', Number.Integer), (r'0b[01]+', Number.Bin), (r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single), (r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick), (r'"', String.Double, 'string'), ], 'magicfuncs': [ # source: http://php.net/manual/en/language.oop5.magic.php (words(( '__construct', '__destruct', '__call', '__callStatic', '__get', '__set', '__isset', '__unset', '__sleep', '__wakeup', '__toString', '__invoke', '__set_state', '__clone', '__debugInfo',), suffix=r'\b'), Name.Function.Magic), ], 'magicconstants': [ # source: http://php.net/manual/en/language.constants.predefined.php (words(( '__LINE__', '__FILE__', '__DIR__', '__FUNCTION__', '__CLASS__', '__TRAIT__', '__METHOD__', '__NAMESPACE__',), suffix=r'\b'), Name.Constant), ], 'classname': [ (_ident_inner, Name.Class, '#pop') ], 'functionname': [ include('magicfuncs'), (_ident_inner, Name.Function, '#pop'), default('#pop') ], 'string': [ (r'"', String.Double, '#pop'), (r'[^{$"\\]+', String.Double), (r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape), (r'\$' + _ident_nons + r'(\[\S+?\]|->' + _ident_nons + ')?', String.Interpol), (r'(\{\$\{)(.*?)(\}\})', bygroups(String.Interpol, using(this, _startinline=True), String.Interpol)), (r'(\{)(\$.*?)(\})', bygroups(String.Interpol, using(this, _startinline=True), String.Interpol)), (r'(\$\{)(\S+)(\})', bygroups(String.Interpol, Name.Variable, String.Interpol)), (r'[${\\]', String.Double) ], } def __init__(self, **options): self.funcnamehighlighting = get_bool_opt( options, 'funcnamehighlighting', True) self.disabledmodules = get_list_opt( options, 'disabledmodules', ['unknown']) self.startinline = get_bool_opt(options, 'startinline', False) # private option argument for the lexer itself if '_startinline' in options: self.startinline = options.pop('_startinline') # collect activated functions in a set self._functions = set() if self.funcnamehighlighting: from pygments.lexers._php_builtins import MODULES for key, value in MODULES.items(): if key not in self.disabledmodules: self._functions.update(value) RegexLexer.__init__(self, **options) def get_tokens_unprocessed(self, text): stack = ['root'] if self.startinline: stack.append('php') for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text, stack): if token is Name.Other: if value in self._functions: yield index, Name.Builtin, value continue yield index, token, value def analyse_text(text): if shebang_matches(text, r'php'): return True rv = 0.0 if re.search(r'<\?(?!xml)', text): rv += 0.3 return rv
12,503
Python
38.075
89
0.497481
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/javascript.py
""" pygments.lexers.javascript ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for JavaScript and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import bygroups, combined, default, do_insertions, include, \ inherit, Lexer, RegexLexer, this, using, words, line_re from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Other, Generic, Whitespace from pygments.util import get_bool_opt import pygments.unistring as uni __all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer', 'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer', 'CoffeeScriptLexer', 'MaskLexer', 'EarlGreyLexer', 'JuttleLexer', 'NodeConsoleLexer'] JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + ']|\\\\u[a-fA-F0-9]{4})') JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Mn', 'Mc', 'Nd', 'Pc') + '\u200c\u200d]|\\\\u[a-fA-F0-9]{4})') JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*' class JavascriptLexer(RegexLexer): """ For JavaScript source code. """ name = 'JavaScript' url = 'https://www.ecma-international.org/publications-and-standards/standards/ecma-262/' aliases = ['javascript', 'js'] filenames = ['*.js', '*.jsm', '*.mjs', '*.cjs'] mimetypes = ['application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript'] flags = re.DOTALL | re.MULTILINE tokens = { 'commentsandwhitespace': [ (r'\s+', Whitespace), (r'<!--', Comment), (r'//.*?$', Comment.Single), (r'/\*.*?\*/', Comment.Multiline) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gimuysd]+\b|\B)', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), default('#pop') ], 'badregex': [ (r'\n', Whitespace, '#pop') ], 'root': [ (r'\A#! ?/.*?$', Comment.Hashbang), # recognized by node.js (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), # Numeric literals (r'0[bB][01]+n?', Number.Bin), (r'0[oO]?[0-7]+n?', Number.Oct), # Browsers support "0o7" and "07" (< ES5) notations (r'0[xX][0-9a-fA-F]+n?', Number.Hex), (r'[0-9]+n', Number.Integer), # Javascript BigInt requires an "n" postfix # Javascript doesn't have actual integer literals, so every other # numeric literal is handled by the regex below (including "normal") # integers (r'(\.[0-9]+|[0-9]+\.[0-9]*|[0-9]+)([eE][-+]?[0-9]+)?', Number.Float), (r'\.\.\.|=>', Punctuation), (r'\+\+|--|~|\?\?=?|\?|:|\\(?=\n)|' r'(<<|>>>?|==?|!=?|(?:\*\*|\|\||&&|[-<>+*%&|^/]))=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(typeof|instanceof|in|void|delete|new)\b', Operator.Word, 'slashstartsregex'), # Match stuff like: constructor (r'\b(constructor|from|as)\b', Keyword.Reserved), (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|' r'throw|try|catch|finally|yield|await|async|this|of|static|export|' r'import|debugger|extends|super)\b', Keyword, 'slashstartsregex'), (r'(var|let|const|with|function|class)\b', Keyword.Declaration, 'slashstartsregex'), (r'(abstract|boolean|byte|char|double|enum|final|float|goto|' r'implements|int|interface|long|native|package|private|protected|' r'public|short|synchronized|throws|transient|volatile)\b', Keyword.Reserved), (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|BigInt|Function|Math|ArrayBuffer|' r'Number|Object|RegExp|String|Promise|Proxy|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'eval|isFinite|isNaN|parseFloat|parseInt|DataView|' r'document|window|globalThis|global|Symbol|Intl|' r'WeakSet|WeakMap|Set|Map|Reflect|JSON|Atomics|' r'Int(?:8|16|32)Array|BigInt64Array|Float32Array|Float64Array|' r'Uint8ClampedArray|Uint(?:8|16|32)Array|BigUint64Array)\b', Name.Builtin), (r'((?:Eval|Internal|Range|Reference|Syntax|Type|URI)?Error)\b', Name.Exception), # Match stuff like: super(argument, list) (r'(super)(\s*)(\([\w,?.$\s]+\s*\))', bygroups(Keyword, Whitespace), 'slashstartsregex'), # Match stuff like: function() {...} (r'([a-zA-Z_?.$][\w?.$]*)(?=\(\) \{)', Name.Other, 'slashstartsregex'), (JS_IDENT, Name.Other), (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double), (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single), (r'`', String.Backtick, 'interp'), ], 'interp': [ (r'`', String.Backtick, '#pop'), (r'\\.', String.Backtick), (r'\$\{', String.Interpol, 'interp-inside'), (r'\$', String.Backtick), (r'[^`\\$]+', String.Backtick), ], 'interp-inside': [ # TODO: should this include single-line comments and allow nesting strings? (r'\}', String.Interpol, '#pop'), include('root'), ], } class TypeScriptLexer(JavascriptLexer): """ For TypeScript source code. .. versionadded:: 1.6 """ name = 'TypeScript' url = 'https://www.typescriptlang.org/' aliases = ['typescript', 'ts'] filenames = ['*.ts'] mimetypes = ['application/x-typescript', 'text/x-typescript'] # Higher priority than the TypoScriptLexer, as TypeScript is far more # common these days priority = 0.5 tokens = { 'root': [ (r'(abstract|implements|private|protected|public|readonly)\b', Keyword, 'slashstartsregex'), (r'(enum|interface|override)\b', Keyword.Declaration, 'slashstartsregex'), (r'\b(declare|type)\b', Keyword.Reserved), # Match variable type keywords (r'\b(string|boolean|number)\b', Keyword.Type), # Match stuff like: module name {...} (r'\b(module)(\s*)([\w?.$]+)(\s*)', bygroups(Keyword.Reserved, Whitespace, Name.Other, Whitespace), 'slashstartsregex'), # Match stuff like: (function: return type) (r'([\w?.$]+)(\s*)(:)(\s*)([\w?.$]+)', bygroups(Name.Other, Whitespace, Operator, Whitespace, Keyword.Type)), # Match stuff like: Decorators (r'@' + JS_IDENT, Keyword.Declaration), inherit, ], } class KalLexer(RegexLexer): """ For Kal source code. .. versionadded:: 2.0 """ name = 'Kal' url = 'http://rzimmerman.github.io/kal' aliases = ['kal'] filenames = ['*.kal'] mimetypes = ['text/kal', 'application/kal'] flags = re.DOTALL tokens = { 'commentsandwhitespace': [ (r'\s+', Whitespace), (r'###[^#].*?###', Comment.Multiline), (r'(#(?!##[^#]).*?)(\n)', bygroups(Comment.Single, Whitespace)), ], 'functiondef': [ (r'([$a-zA-Z_][\w$]*)(\s*)', bygroups(Name.Function, Whitespace), '#pop'), include('commentsandwhitespace'), ], 'classdef': [ (r'\b(inherits)(\s+)(from)\b', bygroups(Keyword, Whitespace, Keyword)), (r'([$a-zA-Z_][\w$]*)(?=\s*\n)', Name.Class, '#pop'), (r'[$a-zA-Z_][\w$]*\b', Name.Class), include('commentsandwhitespace'), ], 'listcomprehension': [ (r'\]', Punctuation, '#pop'), (r'\b(property|value)\b', Keyword), include('root'), ], 'waitfor': [ (r'\n', Whitespace, '#pop'), (r'\bfrom\b', Keyword), include('root'), ], 'root': [ include('commentsandwhitespace'), (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gimuysd]+\b|\B)', String.Regex), (r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?', Operator), (r'\b(and|or|isnt|is|not|but|bitwise|mod|\^|xor|exists|' r'doesnt\s+exist)\b', Operator.Word), (r'(\([^()]+\))?(\s*)(>)', bygroups(Name.Function, Whitespace, Punctuation)), (r'[{(]', Punctuation), (r'\[', Punctuation, 'listcomprehension'), (r'[})\].,]', Punctuation), (r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'), (r'\bclass\b', Keyword.Declaration, 'classdef'), (r'\b(safe(?=\s))?(\s*)(wait(?=\s))(\s+)(for)\b', bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword), 'waitfor'), (r'\b(me|this)(\.[$a-zA-Z_][\w.$]*)?\b', Name.Variable.Instance), (r'(?<![.$])(run)(\s+)(in)(\s+)(parallel)\b', bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)), (r'(?<![.$])(for)(\s+)(parallel|series)?\b', bygroups(Keyword, Whitespace, Keyword)), (r'(?<![.$])(except)(\s+)(when)?\b', bygroups(Keyword, Whitespace, Keyword)), (r'(?<![.$])(fail)(\s+)(with)?\b', bygroups(Keyword, Whitespace, Keyword)), (r'(?<![.$])(inherits)(\s+)(from)?\b', bygroups(Keyword, Whitespace, Keyword)), (r'(?<![.$])(for)(\s+)(parallel|series)?\b', bygroups(Keyword, Whitespace, Keyword)), (words(( 'in', 'of', 'while', 'until', 'break', 'return', 'continue', 'when', 'if', 'unless', 'else', 'otherwise', 'throw', 'raise', 'try', 'catch', 'finally', 'new', 'delete', 'typeof', 'instanceof', 'super'), prefix=r'(?<![.$])', suffix=r'\b'), Keyword), (words(( 'true', 'false', 'yes', 'no', 'on', 'off', 'null', 'nothing', 'none', 'NaN', 'Infinity', 'undefined'), prefix=r'(?<![.$])', suffix=r'\b'), Keyword.Constant), (words(( 'Array', 'Boolean', 'Date', 'Error', 'Function', 'Math', 'Number', 'Object', 'RegExp', 'String', 'decodeURI', 'decodeURIComponent', 'encodeURI', 'encodeURIComponent', 'eval', 'isFinite', 'isNaN', 'isSafeInteger', 'parseFloat', 'parseInt', 'document', 'window', 'globalThis', 'Symbol', 'print'), suffix=r'\b'), Name.Builtin), (r'([$a-zA-Z_][\w.$]*)(\s*)(:|[+\-*/]?\=)?\b', bygroups(Name.Variable, Whitespace, Operator)), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), ('"""', String, 'tdqs'), ("'''", String, 'tsqs'), ('"', String, 'dqs'), ("'", String, 'sqs'), ], 'strings': [ (r'[^#\\\'"]+', String), # note that all kal strings are multi-line. # hashmarks, quotes and backslashes must be parsed one at a time ], 'interpoling_string': [ (r'\}', String.Interpol, "#pop"), include('root') ], 'dqs': [ (r'"', String, '#pop'), (r'\\.|\'', String), # double-quoted string don't need ' escapes (r'#\{', String.Interpol, "interpoling_string"), include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r'#|\\.|"', String), # single quoted strings don't need " escapses include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), (r'\\.|\'|"', String), # no need to escape quotes in triple-string (r'#\{', String.Interpol, "interpoling_string"), include('strings'), ], 'tsqs': [ (r"'''", String, '#pop'), (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings include('strings') ], } class LiveScriptLexer(RegexLexer): """ For LiveScript source code. .. versionadded:: 1.6 """ name = 'LiveScript' url = 'https://livescript.net/' aliases = ['livescript', 'live-script'] filenames = ['*.ls'] mimetypes = ['text/livescript'] flags = re.DOTALL tokens = { 'commentsandwhitespace': [ (r'\s+', Whitespace), (r'/\*.*?\*/', Comment.Multiline), (r'(#.*?)(\n)', bygroups(Comment.Single, Whitespace)), ], 'multilineregex': [ include('commentsandwhitespace'), (r'//([gimuysd]+\b|\B)', String.Regex, '#pop'), (r'/', String.Regex), (r'[^/#]+', String.Regex) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'//', String.Regex, ('#pop', 'multilineregex')), (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gimuysd]+\b|\B)', String.Regex, '#pop'), (r'/', Operator, '#pop'), default('#pop'), ], 'root': [ (r'\A(?=\s|/)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|' r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function), (r'\+\+|&&|(?<![.$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|' r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|' r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|' r'[+*`%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(?<![.$])(for|own|in|of|while|until|loop|break|' r'return|continue|switch|when|then|if|unless|else|' r'throw|try|catch|finally|new|delete|typeof|instanceof|super|' r'extends|this|class|by|const|var|to|til)\b', Keyword, 'slashstartsregex'), (r'(?<![.$])(true|false|yes|no|on|off|' r'null|NaN|Infinity|undefined|void)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|' r'Number|Object|RegExp|String|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|' r'globalThis|Symbol|Symbol|BigInt)\b', Name.Builtin), (r'([$a-zA-Z_][\w.\-:$]*)(\s*)([:=])(\s+)', bygroups(Name.Variable, Whitespace, Operator, Whitespace), 'slashstartsregex'), (r'(@[$a-zA-Z_][\w.\-:$]*)(\s*)([:=])(\s+)', bygroups(Name.Variable.Instance, Whitespace, Operator, Whitespace), 'slashstartsregex'), (r'@', Name.Other, 'slashstartsregex'), (r'@?[$a-zA-Z_][\w-]*', Name.Other, 'slashstartsregex'), (r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float), (r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer), ('"""', String, 'tdqs'), ("'''", String, 'tsqs'), ('"', String, 'dqs'), ("'", String, 'sqs'), (r'\\\S+', String), (r'<\[.*?\]>', String), ], 'strings': [ (r'[^#\\\'"]+', String), # note that all coffee script strings are multi-line. # hashmarks, quotes and backslashes must be parsed one at a time ], 'interpoling_string': [ (r'\}', String.Interpol, "#pop"), include('root') ], 'dqs': [ (r'"', String, '#pop'), (r'\\.|\'', String), # double-quoted string don't need ' escapes (r'#\{', String.Interpol, "interpoling_string"), (r'#', String), include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r'#|\\.|"', String), # single quoted strings don't need " escapses include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), (r'\\.|\'|"', String), # no need to escape quotes in triple-string (r'#\{', String.Interpol, "interpoling_string"), (r'#', String), include('strings'), ], 'tsqs': [ (r"'''", String, '#pop'), (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings include('strings') ], } class DartLexer(RegexLexer): """ For Dart source code. .. versionadded:: 1.5 """ name = 'Dart' url = 'http://dart.dev/' aliases = ['dart'] filenames = ['*.dart'] mimetypes = ['text/x-dart'] flags = re.MULTILINE | re.DOTALL tokens = { 'root': [ include('string_literal'), (r'#!(.*?)$', Comment.Preproc), (r'\b(import|export)\b', Keyword, 'import_decl'), (r'\b(library|source|part of|part)\b', Keyword), (r'[^\S\n]+', Whitespace), (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)), (r'/\*.*?\*/', Comment.Multiline), (r'\b(class|extension|mixin)\b(\s+)', bygroups(Keyword.Declaration, Whitespace), 'class'), (r'\b(as|assert|break|case|catch|const|continue|default|do|else|finally|' r'for|if|in|is|new|rethrow|return|super|switch|this|throw|try|while)\b', Keyword), (r'\b(abstract|async|await|const|covariant|extends|external|factory|final|' r'get|implements|late|native|on|operator|required|set|static|sync|typedef|' r'var|with|yield)\b', Keyword.Declaration), (r'\b(bool|double|dynamic|int|num|Function|Never|Null|Object|String|void)\b', Keyword.Type), (r'\b(false|null|true)\b', Keyword.Constant), (r'[~!%^&*+=|?:<>/-]|as\b', Operator), (r'@[a-zA-Z_$]\w*', Name.Decorator), (r'[a-zA-Z_$]\w*:', Name.Label), (r'[a-zA-Z_$]\w*', Name), (r'[(){}\[\],.;]', Punctuation), (r'0[xX][0-9a-fA-F]+', Number.Hex), # DIGIT+ (‘.’ DIGIT*)? EXPONENT? (r'\d+(\.\d*)?([eE][+-]?\d+)?', Number), (r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT? (r'\n', Whitespace) # pseudo-keyword negate intentionally left out ], 'class': [ (r'[a-zA-Z_$]\w*', Name.Class, '#pop') ], 'import_decl': [ include('string_literal'), (r'\s+', Whitespace), (r'\b(as|deferred|show|hide)\b', Keyword), (r'[a-zA-Z_$]\w*', Name), (r'\,', Punctuation), (r'\;', Punctuation, '#pop') ], 'string_literal': [ # Raw strings. (r'r"""([\w\W]*?)"""', String.Double), (r"r'''([\w\W]*?)'''", String.Single), (r'r"(.*?)"', String.Double), (r"r'(.*?)'", String.Single), # Normal Strings. (r'"""', String.Double, 'string_double_multiline'), (r"'''", String.Single, 'string_single_multiline'), (r'"', String.Double, 'string_double'), (r"'", String.Single, 'string_single') ], 'string_common': [ (r"\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|u\{[0-9A-Fa-f]*\}|[a-z'\"$\\])", String.Escape), (r'(\$)([a-zA-Z_]\w*)', bygroups(String.Interpol, Name)), (r'(\$\{)(.*?)(\})', bygroups(String.Interpol, using(this), String.Interpol)) ], 'string_double': [ (r'"', String.Double, '#pop'), (r'[^"$\\\n]+', String.Double), include('string_common'), (r'\$+', String.Double) ], 'string_double_multiline': [ (r'"""', String.Double, '#pop'), (r'[^"$\\]+', String.Double), include('string_common'), (r'(\$|\")+', String.Double) ], 'string_single': [ (r"'", String.Single, '#pop'), (r"[^'$\\\n]+", String.Single), include('string_common'), (r'\$+', String.Single) ], 'string_single_multiline': [ (r"'''", String.Single, '#pop'), (r'[^\'$\\]+', String.Single), include('string_common'), (r'(\$|\')+', String.Single) ] } class LassoLexer(RegexLexer): """ For Lasso source code, covering both Lasso 9 syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in HTML, use the `LassoHtmlLexer`. Additional options accepted: `builtinshighlighting` If given and ``True``, highlight builtin types, traits, methods, and members (default: ``True``). `requiredelimiters` If given and ``True``, only highlight code between delimiters as Lasso (default: ``False``). .. versionadded:: 1.6 """ name = 'Lasso' aliases = ['lasso', 'lassoscript'] filenames = ['*.lasso', '*.lasso[89]'] alias_filenames = ['*.incl', '*.inc', '*.las'] mimetypes = ['text/x-lasso'] flags = re.IGNORECASE | re.DOTALL | re.MULTILINE tokens = { 'root': [ (r'^#![ \S]+lasso9\b', Comment.Preproc, 'lasso'), (r'(?=\[|<)', Other, 'delimiters'), (r'\s+', Whitespace), default(('delimiters', 'lassofile')), ], 'delimiters': [ (r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'), (r'\[noprocess\]', Comment.Preproc, 'noprocess'), (r'\[', Comment.Preproc, 'squarebrackets'), (r'<\?(lasso(script)?|=)', Comment.Preproc, 'anglebrackets'), (r'<(!--.*?-->)?', Other), (r'[^[<]+', Other), ], 'nosquarebrackets': [ (r'\[noprocess\]', Comment.Preproc, 'noprocess'), (r'\[', Other), (r'<\?(lasso(script)?|=)', Comment.Preproc, 'anglebrackets'), (r'<(!--.*?-->)?', Other), (r'[^[<]+', Other), ], 'noprocess': [ (r'\[/noprocess\]', Comment.Preproc, '#pop'), (r'\[', Other), (r'[^[]', Other), ], 'squarebrackets': [ (r'\]', Comment.Preproc, '#pop'), include('lasso'), ], 'anglebrackets': [ (r'\?>', Comment.Preproc, '#pop'), include('lasso'), ], 'lassofile': [ (r'\]|\?>', Comment.Preproc, '#pop'), include('lasso'), ], 'whitespacecomments': [ (r'\s+', Whitespace), (r'(//.*?)(\s*)$', bygroups(Comment.Single, Whitespace)), (r'/\*\*!.*?\*/', String.Doc), (r'/\*.*?\*/', Comment.Multiline), ], 'lasso': [ # whitespace/comments include('whitespacecomments'), # literals (r'\d*\.\d+(e[+-]?\d+)?', Number.Float), (r'0x[\da-f]+', Number.Hex), (r'\d+', Number.Integer), (r'(infinity|NaN)\b', Number), (r"'", String.Single, 'singlestring'), (r'"', String.Double, 'doublestring'), (r'`[^`]*`', String.Backtick), # names (r'\$[a-z_][\w.]*', Name.Variable), (r'#([a-z_][\w.]*|\d+\b)', Name.Variable.Instance), (r"(\.)(\s*)('[a-z_][\w.]*')", bygroups(Name.Builtin.Pseudo, Whitespace, Name.Variable.Class)), (r"(self)(\s*)(->)(\s*)('[a-z_][\w.]*')", bygroups(Name.Builtin.Pseudo, Whitespace, Operator, Whitespace, Name.Variable.Class)), (r'(\.\.?)(\s*)([a-z_][\w.]*(=(?!=))?)', bygroups(Name.Builtin.Pseudo, Whitespace, Name.Other.Member)), (r'(->\\?|&)(\s*)([a-z_][\w.]*(=(?!=))?)', bygroups(Operator, Whitespace, Name.Other.Member)), (r'(?<!->)(self|inherited|currentcapture|givenblock)\b', Name.Builtin.Pseudo), (r'-(?!infinity)[a-z_][\w.]*', Name.Attribute), (r'(::)(\s*)([a-z_][\w.]*)', bygroups(Punctuation, Whitespace, Name.Label)), (r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|' r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|' r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|' r'Error_InvalidDatabase|Error_InvalidPassword|' r'Error_InvalidUsername|Error_ModuleNotFound|' r'Error_NoError|Error_NoPermission|Error_OutOfMemory|' r'Error_ReqColumnMissing|Error_ReqFieldMissing|' r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|' r'Error_UpdateError)\b', Name.Exception), # definitions (r'(define)(\s+)([a-z_][\w.]*)(\s*)(=>)(\s*)(type|trait|thread)\b', bygroups(Keyword.Declaration, Whitespace, Name.Class, Whitespace, Operator, Whitespace, Keyword)), (r'(define)(\s+)([a-z_][\w.]*)(\s*)(->)(\s*)([a-z_][\w.]*=?|[-+*/%])', bygroups(Keyword.Declaration, Whitespace, Name.Class, Whitespace, Operator, Whitespace, Name.Function), 'signature'), (r'(define)(\s+)([a-z_][\w.]*)', bygroups(Keyword.Declaration, Whitespace, Name.Function), 'signature'), (r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])' r'(?=\s*\())', bygroups(Keyword, Whitespace, Name.Function), 'signature'), (r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Whitespace, Name.Function)), # keywords (r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant), (r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration), (r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|' r'null|boolean|bytes|keyword|list|locale|queue|set|stack|' r'staticarray)\b', Keyword.Type), (r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Whitespace, Keyword)), (r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Whitespace, Name)), (r'require\b', Keyword, 'requiresection'), (r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)), (r'(/?)(Cache|Database_Names|Database_SchemaNames|' r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|' r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|' r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|' r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|Link_FirstRecord|' r'Link_LastGroup|Link_LastRecord|Link_NextGroup|Link_NextRecord|' r'Link_PrevGroup|Link_PrevRecord|Log|Loop|Output_None|Portal|' r'Private|Protect|Records|Referer|Referrer|Repeating|ResultSet|' r'Rows|Search_Args|Search_Arguments|Select|Sort_Args|' r'Sort_Arguments|Thread_Atomic|Value_List|While|Abort|Case|Else|' r'Fail_If|Fail_IfNot|Fail|If_Empty|If_False|If_Null|If_True|' r'Loop_Abort|Loop_Continue|Loop_Count|Params|Params_Up|Return|' r'Return_Value|Run_Children|SOAP_DefineTag|SOAP_LastRequest|' r'SOAP_LastResponse|Tag_Name|ascending|average|by|define|' r'descending|do|equals|frozen|group|handle_failure|import|in|into|' r'join|let|match|max|min|on|order|parent|protected|provide|public|' r'require|returnhome|skip|split_thread|sum|take|thread|to|trait|' r'type|where|with|yield|yieldhome)\b', bygroups(Punctuation, Keyword)), # other (r',', Punctuation, 'commamember'), (r'(and|or|not)\b', Operator.Word), (r'([a-z_][\w.]*)(\s*)(::)(\s*)([a-z_][\w.]*)?(\s*=(?!=))', bygroups(Name, Whitespace, Punctuation, Whitespace, Name.Label, Operator)), (r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)), (r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b', bygroups(Operator, Operator.Word)), (r':=|[-+*/%=<>&|!?\\]+', Operator), (r'[{}():;,@^]', Punctuation), ], 'singlestring': [ (r"'", String.Single, '#pop'), (r"[^'\\]+", String.Single), include('escape'), (r"\\", String.Single), ], 'doublestring': [ (r'"', String.Double, '#pop'), (r'[^"\\]+', String.Double), include('escape'), (r'\\', String.Double), ], 'escape': [ (r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:\n\r]+:|' r'[abefnrtv?"\'\\]|$)', String.Escape), ], 'signature': [ (r'=>', Operator, '#pop'), (r'\)', Punctuation, '#pop'), (r'[(,]', Punctuation, 'parameter'), include('lasso'), ], 'parameter': [ (r'\)', Punctuation, '#pop'), (r'-?[a-z_][\w.]*', Name.Attribute, '#pop'), (r'\.\.\.', Name.Builtin.Pseudo), include('lasso'), ], 'requiresection': [ (r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'), (r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name), (r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'), (r'(::)(\s*)([a-z_][\w.]*)', bygroups(Punctuation, Whitespace, Name.Label)), (r',', Punctuation), include('whitespacecomments'), ], 'requiresignature': [ (r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'), (r'\)', Punctuation, '#pop:2'), (r'-?[a-z_][\w.]*', Name.Attribute), (r'(::)(\s*)([a-z_][\w.]*)', bygroups(Punctuation, Whitespace, Name.Label)), (r'\.\.\.', Name.Builtin.Pseudo), (r'[(,]', Punctuation), include('whitespacecomments'), ], 'commamember': [ (r'(([a-z_][\w.]*=?|[-+*/%])' r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))', Name.Function, 'signature'), include('whitespacecomments'), default('#pop'), ], } def __init__(self, **options): self.builtinshighlighting = get_bool_opt( options, 'builtinshighlighting', True) self.requiredelimiters = get_bool_opt( options, 'requiredelimiters', False) self._builtins = set() self._members = set() if self.builtinshighlighting: from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS for key, value in BUILTINS.items(): self._builtins.update(value) for key, value in MEMBERS.items(): self._members.update(value) RegexLexer.__init__(self, **options) def get_tokens_unprocessed(self, text): stack = ['root'] if self.requiredelimiters: stack.append('delimiters') for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text, stack): if (token is Name.Other and value.lower() in self._builtins or token is Name.Other.Member and value.lower().rstrip('=') in self._members): yield index, Name.Builtin, value continue yield index, token, value def analyse_text(text): rv = 0.0 if 'bin/lasso9' in text: rv += 0.8 if re.search(r'<\?lasso', text, re.I): rv += 0.4 if re.search(r'local\(', text, re.I): rv += 0.4 return rv class ObjectiveJLexer(RegexLexer): """ For Objective-J source code with preprocessor directives. .. versionadded:: 1.3 """ name = 'Objective-J' aliases = ['objective-j', 'objectivej', 'obj-j', 'objj'] filenames = ['*.j'] mimetypes = ['text/x-objective-j'] #: optional Comment or Whitespace _ws = r'(?:\s|//[^\n]*\n|/[*](?:[^*]|[*][^/])*[*]/)*' flags = re.DOTALL | re.MULTILINE tokens = { 'root': [ include('whitespace'), # function definition (r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)', bygroups(using(this), using(this, state='function_signature'), using(this))), # class definition (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Whitespace), 'classname'), (r'(@class|@protocol)(\s*)', bygroups(Keyword, Whitespace), 'forward_classname'), (r'(\s*)(@end)(\s*)', bygroups(Whitespace, Keyword, Whitespace)), include('statements'), ('[{()}]', Punctuation), (';', Punctuation), ], 'whitespace': [ (r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")', bygroups(Comment.Preproc, Whitespace, String.Double)), (r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)', bygroups(Comment.Preproc, Whitespace, String.Double)), (r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")', bygroups(Comment.Preproc, Whitespace, String.Double)), (r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)', bygroups(Comment.Preproc, Whitespace, String.Double)), (r'#if\s+0', Comment.Preproc, 'if0'), (r'#', Comment.Preproc, 'macro'), (r'\s+', Whitespace), (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'<!--', Comment), ], 'slashstartsregex': [ include('whitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), default('#pop'), ], 'badregex': [ (r'\n', Whitespace, '#pop'), ], 'statements': [ (r'(L|@)?"', String, 'string'), (r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double), (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'0[0-7]+[Ll]?', Number.Oct), (r'\d+[Ll]?', Number.Integer), (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(for|in|while|do|break|return|continue|switch|case|default|if|' r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|' r'prototype|__proto__)\b', Keyword, 'slashstartsregex'), (r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'), (r'(@selector|@private|@protected|@public|@encode|' r'@synchronized|@try|@throw|@catch|@finally|@end|@property|' r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword), (r'(int|long|float|short|double|char|unsigned|signed|void|' r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b', Keyword.Type), (r'(self|super)\b', Name.Builtin), (r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant), (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant), (r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|' r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|' r'SQRT2)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|' r'Number|Object|RegExp|String|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|' r'window|globalThis|Symbol)\b', Name.Builtin), (r'([$a-zA-Z_]\w*)(' + _ws + r')(?=\()', bygroups(Name.Function, using(this))), (r'[$a-zA-Z_]\w*', Name), ], 'classname': [ # interface definition that inherits (r'([a-zA-Z_]\w*)(' + _ws + r':' + _ws + r')([a-zA-Z_]\w*)?', bygroups(Name.Class, using(this), Name.Class), '#pop'), # interface definition for a category (r'([a-zA-Z_]\w*)(' + _ws + r'\()([a-zA-Z_]\w*)(\))', bygroups(Name.Class, using(this), Name.Label, Text), '#pop'), # simple interface / implementation (r'([a-zA-Z_]\w*)', Name.Class, '#pop'), ], 'forward_classname': [ (r'([a-zA-Z_]\w*)(\s*)(,)(\s*)', bygroups(Name.Class, Whitespace, Text, Whitespace), '#push'), (r'([a-zA-Z_]\w*)(\s*)(;?)', bygroups(Name.Class, Whitespace, Text), '#pop'), ], 'function_signature': [ include('whitespace'), # start of a selector w/ parameters (r'(\(' + _ws + r')' # open paren r'([a-zA-Z_]\w+)' # return type r'(' + _ws + r'\)' + _ws + r')' # close paren r'([$a-zA-Z_]\w+' + _ws + r':)', # function name bygroups(using(this), Keyword.Type, using(this), Name.Function), 'function_parameters'), # no-param function (r'(\(' + _ws + r')' # open paren r'([a-zA-Z_]\w+)' # return type r'(' + _ws + r'\)' + _ws + r')' # close paren r'([$a-zA-Z_]\w+)', # function name bygroups(using(this), Keyword.Type, using(this), Name.Function), "#pop"), # no return type given, start of a selector w/ parameters (r'([$a-zA-Z_]\w+' + _ws + r':)', # function name bygroups(Name.Function), 'function_parameters'), # no return type given, no-param function (r'([$a-zA-Z_]\w+)', # function name bygroups(Name.Function), "#pop"), default('#pop'), ], 'function_parameters': [ include('whitespace'), # parameters (r'(\(' + _ws + ')' # open paren r'([^)]+)' # type r'(' + _ws + r'\)' + _ws + r')' # close paren r'([$a-zA-Z_]\w+)', # param name bygroups(using(this), Keyword.Type, using(this), Text)), # one piece of a selector name (r'([$a-zA-Z_]\w+' + _ws + r':)', # function name Name.Function), # smallest possible selector piece (r'(:)', Name.Function), # var args (r'(,' + _ws + r'\.\.\.)', using(this)), # param name (r'([$a-zA-Z_]\w+)', Text), ], 'expression': [ (r'([$a-zA-Z_]\w*)(\()', bygroups(Name.Function, Punctuation)), (r'(\))', Punctuation, "#pop"), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation (r'\\', String), # stray backslash ], 'macro': [ (r'[^/\n]+', Comment.Preproc), (r'/[*](.|\n)*?[*]/', Comment.Multiline), (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace), '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Whitespace), (r'\n', Whitespace, '#pop'), ], 'if0': [ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'), (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), (r'(.*?)(\n)', bygroups(Comment, Whitespace)), ] } def analyse_text(text): if re.search(r'^\s*@import\s+[<"]', text, re.MULTILINE): # special directive found in most Objective-J files return True return False class CoffeeScriptLexer(RegexLexer): """ For CoffeeScript source code. .. versionadded:: 1.3 """ name = 'CoffeeScript' url = 'http://coffeescript.org' aliases = ['coffeescript', 'coffee-script', 'coffee'] filenames = ['*.coffee'] mimetypes = ['text/coffeescript'] _operator_re = ( r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|' r'\|\||\\(?=\n)|' r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&|\^/])=?') flags = re.DOTALL tokens = { 'commentsandwhitespace': [ (r'\s+', Whitespace), (r'###[^#].*?###', Comment.Multiline), (r'(#(?!##[^#]).*?)(\n)', bygroups(Comment.Single, Whitespace)), ], 'multilineregex': [ (r'[^/#]+', String.Regex), (r'///([gimuysd]+\b|\B)', String.Regex, '#pop'), (r'#\{', String.Interpol, 'interpoling_string'), (r'[/#]', String.Regex), ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'///', String.Regex, ('#pop', 'multilineregex')), (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gimuysd]+\b|\B)', String.Regex, '#pop'), # This isn't really guarding against mishighlighting well-formed # code, just the ability to infinite-loop between root and # slashstartsregex. (r'/', Operator, '#pop'), default('#pop'), ], 'root': [ include('commentsandwhitespace'), (r'\A(?=\s|/)', Text, 'slashstartsregex'), (_operator_re, Operator, 'slashstartsregex'), (r'(?:\([^()]*\))?\s*[=-]>', Name.Function, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(?<![.$])(for|own|in|of|while|until|' r'loop|break|return|continue|' r'switch|when|then|if|unless|else|' r'throw|try|catch|finally|new|delete|typeof|instanceof|super|' r'extends|this|class|by)\b', Keyword, 'slashstartsregex'), (r'(?<![.$])(true|false|yes|no|on|off|null|' r'NaN|Infinity|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|' r'Number|Object|RegExp|String|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|globalThis|Symbol)\b', Name.Builtin), (r'([$a-zA-Z_][\w.:$]*)(\s*)([:=])(\s+)', bygroups(Name.Variable, Whitespace, Operator, Whitespace), 'slashstartsregex'), (r'(@[$a-zA-Z_][\w.:$]*)(\s*)([:=])(\s+)', bygroups(Name.Variable.Instance, Whitespace, Operator, Whitespace), 'slashstartsregex'), (r'@', Name.Other, 'slashstartsregex'), (r'@?[$a-zA-Z_][\w$]*', Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), ('"""', String, 'tdqs'), ("'''", String, 'tsqs'), ('"', String, 'dqs'), ("'", String, 'sqs'), ], 'strings': [ (r'[^#\\\'"]+', String), # note that all coffee script strings are multi-line. # hashmarks, quotes and backslashes must be parsed one at a time ], 'interpoling_string': [ (r'\}', String.Interpol, "#pop"), include('root') ], 'dqs': [ (r'"', String, '#pop'), (r'\\.|\'', String), # double-quoted string don't need ' escapes (r'#\{', String.Interpol, "interpoling_string"), (r'#', String), include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r'#|\\.|"', String), # single quoted strings don't need " escapses include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), (r'\\.|\'|"', String), # no need to escape quotes in triple-string (r'#\{', String.Interpol, "interpoling_string"), (r'#', String), include('strings'), ], 'tsqs': [ (r"'''", String, '#pop'), (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings include('strings') ], } class MaskLexer(RegexLexer): """ For Mask markup. .. versionadded:: 2.0 """ name = 'Mask' url = 'https://github.com/atmajs/MaskJS' aliases = ['mask'] filenames = ['*.mask'] mimetypes = ['text/x-mask'] flags = re.MULTILINE | re.IGNORECASE | re.DOTALL tokens = { 'root': [ (r'\s+', Whitespace), (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)), (r'/\*.*?\*/', Comment.Multiline), (r'[{};>]', Punctuation), (r"'''", String, 'string-trpl-single'), (r'"""', String, 'string-trpl-double'), (r"'", String, 'string-single'), (r'"', String, 'string-double'), (r'([\w-]+)', Name.Tag, 'node'), (r'([^.#;{>\s]+)', Name.Class, 'node'), (r'(#[\w-]+)', Name.Function, 'node'), (r'(\.[\w-]+)', Name.Variable.Class, 'node') ], 'string-base': [ (r'\\.', String.Escape), (r'~\[', String.Interpol, 'interpolation'), (r'.', String.Single), ], 'string-single': [ (r"'", String.Single, '#pop'), include('string-base') ], 'string-double': [ (r'"', String.Single, '#pop'), include('string-base') ], 'string-trpl-single': [ (r"'''", String.Single, '#pop'), include('string-base') ], 'string-trpl-double': [ (r'"""', String.Single, '#pop'), include('string-base') ], 'interpolation': [ (r'\]', String.Interpol, '#pop'), (r'(\s*)(:)', bygroups(Whitespace, String.Interpol), 'expression'), (r'(\s*)(\w+)(:)', bygroups(Whitespace, Name.Other, Punctuation)), (r'[^\]]+', String.Interpol) ], 'expression': [ (r'[^\]]+', using(JavascriptLexer), '#pop') ], 'node': [ (r'\s+', Whitespace), (r'\.', Name.Variable.Class, 'node-class'), (r'\#', Name.Function, 'node-id'), (r'(style)([ \t]*)(=)', bygroups(Name.Attribute, Whitespace, Operator), 'node-attr-style-value'), (r'([\w:-]+)([ \t]*)(=)', bygroups(Name.Attribute, Whitespace, Operator), 'node-attr-value'), (r'[\w:-]+', Name.Attribute), (r'[>{;]', Punctuation, '#pop') ], 'node-class': [ (r'[\w-]+', Name.Variable.Class), (r'~\[', String.Interpol, 'interpolation'), default('#pop') ], 'node-id': [ (r'[\w-]+', Name.Function), (r'~\[', String.Interpol, 'interpolation'), default('#pop') ], 'node-attr-value': [ (r'\s+', Whitespace), (r'\w+', Name.Variable, '#pop'), (r"'", String, 'string-single-pop2'), (r'"', String, 'string-double-pop2'), default('#pop') ], 'node-attr-style-value': [ (r'\s+', Whitespace), (r"'", String.Single, 'css-single-end'), (r'"', String.Single, 'css-double-end'), include('node-attr-value') ], 'css-base': [ (r'\s+', Whitespace), (r";", Punctuation), (r"[\w\-]+\s*:", Name.Builtin) ], 'css-single-end': [ include('css-base'), (r"'", String.Single, '#pop:2'), (r"[^;']+", Name.Entity) ], 'css-double-end': [ include('css-base'), (r'"', String.Single, '#pop:2'), (r'[^;"]+', Name.Entity) ], 'string-single-pop2': [ (r"'", String.Single, '#pop:2'), include('string-base') ], 'string-double-pop2': [ (r'"', String.Single, '#pop:2'), include('string-base') ], } class EarlGreyLexer(RegexLexer): """ For Earl-Grey source code. .. versionadded: 2.1 """ name = 'Earl Grey' aliases = ['earl-grey', 'earlgrey', 'eg'] filenames = ['*.eg'] mimetypes = ['text/x-earl-grey'] tokens = { 'root': [ (r'\n', Whitespace), include('control'), (r'[^\S\n]+', Text), (r'(;;.*)(\n)', bygroups(Comment, Whitespace)), (r'[\[\]{}:(),;]', Punctuation), (r'(\\)(\n)', bygroups(String.Escape, Whitespace)), (r'\\', Text), include('errors'), (words(( 'with', 'where', 'when', 'and', 'not', 'or', 'in', 'as', 'of', 'is'), prefix=r'(?<=\s|\[)', suffix=r'(?![\w$\-])'), Operator.Word), (r'[*@]?->', Name.Function), (r'[+\-*/~^<>%&|?!@#.]*=', Operator.Word), (r'\.{2,3}', Operator.Word), # Range Operator (r'([+*/~^<>&|?!]+)|([#\-](?=\s))|@@+(?=\s)|=+', Operator), (r'(?<![\w$\-])(var|let)(?:[^\w$])', Keyword.Declaration), include('keywords'), include('builtins'), include('assignment'), (r'''(?x) (?:()([a-zA-Z$_](?:[\w$\-]*[\w$])?)| (?<=[\s{\[(])(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?)) (?=.*%)''', bygroups(Punctuation, Name.Tag, Punctuation, Name.Class.Start), 'dbs'), (r'[rR]?`', String.Backtick, 'bt'), (r'[rR]?```', String.Backtick, 'tbt'), (r'(?<=[\s\[{(,;])\.([a-zA-Z$_](?:[\w$\-]*[\w$])?)' r'(?=[\s\]}),;])', String.Symbol), include('nested'), (r'(?:[rR]|[rR]\.[gmi]{1,3})?"', String, combined('stringescape', 'dqs')), (r'(?:[rR]|[rR]\.[gmi]{1,3})?\'', String, combined('stringescape', 'sqs')), (r'"""', String, combined('stringescape', 'tdqs')), include('tuple'), include('import_paths'), include('name'), include('numbers'), ], 'dbs': [ (r'(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?)(?=[.\[\s])', bygroups(Punctuation, Name.Class.DBS)), (r'(\[)([\^#][a-zA-Z$_](?:[\w$\-]*[\w$])?)(\])', bygroups(Punctuation, Name.Entity.DBS, Punctuation)), (r'\s+', Whitespace), (r'%', Operator.DBS, '#pop'), ], 'import_paths': [ (r'(?<=[\s:;,])(\.{1,3}(?:[\w\-]*/)*)(\w(?:[\w\-]*\w)*)(?=[\s;,])', bygroups(Text.Whitespace, Text)), ], 'assignment': [ (r'(\.)?([a-zA-Z$_](?:[\w$\-]*[\w$])?)' r'(?=\s+[+\-*/~^<>%&|?!@#.]*\=\s)', bygroups(Punctuation, Name.Variable)) ], 'errors': [ (words(('Error', 'TypeError', 'ReferenceError'), prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'), Name.Exception), (r'''(?x) (?<![\w$]) E\.[\w$](?:[\w$\-]*[\w$])? (?:\.[\w$](?:[\w$\-]*[\w$])?)* (?=[({\[?!\s])''', Name.Exception), ], 'control': [ (r'''(?x) ([a-zA-Z$_](?:[\w$-]*[\w$])?) (?!\n)\s+ (?!and|as|each\*|each|in|is|mod|of|or|when|where|with) (?=(?:[+\-*/~^<>%&|?!@#.])?[a-zA-Z$_](?:[\w$-]*[\w$])?)''', Keyword.Control), (r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(?!\n)(\s+)(?=[\'"\d{\[(])', bygroups(Keyword.Control, Whitespace)), (r'''(?x) (?: (?<=[%=])| (?<=[=\-]>)| (?<=with|each|with)| (?<=each\*|where) )(\s+) ([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''', bygroups(Whitespace, Keyword.Control, Punctuation)), (r'''(?x) (?<![+\-*/~^<>%&|?!@#.])(\s+) ([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''', bygroups(Whitespace, Keyword.Control, Punctuation)), ], 'nested': [ (r'''(?x) (?<=[\w$\]})])(\.) ([a-zA-Z$_](?:[\w$-]*[\w$])?) (?=\s+with(?:\s|\n))''', bygroups(Punctuation, Name.Function)), (r'''(?x) (?<!\s)(\.) ([a-zA-Z$_](?:[\w$-]*[\w$])?) (?=[}\]).,;:\s])''', bygroups(Punctuation, Name.Field)), (r'''(?x) (?<=[\w$\]})])(\.) ([a-zA-Z$_](?:[\w$-]*[\w$])?) (?=[\[{(:])''', bygroups(Punctuation, Name.Function)), ], 'keywords': [ (words(( 'each', 'each*', 'mod', 'await', 'break', 'chain', 'continue', 'elif', 'expr-value', 'if', 'match', 'return', 'yield', 'pass', 'else', 'require', 'var', 'let', 'async', 'method', 'gen'), prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'), Keyword.Pseudo), (words(('this', 'self', '@'), prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'), Keyword.Constant), (words(( 'Function', 'Object', 'Array', 'String', 'Number', 'Boolean', 'ErrorFactory', 'ENode', 'Promise'), prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'), Keyword.Type), ], 'builtins': [ (words(( 'send', 'object', 'keys', 'items', 'enumerate', 'zip', 'product', 'neighbours', 'predicate', 'equal', 'nequal', 'contains', 'repr', 'clone', 'range', 'getChecker', 'get-checker', 'getProperty', 'get-property', 'getProjector', 'get-projector', 'consume', 'take', 'promisify', 'spawn', 'constructor'), prefix=r'(?<![\w\-#.])', suffix=r'(?![\w\-.])'), Name.Builtin), (words(( 'true', 'false', 'null', 'undefined'), prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'), Name.Constant), ], 'name': [ (r'@([a-zA-Z$_](?:[\w$-]*[\w$])?)', Name.Variable.Instance), (r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(\+\+|\-\-)?', bygroups(Name.Symbol, Operator.Word)) ], 'tuple': [ (r'#[a-zA-Z_][\w\-]*(?=[\s{(,;])', Name.Namespace) ], 'interpoling_string': [ (r'\}', String.Interpol, '#pop'), include('root') ], 'stringescape': [ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) ], 'strings': [ (r'[^\\\'"]', String), (r'[\'"\\]', String), (r'\n', String) # All strings are multiline in EG ], 'dqs': [ (r'"', String, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r"\\\\|\\'|\\\n", String.Escape), (r'\{', String.Interpol, 'interpoling_string'), include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), include('strings'), ], 'bt': [ (r'`', String.Backtick, '#pop'), (r'(?<!`)\n', String.Backtick), (r'\^=?', String.Escape), (r'.+', String.Backtick), ], 'tbt': [ (r'```', String.Backtick, '#pop'), (r'\n', String.Backtick), (r'\^=?', String.Escape), (r'[^`]+', String.Backtick), ], 'numbers': [ (r'\d+\.(?!\.)\d*([eE][+-]?[0-9]+)?', Number.Float), (r'\d+[eE][+-]?[0-9]+', Number.Float), (r'8r[0-7]+', Number.Oct), (r'2r[01]+', Number.Bin), (r'16r[a-fA-F0-9]+', Number.Hex), (r'([3-79]|[12][0-9]|3[0-6])r[a-zA-Z\d]+(\.[a-zA-Z\d]+)?', Number.Radix), (r'\d+', Number.Integer) ], } class JuttleLexer(RegexLexer): """ For Juttle source code. .. versionadded:: 2.2 """ name = 'Juttle' url = 'http://juttle.github.io/' aliases = ['juttle'] filenames = ['*.juttle'] mimetypes = ['application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle'] flags = re.DOTALL | re.MULTILINE tokens = { 'commentsandwhitespace': [ (r'\s+', Whitespace), (r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)), (r'/\*.*?\*/', Comment.Multiline) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gimuysd]+\b|\B)', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), default('#pop') ], 'badregex': [ (r'\n', Text, '#pop') ], 'root': [ (r'^(?=\s|/)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r':\d{2}:\d{2}:\d{2}(\.\d*)?:', String.Moment), (r':(now|beginning|end|forever|yesterday|today|tomorrow|' r'(\d+(\.\d*)?|\.\d+)(ms|[smhdwMy])?):', String.Moment), (r':\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(\.\d*)?)?' r'(Z|[+-]\d{2}:\d{2}|[+-]\d{4})?:', String.Moment), (r':((\d+(\.\d*)?|\.\d+)[ ]+)?(millisecond|second|minute|hour|' r'day|week|month|year)[s]?' r'(([ ]+and[ ]+(\d+[ ]+)?(millisecond|second|minute|hour|' r'day|week|month|year)[s]?)' r'|[ ]+(ago|from[ ]+now))*:', String.Moment), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(import|return|continue|if|else)\b', Keyword, 'slashstartsregex'), (r'(var|const|function|reducer|sub|input)\b', Keyword.Declaration, 'slashstartsregex'), (r'(batch|emit|filter|head|join|keep|pace|pass|put|read|reduce|remove|' r'sequence|skip|sort|split|tail|unbatch|uniq|view|write)\b', Keyword.Reserved), (r'(true|false|null|Infinity)\b', Keyword.Constant), (r'(Array|Date|Juttle|Math|Number|Object|RegExp|String)\b', Name.Builtin), (JS_IDENT, Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'[0-9]+', Number.Integer), (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double), (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single), ] } class NodeConsoleLexer(Lexer): """ For parsing within an interactive Node.js REPL, such as: .. sourcecode:: nodejsrepl > let a = 3 undefined > a 3 > let b = '4' undefined > b '4' > b == a false .. versionadded: 2.10 """ name = 'Node.js REPL console session' aliases = ['nodejsrepl', ] mimetypes = ['text/x-nodejsrepl', ] def get_tokens_unprocessed(self, text): jslexer = JavascriptLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() if line.startswith('> '): insertions.append((len(curcode), [(0, Generic.Prompt, line[:1]), (1, Whitespace, line[1:2])])) curcode += line[2:] elif line.startswith('...'): # node does a nested ... thing depending on depth code = line.lstrip('.') lead = len(line) - len(code) insertions.append((len(curcode), [(0, Generic.Prompt, line[:lead])])) curcode += code else: if curcode: yield from do_insertions(insertions, jslexer.get_tokens_unprocessed(curcode)) curcode = '' insertions = [] yield from do_insertions([], jslexer.get_tokens_unprocessed(line)) if curcode: yield from do_insertions(insertions, jslexer.get_tokens_unprocessed(curcode))
62,851
Python
38.554437
97
0.437734
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/sql.py
""" pygments.lexers.sql ~~~~~~~~~~~~~~~~~~~ Lexers for various SQL dialects and related interactive sessions. Postgres specific lexers: `PostgresLexer` A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL lexer are: - keywords and data types list parsed from the PG docs (run the `_postgres_builtins` module to update them); - Content of $-strings parsed using a specific lexer, e.g. the content of a PL/Python function is parsed using the Python lexer; - parse PG specific constructs: E-strings, $-strings, U&-strings, different operators and punctuation. `PlPgsqlLexer` A lexer for the PL/pgSQL language. Adds a few specific construct on top of the PG SQL lexer (such as <<label>>). `PostgresConsoleLexer` A lexer to highlight an interactive psql session: - identifies the prompt and does its best to detect the end of command in multiline statement where not all the lines are prefixed by a prompt, telling them apart from the output; - highlights errors in the output and notification levels; - handles psql backslash commands. The ``tests/examplefiles`` contains a few test files with data to be parsed by these lexers. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words from pygments.token import Punctuation, Whitespace, Text, Comment, Operator, \ Keyword, Name, String, Number, Generic, Literal from pygments.lexers import get_lexer_by_name, ClassNotFound from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \ PSEUDO_TYPES, PLPGSQL_KEYWORDS from pygments.lexers._mysql_builtins import \ MYSQL_CONSTANTS, \ MYSQL_DATATYPES, \ MYSQL_FUNCTIONS, \ MYSQL_KEYWORDS, \ MYSQL_OPTIMIZER_HINTS from pygments.lexers import _tsql_builtins __all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer', 'SqlLexer', 'TransactSqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'RqlLexer'] line_re = re.compile('.*?\n') sqlite_prompt_re = re.compile(r'^(?:sqlite| ...)>(?= )') language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE) do_re = re.compile(r'\bDO\b', re.IGNORECASE) # Regular expressions for analyse_text() name_between_bracket_re = re.compile(r'\[[a-zA-Z_]\w*\]') name_between_backtick_re = re.compile(r'`[a-zA-Z_]\w*`') tsql_go_re = re.compile(r'\bgo\b', re.IGNORECASE) tsql_declare_re = re.compile(r'\bdeclare\s+@', re.IGNORECASE) tsql_variable_re = re.compile(r'@[a-zA-Z_]\w*\b') def language_callback(lexer, match): """Parse the content of a $-string using a lexer The lexer is chosen looking for a nearby LANGUAGE or assumed as plpgsql if inside a DO statement and no LANGUAGE has been found. """ lx = None m = language_re.match(lexer.text[match.end():match.end()+100]) if m is not None: lx = lexer._get_lexer(m.group(1)) else: m = list(language_re.finditer( lexer.text[max(0, match.start()-100):match.start()])) if m: lx = lexer._get_lexer(m[-1].group(1)) else: m = list(do_re.finditer( lexer.text[max(0, match.start()-25):match.start()])) if m: lx = lexer._get_lexer('plpgsql') # 1 = $, 2 = delimiter, 3 = $ yield (match.start(1), String, match.group(1)) yield (match.start(2), String.Delimiter, match.group(2)) yield (match.start(3), String, match.group(3)) # 4 = string contents if lx: yield from lx.get_tokens_unprocessed(match.group(4)) else: yield (match.start(4), String, match.group(4)) # 5 = $, 6 = delimiter, 7 = $ yield (match.start(5), String, match.group(5)) yield (match.start(6), String.Delimiter, match.group(6)) yield (match.start(7), String, match.group(7)) class PostgresBase: """Base class for Postgres-related lexers. This is implemented as a mixin to avoid the Lexer metaclass kicking in. this way the different lexer don't have a common Lexer ancestor. If they had, _tokens could be created on this ancestor and not updated for the other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming seem to suggest that regexp lexers are not really subclassable. """ def get_tokens_unprocessed(self, text, *args): # Have a copy of the entire text to be used by `language_callback`. self.text = text yield from super().get_tokens_unprocessed(text, *args) def _get_lexer(self, lang): if lang.lower() == 'sql': return get_lexer_by_name('postgresql', **self.options) tries = [lang] if lang.startswith('pl'): tries.append(lang[2:]) if lang.endswith('u'): tries.append(lang[:-1]) if lang.startswith('pl') and lang.endswith('u'): tries.append(lang[2:-1]) for lx in tries: try: return get_lexer_by_name(lx, **self.options) except ClassNotFound: pass else: # TODO: better logging # print >>sys.stderr, "language not found:", lang return None class PostgresLexer(PostgresBase, RegexLexer): """ Lexer for the PostgreSQL dialect of SQL. .. versionadded:: 1.5 """ name = 'PostgreSQL SQL dialect' aliases = ['postgresql', 'postgres'] mimetypes = ['text/x-postgresql'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s+', Whitespace), (r'--.*\n?', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), (r'(' + '|'.join(s.replace(" ", r"\s+") for s in DATATYPES + PSEUDO_TYPES) + r')\b', Name.Builtin), (words(KEYWORDS, suffix=r'\b'), Keyword), (r'[+*/<>=~!@#%^&|`?-]+', Operator), (r'::', Operator), # cast (r'\$\d+', Name.Variable), (r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float), (r'[0-9]+', Number.Integer), (r"((?:E|U&)?)(')", bygroups(String.Affix, String.Single), 'string'), # quoted identifier (r'((?:U&)?)(")', bygroups(String.Affix, String.Name), 'quoted-ident'), (r'(?s)(\$)([^$]*)(\$)(.*?)(\$)(\2)(\$)', language_callback), (r'[a-z_]\w*', Name), # psql variable in SQL (r""":(['"]?)[a-z]\w*\b\1""", Name.Variable), (r'[;:()\[\]{},.]', Punctuation), ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ], 'string': [ (r"[^']+", String.Single), (r"''", String.Single), (r"'", String.Single, '#pop'), ], 'quoted-ident': [ (r'[^"]+', String.Name), (r'""', String.Name), (r'"', String.Name, '#pop'), ], } class PlPgsqlLexer(PostgresBase, RegexLexer): """ Handle the extra syntax in Pl/pgSQL language. .. versionadded:: 1.5 """ name = 'PL/pgSQL' aliases = ['plpgsql'] mimetypes = ['text/x-plpgsql'] flags = re.IGNORECASE tokens = {k: l[:] for (k, l) in PostgresLexer.tokens.items()} # extend the keywords list for i, pattern in enumerate(tokens['root']): if pattern[1] == Keyword: tokens['root'][i] = ( words(KEYWORDS + PLPGSQL_KEYWORDS, suffix=r'\b'), Keyword) del i break else: assert 0, "SQL keywords not found" # Add specific PL/pgSQL rules (before the SQL ones) tokens['root'][:0] = [ (r'\%[a-z]\w*\b', Name.Builtin), # actually, a datatype (r':=', Operator), (r'\<\<[a-z]\w*\>\>', Name.Label), (r'\#[a-z]\w*\b', Keyword.Pseudo), # #variable_conflict ] class PsqlRegexLexer(PostgresBase, RegexLexer): """ Extend the PostgresLexer adding support specific for psql commands. This is not a complete psql lexer yet as it lacks prompt support and output rendering. """ name = 'PostgreSQL console - regexp based lexer' aliases = [] # not public flags = re.IGNORECASE tokens = {k: l[:] for (k, l) in PostgresLexer.tokens.items()} tokens['root'].append( (r'\\[^\s]+', Keyword.Pseudo, 'psql-command')) tokens['psql-command'] = [ (r'\n', Text, 'root'), (r'\s+', Whitespace), (r'\\[^\s]+', Keyword.Pseudo), (r""":(['"]?)[a-z]\w*\b\1""", Name.Variable), (r"'(''|[^'])*'", String.Single), (r"`([^`])*`", String.Backtick), (r"[^\s]+", String.Symbol), ] re_prompt = re.compile(r'^(\S.*?)??[=\-\(\$\'\"][#>]') re_psql_command = re.compile(r'\s*\\') re_end_command = re.compile(r';\s*(--.*?)?$') re_psql_command = re.compile(r'(\s*)(\\.+?)(\s+)$') re_error = re.compile(r'(ERROR|FATAL):') re_message = re.compile( r'((?:DEBUG|INFO|NOTICE|WARNING|ERROR|' r'FATAL|HINT|DETAIL|CONTEXT|LINE [0-9]+):)(.*?\n)') class lookahead: """Wrap an iterator and allow pushing back an item.""" def __init__(self, x): self.iter = iter(x) self._nextitem = None def __iter__(self): return self def send(self, i): self._nextitem = i return i def __next__(self): if self._nextitem is not None: ni = self._nextitem self._nextitem = None return ni return next(self.iter) next = __next__ class PostgresConsoleLexer(Lexer): """ Lexer for psql sessions. .. versionadded:: 1.5 """ name = 'PostgreSQL console (psql)' aliases = ['psql', 'postgresql-console', 'postgres-console'] mimetypes = ['text/x-postgresql-psql'] def get_tokens_unprocessed(self, data): sql = PsqlRegexLexer(**self.options) lines = lookahead(line_re.findall(data)) # prompt-output cycle while 1: # consume the lines of the command: start with an optional prompt # and continue until the end of command is detected curcode = '' insertions = [] for line in lines: # Identify a shell prompt in case of psql commandline example if line.startswith('$') and not curcode: lexer = get_lexer_by_name('console', **self.options) yield from lexer.get_tokens_unprocessed(line) break # Identify a psql prompt mprompt = re_prompt.match(line) if mprompt is not None: insertions.append((len(curcode), [(0, Generic.Prompt, mprompt.group())])) curcode += line[len(mprompt.group()):] else: curcode += line # Check if this is the end of the command # TODO: better handle multiline comments at the end with # a lexer with an external state? if re_psql_command.match(curcode) \ or re_end_command.search(curcode): break # Emit the combined stream of command and prompt(s) yield from do_insertions(insertions, sql.get_tokens_unprocessed(curcode)) # Emit the output lines out_token = Generic.Output for line in lines: mprompt = re_prompt.match(line) if mprompt is not None: # push the line back to have it processed by the prompt lines.send(line) break mmsg = re_message.match(line) if mmsg is not None: if mmsg.group(1).startswith("ERROR") \ or mmsg.group(1).startswith("FATAL"): out_token = Generic.Error yield (mmsg.start(1), Generic.Strong, mmsg.group(1)) yield (mmsg.start(2), out_token, mmsg.group(2)) else: yield (0, out_token, line) else: return class SqlLexer(RegexLexer): """ Lexer for Structured Query Language. Currently, this lexer does not recognize any special syntax except ANSI SQL. """ name = 'SQL' aliases = ['sql'] filenames = ['*.sql'] mimetypes = ['text/x-sql'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s+', Whitespace), (r'--.*\n?', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), (words(( 'ABORT', 'ABS', 'ABSOLUTE', 'ACCESS', 'ADA', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE', 'ALIAS', 'ALL', 'ALLOCATE', 'ALTER', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARE', 'AS', 'ASC', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT', 'ASYMMETRIC', 'AT', 'ATOMIC', 'AUTHORIZATION', 'AVG', 'BACKWARD', 'BEFORE', 'BEGIN', 'BETWEEN', 'BITVAR', 'BIT_LENGTH', 'BOTH', 'BREADTH', 'BY', 'C', 'CACHE', 'CALL', 'CALLED', 'CARDINALITY', 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CATALOG_NAME', 'CHAIN', 'CHARACTERISTICS', 'CHARACTER_LENGTH', 'CHARACTER_SET_CATALOG', 'CHARACTER_SET_NAME', 'CHARACTER_SET_SCHEMA', 'CHAR_LENGTH', 'CHECK', 'CHECKED', 'CHECKPOINT', 'CLASS', 'CLASS_ORIGIN', 'CLOB', 'CLOSE', 'CLUSTER', 'COALESCE', 'COBOL', 'COLLATE', 'COLLATION', 'COLLATION_CATALOG', 'COLLATION_NAME', 'COLLATION_SCHEMA', 'COLUMN', 'COLUMN_NAME', 'COMMAND_FUNCTION', 'COMMAND_FUNCTION_CODE', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPLETION', 'CONDITION_NUMBER', 'CONNECT', 'CONNECTION', 'CONNECTION_NAME', 'CONSTRAINT', 'CONSTRAINTS', 'CONSTRAINT_CATALOG', 'CONSTRAINT_NAME', 'CONSTRAINT_SCHEMA', 'CONSTRUCTOR', 'CONTAINS', 'CONTINUE', 'CONVERSION', 'CONVERT', 'COPY', 'CORRESPONDING', 'COUNT', 'CREATE', 'CREATEDB', 'CREATEUSER', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_DATE', 'CURRENT_PATH', 'CURRENT_ROLE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'CURSOR', 'CURSOR_NAME', 'CYCLE', 'DATA', 'DATABASE', 'DATETIME_INTERVAL_CODE', 'DATETIME_INTERVAL_PRECISION', 'DAY', 'DEALLOCATE', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE', 'DEFERRED', 'DEFINED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DEREF', 'DESC', 'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR', 'DETERMINISTIC', 'DIAGNOSTICS', 'DICTIONARY', 'DISCONNECT', 'DISPATCH', 'DISTINCT', 'DO', 'DOMAIN', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION', 'DYNAMIC_FUNCTION_CODE', 'EACH', 'ELSE', 'ELSIF', 'ENCODING', 'ENCRYPTED', 'END', 'END-EXEC', 'EQUALS', 'ESCAPE', 'EVERY', 'EXCEPTION', 'EXCEPT', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING', 'EXISTS', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FOR', 'FORCE', 'FOREIGN', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE', 'FREEZE', 'FROM', 'FULL', 'FUNCTION', 'G', 'GENERAL', 'GENERATED', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GRANTED', 'GROUP', 'GROUPING', 'HANDLER', 'HAVING', 'HIERARCHY', 'HOLD', 'HOST', 'IDENTITY', 'IF', 'IGNORE', 'ILIKE', 'IMMEDIATE', 'IMMEDIATELY', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT', 'IN', 'INCLUDING', 'INCREMENT', 'INDEX', 'INDITCATOR', 'INFIX', 'INHERITS', 'INITIALIZE', 'INITIALLY', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INTERSECT', 'INTO', 'INVOKER', 'IS', 'ISNULL', 'ISOLATION', 'ITERATE', 'JOIN', 'KEY', 'KEY_MEMBER', 'KEY_TYPE', 'LANCOMPILER', 'LANGUAGE', 'LARGE', 'LAST', 'LATERAL', 'LEADING', 'LEFT', 'LENGTH', 'LESS', 'LEVEL', 'LIKE', 'LIMIT', 'LISTEN', 'LOAD', 'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATION', 'LOCATOR', 'LOCK', 'LOWER', 'MAP', 'MATCH', 'MAX', 'MAXVALUE', 'MESSAGE_LENGTH', 'MESSAGE_OCTET_LENGTH', 'MESSAGE_TEXT', 'METHOD', 'MIN', 'MINUTE', 'MINVALUE', 'MOD', 'MODE', 'MODIFIES', 'MODIFY', 'MONTH', 'MORE', 'MOVE', 'MUMPS', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', 'NCLOB', 'NEW', 'NEXT', 'NO', 'NOCREATEDB', 'NOCREATEUSER', 'NONE', 'NOT', 'NOTHING', 'NOTIFY', 'NOTNULL', 'NULL', 'NULLABLE', 'NULLIF', 'OBJECT', 'OCTET_LENGTH', 'OF', 'OFF', 'OFFSET', 'OIDS', 'OLD', 'ON', 'ONLY', 'OPEN', 'OPERATION', 'OPERATOR', 'OPTION', 'OPTIONS', 'OR', 'ORDER', 'ORDINALITY', 'OUT', 'OUTER', 'OUTPUT', 'OVERLAPS', 'OVERLAY', 'OVERRIDING', 'OWNER', 'PAD', 'PARAMETER', 'PARAMETERS', 'PARAMETER_MODE', 'PARAMETER_NAME', 'PARAMETER_ORDINAL_POSITION', 'PARAMETER_SPECIFIC_CATALOG', 'PARAMETER_SPECIFIC_NAME', 'PARAMETER_SPECIFIC_SCHEMA', 'PARTIAL', 'PASCAL', 'PENDANT', 'PERIOD', 'PLACING', 'PLI', 'POSITION', 'POSTFIX', 'PRECEEDS', 'PRECISION', 'PREFIX', 'PREORDER', 'PREPARE', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL', 'PROCEDURE', 'PUBLIC', 'READ', 'READS', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCES', 'REFERENCING', 'REINDEX', 'RELATIVE', 'RENAME', 'REPEATABLE', 'REPLACE', 'RESET', 'RESTART', 'RESTRICT', 'RESULT', 'RETURN', 'RETURNED_LENGTH', 'RETURNED_OCTET_LENGTH', 'RETURNED_SQLSTATE', 'RETURNS', 'REVOKE', 'RIGHT', 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE', 'ROUTINE_CATALOG', 'ROUTINE_NAME', 'ROUTINE_SCHEMA', 'ROW', 'ROWS', 'ROW_COUNT', 'RULE', 'SAVE_POINT', 'SCALE', 'SCHEMA', 'SCHEMA_NAME', 'SCOPE', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', 'SELECT', 'SELF', 'SENSITIVE', 'SERIALIZABLE', 'SERVER_NAME', 'SESSION', 'SESSION_USER', 'SET', 'SETOF', 'SETS', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', 'SIZE', 'SOME', 'SOURCE', 'SPACE', 'SPECIFIC', 'SPECIFICTYPE', 'SPECIFIC_NAME', 'SQL', 'SQLCODE', 'SQLERROR', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNINIG', 'STABLE', 'START', 'STATE', 'STATEMENT', 'STATIC', 'STATISTICS', 'STDIN', 'STDOUT', 'STORAGE', 'STRICT', 'STRUCTURE', 'STYPE', 'SUBCLASS_ORIGIN', 'SUBLIST', 'SUBSTRING', 'SUCCEEDS', 'SUM', 'SYMMETRIC', 'SYSID', 'SYSTEM', 'SYSTEM_USER', 'TABLE', 'TABLE_NAME', ' TEMP', 'TEMPLATE', 'TEMPORARY', 'TERMINATE', 'THAN', 'THEN', 'TIME', 'TIMESTAMP', 'TIMEZONE_HOUR', 'TIMEZONE_MINUTE', 'TO', 'TOAST', 'TRAILING', 'TRANSACTION', 'TRANSACTIONS_COMMITTED', 'TRANSACTIONS_ROLLED_BACK', 'TRANSACTION_ACTIVE', 'TRANSFORM', 'TRANSFORMS', 'TRANSLATE', 'TRANSLATION', 'TREAT', 'TRIGGER', 'TRIGGER_CATALOG', 'TRIGGER_NAME', 'TRIGGER_SCHEMA', 'TRIM', 'TRUE', 'TRUNCATE', 'TRUSTED', 'TYPE', 'UNCOMMITTED', 'UNDER', 'UNENCRYPTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN', 'UNNAMED', 'UNNEST', 'UNTIL', 'UPDATE', 'UPPER', 'USAGE', 'USER', 'USER_DEFINED_TYPE_CATALOG', 'USER_DEFINED_TYPE_NAME', 'USER_DEFINED_TYPE_SCHEMA', 'USING', 'VACUUM', 'VALID', 'VALIDATOR', 'VALUES', 'VARIABLE', 'VERBOSE', 'VERSION', 'VERSIONS', 'VERSIONING', 'VIEW', 'VOLATILE', 'WHEN', 'WHENEVER', 'WHERE', 'WITH', 'WITHOUT', 'WORK', 'WRITE', 'YEAR', 'ZONE'), suffix=r'\b'), Keyword), (words(( 'ARRAY', 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'CHARACTER', 'DATE', 'DEC', 'DECIMAL', 'FLOAT', 'INT', 'INTEGER', 'INTERVAL', 'NUMBER', 'NUMERIC', 'REAL', 'SERIAL', 'SMALLINT', 'VARCHAR', 'VARYING', 'INT8', 'SERIAL8', 'TEXT'), suffix=r'\b'), Name.Builtin), (r'[+*/<>=~!@#%^&|`?-]', Operator), (r'[0-9]+', Number.Integer), # TODO: Backslash escapes? (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL (r'[a-z_][\w$]*', Name), # allow $s in strings for Oracle (r'[;:()\[\],.]', Punctuation) ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } def analyse_text(self, text): return class TransactSqlLexer(RegexLexer): """ Transact-SQL (T-SQL) is Microsoft's and Sybase's proprietary extension to SQL. The list of keywords includes ODBC and keywords reserved for future use.. """ name = 'Transact-SQL' aliases = ['tsql', 't-sql'] filenames = ['*.sql'] mimetypes = ['text/x-tsql'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s+', Whitespace), (r'--.*?$\n?', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), (words(_tsql_builtins.OPERATORS), Operator), (words(_tsql_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word), (words(_tsql_builtins.TYPES, suffix=r'\b'), Name.Class), (words(_tsql_builtins.FUNCTIONS, suffix=r'\b'), Name.Function), (r'(goto)(\s+)(\w+\b)', bygroups(Keyword, Whitespace, Name.Label)), (words(_tsql_builtins.KEYWORDS, suffix=r'\b'), Keyword), (r'(\[)([^]]+)(\])', bygroups(Operator, Name, Operator)), (r'0x[0-9a-f]+', Number.Hex), # Float variant 1, for example: 1., 1.e2, 1.2e3 (r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float), # Float variant 2, for example: .1, .1e2 (r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Float variant 3, for example: 123e45 (r'[0-9]+e[+-]?[0-9]+', Number.Float), (r'[0-9]+', Number.Integer), (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Symbol), (r'[;(),.]', Punctuation), # Below we use \w even for the first "real" character because # tokens starting with a digit have already been recognized # as Number above. (r'@@\w+', Name.Builtin), (r'@\w+', Name.Variable), (r'(\w+)(:)', bygroups(Name.Label, Punctuation)), (r'#?#?\w+', Name), # names for temp tables and anything else (r'\?', Name.Variable.Magic), # parameter for prepared statements ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), (r'[^/*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } def analyse_text(text): rating = 0 if tsql_declare_re.search(text): # Found T-SQL variable declaration. rating = 1.0 else: name_between_backtick_count = len( name_between_backtick_re.findall(text)) name_between_bracket_count = len( name_between_bracket_re.findall(text)) # We need to check if there are any names using # backticks or brackets, as otherwise both are 0 # and 0 >= 2 * 0, so we would always assume it's true dialect_name_count = name_between_backtick_count + name_between_bracket_count if dialect_name_count >= 1 and \ name_between_bracket_count >= 2 * name_between_backtick_count: # Found at least twice as many [name] as `name`. rating += 0.5 elif name_between_bracket_count > name_between_backtick_count: rating += 0.2 elif name_between_bracket_count > 0: rating += 0.1 if tsql_variable_re.search(text) is not None: rating += 0.1 if tsql_go_re.search(text) is not None: rating += 0.1 return rating class MySqlLexer(RegexLexer): """The Oracle MySQL lexer. This lexer does not attempt to maintain strict compatibility with MariaDB syntax or keywords. Although MySQL and MariaDB's common code history suggests there may be significant overlap between the two, compatibility between the two is not a target for this lexer. """ name = 'MySQL' aliases = ['mysql'] mimetypes = ['text/x-mysql'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s+', Whitespace), # Comments (r'(?:#|--\s+).*', Comment.Single), (r'/\*\+', Comment.Special, 'optimizer-hints'), (r'/\*', Comment.Multiline, 'multiline-comment'), # Hexadecimal literals (r"x'([0-9a-f]{2})+'", Number.Hex), # MySQL requires paired hex characters in this form. (r'0x[0-9a-f]+', Number.Hex), # Binary literals (r"b'[01]+'", Number.Bin), (r'0b[01]+', Number.Bin), # Numeric literals (r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float), # Mandatory integer, optional fraction and exponent (r'[0-9]*\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Mandatory fraction, optional integer and exponent (r'[0-9]+e[+-]?[0-9]+', Number.Float), # Exponents with integer significands are still floats (r'[0-9]+(?=[^0-9a-z$_\u0080-\uffff])', Number.Integer), # Integers that are not in a schema object name # Date literals (r"\{\s*d\s*(?P<quote>['\"])\s*\d{2}(\d{2})?.?\d{2}.?\d{2}\s*(?P=quote)\s*\}", Literal.Date), # Time literals (r"\{\s*t\s*(?P<quote>['\"])\s*(?:\d+\s+)?\d{1,2}.?\d{1,2}.?\d{1,2}(\.\d*)?\s*(?P=quote)\s*\}", Literal.Date), # Timestamp literals ( r"\{\s*ts\s*(?P<quote>['\"])\s*" r"\d{2}(?:\d{2})?.?\d{2}.?\d{2}" # Date part r"\s+" # Whitespace between date and time r"\d{1,2}.?\d{1,2}.?\d{1,2}(\.\d*)?" # Time part r"\s*(?P=quote)\s*\}", Literal.Date ), # String literals (r"'", String.Single, 'single-quoted-string'), (r'"', String.Double, 'double-quoted-string'), # Variables (r'@@(?:global\.|persist\.|persist_only\.|session\.)?[a-z_]+', Name.Variable), (r'@[a-z0-9_$.]+', Name.Variable), (r"@'", Name.Variable, 'single-quoted-variable'), (r'@"', Name.Variable, 'double-quoted-variable'), (r"@`", Name.Variable, 'backtick-quoted-variable'), (r'\?', Name.Variable), # For demonstrating prepared statements # Operators (r'[!%&*+/:<=>^|~-]+', Operator), # Exceptions; these words tokenize differently in different contexts. (r'\b(set)(?!\s*\()', Keyword), (r'\b(character)(\s+)(set)\b', bygroups(Keyword, Whitespace, Keyword)), # In all other known cases, "SET" is tokenized by MYSQL_DATATYPES. (words(MYSQL_CONSTANTS, prefix=r'\b', suffix=r'\b'), Name.Constant), (words(MYSQL_DATATYPES, prefix=r'\b', suffix=r'\b'), Keyword.Type), (words(MYSQL_KEYWORDS, prefix=r'\b', suffix=r'\b'), Keyword), (words(MYSQL_FUNCTIONS, prefix=r'\b', suffix=r'\b(\s*)(\()'), bygroups(Name.Function, Whitespace, Punctuation)), # Schema object names # # Note: Although the first regex supports unquoted all-numeric # identifiers, this will not be a problem in practice because # numeric literals have already been handled above. # ('[0-9a-z$_\u0080-\uffff]+', Name), (r'`', Name.Quoted, 'schema-object-name'), # Punctuation (r'[(),.;]', Punctuation), ], # Multiline comment substates # --------------------------- 'optimizer-hints': [ (r'[^*a-z]+', Comment.Special), (r'\*/', Comment.Special, '#pop'), (words(MYSQL_OPTIMIZER_HINTS, suffix=r'\b'), Comment.Preproc), ('[a-z]+', Comment.Special), (r'\*', Comment.Special), ], 'multiline-comment': [ (r'[^*]+', Comment.Multiline), (r'\*/', Comment.Multiline, '#pop'), (r'\*', Comment.Multiline), ], # String substates # ---------------- 'single-quoted-string': [ (r"[^'\\]+", String.Single), (r"''", String.Escape), (r"""\\[0'"bnrtZ\\%_]""", String.Escape), (r"'", String.Single, '#pop'), ], 'double-quoted-string': [ (r'[^"\\]+', String.Double), (r'""', String.Escape), (r"""\\[0'"bnrtZ\\%_]""", String.Escape), (r'"', String.Double, '#pop'), ], # Variable substates # ------------------ 'single-quoted-variable': [ (r"[^']+", Name.Variable), (r"''", Name.Variable), (r"'", Name.Variable, '#pop'), ], 'double-quoted-variable': [ (r'[^"]+', Name.Variable), (r'""', Name.Variable), (r'"', Name.Variable, '#pop'), ], 'backtick-quoted-variable': [ (r'[^`]+', Name.Variable), (r'``', Name.Variable), (r'`', Name.Variable, '#pop'), ], # Schema object name substates # ---------------------------- # # "Name.Quoted" and "Name.Quoted.Escape" are non-standard but # formatters will style them as "Name" by default but add # additional styles based on the token name. This gives users # flexibility to add custom styles as desired. # 'schema-object-name': [ (r'[^`]+', Name.Quoted), (r'``', Name.Quoted.Escape), (r'`', Name.Quoted, '#pop'), ], } def analyse_text(text): rating = 0 name_between_backtick_count = len( name_between_backtick_re.findall(text)) name_between_bracket_count = len( name_between_bracket_re.findall(text)) # Same logic as above in the TSQL analysis dialect_name_count = name_between_backtick_count + name_between_bracket_count if dialect_name_count >= 1 and \ name_between_backtick_count >= 2 * name_between_bracket_count: # Found at least twice as many `name` as [name]. rating += 0.5 elif name_between_backtick_count > name_between_bracket_count: rating += 0.2 elif name_between_backtick_count > 0: rating += 0.1 return rating class SqliteConsoleLexer(Lexer): """ Lexer for example sessions using sqlite3. .. versionadded:: 0.11 """ name = 'sqlite3con' aliases = ['sqlite3'] filenames = ['*.sqlite3-console'] mimetypes = ['text/x-sqlite3-console'] def get_tokens_unprocessed(self, data): sql = SqlLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(data): line = match.group() prompt_match = sqlite_prompt_re.match(line) if prompt_match is not None: insertions.append((len(curcode), [(0, Generic.Prompt, line[:7])])) insertions.append((len(curcode), [(7, Whitespace, ' ')])) curcode += line[8:] else: if curcode: yield from do_insertions(insertions, sql.get_tokens_unprocessed(curcode)) curcode = '' insertions = [] if line.startswith('SQL error: '): yield (match.start(), Generic.Traceback, line) else: yield (match.start(), Generic.Output, line) if curcode: yield from do_insertions(insertions, sql.get_tokens_unprocessed(curcode)) class RqlLexer(RegexLexer): """ Lexer for Relation Query Language. .. versionadded:: 2.0 """ name = 'RQL' url = 'http://www.logilab.org/project/rql' aliases = ['rql'] filenames = ['*.rql'] mimetypes = ['text/x-rql'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s+', Whitespace), (r'(DELETE|SET|INSERT|UNION|DISTINCT|WITH|WHERE|BEING|OR' r'|AND|NOT|GROUPBY|HAVING|ORDERBY|ASC|DESC|LIMIT|OFFSET' r'|TODAY|NOW|TRUE|FALSE|NULL|EXISTS)\b', Keyword), (r'[+*/<>=%-]', Operator), (r'(Any|is|instance_of|CWEType|CWRelation)\b', Name.Builtin), (r'[0-9]+', Number.Integer), (r'[A-Z_]\w*\??', Name), (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Single), (r'[;:()\[\],.]', Punctuation) ], }
34,151
Python
39.705602
117
0.515768
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/comal.py
""" pygments.lexers.comal ~~~~~~~~~~~~~~~~~~~~~ Lexer for COMAL-80. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, words from pygments.token import Comment, Whitespace, Operator, Keyword, String, \ Number, Name, Punctuation __all__ = ["Comal80Lexer"] class Comal80Lexer(RegexLexer): """ For COMAL-80 source code. """ name = 'COMAL-80' url = 'https://en.wikipedia.org/wiki/COMAL' aliases = ['comal', 'comal80'] filenames = ['*.cml', '*.comal'] flags = re.IGNORECASE # # COMAL allows for some strange characters in names which we list here so # keywords and word operators will not be recognized at the start of an # identifier. # _suffix = r"\b(?!['\[\]←£\\])" _identifier = r"[a-z]['\[\]←£\\\w]*" tokens = { 'root': [ (r'//.*\n', Comment.Single), (r'\s+', Whitespace), (r':[=+-]|\<\>|[-+*/^↑<>=]', Operator), (r'(and +then|or +else)' + _suffix, Operator.Word), (words([ 'and', 'bitand', 'bitor', 'bitxor', 'div', 'in', 'mod', 'not', 'or'], suffix=_suffix,), Operator.Word), (words([ 'append', 'at', 'case', 'chain', 'close', 'copy', 'create', 'cursor', 'data', 'delete', 'dir', 'do', 'elif', 'else', 'end', 'endcase', 'endif', 'endfor', 'endloop', 'endtrap', 'endwhile', 'exec', 'exit', 'file', 'for', 'goto', 'handler', 'if', 'input', 'let', 'loop', 'mount', 'null', 'of', 'open', 'otherwise', 'output', 'page', 'pass', 'poke', 'print', 'random', 'read', 'repeat', 'report', 'return', 'rename', 'restore', 'select', 'step', 'stop', 'sys', 'then', 'to', 'trap', 'unit', 'unit$', 'until', 'using', 'when', 'while', 'write', 'zone'], suffix=_suffix), Keyword.Reserved), (words([ 'closed', 'dim', 'endfunc', 'endproc', 'external', 'func', 'import', 'proc', 'ref', 'use'], suffix=_suffix), Keyword.Declaration), (words([ 'abs', 'atn', 'chr$', 'cos', 'eod', 'eof', 'err', 'errfile', 'errtext', 'esc', 'exp', 'int', 'key$', 'len', 'log', 'ord', 'peek', 'randomize', 'rnd', 'sgn', 'sin', 'spc$', 'sqr', 'status$', 'str$', 'tab', 'tan', 'time', 'val'], suffix=_suffix), Name.Builtin), (words(['false', 'pi', 'true'], suffix=_suffix), Keyword.Constant), (r'"', String, 'string'), (_identifier + r":(?=[ \n/])", Name.Label), (_identifier + r"[$#]?", Name), (r'%[01]+', Number.Bin), (r'\$[0-9a-f]+', Number.Hex), (r'\d*\.\d*(e[-+]?\d+)?', Number.Float), (r'\d+', Number.Integer), (r'[(),:;]', Punctuation), ], 'string': [ (r'[^"]+', String), (r'"[0-9]*"', String.Escape), (r'"', String, '#pop'), ], }
3,148
Python
37.876543
89
0.456163
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ooc.py
""" pygments.lexers.ooc ~~~~~~~~~~~~~~~~~~~ Lexers for the Ooc language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation __all__ = ['OocLexer'] class OocLexer(RegexLexer): """ For Ooc source code .. versionadded:: 1.2 """ name = 'Ooc' url = 'http://ooc-lang.org/' aliases = ['ooc'] filenames = ['*.ooc'] mimetypes = ['text/x-ooc'] tokens = { 'root': [ (words(( 'class', 'interface', 'implement', 'abstract', 'extends', 'from', 'this', 'super', 'new', 'const', 'final', 'static', 'import', 'use', 'extern', 'inline', 'proto', 'break', 'continue', 'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do', 'switch', 'case', 'as', 'in', 'version', 'return', 'true', 'false', 'null'), prefix=r'\b', suffix=r'\b'), Keyword), (r'include\b', Keyword, 'include'), (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)', bygroups(Keyword, Text, Keyword, Text, Name.Class)), (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)', bygroups(Keyword, Text, Name.Function)), (r'\bfunc\b', Keyword), # Note: %= and ^= not listed on http://ooc-lang.org/syntax (r'//.*', Comment), (r'(?s)/\*.*?\*/', Comment.Multiline), (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|' r'&&?|\|\|?|\^=?)', Operator), (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text, Name.Function)), (r'[A-Z][A-Z0-9_]+', Name.Constant), (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class), (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()', bygroups(Name.Function, Text)), (r'[a-z]\w*', Name.Variable), # : introduces types (r'[:(){}\[\];,]', Punctuation), (r'0x[0-9a-fA-F]+', Number.Hex), (r'0c[0-9]+', Number.Oct), (r'0b[01]+', Number.Bin), (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float), (r'[0-9_]+', Number.Decimal), (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"', String.Double), (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'@', Punctuation), # pointer dereference (r'\.', Punctuation), # imports or chain operator (r'\\[ \t\n]', Text), (r'[ \t]+', Text), ], 'include': [ (r'[\w/]+', Name), (r',', Punctuation), (r'[ \t]', Text), (r'[;\n]', Text, '#pop'), ], }
2,982
Python
33.686046
81
0.400402
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_sourcemod_builtins.py
""" pygments.lexers._sourcemod_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This file contains the names of SourceMod functions. Do not edit the FUNCTIONS list by hand. Run with `python -I` to regenerate. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ FUNCTIONS = ( 'OnEntityCreated', 'OnEntityDestroyed', 'OnGetGameDescription', 'OnLevelInit', 'SDKHook', 'SDKHookEx', 'SDKUnhook', 'SDKHooks_TakeDamage', 'SDKHooks_DropWeapon', 'TopMenuHandler', 'CreateTopMenu', 'LoadTopMenuConfig', 'AddToTopMenu', 'GetTopMenuInfoString', 'GetTopMenuObjName', 'RemoveFromTopMenu', 'DisplayTopMenu', 'DisplayTopMenuCategory', 'FindTopMenuCategory', 'SetTopMenuTitleCaching', 'OnAdminMenuCreated', 'OnAdminMenuReady', 'GetAdminTopMenu', 'AddTargetsToMenu', 'AddTargetsToMenu2', 'RedisplayAdminMenu', 'TEHook', 'AddTempEntHook', 'RemoveTempEntHook', 'TE_Start', 'TE_IsValidProp', 'TE_WriteNum', 'TE_ReadNum', 'TE_WriteFloat', 'TE_ReadFloat', 'TE_WriteVector', 'TE_ReadVector', 'TE_WriteAngles', 'TE_WriteFloatArray', 'TE_Send', 'TE_WriteEncodedEnt', 'TE_SendToAll', 'TE_SendToClient', 'CreateKeyValues', 'KvSetString', 'KvSetNum', 'KvSetUInt64', 'KvSetFloat', 'KvSetColor', 'KvSetVector', 'KvGetString', 'KvGetNum', 'KvGetFloat', 'KvGetColor', 'KvGetUInt64', 'KvGetVector', 'KvJumpToKey', 'KvJumpToKeySymbol', 'KvGotoFirstSubKey', 'KvGotoNextKey', 'KvSavePosition', 'KvDeleteKey', 'KvDeleteThis', 'KvGoBack', 'KvRewind', 'KvGetSectionName', 'KvSetSectionName', 'KvGetDataType', 'KeyValuesToFile', 'FileToKeyValues', 'StringToKeyValues', 'KvSetEscapeSequences', 'KvNodesInStack', 'KvCopySubkeys', 'KvFindKeyById', 'KvGetNameSymbol', 'KvGetSectionSymbol', 'TE_SetupSparks', 'TE_SetupSmoke', 'TE_SetupDust', 'TE_SetupMuzzleFlash', 'TE_SetupMetalSparks', 'TE_SetupEnergySplash', 'TE_SetupArmorRicochet', 'TE_SetupGlowSprite', 'TE_SetupExplosion', 'TE_SetupBloodSprite', 'TE_SetupBeamRingPoint', 'TE_SetupBeamPoints', 'TE_SetupBeamLaser', 'TE_SetupBeamRing', 'TE_SetupBeamFollow', 'HookEvent', 'HookEventEx', 'UnhookEvent', 'CreateEvent', 'FireEvent', 'CancelCreatedEvent', 'GetEventBool', 'SetEventBool', 'GetEventInt', 'SetEventInt', 'GetEventFloat', 'SetEventFloat', 'GetEventString', 'SetEventString', 'GetEventName', 'SetEventBroadcast', 'GetUserMessageType', 'GetUserMessageId', 'GetUserMessageName', 'StartMessage', 'StartMessageEx', 'EndMessage', 'MsgHook', 'MsgPostHook', 'HookUserMessage', 'UnhookUserMessage', 'StartMessageAll', 'StartMessageOne', 'InactivateClient', 'ReconnectClient', 'GetMaxEntities', 'GetEntityCount', 'IsValidEntity', 'IsValidEdict', 'IsEntNetworkable', 'CreateEdict', 'RemoveEdict', 'GetEdictFlags', 'SetEdictFlags', 'GetEdictClassname', 'GetEntityNetClass', 'ChangeEdictState', 'GetEntData', 'SetEntData', 'GetEntDataFloat', 'SetEntDataFloat', 'GetEntDataEnt2', 'SetEntDataEnt2', 'GetEntDataVector', 'SetEntDataVector', 'GetEntDataString', 'SetEntDataString', 'FindSendPropOffs', 'FindSendPropInfo', 'FindDataMapOffs', 'FindDataMapInfo', 'GetEntSendPropOffs', 'GetEntProp', 'SetEntProp', 'GetEntPropFloat', 'SetEntPropFloat', 'GetEntPropEnt', 'SetEntPropEnt', 'GetEntPropVector', 'SetEntPropVector', 'GetEntPropString', 'SetEntPropString', 'GetEntPropArraySize', 'GetEntDataArray', 'SetEntDataArray', 'GetEntityAddress', 'GetEntityClassname', 'float', 'FloatMul', 'FloatDiv', 'FloatAdd', 'FloatSub', 'FloatFraction', 'RoundToZero', 'RoundToCeil', 'RoundToFloor', 'RoundToNearest', 'FloatCompare', 'SquareRoot', 'Pow', 'Exponential', 'Logarithm', 'Sine', 'Cosine', 'Tangent', 'FloatAbs', 'ArcTangent', 'ArcCosine', 'ArcSine', 'ArcTangent2', 'RoundFloat', 'operator%', 'DegToRad', 'RadToDeg', 'GetURandomInt', 'GetURandomFloat', 'SetURandomSeed', 'SetURandomSeedSimple', 'RemovePlayerItem', 'GivePlayerItem', 'GetPlayerWeaponSlot', 'IgniteEntity', 'ExtinguishEntity', 'TeleportEntity', 'ForcePlayerSuicide', 'SlapPlayer', 'FindEntityByClassname', 'GetClientEyeAngles', 'CreateEntityByName', 'DispatchSpawn', 'DispatchKeyValue', 'DispatchKeyValueFloat', 'DispatchKeyValueVector', 'GetClientAimTarget', 'GetTeamCount', 'GetTeamName', 'GetTeamScore', 'SetTeamScore', 'GetTeamClientCount', 'SetEntityModel', 'GetPlayerDecalFile', 'GetPlayerJingleFile', 'GetServerNetStats', 'EquipPlayerWeapon', 'ActivateEntity', 'SetClientInfo', 'GivePlayerAmmo', 'SetClientListeningFlags', 'GetClientListeningFlags', 'SetListenOverride', 'GetListenOverride', 'IsClientMuted', 'TR_GetPointContents', 'TR_GetPointContentsEnt', 'TR_TraceRay', 'TR_TraceHull', 'TR_TraceRayFilter', 'TR_TraceHullFilter', 'TR_TraceRayEx', 'TR_TraceHullEx', 'TR_TraceRayFilterEx', 'TR_TraceHullFilterEx', 'TR_GetFraction', 'TR_GetEndPosition', 'TR_GetEntityIndex', 'TR_DidHit', 'TR_GetHitGroup', 'TR_GetPlaneNormal', 'TR_PointOutsideWorld', 'SortIntegers', 'SortFloats', 'SortStrings', 'SortFunc1D', 'SortCustom1D', 'SortCustom2D', 'SortADTArray', 'SortFuncADTArray', 'SortADTArrayCustom', 'CompileRegex', 'MatchRegex', 'GetRegexSubString', 'SimpleRegexMatch', 'TF2_GetPlayerClass', 'TF2_SetPlayerClass', 'TF2_RemoveWeaponSlot', 'TF2_RemoveAllWeapons', 'TF2_IsPlayerInCondition', 'TF2_GetObjectType', 'TF2_GetObjectMode', 'NominateMap', 'RemoveNominationByMap', 'RemoveNominationByOwner', 'GetExcludeMapList', 'GetNominatedMapList', 'CanMapChooserStartVote', 'InitiateMapChooserVote', 'HasEndOfMapVoteFinished', 'EndOfMapVoteEnabled', 'OnNominationRemoved', 'OnMapVoteStarted', 'CreateTimer', 'KillTimer', 'TriggerTimer', 'GetTickedTime', 'GetMapTimeLeft', 'GetMapTimeLimit', 'ExtendMapTimeLimit', 'GetTickInterval', 'OnMapTimeLeftChanged', 'IsServerProcessing', 'CreateDataTimer', 'ByteCountToCells', 'CreateArray', 'ClearArray', 'CloneArray', 'ResizeArray', 'GetArraySize', 'PushArrayCell', 'PushArrayString', 'PushArrayArray', 'GetArrayCell', 'GetArrayString', 'GetArrayArray', 'SetArrayCell', 'SetArrayString', 'SetArrayArray', 'ShiftArrayUp', 'RemoveFromArray', 'SwapArrayItems', 'FindStringInArray', 'FindValueInArray', 'ProcessTargetString', 'ReplyToTargetError', 'MultiTargetFilter', 'AddMultiTargetFilter', 'RemoveMultiTargetFilter', 'OnBanClient', 'OnBanIdentity', 'OnRemoveBan', 'BanClient', 'BanIdentity', 'RemoveBan', 'CreateTrie', 'SetTrieValue', 'SetTrieArray', 'SetTrieString', 'GetTrieValue', 'GetTrieArray', 'GetTrieString', 'RemoveFromTrie', 'ClearTrie', 'GetTrieSize', 'GetFunctionByName', 'CreateGlobalForward', 'CreateForward', 'GetForwardFunctionCount', 'AddToForward', 'RemoveFromForward', 'RemoveAllFromForward', 'Call_StartForward', 'Call_StartFunction', 'Call_PushCell', 'Call_PushCellRef', 'Call_PushFloat', 'Call_PushFloatRef', 'Call_PushArray', 'Call_PushArrayEx', 'Call_PushString', 'Call_PushStringEx', 'Call_Finish', 'Call_Cancel', 'NativeCall', 'CreateNative', 'ThrowNativeError', 'GetNativeStringLength', 'GetNativeString', 'SetNativeString', 'GetNativeCell', 'GetNativeCellRef', 'SetNativeCellRef', 'GetNativeArray', 'SetNativeArray', 'FormatNativeString', 'RequestFrameCallback', 'RequestFrame', 'OnRebuildAdminCache', 'DumpAdminCache', 'AddCommandOverride', 'GetCommandOverride', 'UnsetCommandOverride', 'CreateAdmGroup', 'FindAdmGroup', 'SetAdmGroupAddFlag', 'GetAdmGroupAddFlag', 'GetAdmGroupAddFlags', 'SetAdmGroupImmuneFrom', 'GetAdmGroupImmuneCount', 'GetAdmGroupImmuneFrom', 'AddAdmGroupCmdOverride', 'GetAdmGroupCmdOverride', 'RegisterAuthIdentType', 'CreateAdmin', 'GetAdminUsername', 'BindAdminIdentity', 'SetAdminFlag', 'GetAdminFlag', 'GetAdminFlags', 'AdminInheritGroup', 'GetAdminGroupCount', 'GetAdminGroup', 'SetAdminPassword', 'GetAdminPassword', 'FindAdminByIdentity', 'RemoveAdmin', 'FlagBitsToBitArray', 'FlagBitArrayToBits', 'FlagArrayToBits', 'FlagBitsToArray', 'FindFlagByName', 'FindFlagByChar', 'FindFlagChar', 'ReadFlagString', 'CanAdminTarget', 'CreateAuthMethod', 'SetAdmGroupImmunityLevel', 'GetAdmGroupImmunityLevel', 'SetAdminImmunityLevel', 'GetAdminImmunityLevel', 'FlagToBit', 'BitToFlag', 'ServerCommand', 'ServerCommandEx', 'InsertServerCommand', 'ServerExecute', 'ClientCommand', 'FakeClientCommand', 'FakeClientCommandEx', 'PrintToServer', 'PrintToConsole', 'ReplyToCommand', 'GetCmdReplySource', 'SetCmdReplySource', 'IsChatTrigger', 'ShowActivity2', 'ShowActivity', 'ShowActivityEx', 'FormatActivitySource', 'SrvCmd', 'RegServerCmd', 'ConCmd', 'RegConsoleCmd', 'RegAdminCmd', 'GetCmdArgs', 'GetCmdArg', 'GetCmdArgString', 'CreateConVar', 'FindConVar', 'ConVarChanged', 'HookConVarChange', 'UnhookConVarChange', 'GetConVarBool', 'SetConVarBool', 'GetConVarInt', 'SetConVarInt', 'GetConVarFloat', 'SetConVarFloat', 'GetConVarString', 'SetConVarString', 'ResetConVar', 'GetConVarDefault', 'GetConVarFlags', 'SetConVarFlags', 'GetConVarBounds', 'SetConVarBounds', 'GetConVarName', 'QueryClientConVar', 'GetCommandIterator', 'ReadCommandIterator', 'CheckCommandAccess', 'CheckAccess', 'IsValidConVarChar', 'GetCommandFlags', 'SetCommandFlags', 'FindFirstConCommand', 'FindNextConCommand', 'SendConVarValue', 'AddServerTag', 'RemoveServerTag', 'CommandListener', 'AddCommandListener', 'RemoveCommandListener', 'CommandExists', 'OnClientSayCommand', 'OnClientSayCommand_Post', 'TF2_IgnitePlayer', 'TF2_RespawnPlayer', 'TF2_RegeneratePlayer', 'TF2_AddCondition', 'TF2_RemoveCondition', 'TF2_SetPlayerPowerPlay', 'TF2_DisguisePlayer', 'TF2_RemovePlayerDisguise', 'TF2_StunPlayer', 'TF2_MakeBleed', 'TF2_GetClass', 'TF2_CalcIsAttackCritical', 'TF2_OnIsHolidayActive', 'TF2_IsHolidayActive', 'TF2_IsPlayerInDuel', 'TF2_RemoveWearable', 'TF2_OnConditionAdded', 'TF2_OnConditionRemoved', 'TF2_OnWaitingForPlayersStart', 'TF2_OnWaitingForPlayersEnd', 'TF2_OnPlayerTeleport', 'SQL_Connect', 'SQL_DefConnect', 'SQL_ConnectCustom', 'SQLite_UseDatabase', 'SQL_CheckConfig', 'SQL_GetDriver', 'SQL_ReadDriver', 'SQL_GetDriverIdent', 'SQL_GetDriverProduct', 'SQL_SetCharset', 'SQL_GetAffectedRows', 'SQL_GetInsertId', 'SQL_GetError', 'SQL_EscapeString', 'SQL_QuoteString', 'SQL_FastQuery', 'SQL_Query', 'SQL_PrepareQuery', 'SQL_FetchMoreResults', 'SQL_HasResultSet', 'SQL_GetRowCount', 'SQL_GetFieldCount', 'SQL_FieldNumToName', 'SQL_FieldNameToNum', 'SQL_FetchRow', 'SQL_MoreRows', 'SQL_Rewind', 'SQL_FetchString', 'SQL_FetchFloat', 'SQL_FetchInt', 'SQL_IsFieldNull', 'SQL_FetchSize', 'SQL_BindParamInt', 'SQL_BindParamFloat', 'SQL_BindParamString', 'SQL_Execute', 'SQL_LockDatabase', 'SQL_UnlockDatabase', 'SQLTCallback', 'SQL_IsSameConnection', 'SQL_TConnect', 'SQL_TQuery', 'SQL_CreateTransaction', 'SQL_AddQuery', 'SQLTxnSuccess', 'SQLTxnFailure', 'SQL_ExecuteTransaction', 'CloseHandle', 'CloneHandle', 'MenuHandler', 'CreateMenu', 'DisplayMenu', 'DisplayMenuAtItem', 'AddMenuItem', 'InsertMenuItem', 'RemoveMenuItem', 'RemoveAllMenuItems', 'GetMenuItem', 'GetMenuSelectionPosition', 'GetMenuItemCount', 'SetMenuPagination', 'GetMenuPagination', 'GetMenuStyle', 'SetMenuTitle', 'GetMenuTitle', 'CreatePanelFromMenu', 'GetMenuExitButton', 'SetMenuExitButton', 'GetMenuExitBackButton', 'SetMenuExitBackButton', 'SetMenuNoVoteButton', 'CancelMenu', 'GetMenuOptionFlags', 'SetMenuOptionFlags', 'IsVoteInProgress', 'CancelVote', 'VoteMenu', 'VoteMenuToAll', 'VoteHandler', 'SetVoteResultCallback', 'CheckVoteDelay', 'IsClientInVotePool', 'RedrawClientVoteMenu', 'GetMenuStyleHandle', 'CreatePanel', 'CreateMenuEx', 'GetClientMenu', 'CancelClientMenu', 'GetMaxPageItems', 'GetPanelStyle', 'SetPanelTitle', 'DrawPanelItem', 'DrawPanelText', 'CanPanelDrawFlags', 'SetPanelKeys', 'SendPanelToClient', 'GetPanelTextRemaining', 'GetPanelCurrentKey', 'SetPanelCurrentKey', 'RedrawMenuItem', 'InternalShowMenu', 'GetMenuVoteInfo', 'IsNewVoteAllowed', 'PrefetchSound', 'EmitAmbientSound', 'FadeClientVolume', 'StopSound', 'EmitSound', 'EmitSentence', 'GetDistGainFromSoundLevel', 'AmbientSHook', 'NormalSHook', 'AddAmbientSoundHook', 'AddNormalSoundHook', 'RemoveAmbientSoundHook', 'RemoveNormalSoundHook', 'EmitSoundToClient', 'EmitSoundToAll', 'ATTN_TO_SNDLEVEL', 'GetGameSoundParams', 'EmitGameSound', 'EmitAmbientGameSound', 'EmitGameSoundToClient', 'EmitGameSoundToAll', 'PrecacheScriptSound', 'strlen', 'StrContains', 'strcmp', 'strncmp', 'StrEqual', 'strcopy', 'Format', 'FormatEx', 'VFormat', 'StringToInt', 'StringToIntEx', 'IntToString', 'StringToFloat', 'StringToFloatEx', 'FloatToString', 'BreakString', 'TrimString', 'SplitString', 'ReplaceString', 'ReplaceStringEx', 'GetCharBytes', 'IsCharAlpha', 'IsCharNumeric', 'IsCharSpace', 'IsCharMB', 'IsCharUpper', 'IsCharLower', 'StripQuotes', 'CharToUpper', 'CharToLower', 'FindCharInString', 'StrCat', 'ExplodeString', 'ImplodeStrings', 'GetVectorLength', 'GetVectorDistance', 'GetVectorDotProduct', 'GetVectorCrossProduct', 'NormalizeVector', 'GetAngleVectors', 'GetVectorAngles', 'GetVectorVectors', 'AddVectors', 'SubtractVectors', 'ScaleVector', 'NegateVector', 'MakeVectorFromPoints', 'BaseComm_IsClientGagged', 'BaseComm_IsClientMuted', 'BaseComm_SetClientGag', 'BaseComm_SetClientMute', 'FormatUserLogText', 'FindPluginByFile', 'FindTarget', 'AcceptEntityInput', 'SetVariantBool', 'SetVariantString', 'SetVariantInt', 'SetVariantFloat', 'SetVariantVector3D', 'SetVariantPosVector3D', 'SetVariantColor', 'SetVariantEntity', 'GameRules_GetProp', 'GameRules_SetProp', 'GameRules_GetPropFloat', 'GameRules_SetPropFloat', 'GameRules_GetPropEnt', 'GameRules_SetPropEnt', 'GameRules_GetPropVector', 'GameRules_SetPropVector', 'GameRules_GetPropString', 'GameRules_SetPropString', 'GameRules_GetRoundState', 'OnClientConnect', 'OnClientConnected', 'OnClientPutInServer', 'OnClientDisconnect', 'OnClientDisconnect_Post', 'OnClientCommand', 'OnClientSettingsChanged', 'OnClientAuthorized', 'OnClientPreAdminCheck', 'OnClientPostAdminFilter', 'OnClientPostAdminCheck', 'GetMaxClients', 'GetMaxHumanPlayers', 'GetClientCount', 'GetClientName', 'GetClientIP', 'GetClientAuthString', 'GetClientAuthId', 'GetSteamAccountID', 'GetClientUserId', 'IsClientConnected', 'IsClientInGame', 'IsClientInKickQueue', 'IsClientAuthorized', 'IsFakeClient', 'IsClientSourceTV', 'IsClientReplay', 'IsClientObserver', 'IsPlayerAlive', 'GetClientInfo', 'GetClientTeam', 'SetUserAdmin', 'GetUserAdmin', 'AddUserFlags', 'RemoveUserFlags', 'SetUserFlagBits', 'GetUserFlagBits', 'CanUserTarget', 'RunAdminCacheChecks', 'NotifyPostAdminCheck', 'CreateFakeClient', 'SetFakeClientConVar', 'GetClientHealth', 'GetClientModel', 'GetClientWeapon', 'GetClientMaxs', 'GetClientMins', 'GetClientAbsAngles', 'GetClientAbsOrigin', 'GetClientArmor', 'GetClientDeaths', 'GetClientFrags', 'GetClientDataRate', 'IsClientTimingOut', 'GetClientTime', 'GetClientLatency', 'GetClientAvgLatency', 'GetClientAvgLoss', 'GetClientAvgChoke', 'GetClientAvgData', 'GetClientAvgPackets', 'GetClientOfUserId', 'KickClient', 'KickClientEx', 'ChangeClientTeam', 'GetClientSerial', 'GetClientFromSerial', 'FindStringTable', 'GetNumStringTables', 'GetStringTableNumStrings', 'GetStringTableMaxStrings', 'GetStringTableName', 'FindStringIndex', 'ReadStringTable', 'GetStringTableDataLength', 'GetStringTableData', 'SetStringTableData', 'AddToStringTable', 'LockStringTables', 'AddFileToDownloadsTable', 'GetEntityFlags', 'SetEntityFlags', 'GetEntityMoveType', 'SetEntityMoveType', 'GetEntityRenderMode', 'SetEntityRenderMode', 'GetEntityRenderFx', 'SetEntityRenderFx', 'SetEntityRenderColor', 'GetEntityGravity', 'SetEntityGravity', 'SetEntityHealth', 'GetClientButtons', 'EntityOutput', 'HookEntityOutput', 'UnhookEntityOutput', 'HookSingleEntityOutput', 'UnhookSingleEntityOutput', 'SMC_CreateParser', 'SMC_ParseFile', 'SMC_GetErrorString', 'SMC_ParseStart', 'SMC_SetParseStart', 'SMC_ParseEnd', 'SMC_SetParseEnd', 'SMC_NewSection', 'SMC_KeyValue', 'SMC_EndSection', 'SMC_SetReaders', 'SMC_RawLine', 'SMC_SetRawLine', 'BfWriteBool', 'BfWriteByte', 'BfWriteChar', 'BfWriteShort', 'BfWriteWord', 'BfWriteNum', 'BfWriteFloat', 'BfWriteString', 'BfWriteEntity', 'BfWriteAngle', 'BfWriteCoord', 'BfWriteVecCoord', 'BfWriteVecNormal', 'BfWriteAngles', 'BfReadBool', 'BfReadByte', 'BfReadChar', 'BfReadShort', 'BfReadWord', 'BfReadNum', 'BfReadFloat', 'BfReadString', 'BfReadEntity', 'BfReadAngle', 'BfReadCoord', 'BfReadVecCoord', 'BfReadVecNormal', 'BfReadAngles', 'BfGetNumBytesLeft', 'CreateProfiler', 'StartProfiling', 'StopProfiling', 'GetProfilerTime', 'OnPluginStart', 'AskPluginLoad2', 'OnPluginEnd', 'OnPluginPauseChange', 'OnGameFrame', 'OnMapStart', 'OnMapEnd', 'OnConfigsExecuted', 'OnAutoConfigsBuffered', 'OnAllPluginsLoaded', 'GetMyHandle', 'GetPluginIterator', 'MorePlugins', 'ReadPlugin', 'GetPluginStatus', 'GetPluginFilename', 'IsPluginDebugging', 'GetPluginInfo', 'FindPluginByNumber', 'SetFailState', 'ThrowError', 'GetTime', 'FormatTime', 'LoadGameConfigFile', 'GameConfGetOffset', 'GameConfGetKeyValue', 'GameConfGetAddress', 'GetSysTickCount', 'AutoExecConfig', 'RegPluginLibrary', 'LibraryExists', 'GetExtensionFileStatus', 'OnLibraryAdded', 'OnLibraryRemoved', 'ReadMapList', 'SetMapListCompatBind', 'OnClientFloodCheck', 'OnClientFloodResult', 'CanTestFeatures', 'GetFeatureStatus', 'RequireFeature', 'LoadFromAddress', 'StoreToAddress', 'CreateStack', 'PushStackCell', 'PushStackString', 'PushStackArray', 'PopStackCell', 'PopStackString', 'PopStackArray', 'IsStackEmpty', 'PopStack', 'OnPlayerRunCmd', 'BuildPath', 'OpenDirectory', 'ReadDirEntry', 'OpenFile', 'DeleteFile', 'ReadFileLine', 'ReadFile', 'ReadFileString', 'WriteFile', 'WriteFileString', 'WriteFileLine', 'ReadFileCell', 'WriteFileCell', 'IsEndOfFile', 'FileSeek', 'FilePosition', 'FileExists', 'RenameFile', 'DirExists', 'FileSize', 'FlushFile', 'RemoveDir', 'CreateDirectory', 'GetFileTime', 'LogToOpenFile', 'LogToOpenFileEx', 'PbReadInt', 'PbReadFloat', 'PbReadBool', 'PbReadString', 'PbReadColor', 'PbReadAngle', 'PbReadVector', 'PbReadVector2D', 'PbGetRepeatedFieldCount', 'PbSetInt', 'PbSetFloat', 'PbSetBool', 'PbSetString', 'PbSetColor', 'PbSetAngle', 'PbSetVector', 'PbSetVector2D', 'PbAddInt', 'PbAddFloat', 'PbAddBool', 'PbAddString', 'PbAddColor', 'PbAddAngle', 'PbAddVector', 'PbAddVector2D', 'PbRemoveRepeatedFieldValue', 'PbReadMessage', 'PbReadRepeatedMessage', 'PbAddMessage', 'SetNextMap', 'GetNextMap', 'ForceChangeLevel', 'GetMapHistorySize', 'GetMapHistory', 'GeoipCode2', 'GeoipCode3', 'GeoipCountry', 'MarkNativeAsOptional', 'RegClientCookie', 'FindClientCookie', 'SetClientCookie', 'GetClientCookie', 'SetAuthIdCookie', 'AreClientCookiesCached', 'OnClientCookiesCached', 'CookieMenuHandler', 'SetCookiePrefabMenu', 'SetCookieMenuItem', 'ShowCookieMenu', 'GetCookieIterator', 'ReadCookieIterator', 'GetCookieAccess', 'GetClientCookieTime', 'LoadTranslations', 'SetGlobalTransTarget', 'GetClientLanguage', 'GetServerLanguage', 'GetLanguageCount', 'GetLanguageInfo', 'SetClientLanguage', 'GetLanguageByCode', 'GetLanguageByName', 'CS_OnBuyCommand', 'CS_OnCSWeaponDrop', 'CS_OnGetWeaponPrice', 'CS_OnTerminateRound', 'CS_RespawnPlayer', 'CS_SwitchTeam', 'CS_DropWeapon', 'CS_TerminateRound', 'CS_GetTranslatedWeaponAlias', 'CS_GetWeaponPrice', 'CS_GetClientClanTag', 'CS_SetClientClanTag', 'CS_GetTeamScore', 'CS_SetTeamScore', 'CS_GetMVPCount', 'CS_SetMVPCount', 'CS_GetClientContributionScore', 'CS_SetClientContributionScore', 'CS_GetClientAssists', 'CS_SetClientAssists', 'CS_AliasToWeaponID', 'CS_WeaponIDToAlias', 'CS_IsValidWeaponID', 'CS_UpdateClientModel', 'LogToGame', 'SetRandomSeed', 'GetRandomFloat', 'GetRandomInt', 'IsMapValid', 'IsDedicatedServer', 'GetEngineTime', 'GetGameTime', 'GetGameTickCount', 'GetGameDescription', 'GetGameFolderName', 'GetCurrentMap', 'PrecacheModel', 'PrecacheSentenceFile', 'PrecacheDecal', 'PrecacheGeneric', 'IsModelPrecached', 'IsDecalPrecached', 'IsGenericPrecached', 'PrecacheSound', 'IsSoundPrecached', 'CreateDialog', 'GetEngineVersion', 'PrintToChat', 'PrintToChatAll', 'PrintCenterText', 'PrintCenterTextAll', 'PrintHintText', 'PrintHintTextToAll', 'ShowVGUIPanel', 'CreateHudSynchronizer', 'SetHudTextParams', 'SetHudTextParamsEx', 'ShowSyncHudText', 'ClearSyncHud', 'ShowHudText', 'ShowMOTDPanel', 'DisplayAskConnectBox', 'EntIndexToEntRef', 'EntRefToEntIndex', 'MakeCompatEntRef', 'SetClientViewEntity', 'SetLightStyle', 'GetClientEyePosition', 'CreateDataPack', 'WritePackCell', 'WritePackFloat', 'WritePackString', 'ReadPackCell', 'ReadPackFloat', 'ReadPackString', 'ResetPack', 'GetPackPosition', 'SetPackPosition', 'IsPackReadable', 'LogMessage', 'LogToFile', 'LogToFileEx', 'LogAction', 'LogError', 'OnLogAction', 'GameLogHook', 'AddGameLogHook', 'RemoveGameLogHook', 'FindTeamByName', 'StartPrepSDKCall', 'PrepSDKCall_SetVirtual', 'PrepSDKCall_SetSignature', 'PrepSDKCall_SetAddress', 'PrepSDKCall_SetFromConf', 'PrepSDKCall_SetReturnInfo', 'PrepSDKCall_AddParameter', 'EndPrepSDKCall', 'SDKCall', 'GetPlayerResourceEntity', ) if __name__ == '__main__': # pragma: no cover import re from urllib.request import FancyURLopener from pygments.util import format_lines class Opener(FancyURLopener): version = 'Mozilla/5.0 (Pygments Sourcemod Builtins Update)' opener = Opener() def get_version(): f = opener.open('http://docs.sourcemod.net/api/index.php') r = re.compile(r'SourceMod v\.<b>([\d\.]+(?:-\w+)?)</td>') for line in f: m = r.search(line.decode()) if m is not None: return m.groups()[0] raise ValueError('No version in api docs') def get_sm_functions(): f = opener.open('http://docs.sourcemod.net/api/SMfuncs.js') r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);') functions = [] for line in f: m = r.match(line.decode()) if m is not None: functions.append(m.groups()[0]) return functions def regenerate(filename, natives): with open(filename) as fp: content = fp.read() header = content[:content.find('FUNCTIONS = (')] footer = content[content.find("if __name__ == '__main__':")-1:] with open(filename, 'w') as fp: fp.write(header) fp.write(format_lines('FUNCTIONS', natives)) fp.write('\n\n' + footer) def run(): version = get_version() print('> Downloading function index for SourceMod %s' % version) functions = get_sm_functions() print('> %d functions found:' % len(functions)) functionlist = [] for full_function_name in functions: print('>> %s' % full_function_name) functionlist.append(full_function_name) regenerate(__file__, functionlist) run()
26,745
Python
22.217014
85
0.634549
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/mips.py
""" pygments.lexers.mips ~~~~~~~~~~~~~~~~~~~~ Lexers for MIPS assembly. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words from pygments.token import Whitespace, Comment, String, Keyword, Name, Text __all__ = ["MIPSLexer"] class MIPSLexer(RegexLexer): """ A MIPS Assembly Lexer. Based on the Emacs major mode by hlissner: https://github.com/hlissner/emacs-mips-mode """ name = 'MIPS' aliases = ['mips'] # TODO: add '*.s' and '*.asm', which will require designing an analyse_text # method for this lexer and refactoring those from Gas and Nasm in order to # have relatively reliable detection filenames = ['*.mips', '*.MIPS'] keywords = [ # Arithmetic insturctions "add", "sub", "subu", "addi", "subi", "addu", "addiu", # Multiplication/division "mul", "mult", "multu", "mulu", "madd", "maddu", "msub", "msubu", "div", "divu", # Bitwise operations "and", "or", "nor", "xor", "andi", "ori", "xori", "clo", "clz", # Shifts "sll", "srl", "sllv", "srlv", "sra", "srav", # Comparisons "slt", "sltu", "slti", "sltiu", # Move data "mfhi", "mthi", "mflo", "mtlo", "movn", "movz", "movf", "movt", # Jump "j", "jal", "jalr", "jr", # branch "bc1f", "bc1t", "beq", "bgez", "bgezal", "bgtz", "blez", "bltzal", "bltz", "bne", # Load "lui", "lb", "lbu", "lh", "lhu", "lw", "lwcl", "lwl", "lwr", # Store "sb", "sh", "sw", "swl", "swr", # coproc: swc1 sdc1 # Concurrent load/store "ll", "sc", # Trap handling "teq", "teqi", "tne", "tneqi", "tge", "tgeu", "tgei", "tgeiu", "tlt", "tltu", "tlti", "tltiu", # Exception / Interrupt "eret", "break", "bop", "syscall", # --- Floats ----------------------------------------------------- # Arithmetic "add.s", "add.d", "sub.s", "sub.d", "mul.s", "mul.d", "div.s", "div.d", "neg.d", "neg.s", # Comparison "c.e.d", "c.e.s", "c.le.d", "c.le.s", "c.lt.s", "c.lt.d", # "c.gt.s", "c.gt.d", "madd.s", "madd.d", "msub.s", "msub.d", # Move Floats "mov.d", "move.s", "movf.d", "movf.s", "movt.d", "movt.s", "movn.d", "movn.s", "movnzd", "movz.s", "movz.d", # Conversion "cvt.d.s", "cvt.d.w", "cvt.s.d", "cvt.s.w", "cvt.w.d", "cvt.w.s", "trunc.w.d", "trunc.w.s", # Math "abs.s", "abs.d", "sqrt.s", "sqrt.d", "ceil.w.d", "ceil.w.s", "floor.w.d", "floor.w.s", "round.w.d", "round.w.s", ] pseudoinstructions = [ # Arithmetic & logical "rem", "remu", "mulo", "mulou", "abs", "neg", "negu", "not", "rol", "ror", # branches "b", "beqz", "bge", "bgeu", "bgt", "bgtu", "ble", "bleu", "blt", "bltu", "bnez", # loads "la", "li", "ld", "ulh", "ulhu", "ulw", # Store "sd", "ush", "usw", # move "move", # coproc: "mfc1.d", # comparisons "sgt", "sgtu", "sge", "sgeu", "sle", "sleu", "sne", "seq", # --- Floats ----------------------------------------------------- # load-store "l.d", "l.s", "s.d", "s.s", ] directives = [ ".align", ".ascii", ".asciiz", ".byte", ".data", ".double", ".extern", ".float", ".globl", ".half", ".kdata", ".ktext", ".space", ".text", ".word", ] deprecated = [ "beql", "bnel", "bgtzl", "bgezl", "bltzl", "blezl", "bltzall", "bgezall", ] tokens = { 'root': [ (r'\s+', Whitespace), (r'#.*', Comment), (r'"', String, 'string'), (r'-?[0-9]+?', Keyword.Constant), (r'\w*:', Name.Function), (words(deprecated, suffix=r'\b'), Keyword.Pseudo), # need warning face (words(pseudoinstructions, suffix=r'\b'), Name.Variable), (words(keywords, suffix=r'\b'), Keyword), (r'[slm][ftwd]c[0-9]([.]d)?', Keyword), (r'\$(f?[0-2][0-9]|f?3[01]|[ft]?[0-9]|[vk][01]|a[0-3]|s[0-7]|[gsf]p|ra|at|zero)', Keyword.Type), (words(directives, suffix=r'\b'), Name.Entity), # Preprocessor? (r':|,|;|\{|\}|=>|@|\$|=', Name.Builtin), (r'\w+', Text), (r'.', Text), ], 'string': [ (r'\\.', String.Escape), (r'"', String, '#pop'), (r'[^\\"]+', String), ], }
4,604
Python
34.697674
93
0.445482
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/usd.py
""" pygments.lexers.usd ~~~~~~~~~~~~~~~~~~~ The module that parses Pixar's Universal Scene Description file format. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups from pygments.lexer import words as words_ from pygments.lexers._usd_builtins import COMMON_ATTRIBUTES, KEYWORDS, \ OPERATORS, SPECIAL_NAMES, TYPES from pygments.token import Comment, Keyword, Name, Number, Operator, \ Punctuation, String, Text, Whitespace __all__ = ["UsdLexer"] def _keywords(words, type_): return [(words_(words, prefix=r"\b", suffix=r"\b"), type_)] _TYPE = r"(\w+(?:\[\])?)" _BASE_ATTRIBUTE = r"(\w+(?:\:\w+)*)(?:(\.)(timeSamples))?" _WHITESPACE = r"([ \t]+)" class UsdLexer(RegexLexer): """ A lexer that parses Pixar's Universal Scene Description file format. .. versionadded:: 2.6 """ name = "USD" url = 'https://graphics.pixar.com/usd/release/index.html' aliases = ["usd", "usda"] filenames = ["*.usd", "*.usda"] tokens = { "root": [ (r"(custom){_WHITESPACE}(uniform)(\s+){}(\s+){}(\s*)(=)".format( _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE), bygroups(Keyword.Token, Whitespace, Keyword.Token, Whitespace, Keyword.Type, Whitespace, Name.Attribute, Text, Name.Keyword.Tokens, Whitespace, Operator)), (r"(custom){_WHITESPACE}{}(\s+){}(\s*)(=)".format( _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE), bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace, Name.Attribute, Text, Name.Keyword.Tokens, Whitespace, Operator)), (r"(uniform){_WHITESPACE}{}(\s+){}(\s*)(=)".format( _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE), bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace, Name.Attribute, Text, Name.Keyword.Tokens, Whitespace, Operator)), (r"{}{_WHITESPACE}{}(\s*)(=)".format( _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE), bygroups(Keyword.Type, Whitespace, Name.Attribute, Text, Name.Keyword.Tokens, Whitespace, Operator)), ] + _keywords(KEYWORDS, Keyword.Tokens) + _keywords(SPECIAL_NAMES, Name.Builtins) + _keywords(COMMON_ATTRIBUTES, Name.Attribute) + [(r"\b\w+:[\w:]+\b", Name.Attribute)] + _keywords(OPERATORS, Operator) + # more attributes [(type_ + r"\[\]", Keyword.Type) for type_ in TYPES] + _keywords(TYPES, Keyword.Type) + [ (r"[(){}\[\]]", Punctuation), ("#.*?$", Comment.Single), (",", Punctuation), (";", Punctuation), # ";"s are allowed to combine separate metadata lines ("=", Operator), (r"[-]*([0-9]*[.])?[0-9]+(?:e[+-]*\d+)?", Number), (r"'''(?:.|\n)*?'''", String), (r'"""(?:.|\n)*?"""', String), (r"'.*?'", String), (r'".*?"', String), (r"<(\.\./)*([\w/]+|[\w/]+\.\w+[\w:]*)>", Name.Namespace), (r"@.*?@", String.Interpol), (r'\(.*"[.\\n]*".*\)', String.Doc), (r"\A#usda .+$", Comment.Hashbang), (r"\s+", Whitespace), (r"\w+", Text), (r"[_:.]+", Punctuation), ], }
3,513
Python
37.615384
86
0.51466
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_vbscript_builtins.py
""" pygments.lexers._vbscript_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These are manually translated lists from http://www.indusoft.com/pdf/VBScript%20Reference.pdf. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ KEYWORDS = [ 'ByRef', 'ByVal', # dim: special rule 'call', 'case', 'class', # const: special rule 'do', 'each', 'else', 'elseif', 'end', 'erase', 'execute', 'function', 'exit', 'for', 'function', 'GetRef', 'global', 'if', 'let', 'loop', 'next', 'new', # option: special rule 'private', 'public', 'redim', 'select', 'set', 'sub', 'then', 'wend', 'while', 'with', ] BUILTIN_FUNCTIONS = [ 'Abs', 'Array', 'Asc', 'Atn', 'CBool', 'CByte', 'CCur', 'CDate', 'CDbl', 'Chr', 'CInt', 'CLng', 'Cos', 'CreateObject', 'CSng', 'CStr', 'Date', 'DateAdd', 'DateDiff', 'DatePart', 'DateSerial', 'DateValue', 'Day', 'Eval', 'Exp', 'Filter', 'Fix', 'FormatCurrency', 'FormatDateTime', 'FormatNumber', 'FormatPercent', 'GetObject', 'GetLocale', 'Hex', 'Hour', 'InStr', 'inStrRev', 'Int', 'IsArray', 'IsDate', 'IsEmpty', 'IsNull', 'IsNumeric', 'IsObject', 'Join', 'LBound', 'LCase', 'Left', 'Len', 'LoadPicture', 'Log', 'LTrim', 'Mid', 'Minute', 'Month', 'MonthName', 'MsgBox', 'Now', 'Oct', 'Randomize', 'RegExp', 'Replace', 'RGB', 'Right', 'Rnd', 'Round', 'RTrim', 'ScriptEngine', 'ScriptEngineBuildVersion', 'ScriptEngineMajorVersion', 'ScriptEngineMinorVersion', 'Second', 'SetLocale', 'Sgn', 'Space', 'Split', 'Sqr', 'StrComp', 'String', 'StrReverse', 'Tan', 'Time', 'Timer', 'TimeSerial', 'TimeValue', 'Trim', 'TypeName', 'UBound', 'UCase', 'VarType', 'Weekday', 'WeekdayName', 'Year', ] BUILTIN_VARIABLES = [ 'Debug', 'Dictionary', 'Drive', 'Drives', 'Err', 'File', 'Files', 'FileSystemObject', 'Folder', 'Folders', 'Match', 'Matches', 'RegExp', 'Submatches', 'TextStream', ] OPERATORS = [ '+', '-', '*', '/', '\\', '^', '|', '<', '<=', '>', '>=', '=', '<>', '&', '$', ] OPERATOR_WORDS = [ 'mod', 'and', 'or', 'xor', 'eqv', 'imp', 'is', 'not', ] BUILTIN_CONSTANTS = [ 'False', 'True', 'vbAbort', 'vbAbortRetryIgnore', 'vbApplicationModal', 'vbArray', 'vbBinaryCompare', 'vbBlack', 'vbBlue', 'vbBoole', 'vbByte', 'vbCancel', 'vbCr', 'vbCritical', 'vbCrLf', 'vbCurrency', 'vbCyan', 'vbDataObject', 'vbDate', 'vbDefaultButton1', 'vbDefaultButton2', 'vbDefaultButton3', 'vbDefaultButton4', 'vbDouble', 'vbEmpty', 'vbError', 'vbExclamation', 'vbFalse', 'vbFirstFullWeek', 'vbFirstJan1', 'vbFormFeed', 'vbFriday', 'vbGeneralDate', 'vbGreen', 'vbIgnore', 'vbInformation', 'vbInteger', 'vbLf', 'vbLong', 'vbLongDate', 'vbLongTime', 'vbMagenta', 'vbMonday', 'vbMsgBoxHelpButton', 'vbMsgBoxRight', 'vbMsgBoxRtlReading', 'vbMsgBoxSetForeground', 'vbNewLine', 'vbNo', 'vbNull', 'vbNullChar', 'vbNullString', 'vbObject', 'vbObjectError', 'vbOK', 'vbOKCancel', 'vbOKOnly', 'vbQuestion', 'vbRed', 'vbRetry', 'vbRetryCancel', 'vbSaturday', 'vbShortDate', 'vbShortTime', 'vbSingle', 'vbString', 'vbSunday', 'vbSystemModal', 'vbTab', 'vbTextCompare', 'vbThursday', 'vbTrue', 'vbTuesday', 'vbUseDefault', 'vbUseSystem', 'vbUseSystem', 'vbVariant', 'vbVerticalTab', 'vbWednesday', 'vbWhite', 'vbYellow', 'vbYes', 'vbYesNo', 'vbYesNoCancel', ]
4,225
Python
14.092857
70
0.47929
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/objective.py
""" pygments.lexers.objective ~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Objective-C family languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, this, words, \ inherit, default from pygments.token import Text, Keyword, Name, String, Operator, \ Number, Punctuation, Literal, Comment from pygments.lexers.c_cpp import CLexer, CppLexer __all__ = ['ObjectiveCLexer', 'ObjectiveCppLexer', 'LogosLexer', 'SwiftLexer'] def objective(baselexer): """ Generate a subclass of baselexer that accepts the Objective-C syntax extensions. """ # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here, # since that's quite common in ordinary C/C++ files. It's OK to match # JavaDoc/Doxygen keywords that only apply to Objective-C, mind. # # The upshot of this is that we CANNOT match @class or @interface _oc_keywords = re.compile(r'@(?:end|implementation|protocol)') # Matches [ <ws>? identifier <ws> ( identifier <ws>? ] | identifier? : ) # (note the identifier is *optional* when there is a ':'!) _oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+' r'(?:[a-zA-Z_]\w*\s*\]|' r'(?:[a-zA-Z_]\w*)?:)') class GeneratedObjectiveCVariant(baselexer): """ Implements Objective-C syntax on top of an existing C family lexer. """ tokens = { 'statements': [ (r'@"', String, 'string'), (r'@(YES|NO)', Number), (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'@0[0-7]+[Ll]?', Number.Oct), (r'@\d+[Ll]?', Number.Integer), (r'@\(', Literal, 'literal_number'), (r'@\[', Literal, 'literal_array'), (r'@\{', Literal, 'literal_dictionary'), (words(( '@selector', '@private', '@protected', '@public', '@encode', '@synchronized', '@try', '@throw', '@catch', '@finally', '@end', '@property', '@synthesize', '__bridge', '__bridge_transfer', '__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong', 'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic', 'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in', 'out', 'inout', 'release', 'class', '@dynamic', '@optional', '@required', '@autoreleasepool', '@import'), suffix=r'\b'), Keyword), (words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL', 'IBOutlet', 'IBAction', 'unichar'), suffix=r'\b'), Keyword.Type), (r'@(true|false|YES|NO)\n', Name.Builtin), (r'(YES|NO|nil|self|super)\b', Name.Builtin), # Carbon types (r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type), # Carbon built-ins (r'(TRUE|FALSE)\b', Name.Builtin), (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), ('#pop', 'oc_classname')), (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text), ('#pop', 'oc_forward_classname')), # @ can also prefix other expressions like @{...} or @(...) (r'@', Punctuation), inherit, ], 'oc_classname': [ # interface definition that inherits (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)', bygroups(Name.Class, Text, Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')), (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', bygroups(Name.Class, Text, Name.Class), '#pop'), # interface definition for a category (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)', bygroups(Name.Class, Text, Name.Label, Text, Punctuation), ('#pop', 'oc_ivars')), (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))', bygroups(Name.Class, Text, Name.Label), '#pop'), # simple interface / implementation (r'([a-zA-Z$_][\w$]*)(\s*)(\{)', bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')), (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop') ], 'oc_forward_classname': [ (r'([a-zA-Z$_][\w$]*)(\s*,\s*)', bygroups(Name.Class, Text), 'oc_forward_classname'), (r'([a-zA-Z$_][\w$]*)(\s*;?)', bygroups(Name.Class, Text), '#pop') ], 'oc_ivars': [ include('whitespace'), include('statements'), (';', Punctuation), (r'\{', Punctuation, '#push'), (r'\}', Punctuation, '#pop'), ], 'root': [ # methods (r'^([-+])(\s*)' # method marker r'(\(.*?\))?(\s*)' # return type r'([a-zA-Z$_][\w$]*:?)', # begin of method name bygroups(Punctuation, Text, using(this), Text, Name.Function), 'method'), inherit, ], 'method': [ include('whitespace'), # TODO unsure if ellipses are allowed elsewhere, see # discussion in Issue 789 (r',', Punctuation), (r'\.\.\.', Punctuation), (r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)', bygroups(using(this), Text, Name.Variable)), (r'[a-zA-Z$_][\w$]*:', Name.Function), (';', Punctuation, '#pop'), (r'\{', Punctuation, 'function'), default('#pop'), ], 'literal_number': [ (r'\(', Punctuation, 'literal_number_inner'), (r'\)', Literal, '#pop'), include('statement'), ], 'literal_number_inner': [ (r'\(', Punctuation, '#push'), (r'\)', Punctuation, '#pop'), include('statement'), ], 'literal_array': [ (r'\[', Punctuation, 'literal_array_inner'), (r'\]', Literal, '#pop'), include('statement'), ], 'literal_array_inner': [ (r'\[', Punctuation, '#push'), (r'\]', Punctuation, '#pop'), include('statement'), ], 'literal_dictionary': [ (r'\}', Literal, '#pop'), include('statement'), ], } def analyse_text(text): if _oc_keywords.search(text): return 1.0 elif '@"' in text: # strings return 0.8 elif re.search('@[0-9]+', text): return 0.7 elif _oc_message.search(text): return 0.8 return 0 def get_tokens_unprocessed(self, text, stack=('root',)): from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ COCOA_PROTOCOLS, COCOA_PRIMITIVES for index, token, value in \ baselexer.get_tokens_unprocessed(self, text, stack): if token is Name or token is Name.Class: if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ or value in COCOA_PRIMITIVES: token = Name.Builtin.Pseudo yield index, token, value return GeneratedObjectiveCVariant class ObjectiveCLexer(objective(CLexer)): """ For Objective-C source code with preprocessor directives. """ name = 'Objective-C' url = 'https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ProgrammingWithObjectiveC/Introduction/Introduction.html' aliases = ['objective-c', 'objectivec', 'obj-c', 'objc'] filenames = ['*.m', '*.h'] mimetypes = ['text/x-objective-c'] priority = 0.05 # Lower than C class ObjectiveCppLexer(objective(CppLexer)): """ For Objective-C++ source code with preprocessor directives. """ name = 'Objective-C++' aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++'] filenames = ['*.mm', '*.hh'] mimetypes = ['text/x-objective-c++'] priority = 0.05 # Lower than C++ class LogosLexer(ObjectiveCppLexer): """ For Logos + Objective-C source code with preprocessor directives. .. versionadded:: 1.6 """ name = 'Logos' aliases = ['logos'] filenames = ['*.x', '*.xi', '*.xm', '*.xmi'] mimetypes = ['text/x-logos'] priority = 0.25 tokens = { 'statements': [ (r'(%orig|%log)\b', Keyword), (r'(%c)\b(\()(\s*)([a-zA-Z$_][\w$]*)(\s*)(\))', bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)), (r'(%init)\b(\()', bygroups(Keyword, Punctuation), 'logos_init_directive'), (r'(%init)(?=\s*;)', bygroups(Keyword)), (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', bygroups(Keyword, Text, Name.Class), '#pop'), (r'(%subclass)(\s+)', bygroups(Keyword, Text), ('#pop', 'logos_classname')), inherit, ], 'logos_init_directive': [ (r'\s+', Text), (',', Punctuation, ('logos_init_directive', '#pop')), (r'([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)', bygroups(Name.Class, Text, Punctuation, Text, Text)), (r'([a-zA-Z$_][\w$]*)', Name.Class), (r'\)', Punctuation, '#pop'), ], 'logos_classname': [ (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', bygroups(Name.Class, Text, Name.Class), '#pop'), (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop') ], 'root': [ (r'(%subclass)(\s+)', bygroups(Keyword, Text), 'logos_classname'), (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', bygroups(Keyword, Text, Name.Class)), (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)', bygroups(Keyword, Text, Name.Variable, Text, String, Text)), (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation), 'function'), (r'(%new)(\s*)(\()(.*?)(\))', bygroups(Keyword, Text, Keyword, String, Keyword)), (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), inherit, ], } _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()') def analyse_text(text): if LogosLexer._logos_keywords.search(text): return 1.0 return 0 class SwiftLexer(RegexLexer): """ For Swift source. .. versionadded:: 2.0 """ name = 'Swift' url = 'https://www.swift.org/' filenames = ['*.swift'] aliases = ['swift'] mimetypes = ['text/x-swift'] tokens = { 'root': [ # Whitespace and Comments (r'\n', Text), (r'\s+', Text), (r'//', Comment.Single, 'comment-single'), (r'/\*', Comment.Multiline, 'comment-multi'), (r'#(if|elseif|else|endif|available)\b', Comment.Preproc, 'preproc'), # Keywords include('keywords'), # Global Types (words(( 'Array', 'AutoreleasingUnsafeMutablePointer', 'BidirectionalReverseView', 'Bit', 'Bool', 'CFunctionPointer', 'COpaquePointer', 'CVaListPointer', 'Character', 'ClosedInterval', 'CollectionOfOne', 'ContiguousArray', 'Dictionary', 'DictionaryGenerator', 'DictionaryIndex', 'Double', 'EmptyCollection', 'EmptyGenerator', 'EnumerateGenerator', 'EnumerateSequence', 'FilterCollectionView', 'FilterCollectionViewIndex', 'FilterGenerator', 'FilterSequenceView', 'Float', 'Float80', 'FloatingPointClassification', 'GeneratorOf', 'GeneratorOfOne', 'GeneratorSequence', 'HalfOpenInterval', 'HeapBuffer', 'HeapBufferStorage', 'ImplicitlyUnwrappedOptional', 'IndexingGenerator', 'Int', 'Int16', 'Int32', 'Int64', 'Int8', 'LazyBidirectionalCollection', 'LazyForwardCollection', 'LazyRandomAccessCollection', 'LazySequence', 'MapCollectionView', 'MapSequenceGenerator', 'MapSequenceView', 'MirrorDisposition', 'ObjectIdentifier', 'OnHeap', 'Optional', 'PermutationGenerator', 'QuickLookObject', 'RandomAccessReverseView', 'Range', 'RangeGenerator', 'RawByte', 'Repeat', 'ReverseBidirectionalIndex', 'ReverseRandomAccessIndex', 'SequenceOf', 'SinkOf', 'Slice', 'StaticString', 'StrideThrough', 'StrideThroughGenerator', 'StrideTo', 'StrideToGenerator', 'String', 'UInt', 'UInt16', 'UInt32', 'UInt64', 'UInt8', 'UTF16', 'UTF32', 'UTF8', 'UnicodeDecodingResult', 'UnicodeScalar', 'Unmanaged', 'UnsafeBufferPointer', 'UnsafeBufferPointerGenerator', 'UnsafeMutableBufferPointer', 'UnsafeMutablePointer', 'UnsafePointer', 'Zip2', 'ZipGenerator2', # Protocols 'AbsoluteValuable', 'AnyObject', 'ArrayLiteralConvertible', 'BidirectionalIndexType', 'BitwiseOperationsType', 'BooleanLiteralConvertible', 'BooleanType', 'CVarArgType', 'CollectionType', 'Comparable', 'DebugPrintable', 'DictionaryLiteralConvertible', 'Equatable', 'ExtendedGraphemeClusterLiteralConvertible', 'ExtensibleCollectionType', 'FloatLiteralConvertible', 'FloatingPointType', 'ForwardIndexType', 'GeneratorType', 'Hashable', 'IntegerArithmeticType', 'IntegerLiteralConvertible', 'IntegerType', 'IntervalType', 'MirrorType', 'MutableCollectionType', 'MutableSliceable', 'NilLiteralConvertible', 'OutputStreamType', 'Printable', 'RandomAccessIndexType', 'RangeReplaceableCollectionType', 'RawOptionSetType', 'RawRepresentable', 'Reflectable', 'SequenceType', 'SignedIntegerType', 'SignedNumberType', 'SinkType', 'Sliceable', 'Streamable', 'Strideable', 'StringInterpolationConvertible', 'StringLiteralConvertible', 'UnicodeCodecType', 'UnicodeScalarLiteralConvertible', 'UnsignedIntegerType', '_ArrayBufferType', '_BidirectionalIndexType', '_CocoaStringType', '_CollectionType', '_Comparable', '_ExtensibleCollectionType', '_ForwardIndexType', '_Incrementable', '_IntegerArithmeticType', '_IntegerType', '_ObjectiveCBridgeable', '_RandomAccessIndexType', '_RawOptionSetType', '_SequenceType', '_Sequence_Type', '_SignedIntegerType', '_SignedNumberType', '_Sliceable', '_Strideable', '_SwiftNSArrayRequiredOverridesType', '_SwiftNSArrayType', '_SwiftNSCopyingType', '_SwiftNSDictionaryRequiredOverridesType', '_SwiftNSDictionaryType', '_SwiftNSEnumeratorType', '_SwiftNSFastEnumerationType', '_SwiftNSStringRequiredOverridesType', '_SwiftNSStringType', '_UnsignedIntegerType', # Variables 'C_ARGC', 'C_ARGV', 'Process', # Typealiases 'Any', 'AnyClass', 'BooleanLiteralType', 'CBool', 'CChar', 'CChar16', 'CChar32', 'CDouble', 'CFloat', 'CInt', 'CLong', 'CLongLong', 'CShort', 'CSignedChar', 'CUnsignedInt', 'CUnsignedLong', 'CUnsignedShort', 'CWideChar', 'ExtendedGraphemeClusterType', 'Float32', 'Float64', 'FloatLiteralType', 'IntMax', 'IntegerLiteralType', 'StringLiteralType', 'UIntMax', 'UWord', 'UnicodeScalarType', 'Void', 'Word', # Foundation/Cocoa 'NSErrorPointer', 'NSObjectProtocol', 'Selector'), suffix=r'\b'), Name.Builtin), # Functions (words(( 'abs', 'advance', 'alignof', 'alignofValue', 'assert', 'assertionFailure', 'contains', 'count', 'countElements', 'debugPrint', 'debugPrintln', 'distance', 'dropFirst', 'dropLast', 'dump', 'enumerate', 'equal', 'extend', 'fatalError', 'filter', 'find', 'first', 'getVaList', 'indices', 'insert', 'isEmpty', 'join', 'last', 'lazy', 'lexicographicalCompare', 'map', 'max', 'maxElement', 'min', 'minElement', 'numericCast', 'overlaps', 'partition', 'precondition', 'preconditionFailure', 'prefix', 'print', 'println', 'reduce', 'reflect', 'removeAll', 'removeAtIndex', 'removeLast', 'removeRange', 'reverse', 'sizeof', 'sizeofValue', 'sort', 'sorted', 'splice', 'split', 'startsWith', 'stride', 'strideof', 'strideofValue', 'suffix', 'swap', 'toDebugString', 'toString', 'transcode', 'underestimateCount', 'unsafeAddressOf', 'unsafeBitCast', 'unsafeDowncast', 'withExtendedLifetime', 'withUnsafeMutablePointer', 'withUnsafeMutablePointers', 'withUnsafePointer', 'withUnsafePointers', 'withVaList'), suffix=r'\b'), Name.Builtin.Pseudo), # Implicit Block Variables (r'\$\d+', Name.Variable), # Binary Literal (r'0b[01_]+', Number.Bin), # Octal Literal (r'0o[0-7_]+', Number.Oct), # Hexadecimal Literal (r'0x[0-9a-fA-F_]+', Number.Hex), # Decimal Literal (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float), (r'[0-9][0-9_]*', Number.Integer), # String Literal (r'"', String, 'string'), # Operators and Punctuation (r'[(){}\[\].,:;=@#`?]|->|[<&?](?=\w)|(?<=\w)[>!?]', Punctuation), (r'[/=\-+!*%<>&|^?~]+', Operator), # Identifier (r'[a-zA-Z_]\w*', Name) ], 'keywords': [ (words(( 'as', 'async', 'await', 'break', 'case', 'catch', 'continue', 'default', 'defer', 'do', 'else', 'fallthrough', 'for', 'guard', 'if', 'in', 'is', 'repeat', 'return', '#selector', 'switch', 'throw', 'try', 'where', 'while'), suffix=r'\b'), Keyword), (r'@availability\([^)]+\)', Keyword.Reserved), (words(( 'associativity', 'convenience', 'dynamic', 'didSet', 'final', 'get', 'indirect', 'infix', 'inout', 'lazy', 'left', 'mutating', 'none', 'nonmutating', 'optional', 'override', 'postfix', 'precedence', 'prefix', 'Protocol', 'required', 'rethrows', 'right', 'set', 'throws', 'Type', 'unowned', 'weak', 'willSet', '@availability', '@autoclosure', '@noreturn', '@NSApplicationMain', '@NSCopying', '@NSManaged', '@objc', '@UIApplicationMain', '@IBAction', '@IBDesignable', '@IBInspectable', '@IBOutlet'), suffix=r'\b'), Keyword.Reserved), (r'(as|dynamicType|false|is|nil|self|Self|super|true|__COLUMN__' r'|__FILE__|__FUNCTION__|__LINE__|_' r'|#(?:file|line|column|function))\b', Keyword.Constant), (r'import\b', Keyword.Declaration, 'module'), (r'(class|enum|extension|struct|protocol)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, Text, Name.Class)), (r'(func)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, Text, Name.Function)), (r'(var|let)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, Text, Name.Variable)), (words(( 'actor', 'associatedtype', 'class', 'deinit', 'enum', 'extension', 'func', 'import', 'init', 'internal', 'let', 'operator', 'private', 'protocol', 'public', 'static', 'struct', 'subscript', 'typealias', 'var'), suffix=r'\b'), Keyword.Declaration) ], 'comment': [ (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):', Comment.Special) ], # Nested 'comment-single': [ (r'\n', Text, '#pop'), include('comment'), (r'[^\n]', Comment.Single) ], 'comment-multi': [ include('comment'), (r'[^*/]', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline) ], 'module': [ (r'\n', Text, '#pop'), (r'[a-zA-Z_]\w*', Name.Class), include('root') ], 'preproc': [ (r'\n', Text, '#pop'), include('keywords'), (r'[A-Za-z]\w*', Comment.Preproc), include('root') ], 'string': [ (r'\\\(', String.Interpol, 'string-intp'), (r'"', String, '#pop'), (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), (r'[^\\"]+', String), (r'\\', String) ], 'string-intp': [ (r'\(', String.Interpol, '#push'), (r'\)', String.Interpol, '#pop'), include('root') ] } def get_tokens_unprocessed(self, text): from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ COCOA_PROTOCOLS, COCOA_PRIMITIVES for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name or token is Name.Class: if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ or value in COCOA_PRIMITIVES: token = Name.Builtin.Pseudo yield index, token, value
22,961
Python
44.379447
143
0.487609
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/macaulay2.py
""" pygments.lexers.macaulay2 ~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for Macaulay2. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words from pygments.token import Comment, Keyword, Name, String, Text __all__ = ['Macaulay2Lexer'] # Auto-generated for Macaulay2-1.21. Do not modify this file manually. M2KEYWORDS = ( "and", "break", "catch", "continue", "do", "elapsedTime", "elapsedTiming", "else", "for", "from", "global", "if", "in", "list", "local", "new", "not", "of", "or", "return", "shield", "SPACE", "step", "symbol", "then", "threadVariable", "throw", "time", "timing", "to", "try", "when", "while", "xor" ) M2DATATYPES = ( "Adjacent", "AffineVariety", "Analyzer", "ANCHOR", "AngleBarList", "Array", "AssociativeExpression", "Bag", "BasicList", "BettiTally", "BinaryOperation", "BLOCKQUOTE", "BODY", "BOLD", "Boolean", "BR", "CacheFunction", "CacheTable", "CC", "CDATA", "ChainComplex", "ChainComplexMap", "CODE", "CoherentSheaf", "Command", "COMMENT", "CompiledFunction", "CompiledFunctionBody", "CompiledFunctionClosure", "ComplexField", "Constant", "Database", "DD", "Descent", "Describe", "Dictionary", "DIV", "Divide", "DL", "DocumentTag", "DT", "Eliminate", "EM", "EngineRing", "Equation", "ExampleItem", "Expression", "File", "FilePosition", "FractionField", "Function", "FunctionApplication", "FunctionBody", "FunctionClosure", "GaloisField", "GeneralOrderedMonoid", "GlobalDictionary", "GradedModule", "GradedModuleMap", "GroebnerBasis", "GroebnerBasisOptions", "HashTable", "HEAD", "HEADER1", "HEADER2", "HEADER3", "HEADER4", "HEADER5", "HEADER6", "HeaderType", "Holder", "HR", "HREF", "HTML", "Hybrid", "Hypertext", "HypertextContainer", "HypertextParagraph", "Ideal", "IMG", "ImmutableType", "IndeterminateNumber", "IndexedVariable", "IndexedVariableTable", "InexactField", "InexactFieldFamily", "InexactNumber", "InfiniteNumber", "IntermediateMarkUpType", "ITALIC", "Iterator", "Keyword", "LABEL", "LATER", "LI", "LINK", "List", "LITERAL", "LocalDictionary", "LowerBound", "Manipulator", "MapExpression", "MarkUpType", "Matrix", "MatrixExpression", "MENU", "META", "MethodFunction", "MethodFunctionBinary", "MethodFunctionSingle", "MethodFunctionWithOptions", "Minus", "Module", "Monoid", "MonoidElement", "MonomialIdeal", "MultigradedBettiTally", "MutableHashTable", "MutableList", "MutableMatrix", "Net", "NetFile", "NonAssociativeProduct", "Nothing", "Number", "NumberedVerticalList", "OL", "OneExpression", "Option", "OptionTable", "OrderedMonoid", "Package", "PARA", "Parenthesize", "Parser", "Partition", "PolynomialRing", "Power", "PRE", "Product", "ProductOrder", "Program", "ProgramRun", "ProjectiveHilbertPolynomial", "ProjectiveVariety", "Pseudocode", "QQ", "QuotientRing", "RealField", "Resolution", "Ring", "RingElement", "RingFamily", "RingMap", "RowExpression", "RR", "RRi", "SCRIPT", "ScriptedFunctor", "SelfInitializingType", "Sequence", "Set", "SheafExpression", "SheafOfRings", "SMALL", "SPAN", "SparseMonomialVectorExpression", "SparseVectorExpression", "String", "STRONG", "STYLE", "SUB", "Subscript", "SUBSECTION", "Sum", "SumOfTwists", "SUP", "Superscript", "Symbol", "SymbolBody", "TABLE", "Table", "Tally", "Task", "TD", "TestInput", "TEX", "TH", "Thing", "Time", "TITLE", "TO", "TO2", "TOH", "TR", "TT", "Type", "UL", "URL", "Variety", "Vector", "VectorExpression", "VerticalList", "VirtualTally", "VisibleList", "WrapperType", "ZeroExpression", "ZZ" ) M2FUNCTIONS = ( "about", "abs", "accumulate", "acos", "acosh", "acot", "acoth", "addCancelTask", "addDependencyTask", "addEndFunction", "addHook", "addStartFunction", "addStartTask", "adjoint", "agm", "alarm", "all", "ambient", "analyticSpread", "ancestor", "ancestors", "andP", "ann", "annihilator", "antipode", "any", "append", "applicationDirectory", "apply", "applyKeys", "applyPairs", "applyTable", "applyValues", "apropos", "arXiv", "ascii", "asin", "asinh", "ass", "assert", "associatedGradedRing", "associatedPrimes", "atan", "atan2", "atanh", "atEndOfFile", "autoload", "baseFilename", "baseName", "baseRing", "basis", "beginDocumentation", "benchmark", "BesselJ", "BesselY", "Beta", "betti", "between", "binomial", "borel", "cacheValue", "cancelTask", "capture", "ceiling", "centerString", "chainComplex", "changeBase", "char", "characters", "charAnalyzer", "check", "checkDegrees", "chi", "class", "clean", "clearEcho", "code", "codim", "coefficient", "coefficientRing", "coefficients", "cohomology", "coimage", "coker", "cokernel", "collectGarbage", "columnAdd", "columnate", "columnMult", "columnPermute", "columnRankProfile", "columnSwap", "combine", "commandInterpreter", "commonest", "commonRing", "comodule", "complement", "complete", "components", "compose", "compositions", "compress", "concatenate", "conductor", "cone", "conjugate", "connectionCount", "constParser", "content", "contract", "conwayPolynomial", "copy", "copyDirectory", "copyFile", "cos", "cosh", "cot", "cotangentSheaf", "coth", "cover", "coverMap", "cpuTime", "createTask", "csc", "csch", "currentColumnNumber", "currentDirectory", "currentPosition", "currentRowNumber", "currentTime", "deadParser", "debug", "debugError", "decompose", "deepSplice", "default", "degree", "degreeGroup", "degreeLength", "degrees", "degreesMonoid", "degreesRing", "delete", "demark", "denominator", "depth", "describe", "det", "determinant", "diagonalMatrix", "diameter", "dictionary", "diff", "difference", "Digamma", "dim", "directSum", "disassemble", "discriminant", "dismiss", "distinguished", "divideByVariable", "doc", "document", "drop", "dual", "eagonNorthcott", "echoOff", "echoOn", "eigenvalues", "eigenvectors", "eint", "elements", "eliminate", "End", "endPackage", "entries", "erase", "erf", "erfc", "error", "euler", "eulers", "even", "EXAMPLE", "examples", "exec", "exp", "expectedReesIdeal", "expm1", "exponents", "export", "exportFrom", "exportMutable", "expression", "extend", "exteriorPower", "factor", "Fano", "fileExecutable", "fileExists", "fileLength", "fileMode", "fileReadable", "fileTime", "fileWritable", "fillMatrix", "findFiles", "findHeft", "findProgram", "findSynonyms", "first", "firstkey", "fittingIdeal", "flagLookup", "flatten", "flattenRing", "flip", "floor", "fold", "forceGB", "fork", "format", "formation", "frac", "fraction", "frames", "fromDividedPowers", "fromDual", "functionBody", "futureParser", "Gamma", "gb", "gbRemove", "gbSnapshot", "gcd", "gcdCoefficients", "gcdLLL", "GCstats", "genera", "generateAssertions", "generator", "generators", "genericMatrix", "genericSkewMatrix", "genericSymmetricMatrix", "gens", "genus", "get", "getc", "getChangeMatrix", "getenv", "getGlobalSymbol", "getNetFile", "getNonUnit", "getPrimeWithRootOfUnity", "getSymbol", "getWWW", "GF", "globalAssign", "globalAssignFunction", "globalAssignment", "globalReleaseFunction", "gradedModule", "gradedModuleMap", "gramm", "graphIdeal", "graphRing", "Grassmannian", "groebnerBasis", "groupID", "hash", "hashTable", "heft", "height", "hermite", "hilbertFunction", "hilbertPolynomial", "hilbertSeries", "hold", "Hom", "homogenize", "homology", "homomorphism", "hooks", "horizontalJoin", "html", "httpHeaders", "hypertext", "icFracP", "icFractions", "icMap", "icPIdeal", "ideal", "idealizer", "identity", "image", "imaginaryPart", "importFrom", "independentSets", "index", "indices", "inducedMap", "inducesWellDefinedMap", "info", "input", "insert", "installAssignmentMethod", "installedPackages", "installHilbertFunction", "installMethod", "installMinprimes", "installPackage", "instance", "instances", "integralClosure", "integrate", "intersect", "intersectInP", "intersection", "interval", "inverse", "inverseErf", "inversePermutation", "inverseRegularizedBeta", "inverseRegularizedGamma", "inverseSystem", "irreducibleCharacteristicSeries", "irreducibleDecomposition", "isAffineRing", "isANumber", "isBorel", "isc", "isCanceled", "isCommutative", "isConstant", "isDirectory", "isDirectSum", "isEmpty", "isField", "isFinite", "isFinitePrimeField", "isFreeModule", "isGlobalSymbol", "isHomogeneous", "isIdeal", "isInfinite", "isInjective", "isInputFile", "isIsomorphic", "isIsomorphism", "isLinearType", "isListener", "isLLL", "isMember", "isModule", "isMonomialIdeal", "isNormal", "isOpen", "isOutputFile", "isPolynomialRing", "isPrimary", "isPrime", "isPrimitive", "isPseudoprime", "isQuotientModule", "isQuotientOf", "isQuotientRing", "isReady", "isReal", "isReduction", "isRegularFile", "isRing", "isSkewCommutative", "isSorted", "isSquareFree", "isStandardGradedPolynomialRing", "isSubmodule", "isSubquotient", "isSubset", "isSupportedInZeroLocus", "isSurjective", "isTable", "isUnit", "isWellDefined", "isWeylAlgebra", "iterator", "jacobian", "jacobianDual", "join", "ker", "kernel", "kernelLLL", "kernelOfLocalization", "keys", "kill", "koszul", "last", "lcm", "leadCoefficient", "leadComponent", "leadMonomial", "leadTerm", "left", "length", "letterParser", "lift", "liftable", "limitFiles", "limitProcesses", "lines", "linkFile", "listForm", "listSymbols", "LLL", "lngamma", "load", "loadPackage", "localDictionaries", "localize", "locate", "log", "log1p", "lookup", "lookupCount", "LUdecomposition", "M2CODE", "makeDirectory", "makeDocumentTag", "makePackageIndex", "makeS2", "map", "markedGB", "match", "mathML", "matrix", "max", "maxPosition", "member", "memoize", "memoizeClear", "memoizeValues", "merge", "mergePairs", "method", "methodOptions", "methods", "midpoint", "min", "mingens", "mingle", "minimalBetti", "minimalPresentation", "minimalPrimes", "minimalReduction", "minimizeFilename", "minors", "minPosition", "minPres", "minprimes", "minus", "mkdir", "mod", "module", "modulo", "monoid", "monomialCurveIdeal", "monomialIdeal", "monomials", "monomialSubideal", "moveFile", "multidegree", "multidoc", "multigraded", "multiplicity", "mutable", "mutableIdentity", "mutableMatrix", "nanosleep", "needs", "needsPackage", "net", "netList", "newClass", "newCoordinateSystem", "newNetFile", "newPackage", "newRing", "next", "nextkey", "nextPrime", "NNParser", "nonspaceAnalyzer", "norm", "normalCone", "notImplemented", "nullhomotopy", "nullParser", "nullSpace", "number", "numcols", "numColumns", "numerator", "numeric", "numericInterval", "numgens", "numRows", "numrows", "odd", "oeis", "ofClass", "on", "openDatabase", "openDatabaseOut", "openFiles", "openIn", "openInOut", "openListener", "openOut", "openOutAppend", "optionalSignParser", "options", "optP", "orP", "override", "pack", "package", "packageTemplate", "pad", "pager", "pairs", "parent", "part", "partition", "partitions", "parts", "pdim", "peek", "permanents", "permutations", "pfaffians", "pivots", "plus", "poincare", "poincareN", "polarize", "poly", "position", "positions", "power", "powermod", "precision", "preimage", "prepend", "presentation", "pretty", "primaryComponent", "primaryDecomposition", "print", "printerr", "printString", "processID", "product", "profile", "Proj", "projectiveHilbertPolynomial", "promote", "protect", "prune", "pseudocode", "pseudoRemainder", "pushForward", "QQParser", "QRDecomposition", "quotient", "quotientRemainder", "radical", "radicalContainment", "random", "randomKRationalPoint", "randomMutableMatrix", "rank", "read", "readDirectory", "readlink", "readPackage", "realPart", "realpath", "recursionDepth", "reducedRowEchelonForm", "reduceHilbert", "reductionNumber", "reesAlgebra", "reesAlgebraIdeal", "reesIdeal", "regex", "regexQuote", "registerFinalizer", "regSeqInIdeal", "regularity", "regularizedBeta", "regularizedGamma", "relations", "relativizeFilename", "remainder", "remove", "removeDirectory", "removeFile", "removeLowestDimension", "reorganize", "replace", "res", "reshape", "resolution", "resultant", "reverse", "right", "ring", "ringFromFractions", "roots", "rotate", "round", "rowAdd", "rowMult", "rowPermute", "rowRankProfile", "rowSwap", "rsort", "run", "runHooks", "runLengthEncode", "runProgram", "same", "saturate", "scan", "scanKeys", "scanLines", "scanPairs", "scanValues", "schedule", "schreyerOrder", "Schubert", "searchPath", "sec", "sech", "seeParsing", "select", "selectInSubring", "selectVariables", "separate", "separateRegexp", "sequence", "serialNumber", "set", "setEcho", "setGroupID", "setIOExclusive", "setIOSynchronized", "setIOUnSynchronized", "setRandomSeed", "setup", "setupEmacs", "sheaf", "sheafHom", "show", "showHtml", "showTex", "simpleDocFrob", "sin", "singularLocus", "sinh", "size", "size2", "sleep", "smithNormalForm", "solve", "someTerms", "sort", "sortColumns", "source", "span", "Spec", "specialFiber", "specialFiberIdeal", "splice", "splitWWW", "sqrt", "stack", "stacksProject", "standardForm", "standardPairs", "stashValue", "status", "style", "sub", "sublists", "submatrix", "submatrixByDegrees", "subquotient", "subsets", "substitute", "substring", "subtable", "sum", "super", "support", "SVD", "switch", "sylvesterMatrix", "symbolBody", "symlinkDirectory", "symlinkFile", "symmetricAlgebra", "symmetricAlgebraIdeal", "symmetricKernel", "symmetricPower", "synonym", "SYNOPSIS", "syz", "syzygyScheme", "table", "take", "tally", "tan", "tangentCone", "tangentSheaf", "tanh", "target", "taskResult", "temporaryFileName", "tensor", "tensorAssociativity", "terminalParser", "terms", "TEST", "testHunekeQuestion", "tests", "tex", "texMath", "times", "toAbsolutePath", "toCC", "toDividedPowers", "toDual", "toExternalString", "toField", "toList", "toLower", "top", "topCoefficients", "topComponents", "toRR", "toRRi", "toSequence", "toString", "toUpper", "trace", "transpose", "trim", "truncate", "truncateOutput", "tutorial", "ultimate", "unbag", "uncurry", "undocumented", "uniform", "uninstallAllPackages", "uninstallPackage", "unique", "uniquePermutations", "unsequence", "unstack", "urlEncode", "use", "userSymbols", "utf8", "utf8check", "utf8substring", "validate", "value", "values", "variety", "vars", "vector", "versalEmbedding", "wait", "wedgeProduct", "weightRange", "whichGm", "width", "wikipedia", "wrap", "youngest", "zero", "zeta", "ZZParser" ) M2CONSTANTS = ( "AbstractToricVarieties", "Acknowledgement", "AdditionalPaths", "AdjointIdeal", "AfterEval", "AfterNoPrint", "AfterPrint", "AInfinity", "AlgebraicSplines", "Algorithm", "Alignment", "AllCodimensions", "allowableThreads", "AnalyzeSheafOnP1", "applicationDirectorySuffix", "argument", "Ascending", "AssociativeAlgebras", "Authors", "AuxiliaryFiles", "backtrace", "Bareiss", "BaseFunction", "baseRings", "BaseRow", "BasisElementLimit", "Bayer", "BeforePrint", "BeginningMacaulay2", "Benchmark", "Bertini", "BettiCharacters", "BGG", "BIBasis", "Binary", "Binomial", "BinomialEdgeIdeals", "Binomials", "BKZ", "blockMatrixForm", "Body", "BoijSoederberg", "Book3264Examples", "BooleanGB", "Boxes", "Browse", "Bruns", "cache", "CacheExampleOutput", "CallLimit", "CannedExample", "CatalanConstant", "Caveat", "Center", "Certification", "ChainComplexExtras", "ChainComplexOperations", "ChangeMatrix", "CharacteristicClasses", "CheckDocumentation", "Chordal", "Classic", "clearAll", "clearOutput", "close", "closeIn", "closeOut", "ClosestFit", "Code", "CodimensionLimit", "CodingTheory", "CoefficientRing", "Cofactor", "CohenEngine", "CohenTopLevel", "CohomCalg", "CoincidentRootLoci", "commandLine", "compactMatrixForm", "Complement", "CompleteIntersection", "CompleteIntersectionResolutions", "Complexes", "ConductorElement", "Configuration", "ConformalBlocks", "Consequences", "Constants", "Contributors", "ConvexInterface", "ConwayPolynomials", "copyright", "Core", "CorrespondenceScrolls", "CotangentSchubert", "Cremona", "currentFileDirectory", "currentFileName", "currentLayout", "currentPackage", "Cyclotomic", "Date", "dd", "DebuggingMode", "debuggingMode", "debugLevel", "DecomposableSparseSystems", "Decompose", "Default", "defaultPrecision", "Degree", "DegreeGroup", "DegreeLift", "DegreeLimit", "DegreeMap", "DegreeOrder", "DegreeRank", "Degrees", "Dense", "Density", "Depth", "Descending", "Description", "DeterminantalRepresentations", "DGAlgebras", "dictionaryPath", "DiffAlg", "Dispatch", "DivideConquer", "DividedPowers", "Divisor", "Dmodules", "docExample", "docTemplate", "Down", "Dynamic", "EagonResolution", "EdgeIdeals", "edit", "EigenSolver", "EisenbudHunekeVasconcelos", "Elimination", "EliminationMatrices", "EllipticCurves", "EllipticIntegrals", "Email", "end", "endl", "Engine", "engineDebugLevel", "EngineTests", "EnumerationCurves", "environment", "EquivariantGB", "errorDepth", "EulerConstant", "Example", "ExampleFiles", "ExampleSystems", "Exclude", "exit", "Ext", "ExteriorIdeals", "ExteriorModules", "false", "FastMinors", "FastNonminimal", "FGLM", "fileDictionaries", "fileExitHooks", "FileName", "FindOne", "FiniteFittingIdeals", "First", "FirstPackage", "FlatMonoid", "Flexible", "flush", "FollowLinks", "ForeignFunctions", "FormalGroupLaws", "Format", "FourierMotzkin", "FourTiTwo", "fpLLL", "FrobeniusThresholds", "FunctionFieldDesingularization", "GBDegrees", "gbTrace", "GenerateAssertions", "Generic", "GenericInitialIdeal", "GeometricDecomposability", "gfanInterface", "Givens", "GKMVarieties", "GLex", "Global", "GlobalAssignHook", "globalAssignmentHooks", "GlobalHookStore", "GlobalReleaseHook", "Gorenstein", "GradedLieAlgebras", "GraphicalModels", "GraphicalModelsMLE", "Graphics", "Graphs", "GRevLex", "GroebnerStrata", "GroebnerWalk", "GroupLex", "GroupRevLex", "GTZ", "Hadamard", "handleInterrupts", "HardDegreeLimit", "Heading", "Headline", "Heft", "Height", "help", "Hermite", "Hermitian", "HH", "hh", "HigherCIOperators", "HighestWeights", "Hilbert", "HodgeIntegrals", "homeDirectory", "HomePage", "Homogeneous", "Homogeneous2", "HomotopyLieAlgebra", "HorizontalSpace", "HyperplaneArrangements", "id", "IgnoreExampleErrors", "ii", "incomparable", "Increment", "indeterminate", "Index", "indexComponents", "infinity", "InfoDirSection", "infoHelp", "Inhomogeneous", "Inputs", "InstallPrefix", "IntegralClosure", "interpreterDepth", "Intersection", "InvariantRing", "InverseMethod", "Inverses", "InverseSystems", "Invertible", "InvolutiveBases", "Isomorphism", "Item", "Iterate", "Jacobian", "Jets", "Join", "JSON", "Jupyter", "K3Carpets", "K3Surfaces", "Keep", "KeepFiles", "KeepZeroes", "Key", "Keywords", "Kronecker", "KustinMiller", "lastMatch", "LatticePolytopes", "Layout", "Left", "LengthLimit", "Lex", "LexIdeals", "Licenses", "LieTypes", "Limit", "Linear", "LinearAlgebra", "LinearTruncations", "lineNumber", "listLocalSymbols", "listUserSymbols", "LLLBases", "loadDepth", "LoadDocumentation", "loadedFiles", "loadedPackages", "Local", "LocalRings", "LongPolynomial", "M0nbar", "Macaulay2Doc", "MakeDocumentation", "MakeHTML", "MakeInfo", "MakeLinks", "MakePDF", "MapleInterface", "Markov", "Matroids", "maxAllowableThreads", "maxExponent", "MaximalRank", "MaxReductionCount", "MCMApproximations", "MergeTeX", "minExponent", "MinimalGenerators", "MinimalMatrix", "minimalPresentationMap", "minimalPresentationMapInv", "MinimalPrimes", "Minimize", "MinimumVersion", "Miura", "MixedMultiplicity", "ModuleDeformations", "MonodromySolver", "Monomial", "MonomialAlgebras", "MonomialIntegerPrograms", "MonomialOrbits", "MonomialOrder", "Monomials", "MonomialSize", "MultiGradedRationalMap", "MultiplicitySequence", "MultiplierIdeals", "MultiplierIdealsDim2", "MultiprojectiveVarieties", "NAGtypes", "Name", "Nauty", "NautyGraphs", "NCAlgebra", "NCLex", "NewFromMethod", "newline", "NewMethod", "NewOfFromMethod", "NewOfMethod", "nil", "Node", "NoetherianOperators", "NoetherNormalization", "NonminimalComplexes", "NoPrint", "Normaliz", "NormalToricVarieties", "notify", "NTL", "null", "nullaryMethods", "NumericalAlgebraicGeometry", "NumericalCertification", "NumericalImplicitization", "NumericalLinearAlgebra", "NumericalSchubertCalculus", "NumericSolutions", "OldPolyhedra", "OldToricVectorBundles", "OnlineLookup", "OO", "oo", "ooo", "oooo", "OpenMath", "operatorAttributes", "OptionalComponentsPresent", "Options", "Order", "order", "OutputDictionary", "Outputs", "PackageCitations", "PackageDictionary", "PackageExports", "PackageImports", "PackageTemplate", "PairLimit", "PairsRemaining", "Parametrization", "Parsing", "path", "PencilsOfQuadrics", "Permanents", "PHCpack", "PhylogeneticTrees", "pi", "PieriMaps", "PlaneCurveSingularities", "Points", "Polyhedra", "Polymake", "Posets", "Position", "PositivityToricBundles", "POSIX", "Postfix", "Pre", "Precision", "Prefix", "prefixDirectory", "prefixPath", "PrimaryDecomposition", "PrimaryTag", "PrimitiveElement", "Print", "printingAccuracy", "printingLeadLimit", "printingPrecision", "printingSeparator", "printingTimeLimit", "printingTrailLimit", "printWidth", "Probability", "profileSummary", "programPaths", "Projective", "Prune", "PruneComplex", "pruningMap", "PseudomonomialPrimaryDecomposition", "Pullback", "PushForward", "Python", "QthPower", "Quasidegrees", "QuaternaryQuartics", "QuillenSuslin", "quit", "Quotient", "Radical", "RadicalCodim1", "RaiseError", "RandomCanonicalCurves", "RandomComplexes", "RandomCurves", "RandomCurvesOverVerySmallFiniteFields", "RandomGenus14Curves", "RandomIdeals", "RandomMonomialIdeals", "RandomObjects", "RandomPlaneCurves", "RandomPoints", "RandomSpaceCurves", "Range", "RationalMaps", "RationalPoints", "RationalPoints2", "ReactionNetworks", "RealFP", "RealQP", "RealQP1", "RealRoots", "RealRR", "RealXD", "recursionLimit", "Reduce", "ReesAlgebra", "References", "ReflexivePolytopesDB", "Regularity", "RelativeCanonicalResolution", "Reload", "RemakeAllDocumentation", "RerunExamples", "ResidualIntersections", "ResLengthThree", "ResolutionsOfStanleyReisnerRings", "restart", "Result", "Resultants", "returnCode", "Reverse", "RevLex", "Right", "rootPath", "rootURI", "RunDirectory", "RunExamples", "RunExternalM2", "Saturation", "Schubert2", "SchurComplexes", "SchurFunctors", "SchurRings", "scriptCommandLine", "SCSCP", "SectionRing", "SeeAlso", "SegreClasses", "SemidefiniteProgramming", "Seminormalization", "SeparateExec", "Serialization", "sheafExt", "ShimoyamaYokoyama", "showClassStructure", "showStructure", "showUserStructure", "SimpleDoc", "SimplicialComplexes", "SimplicialDecomposability", "SimplicialPosets", "SimplifyFractions", "SizeLimit", "SkewCommutative", "SlackIdeals", "SLnEquivariantMatrices", "SLPexpressions", "Sort", "SortStrategy", "SourceCode", "SourceRing", "SpaceCurves", "SparseResultants", "SpechtModule", "SpecialFanoFourfolds", "SpectralSequences", "SRdeformations", "Standard", "StartWithOneMinor", "StatePolytope", "StatGraphs", "stderr", "stdio", "StopBeforeComputation", "stopIfError", "StopIteration", "StopWithMinimalGenerators", "Strategy", "Strict", "StronglyStableIdeals", "Style", "SubalgebraBases", "Subnodes", "SubringLimit", "subscript", "Sugarless", "SumsOfSquares", "SuperLinearAlgebra", "superscript", "SVDComplexes", "SwitchingFields", "SymbolicPowers", "SymmetricPolynomials", "Synopsis", "Syzygies", "SyzygyLimit", "SyzygyMatrix", "SyzygyRows", "TangentCone", "TateOnProducts", "TensorComplexes", "Test", "testExample", "TestIdeals", "TeXmacs", "Text", "ThinSincereQuivers", "ThreadedGB", "Threshold", "Topcom", "topLevelMode", "Tor", "TorAlgebra", "Toric", "ToricInvariants", "ToricTopology", "ToricVectorBundles", "Torsion", "TotalPairs", "Tree", "TriangularSets", "Triangulations", "Tries", "Trim", "Triplets", "Tropical", "true", "Truncate", "Truncations", "TSpreadIdeals", "TypicalValue", "typicalValues", "Undo", "Unique", "Units", "Unmixed", "Up", "UpdateOnly", "UpperTriangular", "Usage", "UseCachedExampleOutput", "UseHilbertFunction", "UserMode", "UseSyzygies", "Variable", "VariableBaseName", "Variables", "Vasconcelos", "VectorFields", "VectorGraphics", "Verbose", "Verbosity", "Verify", "VersalDeformations", "Version", "version", "VerticalSpace", "viewHelp", "VirtualResolutions", "Visualize", "WebApp", "Weights", "WeylAlgebra", "WeylGroups", "WhitneyStratifications", "Wrap", "XML" ) class Macaulay2Lexer(RegexLexer): """Lexer for Macaulay2, a software system for research in algebraic geometry.""" name = 'Macaulay2' url = 'https://faculty.math.illinois.edu/Macaulay2/' aliases = ['macaulay2'] filenames = ['*.m2'] tokens = { 'root': [ (r'--.*$', Comment.Single), (r'-\*', Comment.Multiline, 'block comment'), (r'"', String, 'quote string'), (r'///', String, 'slash string'), (words(M2KEYWORDS, prefix=r'\b', suffix=r'\b'), Keyword), (words(M2DATATYPES, prefix=r'\b', suffix=r'\b'), Name.Builtin), (words(M2FUNCTIONS, prefix=r'\b', suffix=r'\b'), Name.Function), (words(M2CONSTANTS, prefix=r'\b', suffix=r'\b'), Name.Constant), (r'\s+', Text.Whitespace), (r'.', Text) ], 'block comment' : [ (r'[^*-]+', Comment.Multiline), (r'\*-', Comment.Multiline, '#pop'), (r'[*-]', Comment.Multiline) ], 'quote string' : [ (r'[^\\"]+', String), (r'"', String, '#pop'), (r'\\"?', String), ], 'slash string' : [ (r'[^/]+', String), (r'(//)+(?!/)', String), (r'/(//)+(?!/)', String, '#pop'), (r'/', String) ] }
31,914
Python
17.341954
84
0.551482
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_postgres_builtins.py
""" pygments.lexers._postgres_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Self-updating data files for PostgreSQL lexer. Run with `python -I` to update itself. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # Autogenerated: please edit them if you like wasting your time. KEYWORDS = ( 'ABORT', 'ABSOLUTE', 'ACCESS', 'ACTION', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE', 'ALL', 'ALSO', 'ALTER', 'ALWAYS', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASC', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT', 'ASYMMETRIC', 'AT', 'ATOMIC', 'ATTACH', 'ATTRIBUTE', 'AUTHORIZATION', 'BACKWARD', 'BEFORE', 'BEGIN', 'BETWEEN', 'BIGINT', 'BINARY', 'BIT', 'BOOLEAN', 'BOTH', 'BREADTH', 'BY', 'CACHE', 'CALL', 'CALLED', 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CHAIN', 'CHAR', 'CHARACTER', 'CHARACTERISTICS', 'CHECK', 'CHECKPOINT', 'CLASS', 'CLOSE', 'CLUSTER', 'COALESCE', 'COLLATE', 'COLLATION', 'COLUMN', 'COLUMNS', 'COMMENT', 'COMMENTS', 'COMMIT', 'COMMITTED', 'COMPRESSION', 'CONCURRENTLY', 'CONFIGURATION', 'CONFLICT', 'CONNECTION', 'CONSTRAINT', 'CONSTRAINTS', 'CONTENT', 'CONTINUE', 'CONVERSION', 'COPY', 'COST', 'CREATE', 'CROSS', 'CSV', 'CUBE', 'CURRENT', 'CURRENT_CATALOG', 'CURRENT_DATE', 'CURRENT_ROLE', 'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'CURSOR', 'CYCLE', 'DATA', 'DATABASE', 'DAY', 'DEALLOCATE', 'DEC', 'DECIMAL', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE', 'DEFERRED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DEPENDS', 'DEPTH', 'DESC', 'DETACH', 'DICTIONARY', 'DISABLE', 'DISCARD', 'DISTINCT', 'DO', 'DOCUMENT', 'DOMAIN', 'DOUBLE', 'DROP', 'EACH', 'ELSE', 'ENABLE', 'ENCODING', 'ENCRYPTED', 'END', 'ENUM', 'ESCAPE', 'EVENT', 'EXCEPT', 'EXCLUDE', 'EXCLUDING', 'EXCLUSIVE', 'EXECUTE', 'EXISTS', 'EXPLAIN', 'EXPRESSION', 'EXTENSION', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FAMILY', 'FETCH', 'FILTER', 'FINALIZE', 'FIRST', 'FLOAT', 'FOLLOWING', 'FOR', 'FORCE', 'FOREIGN', 'FORWARD', 'FREEZE', 'FROM', 'FULL', 'FUNCTION', 'FUNCTIONS', 'GENERATED', 'GLOBAL', 'GRANT', 'GRANTED', 'GREATEST', 'GROUP', 'GROUPING', 'GROUPS', 'HANDLER', 'HAVING', 'HEADER', 'HOLD', 'HOUR', 'IDENTITY', 'IF', 'ILIKE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLICIT', 'IMPORT', 'IN', 'INCLUDE', 'INCLUDING', 'INCREMENT', 'INDEX', 'INDEXES', 'INHERIT', 'INHERITS', 'INITIALLY', 'INLINE', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT', 'INTERVAL', 'INTO', 'INVOKER', 'IS', 'ISNULL', 'ISOLATION', 'JOIN', 'KEY', 'LABEL', 'LANGUAGE', 'LARGE', 'LAST', 'LATERAL', 'LEADING', 'LEAKPROOF', 'LEAST', 'LEFT', 'LEVEL', 'LIKE', 'LIMIT', 'LISTEN', 'LOAD', 'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATION', 'LOCK', 'LOCKED', 'LOGGED', 'MAPPING', 'MATCH', 'MATERIALIZED', 'MAXVALUE', 'METHOD', 'MINUTE', 'MINVALUE', 'MODE', 'MONTH', 'MOVE', 'NAME', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', 'NEW', 'NEXT', 'NFC', 'NFD', 'NFKC', 'NFKD', 'NO', 'NONE', 'NORMALIZE', 'NORMALIZED', 'NOT', 'NOTHING', 'NOTIFY', 'NOTNULL', 'NOWAIT', 'NULL', 'NULLIF', 'NULLS', 'NUMERIC', 'OBJECT', 'OF', 'OFF', 'OFFSET', 'OIDS', 'OLD', 'ON', 'ONLY', 'OPERATOR', 'OPTION', 'OPTIONS', 'OR', 'ORDER', 'ORDINALITY', 'OTHERS', 'OUT', 'OUTER', 'OVER', 'OVERLAPS', 'OVERLAY', 'OVERRIDING', 'OWNED', 'OWNER', 'PARALLEL', 'PARSER', 'PARTIAL', 'PARTITION', 'PASSING', 'PASSWORD', 'PLACING', 'PLANS', 'POLICY', 'POSITION', 'PRECEDING', 'PRECISION', 'PREPARE', 'PREPARED', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL', 'PROCEDURE', 'PROCEDURES', 'PROGRAM', 'PUBLICATION', 'QUOTE', 'RANGE', 'READ', 'REAL', 'REASSIGN', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCES', 'REFERENCING', 'REFRESH', 'REINDEX', 'RELATIVE', 'RELEASE', 'RENAME', 'REPEATABLE', 'REPLACE', 'REPLICA', 'RESET', 'RESTART', 'RESTRICT', 'RETURN', 'RETURNING', 'RETURNS', 'REVOKE', 'RIGHT', 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE', 'ROUTINES', 'ROW', 'ROWS', 'RULE', 'SAVEPOINT', 'SCHEMA', 'SCHEMAS', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', 'SELECT', 'SEQUENCE', 'SEQUENCES', 'SERIALIZABLE', 'SERVER', 'SESSION', 'SESSION_USER', 'SET', 'SETOF', 'SETS', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', 'SKIP', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SQL', 'STABLE', 'STANDALONE', 'START', 'STATEMENT', 'STATISTICS', 'STDIN', 'STDOUT', 'STORAGE', 'STORED', 'STRICT', 'STRIP', 'SUBSCRIPTION', 'SUBSTRING', 'SUPPORT', 'SYMMETRIC', 'SYSID', 'SYSTEM', 'TABLE', 'TABLES', 'TABLESAMPLE', 'TABLESPACE', 'TEMP', 'TEMPLATE', 'TEMPORARY', 'TEXT', 'THEN', 'TIES', 'TIME', 'TIMESTAMP', 'TO', 'TRAILING', 'TRANSACTION', 'TRANSFORM', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE', 'TRUNCATE', 'TRUSTED', 'TYPE', 'TYPES', 'UESCAPE', 'UNBOUNDED', 'UNCOMMITTED', 'UNENCRYPTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN', 'UNLOGGED', 'UNTIL', 'UPDATE', 'USER', 'USING', 'VACUUM', 'VALID', 'VALIDATE', 'VALIDATOR', 'VALUE', 'VALUES', 'VARCHAR', 'VARIADIC', 'VARYING', 'VERBOSE', 'VERSION', 'VIEW', 'VIEWS', 'VOLATILE', 'WHEN', 'WHERE', 'WHITESPACE', 'WINDOW', 'WITH', 'WITHIN', 'WITHOUT', 'WORK', 'WRAPPER', 'WRITE', 'XML', 'XMLATTRIBUTES', 'XMLCONCAT', 'XMLELEMENT', 'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES', 'ZONE', ) DATATYPES = ( 'bigint', 'bigserial', 'bit', 'bit varying', 'bool', 'boolean', 'box', 'bytea', 'char', 'character', 'character varying', 'cidr', 'circle', 'date', 'decimal', 'double precision', 'float4', 'float8', 'inet', 'int', 'int2', 'int4', 'int8', 'integer', 'interval', 'json', 'jsonb', 'line', 'lseg', 'macaddr', 'macaddr8', 'money', 'numeric', 'path', 'pg_lsn', 'pg_snapshot', 'point', 'polygon', 'real', 'serial', 'serial2', 'serial4', 'serial8', 'smallint', 'smallserial', 'text', 'time', 'timestamp', 'timestamptz', 'timetz', 'tsquery', 'tsvector', 'txid_snapshot', 'uuid', 'varbit', 'varchar', 'with time zone', 'without time zone', 'xml', ) PSEUDO_TYPES = ( 'any', 'anyarray', 'anycompatible', 'anycompatiblearray', 'anycompatiblemultirange', 'anycompatiblenonarray', 'anycompatiblerange', 'anyelement', 'anyenum', 'anymultirange', 'anynonarray', 'anyrange', 'cstring', 'event_trigger', 'fdw_handler', 'index_am_handler', 'internal', 'language_handler', 'pg_ddl_command', 'record', 'table_am_handler', 'trigger', 'tsm_handler', 'unknown', 'void', ) # Remove 'trigger' from types PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS)))) PLPGSQL_KEYWORDS = ( 'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT', 'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE', 'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE', ) if __name__ == '__main__': # pragma: no cover import re from urllib.request import urlopen from pygments.util import format_lines # One man's constant is another man's variable. SOURCE_URL = 'https://github.com/postgres/postgres/raw/master' KEYWORDS_URL = SOURCE_URL + '/src/include/parser/kwlist.h' DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml' def update_myself(): content = urlopen(DATATYPES_URL).read().decode('utf-8', errors='ignore') data_file = list(content.splitlines()) datatypes = parse_datatypes(data_file) pseudos = parse_pseudos(data_file) content = urlopen(KEYWORDS_URL).read().decode('utf-8', errors='ignore') keywords = parse_keywords(content) update_consts(__file__, 'DATATYPES', datatypes) update_consts(__file__, 'PSEUDO_TYPES', pseudos) update_consts(__file__, 'KEYWORDS', keywords) def parse_keywords(f): kw = [] for m in re.finditer(r'PG_KEYWORD\("(.+?)"', f): kw.append(m.group(1).upper()) if not kw: raise ValueError('no keyword found') kw.sort() return kw def parse_datatypes(f): dt = set() for line in f: if '<sect1' in line: break if '<entry><type>' not in line: continue # Parse a string such as # time [ (<replaceable>p</replaceable>) ] [ without time zone ] # into types "time" and "without time zone" # remove all the tags line = re.sub("<replaceable>[^<]+</replaceable>", "", line) line = re.sub("<[^>]+>", "", line) # Drop the parts containing braces for tmp in [t for tmp in line.split('[') for t in tmp.split(']') if "(" not in t]: for t in tmp.split(','): t = t.strip() if not t: continue dt.add(" ".join(t.split())) dt = list(dt) dt.sort() return dt def parse_pseudos(f): dt = [] re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">') re_entry = re.compile(r'\s*<entry><type>(.+?)</type></entry>') re_end = re.compile(r'\s*</table>') f = iter(f) for line in f: if re_start.match(line) is not None: break else: raise ValueError('pseudo datatypes table not found') for line in f: m = re_entry.match(line) if m is not None: dt.append(m.group(1)) if re_end.match(line) is not None: break else: raise ValueError('end of pseudo datatypes table not found') if not dt: raise ValueError('pseudo datatypes not found') dt.sort() return dt def update_consts(filename, constname, content): with open(filename) as f: data = f.read() # Line to start/end inserting re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S) m = re_match.search(data) if not m: raise ValueError('Could not find existing definition for %s' % (constname,)) new_block = format_lines(constname, content) data = data[:m.start()] + new_block + data[m.end():] with open(filename, 'w', newline='\n') as f: f.write(data) update_myself()
12,316
Python
16.981022
82
0.484898
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/nimrod.py
""" pygments.lexers.nimrod ~~~~~~~~~~~~~~~~~~~~~~ Lexer for the Nim language (formerly known as Nimrod). :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, default, bygroups from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Error __all__ = ['NimrodLexer'] class NimrodLexer(RegexLexer): """ For Nim source code. .. versionadded:: 1.5 """ name = 'Nimrod' url = 'http://nim-lang.org/' aliases = ['nimrod', 'nim'] filenames = ['*.nim', '*.nimrod'] mimetypes = ['text/x-nim'] flags = re.MULTILINE | re.IGNORECASE def underscorize(words): newWords = [] new = [] for word in words: for ch in word: new.append(ch) new.append("_?") newWords.append(''.join(new)) new = [] return "|".join(newWords) keywords = [ 'addr', 'and', 'as', 'asm', 'bind', 'block', 'break', 'case', 'cast', 'concept', 'const', 'continue', 'converter', 'defer', 'discard', 'distinct', 'div', 'do', 'elif', 'else', 'end', 'enum', 'except', 'export', 'finally', 'for', 'if', 'in', 'yield', 'interface', 'is', 'isnot', 'iterator', 'let', 'mixin', 'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'static', 'try', 'tuple', 'type', 'using', 'when', 'while', 'xor' ] keywordsPseudo = [ 'nil', 'true', 'false' ] opWords = [ 'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in', 'notin', 'is', 'isnot' ] types = [ 'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64', 'bool', 'char', 'range', 'array', 'seq', 'set', 'string' ] tokens = { 'root': [ # Comments (r'##\[', String.Doc, 'doccomment'), (r'##.*$', String.Doc), (r'#\[', Comment.Multiline, 'comment'), (r'#.*$', Comment), # Pragmas (r'\{\.', String.Other, 'pragma'), # Operators (r'[*=><+\-/@$~&%!?|\\\[\]]', Operator), (r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;', Punctuation), # Case statement branch (r'(\n\s*)(of)(\s)', bygroups(Text.Whitespace, Keyword, Text.Whitespace), 'casebranch'), # Strings (r'(?:[\w]+)"', String, 'rdqs'), (r'"""', String.Double, 'tdqs'), ('"', String, 'dqs'), # Char ("'", String.Char, 'chars'), # Keywords (r'(%s)\b' % underscorize(opWords), Operator.Word), (r'(proc|func|method|macro|template)(\s)(?![(\[\]])', bygroups(Keyword, Text.Whitespace), 'funcname'), (r'(%s)\b' % underscorize(keywords), Keyword), (r'(%s)\b' % underscorize(['from', 'import', 'include', 'export']), Keyword.Namespace), (r'(v_?a_?r)\b', Keyword.Declaration), (r'(%s)\b' % underscorize(types), Name.Builtin), (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo), # Identifiers (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name), # Numbers (r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))', Number.Float, ('float-suffix', 'float-number')), (r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'), (r'0b[01][01_]*', Number.Bin, 'int-suffix'), (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'), (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'), # Whitespace (r'\s+', Text.Whitespace), (r'.+$', Error), ], 'chars': [ (r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape), (r"'", String.Char, '#pop'), (r".", String.Char) ], 'strings': [ (r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol), (r'[^\\\'"$\n]+', String), # quotes, dollars and backslashes must be parsed one at a time (r'[\'"\\]', String), # unhandled string formatting sign (r'\$', String) # newlines are an error (use "nl" state) ], 'doccomment': [ (r'[^\]#]+', String.Doc), (r'##\[', String.Doc, '#push'), (r'\]##', String.Doc, '#pop'), (r'[\]#]', String.Doc), ], 'comment': [ (r'[^\]#]+', Comment.Multiline), (r'#\[', Comment.Multiline, '#push'), (r'\]#', Comment.Multiline, '#pop'), (r'[\]#]', Comment.Multiline), ], 'dqs': [ (r'\\([\\abcefnrtvl"\']|\n|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape), (r'"', String, '#pop'), include('strings') ], 'rdqs': [ (r'"(?!")', String, '#pop'), (r'""', String.Escape), include('strings') ], 'tdqs': [ (r'"""', String.Double, '#pop'), include('strings'), (r'\n', String.Double) ], 'funcname': [ (r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'), (r'`.+`', Name.Function, '#pop') ], 'nl': [ (r'\n', String) ], 'float-number': [ (r'\.(?!\.)[0-9_]*[f]*', Number.Float), (r'e[+-]?[0-9][0-9_]*', Number.Float), default('#pop') ], 'float-suffix': [ (r'\'f(32|64)', Number.Float), default('#pop') ], 'int-suffix': [ (r'\'i(32|64)', Number.Integer.Long), (r'\'i(8|16)', Number.Integer), default('#pop') ], 'casebranch': [ (r',', Punctuation), (r'[\n ]+', Text.Whitespace), (r':', Operator, '#pop'), (r'\w+|[^:]', Name.Label), ], 'pragma': [ (r'[:,]', Text), (r'[\n ]+', Text.Whitespace), (r'\.\}', String.Other, '#pop'), (r'\w+|\W+|[^.}]', String.Other), ], }
6,416
Python
30.925373
80
0.403211
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/pony.py
""" pygments.lexers.pony ~~~~~~~~~~~~~~~~~~~~ Lexers for Pony and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation __all__ = ['PonyLexer'] class PonyLexer(RegexLexer): """ For Pony source code. .. versionadded:: 2.4 """ name = 'Pony' aliases = ['pony'] filenames = ['*.pony'] _caps = r'(iso|trn|ref|val|box|tag)' tokens = { 'root': [ (r'\n', Text), (r'[^\S\n]+', Text), (r'//.*\n', Comment.Single), (r'/\*', Comment.Multiline, 'nested_comment'), (r'"""(?:.|\n)*?"""', String.Doc), (r'"', String, 'string'), (r'\'.*\'', String.Char), (r'=>|[]{}:().~;,|&!^?[]', Punctuation), (words(( 'addressof', 'and', 'as', 'consume', 'digestof', 'is', 'isnt', 'not', 'or'), suffix=r'\b'), Operator.Word), (r'!=|==|<<|>>|[-+/*%=<>]', Operator), (words(( 'box', 'break', 'compile_error', 'compile_intrinsic', 'continue', 'do', 'else', 'elseif', 'embed', 'end', 'error', 'for', 'if', 'ifdef', 'in', 'iso', 'lambda', 'let', 'match', 'object', 'recover', 'ref', 'repeat', 'return', 'tag', 'then', 'this', 'trn', 'try', 'until', 'use', 'var', 'val', 'where', 'while', 'with', '#any', '#read', '#send', '#share'), suffix=r'\b'), Keyword), (r'(actor|class|struct|primitive|interface|trait|type)((?:\s)+)', bygroups(Keyword, Text), 'typename'), (r'(new|fun|be)((?:\s)+)', bygroups(Keyword, Text), 'methodname'), (words(( 'I8', 'U8', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'I128', 'U128', 'ILong', 'ULong', 'ISize', 'USize', 'F32', 'F64', 'Bool', 'Pointer', 'None', 'Any', 'Array', 'String', 'Iterator'), suffix=r'\b'), Name.Builtin.Type), (r'_?[A-Z]\w*', Name.Type), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'\d+', Number.Integer), (r'(true|false)\b', Name.Builtin), (r'_\d*', Name), (r'_?[a-z][\w\']*', Name) ], 'typename': [ (_caps + r'?((?:\s)*)(_?[A-Z]\w*)', bygroups(Keyword, Text, Name.Class), '#pop') ], 'methodname': [ (_caps + r'?((?:\s)*)(_?[a-z]\w*)', bygroups(Keyword, Text, Name.Function), '#pop') ], 'nested_comment': [ (r'[^*/]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline) ], 'string': [ (r'"', String, '#pop'), (r'\\"', String), (r'[^\\"]+', String) ] }
3,244
Python
33.521276
78
0.408138
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/fortran.py
""" pygments.lexers.fortran ~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Fortran languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, bygroups, include, words, using, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic __all__ = ['FortranLexer', 'FortranFixedLexer'] class FortranLexer(RegexLexer): """ Lexer for FORTRAN 90 code. .. versionadded:: 0.10 """ name = 'Fortran' url = 'https://fortran-lang.org/' aliases = ['fortran', 'f90'] filenames = ['*.f03', '*.f90', '*.F03', '*.F90'] mimetypes = ['text/x-fortran'] flags = re.IGNORECASE | re.MULTILINE # Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION # Operators: **, *, +, -, /, <, >, <=, >=, ==, /= # Logical (?): NOT, AND, OR, EQV, NEQV # Builtins: # http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html tokens = { 'root': [ (r'^#.*\n', Comment.Preproc), (r'!.*\n', Comment), include('strings'), include('core'), (r'[a-z][\w$]*', Name), include('nums'), (r'[\s]+', Text.Whitespace), ], 'core': [ # Statements (r'\b(DO)(\s+)(CONCURRENT)\b', bygroups(Keyword, Text.Whitespace, Keyword)), (r'\b(GO)(\s*)(TO)\b', bygroups(Keyword, Text.Whitespace, Keyword)), (words(( 'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE', 'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND', 'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE', 'CODIMENSION', 'COMMON', 'CONTIGUOUS', 'CONTAINS', 'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE', 'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END', 'ENDASSOCIATE', 'ENDBLOCK', 'ENDDO', 'ENDENUM', 'ENDFORALL', 'ENDFUNCTION', 'ENDIF', 'ENDINTERFACE', 'ENDMODULE', 'ENDPROGRAM', 'ENDSELECT', 'ENDSUBMODULE', 'ENDSUBROUTINE', 'ENDTYPE', 'ENDWHERE', 'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'ERROR STOP', 'EXIT', 'EXTENDS', 'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT', 'FUNCTION', 'GENERIC', 'IF', 'IMAGES', 'IMPLICIT', 'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE', 'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY', 'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'ONLY', 'OPEN', 'OPTIONAL', 'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE', 'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ', 'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE', 'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES', 'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE', 'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'), Keyword), # Data Types (words(( 'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER', 'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG', 'C_SIGNED_CHAR', 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T', 'C_INT64_T', 'C_INT_LEAST8_T', 'C_INT_LEAST16_T', 'C_INT_LEAST32_T', 'C_INT_LEAST64_T', 'C_INT_FAST8_T', 'C_INT_FAST16_T', 'C_INT_FAST32_T', 'C_INT_FAST64_T', 'C_INTMAX_T', 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE', 'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', 'C_DOUBLE_COMPLEX', 'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', 'C_FUNPTR'), prefix=r'\b', suffix=r'\s*\b'), Keyword.Type), # Operators (r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator), (r'(::)', Keyword.Declaration), (r'[()\[\],:&%;.]', Punctuation), # Intrinsics (words(( 'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL', 'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog', 'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH', 'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref', 'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0', 'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE', 'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp', 'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count', 'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift', 'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated', 'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer', 'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc', 'C_Sizeof', 'C_New_Line', 'C_Carriage_Return', 'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin', 'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY', 'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF', 'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax', 'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH', 'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime', 'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime', 'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of', 'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush', 'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell', 'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument', 'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog', 'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs', 'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits', 'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr', 'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index', 'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC', 'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End', 'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound', 'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk', 'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat', 'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent', 'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc', 'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits', 'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images', 'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product', 'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real', 'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift', 'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind', 'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape', 'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH', 'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand', 'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock', 'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer', 'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask', 'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp', 'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'), Name.Builtin), # Booleans (r'\.(true|false)\.', Name.Builtin), # Comparing Operators (r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word), ], 'strings': [ (r'"(\\[0-7]+|\\[^0-7]|[^"\\])*"', String.Double), (r"'(\\[0-7]+|\\[^0-7]|[^'\\])*'", String.Single), ], 'nums': [ (r'\d+(?![.e])(_([1-9]|[a-z]\w*))?', Number.Integer), (r'[+-]?\d*\.\d+([ed][-+]?\d+)?(_([1-9]|[a-z]\w*))?', Number.Float), (r'[+-]?\d+\.\d*([ed][-+]?\d+)?(_([1-9]|[a-z]\w*))?', Number.Float), (r'[+-]?\d+(\.\d*)?[ed][-+]?\d+(_([1-9]|[a-z]\w*))?', Number.Float), ], } class FortranFixedLexer(RegexLexer): """ Lexer for fixed format Fortran. .. versionadded:: 2.1 """ name = 'FortranFixed' aliases = ['fortranfixed'] filenames = ['*.f', '*.F'] flags = re.IGNORECASE def _lex_fortran(self, match, ctx=None): """Lex a line just as free form fortran without line break.""" lexer = FortranLexer() text = match.group(0) + "\n" for index, token, value in lexer.get_tokens_unprocessed(text): value = value.replace('\n', '') if value != '': yield index, token, value tokens = { 'root': [ (r'[C*].*\n', Comment), (r'#.*\n', Comment.Preproc), (r' {0,4}!.*\n', Comment), (r'(.{5})', Name.Label, 'cont-char'), (r'.*\n', using(FortranLexer)), ], 'cont-char': [ (' ', Text, 'code'), ('0', Comment, 'code'), ('.', Generic.Strong, 'code'), ], 'code': [ (r'(.{66})(.*)(\n)', bygroups(_lex_fortran, Comment, Text.Whitespace), 'root'), (r'(.*)(\n)', bygroups(_lex_fortran, Text.Whitespace), 'root'), default('root'), ] }
10,336
Python
47.303738
89
0.469234
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/amdgpu.py
""" pygments.lexers.amdgpu ~~~~~~~~~~~~~~~~~~~~~~ Lexers for the AMDGPU ISA assembly. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words from pygments.token import Name, Text, Keyword, Whitespace, Number, Comment import re __all__ = ['AMDGPULexer'] class AMDGPULexer(RegexLexer): """ For AMD GPU assembly. .. versionadded:: 2.8 """ name = 'AMDGPU' aliases = ['amdgpu'] filenames = ['*.isa'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s+', Whitespace), (r'[\r\n]+', Text), (r'(([a-z_0-9])*:([a-z_0-9])*)', Name.Attribute), (r'(\[|\]|\(|\)|,|\:|\&)', Text), (r'([;#]|//).*?\n', Comment.Single), (r'((s_)?(ds|buffer|flat|image)_[a-z0-9_]+)', Keyword.Reserved), (r'(_lo|_hi)', Name.Variable), (r'(vmcnt|lgkmcnt|expcnt)', Name.Attribute), (words(( 'op', 'vaddr', 'vdata', 'soffset', 'srsrc', 'format', 'offset', 'offen', 'idxen', 'glc', 'dlc', 'slc', 'tfe', 'lds', 'lit', 'unorm'), suffix=r'\b'), Name.Attribute), (r'(label_[a-z0-9]+)', Keyword), (r'(_L[0-9]*)', Name.Variable), (r'(s|v)_[a-z0-9_]+', Keyword), (r'(v[0-9.]+|vcc|exec|v)', Name.Variable), (r's[0-9.]+|s', Name.Variable), (r'[0-9]+\.[^0-9]+', Number.Float), (r'(0[xX][a-z0-9]+)|([0-9]+)', Number.Integer) ] }
1,603
Python
28.703703
78
0.462882
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/parasail.py
""" pygments.lexers.parasail ~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for ParaSail. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Literal __all__ = ['ParaSailLexer'] class ParaSailLexer(RegexLexer): """ For ParaSail source code. .. versionadded:: 2.1 """ name = 'ParaSail' url = 'http://www.parasail-lang.org' aliases = ['parasail'] filenames = ['*.psi', '*.psl'] mimetypes = ['text/x-parasail'] flags = re.MULTILINE tokens = { 'root': [ (r'[^\S\n]+', Text), (r'//.*?\n', Comment.Single), (r'\b(and|or|xor)=', Operator.Word), (r'\b(and(\s+then)?|or(\s+else)?|xor|rem|mod|' r'(is|not)\s+null)\b', Operator.Word), # Keywords (r'\b(abs|abstract|all|block|class|concurrent|const|continue|' r'each|end|exit|extends|exports|forward|func|global|implements|' r'import|in|interface|is|lambda|locked|new|not|null|of|op|' r'optional|private|queued|ref|return|reverse|separate|some|' r'type|until|var|with|' # Control flow r'if|then|else|elsif|case|for|while|loop)\b', Keyword.Reserved), (r'(abstract\s+)?(interface|class|op|func|type)', Keyword.Declaration), # Literals (r'"[^"]*"', String), (r'\\[\'ntrf"0]', String.Escape), (r'#[a-zA-Z]\w*', Literal), # Enumeration include('numbers'), (r"'[^']'", String.Char), (r'[a-zA-Z]\w*', Name), # Operators and Punctuation (r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|' r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\|=|\||/=|\+|-|\*|/|' r'\.\.|<\.\.|\.\.<|<\.\.<)', Operator), (r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)', Punctuation), (r'\n+', Text), ], 'numbers': [ (r'\d[0-9_]*#[0-9a-fA-F][0-9a-fA-F_]*#', Number.Hex), # any base (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), # C-like hex (r'0[bB][01][01_]*', Number.Bin), # C-like bin (r'\d[0-9_]*\.\d[0-9_]*[eE][+-]\d[0-9_]*', # float exp Number.Float), (r'\d[0-9_]*\.\d[0-9_]*', Number.Float), # float (r'\d[0-9_]*', Number.Integer), # integer ], }
2,720
Python
33.0125
79
0.438603
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ride.py
""" pygments.lexers.ride ~~~~~~~~~~~~~~~~~~~~ Lexer for the Ride programming language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words, include from pygments.token import Comment, Keyword, Name, Number, Punctuation, \ String, Text __all__ = ['RideLexer'] class RideLexer(RegexLexer): """ For `Ride <https://docs.wavesplatform.com/en/ride/about-ride.html>`_ source code. .. versionadded:: 2.6 """ name = 'Ride' aliases = ['ride'] filenames = ['*.ride'] mimetypes = ['text/x-ride'] validName = r'[a-zA-Z_][a-zA-Z0-9_\']*' builtinOps = ( '||', '|', '>=', '>', '==', '!', '=', '<=', '<', '::', ':+', ':', '!=', '/', '.', '=>', '-', '+', '*', '&&', '%', '++', ) globalVariablesName = ( 'NOALG', 'MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', 'SHA3224', 'SHA3256', 'SHA3384', 'SHA3512', 'nil', 'this', 'unit', 'height', 'lastBlock', 'Buy', 'Sell', 'CEILING', 'FLOOR', 'DOWN', 'HALFDOWN', 'HALFEVEN', 'HALFUP', 'UP', ) typesName = ( 'Unit', 'Int', 'Boolean', 'ByteVector', 'String', 'Address', 'Alias', 'Transfer', 'AssetPair', 'DataEntry', 'Order', 'Transaction', 'GenesisTransaction', 'PaymentTransaction', 'ReissueTransaction', 'BurnTransaction', 'MassTransferTransaction', 'ExchangeTransaction', 'TransferTransaction', 'SetAssetScriptTransaction', 'InvokeScriptTransaction', 'IssueTransaction', 'LeaseTransaction', 'LeaseCancelTransaction', 'CreateAliasTransaction', 'SetScriptTransaction', 'SponsorFeeTransaction', 'DataTransaction', 'WriteSet', 'AttachedPayment', 'ScriptTransfer', 'TransferSet', 'ScriptResult', 'Invocation', 'Asset', 'BlockInfo', 'Issue', 'Reissue', 'Burn', 'NoAlg', 'Md5', 'Sha1', 'Sha224', 'Sha256', 'Sha384', 'Sha512', 'Sha3224', 'Sha3256', 'Sha3384', 'Sha3512', 'BinaryEntry', 'BooleanEntry', 'IntegerEntry', 'StringEntry', 'List', 'Ceiling', 'Down', 'Floor', 'HalfDown', 'HalfEven', 'HalfUp', 'Up', ) functionsName = ( 'fraction', 'size', 'toBytes', 'take', 'drop', 'takeRight', 'dropRight', 'toString', 'isDefined', 'extract', 'throw', 'getElement', 'value', 'cons', 'toUtf8String', 'toInt', 'indexOf', 'lastIndexOf', 'split', 'parseInt', 'parseIntValue', 'keccak256', 'blake2b256', 'sha256', 'sigVerify', 'toBase58String', 'fromBase58String', 'toBase64String', 'fromBase64String', 'transactionById', 'transactionHeightById', 'getInteger', 'getBoolean', 'getBinary', 'getString', 'addressFromPublicKey', 'addressFromString', 'addressFromRecipient', 'assetBalance', 'wavesBalance', 'getIntegerValue', 'getBooleanValue', 'getBinaryValue', 'getStringValue', 'addressFromStringValue', 'assetInfo', 'rsaVerify', 'checkMerkleProof', 'median', 'valueOrElse', 'valueOrErrorMessage', 'contains', 'log', 'pow', 'toBase16String', 'fromBase16String', 'blockInfoByHeight', 'transferTransactionById', ) reservedWords = words(( 'match', 'case', 'else', 'func', 'if', 'let', 'then', '@Callable', '@Verifier', ), suffix=r'\b') tokens = { 'root': [ # Comments (r'#.*', Comment.Single), # Whitespace (r'\s+', Text), # Strings (r'"', String, 'doublequote'), (r'utf8\'', String, 'utf8quote'), (r'base(58|64|16)\'', String, 'singlequote'), # Keywords (reservedWords, Keyword.Reserved), (r'\{-#.*?#-\}', Keyword.Reserved), (r'FOLD<\d+>', Keyword.Reserved), # Types (words(typesName), Keyword.Type), # Main # (specialName, Keyword.Reserved), # Prefix Operators (words(builtinOps, prefix=r'\(', suffix=r'\)'), Name.Function), # Infix Operators (words(builtinOps), Name.Function), (words(globalVariablesName), Name.Function), (words(functionsName), Name.Function), # Numbers include('numbers'), # Variable Names (validName, Name.Variable), # Parens (r'[,()\[\]{}]', Punctuation), ], 'doublequote': [ (r'\\u[0-9a-fA-F]{4}', String.Escape), (r'\\[nrfvb\\"]', String.Escape), (r'[^"]', String), (r'"', String, '#pop'), ], 'utf8quote': [ (r'\\u[0-9a-fA-F]{4}', String.Escape), (r'\\[nrfvb\\\']', String.Escape), (r'[^\']', String), (r'\'', String, '#pop'), ], 'singlequote': [ (r'[^\']', String), (r'\'', String, '#pop'), ], 'numbers': [ (r'_?\d+', Number.Integer), ], }
5,056
Python
35.121428
80
0.522547
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/matlab.py
""" pygments.lexers.matlab ~~~~~~~~~~~~~~~~~~~~~~ Lexers for Matlab and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import Lexer, RegexLexer, bygroups, default, words, \ do_insertions, include from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Whitespace from pygments.lexers import _scilab_builtins __all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer'] class MatlabLexer(RegexLexer): """ For Matlab source code. .. versionadded:: 0.10 """ name = 'Matlab' aliases = ['matlab'] filenames = ['*.m'] mimetypes = ['text/matlab'] _operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\./|/|\\' tokens = { 'expressions': [ # operators: (_operators, Operator), # numbers (must come before punctuation to handle `.5`; cannot use # `\b` due to e.g. `5. + .5`). The negative lookahead on operators # avoids including the dot in `1./x` (the dot is part of `./`). (r'(?<!\w)((\d+\.\d+)|(\d*\.\d+)|(\d+\.(?!%s)))' r'([eEf][+-]?\d+)?(?!\w)' % _operators, Number.Float), (r'\b\d+[eEf][+-]?[0-9]+\b', Number.Float), (r'\b\d+\b', Number.Integer), # punctuation: (r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation), (r'=|:|;', Punctuation), # quote can be transpose, instead of string: # (not great, but handles common cases...) (r'(?<=[\w)\].])\'+', Operator), (r'"(""|[^"])*"', String), (r'(?<![\w)\].])\'', String, 'string'), (r'[a-zA-Z_]\w*', Name), (r'\s+', Whitespace), (r'.', Text), ], 'root': [ # line starting with '!' is sent as a system command. not sure what # label to use... (r'^!.*', String.Other), (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), (r'%.*$', Comment), (r'(\s*^\s*)(function)\b', bygroups(Whitespace, Keyword), 'deffunc'), (r'(\s*^\s*)(properties)(\s+)(\()', bygroups(Whitespace, Keyword, Whitespace, Punctuation), ('defprops', 'propattrs')), (r'(\s*^\s*)(properties)\b', bygroups(Whitespace, Keyword), 'defprops'), # from 'iskeyword' on version 9.4 (R2018a): # Check that there is no preceding dot, as keywords are valid field # names. (words(('break', 'case', 'catch', 'classdef', 'continue', 'dynamicprops', 'else', 'elseif', 'end', 'for', 'function', 'global', 'if', 'methods', 'otherwise', 'parfor', 'persistent', 'return', 'spmd', 'switch', 'try', 'while'), prefix=r'(?<!\.)(\s*)(', suffix=r')\b'), bygroups(Whitespace, Keyword)), ( words( [ # See https://mathworks.com/help/matlab/referencelist.html # Below data from 2021-02-10T18:24:08Z # for Matlab release R2020b "BeginInvoke", "COM", "Combine", "CombinedDatastore", "EndInvoke", "Execute", "FactoryGroup", "FactorySetting", "Feval", "FunctionTestCase", "GetCharArray", "GetFullMatrix", "GetVariable", "GetWorkspaceData", "GraphPlot", "H5.close", "H5.garbage_collect", "H5.get_libversion", "H5.open", "H5.set_free_list_limits", "H5A.close", "H5A.create", "H5A.delete", "H5A.get_info", "H5A.get_name", "H5A.get_space", "H5A.get_type", "H5A.iterate", "H5A.open", "H5A.open_by_idx", "H5A.open_by_name", "H5A.read", "H5A.write", "H5D.close", "H5D.create", "H5D.get_access_plist", "H5D.get_create_plist", "H5D.get_offset", "H5D.get_space", "H5D.get_space_status", "H5D.get_storage_size", "H5D.get_type", "H5D.open", "H5D.read", "H5D.set_extent", "H5D.vlen_get_buf_size", "H5D.write", "H5DS.attach_scale", "H5DS.detach_scale", "H5DS.get_label", "H5DS.get_num_scales", "H5DS.get_scale_name", "H5DS.is_scale", "H5DS.iterate_scales", "H5DS.set_label", "H5DS.set_scale", "H5E.clear", "H5E.get_major", "H5E.get_minor", "H5E.walk", "H5F.close", "H5F.create", "H5F.flush", "H5F.get_access_plist", "H5F.get_create_plist", "H5F.get_filesize", "H5F.get_freespace", "H5F.get_info", "H5F.get_mdc_config", "H5F.get_mdc_hit_rate", "H5F.get_mdc_size", "H5F.get_name", "H5F.get_obj_count", "H5F.get_obj_ids", "H5F.is_hdf5", "H5F.mount", "H5F.open", "H5F.reopen", "H5F.set_mdc_config", "H5F.unmount", "H5G.close", "H5G.create", "H5G.get_info", "H5G.open", "H5I.dec_ref", "H5I.get_file_id", "H5I.get_name", "H5I.get_ref", "H5I.get_type", "H5I.inc_ref", "H5I.is_valid", "H5L.copy", "H5L.create_external", "H5L.create_hard", "H5L.create_soft", "H5L.delete", "H5L.exists", "H5L.get_info", "H5L.get_name_by_idx", "H5L.get_val", "H5L.iterate", "H5L.iterate_by_name", "H5L.move", "H5L.visit", "H5L.visit_by_name", "H5ML.compare_values", "H5ML.get_constant_names", "H5ML.get_constant_value", "H5ML.get_function_names", "H5ML.get_mem_datatype", "H5O.close", "H5O.copy", "H5O.get_comment", "H5O.get_comment_by_name", "H5O.get_info", "H5O.link", "H5O.open", "H5O.open_by_idx", "H5O.set_comment", "H5O.set_comment_by_name", "H5O.visit", "H5O.visit_by_name", "H5P.all_filters_avail", "H5P.close", "H5P.close_class", "H5P.copy", "H5P.create", "H5P.equal", "H5P.exist", "H5P.fill_value_defined", "H5P.get", "H5P.get_alignment", "H5P.get_alloc_time", "H5P.get_attr_creation_order", "H5P.get_attr_phase_change", "H5P.get_btree_ratios", "H5P.get_char_encoding", "H5P.get_chunk", "H5P.get_chunk_cache", "H5P.get_class", "H5P.get_class_name", "H5P.get_class_parent", "H5P.get_copy_object", "H5P.get_create_intermediate_group", "H5P.get_driver", "H5P.get_edc_check", "H5P.get_external", "H5P.get_external_count", "H5P.get_family_offset", "H5P.get_fapl_core", "H5P.get_fapl_family", "H5P.get_fapl_multi", "H5P.get_fclose_degree", "H5P.get_fill_time", "H5P.get_fill_value", "H5P.get_filter", "H5P.get_filter_by_id", "H5P.get_gc_references", "H5P.get_hyper_vector_size", "H5P.get_istore_k", "H5P.get_layout", "H5P.get_libver_bounds", "H5P.get_link_creation_order", "H5P.get_link_phase_change", "H5P.get_mdc_config", "H5P.get_meta_block_size", "H5P.get_multi_type", "H5P.get_nfilters", "H5P.get_nprops", "H5P.get_sieve_buf_size", "H5P.get_size", "H5P.get_sizes", "H5P.get_small_data_block_size", "H5P.get_sym_k", "H5P.get_userblock", "H5P.get_version", "H5P.isa_class", "H5P.iterate", "H5P.modify_filter", "H5P.remove_filter", "H5P.set", "H5P.set_alignment", "H5P.set_alloc_time", "H5P.set_attr_creation_order", "H5P.set_attr_phase_change", "H5P.set_btree_ratios", "H5P.set_char_encoding", "H5P.set_chunk", "H5P.set_chunk_cache", "H5P.set_copy_object", "H5P.set_create_intermediate_group", "H5P.set_deflate", "H5P.set_edc_check", "H5P.set_external", "H5P.set_family_offset", "H5P.set_fapl_core", "H5P.set_fapl_family", "H5P.set_fapl_log", "H5P.set_fapl_multi", "H5P.set_fapl_sec2", "H5P.set_fapl_split", "H5P.set_fapl_stdio", "H5P.set_fclose_degree", "H5P.set_fill_time", "H5P.set_fill_value", "H5P.set_filter", "H5P.set_fletcher32", "H5P.set_gc_references", "H5P.set_hyper_vector_size", "H5P.set_istore_k", "H5P.set_layout", "H5P.set_libver_bounds", "H5P.set_link_creation_order", "H5P.set_link_phase_change", "H5P.set_mdc_config", "H5P.set_meta_block_size", "H5P.set_multi_type", "H5P.set_nbit", "H5P.set_scaleoffset", "H5P.set_shuffle", "H5P.set_sieve_buf_size", "H5P.set_sizes", "H5P.set_small_data_block_size", "H5P.set_sym_k", "H5P.set_userblock", "H5R.create", "H5R.dereference", "H5R.get_name", "H5R.get_obj_type", "H5R.get_region", "H5S.close", "H5S.copy", "H5S.create", "H5S.create_simple", "H5S.extent_copy", "H5S.get_select_bounds", "H5S.get_select_elem_npoints", "H5S.get_select_elem_pointlist", "H5S.get_select_hyper_blocklist", "H5S.get_select_hyper_nblocks", "H5S.get_select_npoints", "H5S.get_select_type", "H5S.get_simple_extent_dims", "H5S.get_simple_extent_ndims", "H5S.get_simple_extent_npoints", "H5S.get_simple_extent_type", "H5S.is_simple", "H5S.offset_simple", "H5S.select_all", "H5S.select_elements", "H5S.select_hyperslab", "H5S.select_none", "H5S.select_valid", "H5S.set_extent_none", "H5S.set_extent_simple", "H5T.array_create", "H5T.close", "H5T.commit", "H5T.committed", "H5T.copy", "H5T.create", "H5T.detect_class", "H5T.enum_create", "H5T.enum_insert", "H5T.enum_nameof", "H5T.enum_valueof", "H5T.equal", "H5T.get_array_dims", "H5T.get_array_ndims", "H5T.get_class", "H5T.get_create_plist", "H5T.get_cset", "H5T.get_ebias", "H5T.get_fields", "H5T.get_inpad", "H5T.get_member_class", "H5T.get_member_index", "H5T.get_member_name", "H5T.get_member_offset", "H5T.get_member_type", "H5T.get_member_value", "H5T.get_native_type", "H5T.get_nmembers", "H5T.get_norm", "H5T.get_offset", "H5T.get_order", "H5T.get_pad", "H5T.get_precision", "H5T.get_sign", "H5T.get_size", "H5T.get_strpad", "H5T.get_super", "H5T.get_tag", "H5T.insert", "H5T.is_variable_str", "H5T.lock", "H5T.open", "H5T.pack", "H5T.set_cset", "H5T.set_ebias", "H5T.set_fields", "H5T.set_inpad", "H5T.set_norm", "H5T.set_offset", "H5T.set_order", "H5T.set_pad", "H5T.set_precision", "H5T.set_sign", "H5T.set_size", "H5T.set_strpad", "H5T.set_tag", "H5T.vlen_create", "H5Z.filter_avail", "H5Z.get_filter_info", "Inf", "KeyValueDatastore", "KeyValueStore", "MException", "MException.last", "MaximizeCommandWindow", "MemoizedFunction", "MinimizeCommandWindow", "NET", "NET.Assembly", "NET.GenericClass", "NET.NetException", "NET.addAssembly", "NET.convertArray", "NET.createArray", "NET.createGeneric", "NET.disableAutoRelease", "NET.enableAutoRelease", "NET.invokeGenericMethod", "NET.isNETSupported", "NET.setStaticProperty", "NaN", "NaT", "OperationResult", "PutCharArray", "PutFullMatrix", "PutWorkspaceData", "PythonEnvironment", "Quit", "RandStream", "ReleaseCompatibilityException", "ReleaseCompatibilityResults", "Remove", "RemoveAll", "Setting", "SettingsGroup", "TallDatastore", "Test", "TestResult", "Tiff", "TransformedDatastore", "ValueIterator", "VersionResults", "VideoReader", "VideoWriter", "abs", "accumarray", "acos", "acosd", "acosh", "acot", "acotd", "acoth", "acsc", "acscd", "acsch", "actxGetRunningServer", "actxserver", "add", "addCause", "addCorrection", "addFile", "addFolderIncludingChildFiles", "addGroup", "addLabel", "addPath", "addReference", "addSetting", "addShortcut", "addShutdownFile", "addStartupFile", "addStyle", "addToolbarExplorationButtons", "addboundary", "addcats", "addedge", "addevent", "addlistener", "addmulti", "addnode", "addpath", "addpoints", "addpref", "addprop", "addsample", "addsampletocollection", "addtodate", "addts", "addvars", "adjacency", "airy", "align", "alim", "all", "allchild", "alpha", "alphaShape", "alphaSpectrum", "alphaTriangulation", "alphamap", "alphanumericBoundary", "alphanumericsPattern", "amd", "analyzeCodeCompatibility", "ancestor", "angle", "animatedline", "annotation", "ans", "any", "appdesigner", "append", "area", "arguments", "array2table", "array2timetable", "arrayDatastore", "arrayfun", "asFewOfPattern", "asManyOfPattern", "ascii", "asec", "asecd", "asech", "asin", "asind", "asinh", "assert", "assignin", "atan", "atan2", "atan2d", "atand", "atanh", "audiodevinfo", "audiodevreset", "audioinfo", "audioplayer", "audioread", "audiorecorder", "audiowrite", "autumn", "axes", "axis", "axtoolbar", "axtoolbarbtn", "balance", "bandwidth", "bar", "bar3", "bar3h", "barh", "barycentricToCartesian", "base2dec", "batchStartupOptionUsed", "bctree", "beep", "bench", "besselh", "besseli", "besselj", "besselk", "bessely", "beta", "betainc", "betaincinv", "betaln", "between", "bfsearch", "bicg", "bicgstab", "bicgstabl", "biconncomp", "bin2dec", "binary", "binscatter", "bitand", "bitcmp", "bitget", "bitnot", "bitor", "bitset", "bitshift", "bitxor", "blanks", "ble", "blelist", "blkdiag", "bluetooth", "bluetoothlist", "bone", "boundary", "boundaryFacets", "boundaryshape", "boundingbox", "bounds", "box", "boxchart", "brighten", "brush", "bsxfun", "bubblechart", "bubblechart3", "bubblelegend", "bubblelim", "bubblesize", "builddocsearchdb", "builtin", "bvp4c", "bvp5c", "bvpget", "bvpinit", "bvpset", "bvpxtend", "caldays", "caldiff", "calendar", "calendarDuration", "calllib", "calmonths", "calquarters", "calweeks", "calyears", "camdolly", "cameratoolbar", "camlight", "camlookat", "camorbit", "campan", "campos", "camproj", "camroll", "camtarget", "camup", "camva", "camzoom", "canUseGPU", "canUseParallelPool", "cart2pol", "cart2sph", "cartesianToBarycentric", "caseInsensitivePattern", "caseSensitivePattern", "cast", "cat", "categorical", "categories", "caxis", "cd", "cdf2rdf", "cdfepoch", "cdfinfo", "cdflib", "cdfread", "ceil", "cell", "cell2mat", "cell2struct", "cell2table", "celldisp", "cellfun", "cellplot", "cellstr", "centrality", "centroid", "cgs", "char", "characterListPattern", "characteristic", "checkcode", "chol", "cholupdate", "choose", "chooseContextMenu", "circshift", "circumcenter", "cla", "clabel", "class", "classUnderlying", "clc", "clear", "clearAllMemoizedCaches", "clearPersonalValue", "clearTemporaryValue", "clearpoints", "clearvars", "clf", "clibArray", "clibConvertArray", "clibIsNull", "clibIsReadOnly", "clibRelease", "clibgen.buildInterface", "clibgen.generateLibraryDefinition", "clipboard", "clock", "clone", "close", "closeFile", "closereq", "cmap2gray", "cmpermute", "cmunique", "codeCompatibilityReport", "colamd", "collapse", "colon", "colorbar", "colorcube", "colormap", "colororder", "colperm", "com.mathworks.engine.MatlabEngine", "com.mathworks.matlab.types.CellStr", "com.mathworks.matlab.types.Complex", "com.mathworks.matlab.types.HandleObject", "com.mathworks.matlab.types.Struct", "combine", "comet", "comet3", "compan", "compass", "complex", "compose", "computer", "comserver", "cond", "condeig", "condensation", "condest", "coneplot", "configureCallback", "configureTerminator", "conj", "conncomp", "containers.Map", "contains", "containsrange", "contour", "contour3", "contourc", "contourf", "contourslice", "contrast", "conv", "conv2", "convertCharsToStrings", "convertContainedStringsToChars", "convertStringsToChars", "convertTo", "convertvars", "convexHull", "convhull", "convhulln", "convn", "cool", "copper", "copyHDU", "copyfile", "copygraphics", "copyobj", "corrcoef", "cos", "cosd", "cosh", "cospi", "cot", "cotd", "coth", "count", "countcats", "cov", "cplxpair", "cputime", "createCategory", "createFile", "createImg", "createLabel", "createTbl", "criticalAlpha", "cross", "csc", "cscd", "csch", "ctranspose", "cummax", "cummin", "cumprod", "cumsum", "cumtrapz", "curl", "currentProject", "cylinder", "daspect", "dataTipInteraction", "dataTipTextRow", "datacursormode", "datastore", "datatip", "date", "datenum", "dateshift", "datestr", "datetick", "datetime", "datevec", "day", "days", "dbclear", "dbcont", "dbdown", "dbmex", "dbquit", "dbstack", "dbstatus", "dbstep", "dbstop", "dbtype", "dbup", "dde23", "ddeget", "ddensd", "ddesd", "ddeset", "deblank", "dec2base", "dec2bin", "dec2hex", "decic", "decomposition", "deconv", "deg2rad", "degree", "del2", "delaunay", "delaunayTriangulation", "delaunayn", "delete", "deleteCol", "deleteFile", "deleteHDU", "deleteKey", "deleteRecord", "deleteRows", "delevent", "delimitedTextImportOptions", "delsample", "delsamplefromcollection", "demo", "descriptor", "det", "details", "detectImportOptions", "detrend", "deval", "dfsearch", "diag", "dialog", "diary", "diff", "diffuse", "digitBoundary", "digitsPattern", "digraph", "dir", "disableDefaultInteractivity", "discretize", "disp", "display", "dissect", "distances", "dither", "divergence", "dmperm", "doc", "docsearch", "dos", "dot", "double", "drag", "dragrect", "drawnow", "dsearchn", "duration", "dynamicprops", "echo", "echodemo", "echotcpip", "edgeAttachments", "edgecount", "edges", "edit", "eig", "eigs", "ellipj", "ellipke", "ellipsoid", "empty", "enableDefaultInteractivity", "enableLegacyExplorationModes", "enableNETfromNetworkDrive", "enableservice", "endsWith", "enumeration", "eomday", "eps", "eq", "equilibrate", "erase", "eraseBetween", "erf", "erfc", "erfcinv", "erfcx", "erfinv", "error", "errorbar", "errordlg", "etime", "etree", "etreeplot", "eval", "evalc", "evalin", "event.ClassInstanceEvent", "event.DynamicPropertyEvent", "event.EventData", "event.PropertyEvent", "event.hasListener", "event.listener", "event.proplistener", "eventlisteners", "events", "exceltime", "exist", "exit", "exp", "expand", "expint", "expm", "expm1", "export", "export2wsdlg", "exportapp", "exportgraphics", "exportsetupdlg", "extract", "extractAfter", "extractBefore", "extractBetween", "eye", "ezpolar", "faceNormal", "factor", "factorial", "false", "fclose", "fcontour", "feather", "featureEdges", "feof", "ferror", "feval", "fewerbins", "fft", "fft2", "fftn", "fftshift", "fftw", "fgetl", "fgets", "fieldnames", "figure", "figurepalette", "fileDatastore", "fileMode", "fileName", "fileattrib", "filemarker", "fileparts", "fileread", "filesep", "fill", "fill3", "fillmissing", "filloutliers", "filter", "filter2", "fimplicit", "fimplicit3", "find", "findCategory", "findEvent", "findFile", "findLabel", "findall", "findedge", "findfigs", "findgroups", "findnode", "findobj", "findprop", "finish", "fitsdisp", "fitsinfo", "fitsread", "fitswrite", "fix", "fixedWidthImportOptions", "flag", "flintmax", "flip", "flipedge", "fliplr", "flipud", "floor", "flow", "flush", "fmesh", "fminbnd", "fminsearch", "fopen", "format", "fplot", "fplot3", "fprintf", "frame2im", "fread", "freeBoundary", "freqspace", "frewind", "fscanf", "fseek", "fsurf", "ftell", "ftp", "full", "fullfile", "func2str", "function_handle", "functions", "functiontests", "funm", "fwrite", "fzero", "gallery", "gamma", "gammainc", "gammaincinv", "gammaln", "gather", "gca", "gcbf", "gcbo", "gcd", "gcf", "gcmr", "gco", "genpath", "geoaxes", "geobasemap", "geobubble", "geodensityplot", "geolimits", "geoplot", "geoscatter", "geotickformat", "get", "getAColParms", "getAxes", "getBColParms", "getColName", "getColType", "getColorbar", "getConstantValue", "getEqColType", "getFileFormats", "getHDUnum", "getHDUtype", "getHdrSpace", "getImgSize", "getImgType", "getLayout", "getLegend", "getMockHistory", "getNumCols", "getNumHDUs", "getNumInputs", "getNumInputsImpl", "getNumOutputs", "getNumOutputsImpl", "getNumRows", "getOpenFiles", "getProfiles", "getPropertyGroupsImpl", "getReport", "getTimeStr", "getVersion", "getabstime", "getappdata", "getaudiodata", "getdatasamples", "getdatasamplesize", "getenv", "getfield", "getframe", "getinterpmethod", "getnext", "getpinstatus", "getpixelposition", "getplayer", "getpoints", "getpref", "getqualitydesc", "getrangefromclass", "getsamples", "getsampleusingtime", "gettimeseriesnames", "gettsafteratevent", "gettsafterevent", "gettsatevent", "gettsbeforeatevent", "gettsbeforeevent", "gettsbetweenevents", "getvaropts", "ginput", "gmres", "gobjects", "gplot", "grabcode", "gradient", "graph", "gray", "grid", "griddata", "griddatan", "griddedInterpolant", "groot", "groupcounts", "groupfilter", "groupsummary", "grouptransform", "gsvd", "gtext", "guidata", "guide", "guihandles", "gunzip", "gzip", "h5create", "h5disp", "h5info", "h5read", "h5readatt", "h5write", "h5writeatt", "hadamard", "handle", "hankel", "hasFactoryValue", "hasFrame", "hasGroup", "hasPersonalValue", "hasSetting", "hasTemporaryValue", "hasdata", "hasnext", "hdfan", "hdfdf24", "hdfdfr8", "hdfh", "hdfhd", "hdfhe", "hdfhx", "hdfinfo", "hdfml", "hdfpt", "hdfread", "hdfv", "hdfvf", "hdfvh", "hdfvs", "head", "heatmap", "height", "help", "helpdlg", "hess", "hex2dec", "hex2num", "hgexport", "hggroup", "hgtransform", "hidden", "highlight", "hilb", "histcounts", "histcounts2", "histogram", "histogram2", "hms", "hold", "holes", "home", "horzcat", "hot", "hour", "hours", "hover", "hsv", "hsv2rgb", "hypot", "i", "ichol", "idealfilter", "idivide", "ifft", "ifft2", "ifftn", "ifftshift", "ilu", "im2double", "im2frame", "im2gray", "im2java", "imag", "image", "imageDatastore", "imagesc", "imapprox", "imfinfo", "imformats", "imgCompress", "import", "importdata", "imread", "imresize", "imshow", "imtile", "imwrite", "inShape", "incenter", "incidence", "ind2rgb", "ind2sub", "indegree", "inedges", "infoImpl", "inmem", "inner2outer", "innerjoin", "inpolygon", "input", "inputParser", "inputdlg", "inputname", "insertATbl", "insertAfter", "insertBTbl", "insertBefore", "insertCol", "insertImg", "insertRows", "int16", "int2str", "int32", "int64", "int8", "integral", "integral2", "integral3", "interp1", "interp2", "interp3", "interpft", "interpn", "interpstreamspeed", "intersect", "intmax", "intmin", "inv", "invhilb", "ipermute", "iqr", "isCompressedImg", "isConnected", "isDiscreteStateSpecificationMutableImpl", "isDone", "isDoneImpl", "isInactivePropertyImpl", "isInputComplexityMutableImpl", "isInputDataTypeMutableImpl", "isInputSizeMutableImpl", "isInterior", "isKey", "isLoaded", "isLocked", "isMATLABReleaseOlderThan", "isPartitionable", "isShuffleable", "isStringScalar", "isTunablePropertyDataTypeMutableImpl", "isUnderlyingType", "isa", "isaUnderlying", "isappdata", "isbanded", "isbetween", "iscalendarduration", "iscategorical", "iscategory", "iscell", "iscellstr", "ischange", "ischar", "iscolumn", "iscom", "isdag", "isdatetime", "isdiag", "isdst", "isduration", "isempty", "isenum", "isequal", "isequaln", "isevent", "isfield", "isfile", "isfinite", "isfloat", "isfolder", "isgraphics", "ishandle", "ishermitian", "ishold", "ishole", "isinf", "isinteger", "isinterface", "isinterior", "isisomorphic", "isjava", "iskeyword", "isletter", "islocalmax", "islocalmin", "islogical", "ismac", "ismatrix", "ismember", "ismembertol", "ismethod", "ismissing", "ismultigraph", "isnan", "isnat", "isnumeric", "isobject", "isocaps", "isocolors", "isomorphism", "isonormals", "isordinal", "isosurface", "isoutlier", "ispc", "isplaying", "ispref", "isprime", "isprop", "isprotected", "isreal", "isrecording", "isregular", "isrow", "isscalar", "issimplified", "issorted", "issortedrows", "isspace", "issparse", "isstring", "isstrprop", "isstruct", "isstudent", "issymmetric", "istable", "istall", "istimetable", "istril", "istriu", "isundefined", "isunix", "isvalid", "isvarname", "isvector", "isweekend", "j", "javaArray", "javaMethod", "javaMethodEDT", "javaObject", "javaObjectEDT", "javaaddpath", "javachk", "javaclasspath", "javarmpath", "jet", "join", "jsondecode", "jsonencode", "juliandate", "keyboard", "keys", "kron", "labeledge", "labelnode", "lag", "laplacian", "lastwarn", "layout", "lcm", "ldl", "leapseconds", "legend", "legendre", "length", "letterBoundary", "lettersPattern", "lib.pointer", "libfunctions", "libfunctionsview", "libisloaded", "libpointer", "libstruct", "license", "light", "lightangle", "lighting", "lin2mu", "line", "lineBoundary", "lines", "linkaxes", "linkdata", "linkprop", "linsolve", "linspace", "listModifiedFiles", "listRequiredFiles", "listdlg", "listener", "listfonts", "load", "loadObjectImpl", "loadlibrary", "loadobj", "localfunctions", "log", "log10", "log1p", "log2", "logical", "loglog", "logm", "logspace", "lookAheadBoundary", "lookBehindBoundary", "lookfor", "lower", "ls", "lscov", "lsqminnorm", "lsqnonneg", "lsqr", "lu", "magic", "makehgtform", "makima", "mapreduce", "mapreducer", "maskedPattern", "mat2cell", "mat2str", "matches", "matchpairs", "material", "matfile", "matlab.System", "matlab.addons.disableAddon", "matlab.addons.enableAddon", "matlab.addons.install", "matlab.addons.installedAddons", "matlab.addons.isAddonEnabled", "matlab.addons.toolbox.installToolbox", "matlab.addons.toolbox.installedToolboxes", "matlab.addons.toolbox.packageToolbox", "matlab.addons.toolbox.toolboxVersion", "matlab.addons.toolbox.uninstallToolbox", "matlab.addons.uninstall", "matlab.apputil.create", "matlab.apputil.getInstalledAppInfo", "matlab.apputil.install", "matlab.apputil.package", "matlab.apputil.run", "matlab.apputil.uninstall", "matlab.codetools.requiredFilesAndProducts", "matlab.engine.FutureResult", "matlab.engine.MatlabEngine", "matlab.engine.connect_matlab", "matlab.engine.engineName", "matlab.engine.find_matlab", "matlab.engine.isEngineShared", "matlab.engine.shareEngine", "matlab.engine.start_matlab", "matlab.exception.JavaException", "matlab.exception.PyException", "matlab.graphics.chartcontainer.ChartContainer", "matlab.graphics.chartcontainer.mixin.Colorbar", "matlab.graphics.chartcontainer.mixin.Legend", "matlab.io.Datastore", "matlab.io.datastore.BlockedFileSet", "matlab.io.datastore.DsFileReader", "matlab.io.datastore.DsFileSet", "matlab.io.datastore.FileSet", "matlab.io.datastore.FileWritable", "matlab.io.datastore.FoldersPropertyProvider", "matlab.io.datastore.HadoopLocationBased", "matlab.io.datastore.Partitionable", "matlab.io.datastore.Shuffleable", "matlab.io.hdf4.sd", "matlab.io.hdfeos.gd", "matlab.io.hdfeos.sw", "matlab.io.saveVariablesToScript", "matlab.lang.OnOffSwitchState", "matlab.lang.correction.AppendArgumentsCorrection", "matlab.lang.correction.ConvertToFunctionNotationCorrection", "matlab.lang.correction.ReplaceIdentifierCorrection", "matlab.lang.makeUniqueStrings", "matlab.lang.makeValidName", "matlab.mex.MexHost", "matlab.mixin.Copyable", "matlab.mixin.CustomDisplay", "matlab.mixin.Heterogeneous", "matlab.mixin.SetGet", "matlab.mixin.SetGetExactNames", "matlab.mixin.util.PropertyGroup", "matlab.mock.AnyArguments", "matlab.mock.InteractionHistory", "matlab.mock.InteractionHistory.forMock", "matlab.mock.MethodCallBehavior", "matlab.mock.PropertyBehavior", "matlab.mock.PropertyGetBehavior", "matlab.mock.PropertySetBehavior", "matlab.mock.TestCase", "matlab.mock.actions.AssignOutputs", "matlab.mock.actions.DoNothing", "matlab.mock.actions.Invoke", "matlab.mock.actions.ReturnStoredValue", "matlab.mock.actions.StoreValue", "matlab.mock.actions.ThrowException", "matlab.mock.constraints.Occurred", "matlab.mock.constraints.WasAccessed", "matlab.mock.constraints.WasCalled", "matlab.mock.constraints.WasSet", "matlab.net.ArrayFormat", "matlab.net.QueryParameter", "matlab.net.URI", "matlab.net.base64decode", "matlab.net.base64encode", "matlab.net.http.AuthInfo", "matlab.net.http.AuthenticationScheme", "matlab.net.http.Cookie", "matlab.net.http.CookieInfo", "matlab.net.http.Credentials", "matlab.net.http.Disposition", "matlab.net.http.HTTPException", "matlab.net.http.HTTPOptions", "matlab.net.http.HeaderField", "matlab.net.http.LogRecord", "matlab.net.http.MediaType", "matlab.net.http.Message", "matlab.net.http.MessageBody", "matlab.net.http.MessageType", "matlab.net.http.ProgressMonitor", "matlab.net.http.ProtocolVersion", "matlab.net.http.RequestLine", "matlab.net.http.RequestMessage", "matlab.net.http.RequestMethod", "matlab.net.http.ResponseMessage", "matlab.net.http.StartLine", "matlab.net.http.StatusClass", "matlab.net.http.StatusCode", "matlab.net.http.StatusLine", "matlab.net.http.field.AcceptField", "matlab.net.http.field.AuthenticateField", "matlab.net.http.field.AuthenticationInfoField", "matlab.net.http.field.AuthorizationField", "matlab.net.http.field.ContentDispositionField", "matlab.net.http.field.ContentLengthField", "matlab.net.http.field.ContentLocationField", "matlab.net.http.field.ContentTypeField", "matlab.net.http.field.CookieField", "matlab.net.http.field.DateField", "matlab.net.http.field.GenericField", "matlab.net.http.field.GenericParameterizedField", "matlab.net.http.field.HTTPDateField", "matlab.net.http.field.IntegerField", "matlab.net.http.field.LocationField", "matlab.net.http.field.MediaRangeField", "matlab.net.http.field.SetCookieField", "matlab.net.http.field.URIReferenceField", "matlab.net.http.io.BinaryConsumer", "matlab.net.http.io.ContentConsumer", "matlab.net.http.io.ContentProvider", "matlab.net.http.io.FileConsumer", "matlab.net.http.io.FileProvider", "matlab.net.http.io.FormProvider", "matlab.net.http.io.GenericConsumer", "matlab.net.http.io.GenericProvider", "matlab.net.http.io.ImageConsumer", "matlab.net.http.io.ImageProvider", "matlab.net.http.io.JSONConsumer", "matlab.net.http.io.JSONProvider", "matlab.net.http.io.MultipartConsumer", "matlab.net.http.io.MultipartFormProvider", "matlab.net.http.io.MultipartProvider", "matlab.net.http.io.StringConsumer", "matlab.net.http.io.StringProvider", "matlab.perftest.FixedTimeExperiment", "matlab.perftest.FrequentistTimeExperiment", "matlab.perftest.TestCase", "matlab.perftest.TimeExperiment", "matlab.perftest.TimeResult", "matlab.project.Project", "matlab.project.convertDefinitionFiles", "matlab.project.createProject", "matlab.project.deleteProject", "matlab.project.loadProject", "matlab.project.rootProject", "matlab.settings.FactoryGroup.createToolboxGroup", "matlab.settings.SettingsFileUpgrader", "matlab.settings.loadSettingsCompatibilityResults", "matlab.settings.mustBeIntegerScalar", "matlab.settings.mustBeLogicalScalar", "matlab.settings.mustBeNumericScalar", "matlab.settings.mustBeStringScalar", "matlab.settings.reloadFactoryFile", "matlab.system.mixin.FiniteSource", "matlab.tall.blockMovingWindow", "matlab.tall.movingWindow", "matlab.tall.reduce", "matlab.tall.transform", "matlab.test.behavior.Missing", "matlab.ui.componentcontainer.ComponentContainer", "matlab.uitest.TestCase", "matlab.uitest.TestCase.forInteractiveUse", "matlab.uitest.unlock", "matlab.unittest.Test", "matlab.unittest.TestCase", "matlab.unittest.TestResult", "matlab.unittest.TestRunner", "matlab.unittest.TestSuite", "matlab.unittest.constraints.BooleanConstraint", "matlab.unittest.constraints.Constraint", "matlab.unittest.constraints.Tolerance", "matlab.unittest.diagnostics.ConstraintDiagnostic", "matlab.unittest.diagnostics.Diagnostic", "matlab.unittest.fixtures.Fixture", "matlab.unittest.measurement.DefaultMeasurementResult", "matlab.unittest.measurement.MeasurementResult", "matlab.unittest.measurement.chart.ComparisonPlot", "matlab.unittest.plugins.OutputStream", "matlab.unittest.plugins.Parallelizable", "matlab.unittest.plugins.QualifyingPlugin", "matlab.unittest.plugins.TestRunnerPlugin", "matlab.wsdl.createWSDLClient", "matlab.wsdl.setWSDLToolPath", "matlabRelease", "matlabrc", "matlabroot", "max", "maxflow", "maxk", "mean", "median", "memmapfile", "memoize", "memory", "mergecats", "mergevars", "mesh", "meshc", "meshgrid", "meshz", "meta.ArrayDimension", "meta.DynamicProperty", "meta.EnumeratedValue", "meta.FixedDimension", "meta.MetaData", "meta.UnrestrictedDimension", "meta.Validation", "meta.abstractDetails", "meta.class", "meta.class.fromName", "meta.event", "meta.method", "meta.package", "meta.package.fromName", "meta.package.getAllPackages", "meta.property", "metaclass", "methods", "methodsview", "mex", "mexext", "mexhost", "mfilename", "mget", "milliseconds", "min", "mink", "minres", "minspantree", "minute", "minutes", "mislocked", "missing", "mkdir", "mkpp", "mldivide", "mlintrpt", "mlock", "mmfileinfo", "mod", "mode", "month", "more", "morebins", "movAbsHDU", "movNamHDU", "movRelHDU", "move", "movefile", "movegui", "movevars", "movie", "movmad", "movmax", "movmean", "movmedian", "movmin", "movprod", "movstd", "movsum", "movvar", "mpower", "mput", "mrdivide", "msgbox", "mtimes", "mu2lin", "multibandread", "multibandwrite", "munlock", "mustBeA", "mustBeFile", "mustBeFinite", "mustBeFloat", "mustBeFolder", "mustBeGreaterThan", "mustBeGreaterThanOrEqual", "mustBeInRange", "mustBeInteger", "mustBeLessThan", "mustBeLessThanOrEqual", "mustBeMember", "mustBeNegative", "mustBeNonNan", "mustBeNonempty", "mustBeNonmissing", "mustBeNonnegative", "mustBeNonpositive", "mustBeNonsparse", "mustBeNonzero", "mustBeNonzeroLengthText", "mustBeNumeric", "mustBeNumericOrLogical", "mustBePositive", "mustBeReal", "mustBeScalarOrEmpty", "mustBeText", "mustBeTextScalar", "mustBeUnderlyingType", "mustBeValidVariableName", "mustBeVector", "namedPattern", "namedargs2cell", "namelengthmax", "nargin", "narginchk", "nargout", "nargoutchk", "native2unicode", "nccreate", "ncdisp", "nchoosek", "ncinfo", "ncread", "ncreadatt", "ncwrite", "ncwriteatt", "ncwriteschema", "ndgrid", "ndims", "nearest", "nearestNeighbor", "nearestvertex", "neighbors", "netcdf.abort", "netcdf.close", "netcdf.copyAtt", "netcdf.create", "netcdf.defDim", "netcdf.defGrp", "netcdf.defVar", "netcdf.defVarChunking", "netcdf.defVarDeflate", "netcdf.defVarFill", "netcdf.defVarFletcher32", "netcdf.delAtt", "netcdf.endDef", "netcdf.getAtt", "netcdf.getChunkCache", "netcdf.getConstant", "netcdf.getConstantNames", "netcdf.getVar", "netcdf.inq", "netcdf.inqAtt", "netcdf.inqAttID", "netcdf.inqAttName", "netcdf.inqDim", "netcdf.inqDimID", "netcdf.inqDimIDs", "netcdf.inqFormat", "netcdf.inqGrpName", "netcdf.inqGrpNameFull", "netcdf.inqGrpParent", "netcdf.inqGrps", "netcdf.inqLibVers", "netcdf.inqNcid", "netcdf.inqUnlimDims", "netcdf.inqVar", "netcdf.inqVarChunking", "netcdf.inqVarDeflate", "netcdf.inqVarFill", "netcdf.inqVarFletcher32", "netcdf.inqVarID", "netcdf.inqVarIDs", "netcdf.open", "netcdf.putAtt", "netcdf.putVar", "netcdf.reDef", "netcdf.renameAtt", "netcdf.renameDim", "netcdf.renameVar", "netcdf.setChunkCache", "netcdf.setDefaultFormat", "netcdf.setFill", "netcdf.sync", "newline", "newplot", "nextpow2", "nexttile", "nnz", "nonzeros", "norm", "normalize", "normest", "notify", "now", "nsidedpoly", "nthroot", "nufft", "nufftn", "null", "num2cell", "num2hex", "num2ruler", "num2str", "numArgumentsFromSubscript", "numRegions", "numboundaries", "numedges", "numel", "numnodes", "numpartitions", "numsides", "nzmax", "ode113", "ode15i", "ode15s", "ode23", "ode23s", "ode23t", "ode23tb", "ode45", "odeget", "odeset", "odextend", "onCleanup", "ones", "open", "openDiskFile", "openFile", "openProject", "openfig", "opengl", "openvar", "optimget", "optimset", "optionalPattern", "ordeig", "orderfields", "ordqz", "ordschur", "orient", "orth", "outdegree", "outedges", "outerjoin", "overlaps", "overlapsrange", "pack", "pad", "padecoef", "pagectranspose", "pagemtimes", "pagetranspose", "pan", "panInteraction", "parallelplot", "pareto", "parquetDatastore", "parquetinfo", "parquetread", "parquetwrite", "partition", "parula", "pascal", "patch", "path", "pathsep", "pathtool", "pattern", "pause", "pbaspect", "pcg", "pchip", "pcode", "pcolor", "pdepe", "pdeval", "peaks", "perimeter", "perl", "perms", "permute", "pi", "pie", "pie3", "pink", "pinv", "planerot", "play", "playblocking", "plot", "plot3", "plotbrowser", "plotedit", "plotmatrix", "plottools", "plus", "pointLocation", "pol2cart", "polaraxes", "polarbubblechart", "polarhistogram", "polarplot", "polarscatter", "poly", "polyarea", "polybuffer", "polyder", "polyeig", "polyfit", "polyint", "polyshape", "polyval", "polyvalm", "posixtime", "possessivePattern", "pow2", "ppval", "predecessors", "prefdir", "preferences", "press", "preview", "primes", "print", "printdlg", "printopt", "printpreview", "prism", "processInputSpecificationChangeImpl", "processTunedPropertiesImpl", "prod", "profile", "propedit", "properties", "propertyeditor", "psi", "publish", "pwd", "pyargs", "pyenv", "qmr", "qr", "qrdelete", "qrinsert", "qrupdate", "quad2d", "quadgk", "quarter", "questdlg", "quit", "quiver", "quiver3", "qz", "rad2deg", "rand", "randi", "randn", "randperm", "rank", "rat", "rats", "rbbox", "rcond", "read", "readATblHdr", "readBTblHdr", "readCard", "readCol", "readFrame", "readImg", "readKey", "readKeyCmplx", "readKeyDbl", "readKeyLongLong", "readKeyLongStr", "readKeyUnit", "readRecord", "readall", "readcell", "readline", "readlines", "readmatrix", "readstruct", "readtable", "readtimetable", "readvars", "real", "reallog", "realmax", "realmin", "realpow", "realsqrt", "record", "recordblocking", "rectangle", "rectint", "recycle", "reducepatch", "reducevolume", "refresh", "refreshSourceControl", "refreshdata", "regexp", "regexpPattern", "regexpi", "regexprep", "regexptranslate", "regionZoomInteraction", "regions", "registerevent", "regmatlabserver", "rehash", "relationaloperators", "release", "releaseImpl", "reload", "rem", "remove", "removeCategory", "removeFile", "removeGroup", "removeLabel", "removePath", "removeReference", "removeSetting", "removeShortcut", "removeShutdownFile", "removeStartupFile", "removeStyle", "removeToolbarExplorationButtons", "removecats", "removets", "removevars", "rename", "renamecats", "renamevars", "rendererinfo", "reordercats", "reordernodes", "repelem", "replace", "replaceBetween", "repmat", "resample", "rescale", "reset", "resetImpl", "reshape", "residue", "restoredefaultpath", "resume", "rethrow", "retime", "reverse", "rgb2gray", "rgb2hsv", "rgb2ind", "rgbplot", "ribbon", "rlim", "rmappdata", "rmboundary", "rmdir", "rmedge", "rmfield", "rmholes", "rmmissing", "rmnode", "rmoutliers", "rmpath", "rmpref", "rmprop", "rmslivers", "rng", "roots", "rosser", "rot90", "rotate", "rotate3d", "rotateInteraction", "round", "rowfun", "rows2vars", "rref", "rsf2csf", "rtickangle", "rtickformat", "rticklabels", "rticks", "ruler2num", "rulerPanInteraction", "run", "runChecks", "runperf", "runtests", "save", "saveObjectImpl", "saveas", "savefig", "saveobj", "savepath", "scale", "scatter", "scatter3", "scatteredInterpolant", "scatterhistogram", "schur", "scroll", "sec", "secd", "sech", "second", "seconds", "semilogx", "semilogy", "sendmail", "serialport", "serialportlist", "set", "setBscale", "setCompressionType", "setDTR", "setHCompScale", "setHCompSmooth", "setProperties", "setRTS", "setTileDim", "setTscale", "setabstime", "setappdata", "setcats", "setdiff", "setenv", "setfield", "setinterpmethod", "setpixelposition", "setpref", "settimeseriesnames", "settings", "setuniformtime", "setup", "setupImpl", "setvaropts", "setvartype", "setxor", "sgtitle", "shading", "sheetnames", "shg", "shiftdim", "shortestpath", "shortestpathtree", "showplottool", "shrinkfaces", "shuffle", "sign", "simplify", "sin", "sind", "single", "sinh", "sinpi", "size", "slice", "smooth3", "smoothdata", "snapnow", "sort", "sortboundaries", "sortregions", "sortrows", "sortx", "sorty", "sound", "soundsc", "spalloc", "sparse", "spaugment", "spconvert", "spdiags", "specular", "speye", "spfun", "sph2cart", "sphere", "spinmap", "spline", "split", "splitapply", "splitlines", "splitvars", "spones", "spparms", "sprand", "sprandn", "sprandsym", "sprank", "spreadsheetDatastore", "spreadsheetImportOptions", "spring", "sprintf", "spy", "sqrt", "sqrtm", "squeeze", "ss2tf", "sscanf", "stack", "stackedplot", "stairs", "standardizeMissing", "start", "startat", "startsWith", "startup", "std", "stem", "stem3", "step", "stepImpl", "stlread", "stlwrite", "stop", "str2double", "str2func", "str2num", "strcat", "strcmp", "strcmpi", "stream2", "stream3", "streamline", "streamparticles", "streamribbon", "streamslice", "streamtube", "strfind", "string", "strings", "strip", "strjoin", "strjust", "strlength", "strncmp", "strncmpi", "strrep", "strsplit", "strtok", "strtrim", "struct", "struct2cell", "struct2table", "structfun", "sub2ind", "subgraph", "subplot", "subsasgn", "subscribe", "subsindex", "subspace", "subsref", "substruct", "subtitle", "subtract", "subvolume", "successors", "sum", "summary", "summer", "superclasses", "surf", "surf2patch", "surface", "surfaceArea", "surfc", "surfl", "surfnorm", "svd", "svds", "svdsketch", "swapbytes", "swarmchart", "swarmchart3", "sylvester", "symamd", "symbfact", "symmlq", "symrcm", "synchronize", "sysobjupdate", "system", "table", "table2array", "table2cell", "table2struct", "table2timetable", "tabularTextDatastore", "tail", "tall", "tallrng", "tan", "tand", "tanh", "tar", "tcpclient", "tempdir", "tempname", "testsuite", "tetramesh", "texlabel", "text", "textBoundary", "textscan", "textwrap", "tfqmr", "thetalim", "thetatickformat", "thetaticklabels", "thetaticks", "thingSpeakRead", "thingSpeakWrite", "throw", "throwAsCaller", "tic", "tiledlayout", "time", "timeit", "timeofday", "timer", "timerange", "timerfind", "timerfindall", "timeseries", "timetable", "timetable2table", "timezones", "title", "toc", "todatenum", "toeplitz", "toolboxdir", "topkrows", "toposort", "trace", "transclosure", "transform", "translate", "transpose", "transreduction", "trapz", "treelayout", "treeplot", "triangulation", "tril", "trimesh", "triplot", "trisurf", "triu", "true", "tscollection", "tsdata.event", "tsearchn", "turbo", "turningdist", "type", "typecast", "tzoffset", "uialert", "uiaxes", "uibutton", "uibuttongroup", "uicheckbox", "uiconfirm", "uicontextmenu", "uicontrol", "uidatepicker", "uidropdown", "uieditfield", "uifigure", "uigauge", "uigetdir", "uigetfile", "uigetpref", "uigridlayout", "uihtml", "uiimage", "uiknob", "uilabel", "uilamp", "uilistbox", "uimenu", "uint16", "uint32", "uint64", "uint8", "uiopen", "uipanel", "uiprogressdlg", "uipushtool", "uiputfile", "uiradiobutton", "uiresume", "uisave", "uisetcolor", "uisetfont", "uisetpref", "uislider", "uispinner", "uistack", "uistyle", "uiswitch", "uitab", "uitabgroup", "uitable", "uitextarea", "uitogglebutton", "uitoggletool", "uitoolbar", "uitree", "uitreenode", "uiwait", "uminus", "underlyingType", "underlyingValue", "unicode2native", "union", "unique", "uniquetol", "unix", "unloadlibrary", "unmesh", "unmkpp", "unregisterallevents", "unregisterevent", "unstack", "unsubscribe", "untar", "unwrap", "unzip", "update", "updateDependencies", "uplus", "upper", "usejava", "userpath", "validateFunctionSignaturesJSON", "validateInputsImpl", "validatePropertiesImpl", "validateattributes", "validatecolor", "validatestring", "values", "vander", "var", "varargin", "varargout", "varfun", "vartype", "vecnorm", "ver", "verLessThan", "version", "vertcat", "vertexAttachments", "vertexNormal", "view", "viewmtx", "visdiff", "volume", "volumebounds", "voronoi", "voronoiDiagram", "voronoin", "wait", "waitbar", "waitfor", "waitforbuttonpress", "warndlg", "warning", "waterfall", "web", "weboptions", "webread", "websave", "webwrite", "week", "weekday", "what", "which", "whitespaceBoundary", "whitespacePattern", "who", "whos", "width", "wildcardPattern", "wilkinson", "winopen", "winqueryreg", "winter", "withinrange", "withtol", "wordcloud", "write", "writeChecksum", "writeCol", "writeComment", "writeDate", "writeHistory", "writeImg", "writeKey", "writeKeyUnit", "writeVideo", "writeall", "writecell", "writeline", "writematrix", "writestruct", "writetable", "writetimetable", "xcorr", "xcov", "xlabel", "xlim", "xline", "xmlread", "xmlwrite", "xor", "xslt", "xtickangle", "xtickformat", "xticklabels", "xticks", "year", "years", "ylabel", "ylim", "yline", "ymd", "ytickangle", "ytickformat", "yticklabels", "yticks", "yyaxis", "yyyymmdd", "zeros", "zip", "zlabel", "zlim", "zoom", "zoomInteraction", "ztickangle", "ztickformat", "zticklabels", "zticks", ], prefix=r"(?<!\.)(", # Exclude field names suffix=r")\b" ), Name.Builtin ), # line continuation with following comment: (r'(\.\.\.)(.*)$', bygroups(Keyword, Comment)), # command form: # "How MATLAB Recognizes Command Syntax" specifies that an operator # is recognized if it is either surrounded by spaces or by no # spaces on both sides (this allows distinguishing `cd ./foo` from # `cd ./ foo`.). Here, the regex checks that the first word in the # line is not followed by <spaces> and then # (equal | open-parenthesis | <operator><space> | <space>). (r'(?:^|(?<=;))(\s*)(\w+)(\s+)(?!=|\(|%s\s|\s)' % _operators, bygroups(Whitespace, Name, Whitespace), 'commandargs'), include('expressions') ], 'blockcomment': [ (r'^\s*%\}', Comment.Multiline, '#pop'), (r'^.*\n', Comment.Multiline), (r'.', Comment.Multiline), ], 'deffunc': [ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', bygroups(Whitespace, Text, Whitespace, Punctuation, Whitespace, Name.Function, Punctuation, Text, Punctuation, Whitespace), '#pop'), # function with no args (r'(\s*)([a-zA-Z_]\w*)', bygroups(Whitespace, Name.Function), '#pop'), ], 'propattrs': [ (r'(\w+)(\s*)(=)(\s*)(\d+)', bygroups(Name.Builtin, Whitespace, Punctuation, Whitespace, Number)), (r'(\w+)(\s*)(=)(\s*)([a-zA-Z]\w*)', bygroups(Name.Builtin, Whitespace, Punctuation, Whitespace, Keyword)), (r',', Punctuation), (r'\)', Punctuation, '#pop'), (r'\s+', Whitespace), (r'.', Text), ], 'defprops': [ (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), (r'%.*$', Comment), (r'(?<!\.)end\b', Keyword, '#pop'), include('expressions'), ], 'string': [ (r"[^']*'", String, '#pop'), ], 'commandargs': [ # If an equal sign or other operator is encountered, this # isn't a command. It might be a variable assignment or # comparison operation with multiple spaces before the # equal sign or operator (r"=", Punctuation, '#pop'), (_operators, Operator, '#pop'), (r"[ \t]+", Whitespace), ("'[^']*'", String), (r"[^';\s]+", String), (";", Punctuation, '#pop'), default('#pop'), ] } def analyse_text(text): # function declaration. first_non_comment = next((line for line in text.splitlines() if not re.match(r'^\s*%', text)), '').strip() if (first_non_comment.startswith('function') and '{' not in first_non_comment): return 1. # comment elif re.search(r'^\s*%', text, re.M): return 0.2 # system cmd elif re.search(r'^!\w+', text, re.M): return 0.2 line_re = re.compile('.*?\n') class MatlabSessionLexer(Lexer): """ For Matlab sessions. Modeled after PythonConsoleLexer. Contributed by Ken Schutte <[email protected]>. .. versionadded:: 0.10 """ name = 'Matlab session' aliases = ['matlabsession'] def get_tokens_unprocessed(self, text): mlexer = MatlabLexer(**self.options) curcode = '' insertions = [] continuation = False for match in line_re.finditer(text): line = match.group() if line.startswith('>> '): insertions.append((len(curcode), [(0, Generic.Prompt, line[:3])])) curcode += line[3:] elif line.startswith('>>'): insertions.append((len(curcode), [(0, Generic.Prompt, line[:2])])) curcode += line[2:] elif line.startswith('???'): idx = len(curcode) # without is showing error on same line as before...? # line = "\n" + line token = (0, Generic.Traceback, line) insertions.append((idx, [token])) elif continuation and insertions: # line_start is the length of the most recent prompt symbol line_start = len(insertions[-1][-1][-1]) # Set leading spaces with the length of the prompt to be a generic prompt # This keeps code aligned when prompts are removed, say with some Javascript if line.startswith(' '*line_start): insertions.append( (len(curcode), [(0, Generic.Prompt, line[:line_start])])) curcode += line[line_start:] else: curcode += line else: if curcode: yield from do_insertions( insertions, mlexer.get_tokens_unprocessed(curcode)) curcode = '' insertions = [] yield match.start(), Generic.Output, line # Does not allow continuation if a comment is included after the ellipses. # Continues any line that ends with ..., even comments (lines that start with %) if line.strip().endswith('...'): continuation = True else: continuation = False if curcode: # or item: yield from do_insertions( insertions, mlexer.get_tokens_unprocessed(curcode)) class OctaveLexer(RegexLexer): """ For GNU Octave source code. .. versionadded:: 1.5 """ name = 'Octave' url = 'https://www.gnu.org/software/octave/index' aliases = ['octave'] filenames = ['*.m'] mimetypes = ['text/octave'] # These lists are generated automatically. # Run the following in bash shell: # # First dump all of the Octave manual into a plain text file: # # $ info octave --subnodes -o octave-manual # # Now grep through it: # for i in \ # "Built-in Function" "Command" "Function File" \ # "Loadable Function" "Mapping Function"; # do # perl -e '@name = qw('"$i"'); # print lc($name[0]),"_kw = [\n"'; # # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \ # octave-manual | sort | uniq ; # echo "]" ; # echo; # done # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011) builtin_kw = ( "addlistener", "addpath", "addproperty", "all", "and", "any", "argnames", "argv", "assignin", "atexit", "autoload", "available_graphics_toolkits", "beep_on_error", "bitand", "bitmax", "bitor", "bitshift", "bitxor", "cat", "cell", "cellstr", "char", "class", "clc", "columns", "command_line_path", "completion_append_char", "completion_matches", "complex", "confirm_recursive_rmdir", "cputime", "crash_dumps_octave_core", "ctranspose", "cumprod", "cumsum", "debug_on_error", "debug_on_interrupt", "debug_on_warning", "default_save_options", "dellistener", "diag", "diff", "disp", "doc_cache_file", "do_string_escapes", "double", "drawnow", "e", "echo_executing_commands", "eps", "eq", "errno", "errno_list", "error", "eval", "evalin", "exec", "exist", "exit", "eye", "false", "fclear", "fclose", "fcntl", "fdisp", "feof", "ferror", "feval", "fflush", "fgetl", "fgets", "fieldnames", "file_in_loadpath", "file_in_path", "filemarker", "filesep", "find_dir_in_path", "fixed_point_format", "fnmatch", "fopen", "fork", "formula", "fprintf", "fputs", "fread", "freport", "frewind", "fscanf", "fseek", "fskipl", "ftell", "functions", "fwrite", "ge", "genpath", "get", "getegid", "getenv", "geteuid", "getgid", "getpgrp", "getpid", "getppid", "getuid", "glob", "gt", "gui_mode", "history_control", "history_file", "history_size", "history_timestamp_format_string", "home", "horzcat", "hypot", "ifelse", "ignore_function_time_stamp", "inferiorto", "info_file", "info_program", "inline", "input", "intmax", "intmin", "ipermute", "is_absolute_filename", "isargout", "isbool", "iscell", "iscellstr", "ischar", "iscomplex", "isempty", "isfield", "isfloat", "isglobal", "ishandle", "isieee", "isindex", "isinteger", "islogical", "ismatrix", "ismethod", "isnull", "isnumeric", "isobject", "isreal", "is_rooted_relative_filename", "issorted", "isstruct", "isvarname", "kbhit", "keyboard", "kill", "lasterr", "lasterror", "lastwarn", "ldivide", "le", "length", "link", "linspace", "logical", "lstat", "lt", "make_absolute_filename", "makeinfo_program", "max_recursion_depth", "merge", "methods", "mfilename", "minus", "mislocked", "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock", "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes", "munlock", "nargin", "nargout", "native_float_format", "ndims", "ne", "nfields", "nnz", "norm", "not", "numel", "nzmax", "octave_config_info", "octave_core_file_limit", "octave_core_file_name", "octave_core_file_options", "ones", "or", "output_max_field_width", "output_precision", "page_output_immediately", "page_screen_output", "path", "pathsep", "pause", "pclose", "permute", "pi", "pipe", "plus", "popen", "power", "print_empty_dimensions", "printf", "print_struct_array_contents", "prod", "program_invocation_name", "program_name", "putenv", "puts", "pwd", "quit", "rats", "rdivide", "readdir", "readlink", "read_readline_init_file", "realmax", "realmin", "rehash", "rename", "repelems", "re_read_readline_init_file", "reset", "reshape", "resize", "restoredefaultpath", "rethrow", "rmdir", "rmfield", "rmpath", "rows", "save_header_format_string", "save_precision", "saving_history", "scanf", "set", "setenv", "shell_cmd", "sighup_dumps_octave_core", "sigterm_dumps_octave_core", "silent_functions", "single", "size", "size_equal", "sizemax", "sizeof", "sleep", "source", "sparse_auto_mutate", "split_long_rows", "sprintf", "squeeze", "sscanf", "stat", "stderr", "stdin", "stdout", "strcmp", "strcmpi", "string_fill_char", "strncmp", "strncmpi", "struct", "struct_levels_to_print", "strvcat", "subsasgn", "subsref", "sum", "sumsq", "superiorto", "suppress_verbose_help_message", "symlink", "system", "tic", "tilde_expand", "times", "tmpfile", "tmpnam", "toc", "toupper", "transpose", "true", "typeinfo", "umask", "uminus", "uname", "undo_string_escapes", "unlink", "uplus", "upper", "usage", "usleep", "vec", "vectorize", "vertcat", "waitpid", "warning", "warranty", "whos_line_format", "yes_or_no", "zeros", "inf", "Inf", "nan", "NaN") command_kw = ("close", "load", "who", "whos") function_kw = ( "accumarray", "accumdim", "acosd", "acotd", "acscd", "addtodate", "allchild", "ancestor", "anova", "arch_fit", "arch_rnd", "arch_test", "area", "arma_rnd", "arrayfun", "ascii", "asctime", "asecd", "asind", "assert", "atand", "autoreg_matrix", "autumn", "axes", "axis", "bar", "barh", "bartlett", "bartlett_test", "beep", "betacdf", "betainv", "betapdf", "betarnd", "bicgstab", "bicubic", "binary", "binocdf", "binoinv", "binopdf", "binornd", "bitcmp", "bitget", "bitset", "blackman", "blanks", "blkdiag", "bone", "box", "brighten", "calendar", "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf", "cauchy_rnd", "caxis", "celldisp", "center", "cgs", "chisquare_test_homogeneity", "chisquare_test_independence", "circshift", "cla", "clabel", "clf", "clock", "cloglog", "closereq", "colon", "colorbar", "colormap", "colperm", "comet", "common_size", "commutation_matrix", "compan", "compare_versions", "compass", "computer", "cond", "condest", "contour", "contourc", "contourf", "contrast", "conv", "convhull", "cool", "copper", "copyfile", "cor", "corrcoef", "cor_test", "cosd", "cotd", "cov", "cplxpair", "cross", "cscd", "cstrcat", "csvread", "csvwrite", "ctime", "cumtrapz", "curl", "cut", "cylinder", "date", "datenum", "datestr", "datetick", "datevec", "dblquad", "deal", "deblank", "deconv", "delaunay", "delaunayn", "delete", "demo", "detrend", "diffpara", "diffuse", "dir", "discrete_cdf", "discrete_inv", "discrete_pdf", "discrete_rnd", "display", "divergence", "dlmwrite", "dos", "dsearch", "dsearchn", "duplication_matrix", "durbinlevinson", "ellipsoid", "empirical_cdf", "empirical_inv", "empirical_pdf", "empirical_rnd", "eomday", "errorbar", "etime", "etreeplot", "example", "expcdf", "expinv", "expm", "exppdf", "exprnd", "ezcontour", "ezcontourf", "ezmesh", "ezmeshc", "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor", "factorial", "fail", "fcdf", "feather", "fftconv", "fftfilt", "fftshift", "figure", "fileattrib", "fileparts", "fill", "findall", "findobj", "findstr", "finv", "flag", "flipdim", "fliplr", "flipud", "fpdf", "fplot", "fractdiff", "freqz", "freqz_plot", "frnd", "fsolve", "f_test_regression", "ftp", "fullfile", "fzero", "gamcdf", "gaminv", "gampdf", "gamrnd", "gca", "gcbf", "gcbo", "gcf", "genvarname", "geocdf", "geoinv", "geopdf", "geornd", "getfield", "ginput", "glpk", "gls", "gplot", "gradient", "graphics_toolkit", "gray", "grid", "griddata", "griddatan", "gtext", "gunzip", "gzip", "hadamard", "hamming", "hankel", "hanning", "hggroup", "hidden", "hilb", "hist", "histc", "hold", "hot", "hotelling_test", "housh", "hsv", "hurst", "hygecdf", "hygeinv", "hygepdf", "hygernd", "idivide", "ifftshift", "image", "imagesc", "imfinfo", "imread", "imshow", "imwrite", "index", "info", "inpolygon", "inputname", "interpft", "interpn", "intersect", "invhilb", "iqr", "isa", "isdefinite", "isdir", "is_duplicate_entry", "isequal", "isequalwithequalnans", "isfigure", "ishermitian", "ishghandle", "is_leap_year", "isletter", "ismac", "ismember", "ispc", "isprime", "isprop", "isscalar", "issquare", "isstrprop", "issymmetric", "isunix", "is_valid_file_id", "isvector", "jet", "kendall", "kolmogorov_smirnov_cdf", "kolmogorov_smirnov_test", "kruskal_wallis_test", "krylov", "kurtosis", "laplace_cdf", "laplace_inv", "laplace_pdf", "laplace_rnd", "legend", "legendre", "license", "line", "linkprop", "list_primes", "loadaudio", "loadobj", "logistic_cdf", "logistic_inv", "logistic_pdf", "logistic_rnd", "logit", "loglog", "loglogerr", "logm", "logncdf", "logninv", "lognpdf", "lognrnd", "logspace", "lookfor", "ls_command", "lsqnonneg", "magic", "mahalanobis", "manova", "matlabroot", "mcnemar_test", "mean", "meansq", "median", "menu", "mesh", "meshc", "meshgrid", "meshz", "mexext", "mget", "mkpp", "mode", "moment", "movefile", "mpoles", "mput", "namelengthmax", "nargchk", "nargoutchk", "nbincdf", "nbininv", "nbinpdf", "nbinrnd", "nchoosek", "ndgrid", "newplot", "news", "nonzeros", "normcdf", "normest", "norminv", "normpdf", "normrnd", "now", "nthroot", "null", "ocean", "ols", "onenormest", "optimget", "optimset", "orderfields", "orient", "orth", "pack", "pareto", "parseparams", "pascal", "patch", "pathdef", "pcg", "pchip", "pcolor", "pcr", "peaks", "periodogram", "perl", "perms", "pie", "pink", "planerot", "playaudio", "plot", "plotmatrix", "plotyy", "poisscdf", "poissinv", "poisspdf", "poissrnd", "polar", "poly", "polyaffine", "polyarea", "polyderiv", "polyfit", "polygcd", "polyint", "polyout", "polyreduce", "polyval", "polyvalm", "postpad", "powerset", "ppder", "ppint", "ppjumps", "ppplot", "ppval", "pqpnonneg", "prepad", "primes", "print", "print_usage", "prism", "probit", "qp", "qqplot", "quadcc", "quadgk", "quadl", "quadv", "quiver", "qzhess", "rainbow", "randi", "range", "rank", "ranks", "rat", "reallog", "realpow", "realsqrt", "record", "rectangle_lw", "rectangle_sw", "rectint", "refresh", "refreshdata", "regexptranslate", "repmat", "residue", "ribbon", "rindex", "roots", "rose", "rosser", "rotdim", "rref", "run", "run_count", "rundemos", "run_test", "runtests", "saveas", "saveaudio", "saveobj", "savepath", "scatter", "secd", "semilogx", "semilogxerr", "semilogy", "semilogyerr", "setaudio", "setdiff", "setfield", "setxor", "shading", "shift", "shiftdim", "sign_test", "sinc", "sind", "sinetone", "sinewave", "skewness", "slice", "sombrero", "sortrows", "spaugment", "spconvert", "spdiags", "spearman", "spectral_adf", "spectral_xdf", "specular", "speed", "spencer", "speye", "spfun", "sphere", "spinmap", "spline", "spones", "sprand", "sprandn", "sprandsym", "spring", "spstats", "spy", "sqp", "stairs", "statistics", "std", "stdnormal_cdf", "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd", "stem", "stft", "strcat", "strchr", "strjust", "strmatch", "strread", "strsplit", "strtok", "strtrim", "strtrunc", "structfun", "studentize", "subplot", "subsindex", "subspace", "substr", "substruct", "summer", "surf", "surface", "surfc", "surfl", "surfnorm", "svds", "swapbytes", "sylvester_matrix", "symvar", "synthesis", "table", "tand", "tar", "tcdf", "tempdir", "tempname", "test", "text", "textread", "textscan", "tinv", "title", "toeplitz", "tpdf", "trace", "trapz", "treelayout", "treeplot", "triangle_lw", "triangle_sw", "tril", "trimesh", "triplequad", "triplot", "trisurf", "triu", "trnd", "tsearchn", "t_test", "t_test_regression", "type", "unidcdf", "unidinv", "unidpdf", "unidrnd", "unifcdf", "unifinv", "unifpdf", "unifrnd", "union", "unique", "unix", "unmkpp", "unpack", "untabify", "untar", "unwrap", "unzip", "u_test", "validatestring", "vander", "var", "var_test", "vech", "ver", "version", "view", "voronoi", "voronoin", "waitforbuttonpress", "wavread", "wavwrite", "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday", "welch_test", "what", "white", "whitebg", "wienrnd", "wilcoxon_test", "wilkinson", "winter", "xlabel", "xlim", "ylabel", "yulewalker", "zip", "zlabel", "z_test") loadable_kw = ( "airy", "amd", "balance", "besselh", "besseli", "besselj", "besselk", "bessely", "bitpack", "bsxfun", "builtin", "ccolamd", "cellfun", "cellslices", "chol", "choldelete", "cholinsert", "cholinv", "cholshift", "cholupdate", "colamd", "colloc", "convhulln", "convn", "csymamd", "cummax", "cummin", "daspk", "daspk_options", "dasrt", "dasrt_options", "dassl", "dassl_options", "dbclear", "dbdown", "dbstack", "dbstatus", "dbstop", "dbtype", "dbup", "dbwhere", "det", "dlmread", "dmperm", "dot", "eig", "eigs", "endgrent", "endpwent", "etree", "fft", "fftn", "fftw", "filter", "find", "full", "gcd", "getgrent", "getgrgid", "getgrnam", "getpwent", "getpwnam", "getpwuid", "getrusage", "givens", "gmtime", "gnuplot_binary", "hess", "ifft", "ifftn", "inv", "isdebugmode", "issparse", "kron", "localtime", "lookup", "lsode", "lsode_options", "lu", "luinc", "luupdate", "matrix_type", "max", "min", "mktime", "pinv", "qr", "qrdelete", "qrinsert", "qrshift", "qrupdate", "quad", "quad_options", "qz", "rand", "rande", "randg", "randn", "randp", "randperm", "rcond", "regexp", "regexpi", "regexprep", "schur", "setgrent", "setpwent", "sort", "spalloc", "sparse", "spparms", "sprank", "sqrtm", "strfind", "strftime", "strptime", "strrep", "svd", "svd_driver", "syl", "symamd", "symbfact", "symrcm", "time", "tsearch", "typecast", "urlread", "urlwrite") mapping_kw = ( "abs", "acos", "acosh", "acot", "acoth", "acsc", "acsch", "angle", "arg", "asec", "asech", "asin", "asinh", "atan", "atanh", "beta", "betainc", "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos", "cosh", "cot", "coth", "csc", "csch", "erf", "erfc", "erfcx", "erfinv", "exp", "finite", "fix", "floor", "fmod", "gamma", "gammainc", "gammaln", "imag", "isalnum", "isalpha", "isascii", "iscntrl", "isdigit", "isfinite", "isgraph", "isinf", "islower", "isna", "isnan", "isprint", "ispunct", "isspace", "isupper", "isxdigit", "lcm", "lgamma", "log", "lower", "mod", "real", "rem", "round", "roundb", "sec", "sech", "sign", "sin", "sinh", "sqrt", "tan", "tanh", "toascii", "tolower", "xor") builtin_consts = ( "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA", "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER", "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET", "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO", "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE", "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED", "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG", "WSTOPSIG", "WTERMSIG", "WUNTRACED") tokens = { 'root': [ (r'%\{\s*\n', Comment.Multiline, 'percentblockcomment'), (r'#\{\s*\n', Comment.Multiline, 'hashblockcomment'), (r'[%#].*$', Comment), (r'^\s*function\b', Keyword, 'deffunc'), # from 'iskeyword' on hg changeset 8cc154f45e37 (words(( '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else', 'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef', 'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties', 'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods', 'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try', 'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'), Keyword), (words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw, suffix=r'\b'), Name.Builtin), (words(builtin_consts, suffix=r'\b'), Name.Constant), # operators in Octave but not Matlab: (r'-=|!=|!|/=|--', Operator), # operators: (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), # operators in Octave but not Matlab requiring escape for re: (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator), # operators requiring escape for re: (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), # punctuation: (r'[\[\](){}:@.,]', Punctuation), (r'=|:|;', Punctuation), (r'"[^"]*"', String), (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), (r'\d+[eEf][+-]?[0-9]+', Number.Float), (r'\d+', Number.Integer), # quote can be transpose, instead of string: # (not great, but handles common cases...) (r'(?<=[\w)\].])\'+', Operator), (r'(?<![\w)\].])\'', String, 'string'), (r'[a-zA-Z_]\w*', Name), (r'\s+', Text), (r'.', Text), ], 'percentblockcomment': [ (r'^\s*%\}', Comment.Multiline, '#pop'), (r'^.*\n', Comment.Multiline), (r'.', Comment.Multiline), ], 'hashblockcomment': [ (r'^\s*#\}', Comment.Multiline, '#pop'), (r'^.*\n', Comment.Multiline), (r'.', Comment.Multiline), ], 'string': [ (r"[^']*'", String, '#pop'), ], 'deffunc': [ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', bygroups(Whitespace, Text, Whitespace, Punctuation, Whitespace, Name.Function, Punctuation, Text, Punctuation, Whitespace), '#pop'), # function with no args (r'(\s*)([a-zA-Z_]\w*)', bygroups(Whitespace, Name.Function), '#pop'), ], } def analyse_text(text): """Octave is quite hard to spot, and it looks like Matlab as well.""" return 0 class ScilabLexer(RegexLexer): """ For Scilab source code. .. versionadded:: 1.5 """ name = 'Scilab' url = 'https://www.scilab.org/' aliases = ['scilab'] filenames = ['*.sci', '*.sce', '*.tst'] mimetypes = ['text/scilab'] tokens = { 'root': [ (r'//.*?$', Comment.Single), (r'^\s*function\b', Keyword, 'deffunc'), (words(( '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else', 'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef', 'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties', 'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods', 'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try', 'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'), Keyword), (words(_scilab_builtins.functions_kw + _scilab_builtins.commands_kw + _scilab_builtins.macros_kw, suffix=r'\b'), Name.Builtin), (words(_scilab_builtins.variables_kw, suffix=r'\b'), Name.Constant), # operators: (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), # operators requiring escape for re: (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), # punctuation: (r'[\[\](){}@.,=:;]+', Punctuation), (r'"[^"]*"', String), # quote can be transpose, instead of string: # (not great, but handles common cases...) (r'(?<=[\w)\].])\'+', Operator), (r'(?<![\w)\].])\'', String, 'string'), (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), (r'\d+[eEf][+-]?[0-9]+', Number.Float), (r'\d+', Number.Integer), (r'[a-zA-Z_]\w*', Name), (r'\s+', Whitespace), (r'.', Text), ], 'string': [ (r"[^']*'", String, '#pop'), (r'.', String, '#pop'), ], 'deffunc': [ (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', bygroups(Whitespace, Text, Whitespace, Punctuation, Whitespace, Name.Function, Punctuation, Text, Punctuation, Whitespace), '#pop'), # function with no args (r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'), ], } # the following is needed to distinguish Scilab and GAP .tst files def analyse_text(text): score = 0.0 # Scilab comments (don't appear in e.g. GAP code) if re.search(r"^\s*//", text): score += 0.1 if re.search(r"^\s*/\*", text): score += 0.1 return min(score, 1.0)
132,852
Python
39.148988
103
0.328704
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/esoteric.py
""" pygments.lexers.esoteric ~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for esoteric languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, words, bygroups from pygments.token import Comment, Operator, Keyword, Name, String, Number, \ Punctuation, Error, Whitespace __all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'CAmkESLexer', 'CapDLLexer', 'AheuiLexer'] class BrainfuckLexer(RegexLexer): """ Lexer for the esoteric BrainFuck language. """ name = 'Brainfuck' url = 'http://www.muppetlabs.com/~breadbox/bf/' aliases = ['brainfuck', 'bf'] filenames = ['*.bf', '*.b'] mimetypes = ['application/x-brainfuck'] tokens = { 'common': [ # use different colors for different instruction types (r'[.,]+', Name.Tag), (r'[+-]+', Name.Builtin), (r'[<>]+', Name.Variable), (r'[^.,+\-<>\[\]]+', Comment), ], 'root': [ (r'\[', Keyword, 'loop'), (r'\]', Error), include('common'), ], 'loop': [ (r'\[', Keyword, '#push'), (r'\]', Keyword, '#pop'), include('common'), ] } def analyse_text(text): """It's safe to assume that a program which mostly consists of + - and < > is brainfuck.""" plus_minus_count = 0 greater_less_count = 0 range_to_check = max(256, len(text)) for c in text[:range_to_check]: if c == '+' or c == '-': plus_minus_count += 1 if c == '<' or c == '>': greater_less_count += 1 if plus_minus_count > (0.25 * range_to_check): return 1.0 if greater_less_count > (0.25 * range_to_check): return 1.0 result = 0 if '[-]' in text: result += 0.5 return result class BefungeLexer(RegexLexer): """ Lexer for the esoteric Befunge language. .. versionadded:: 0.7 """ name = 'Befunge' url = 'http://en.wikipedia.org/wiki/Befunge' aliases = ['befunge'] filenames = ['*.befunge'] mimetypes = ['application/x-befunge'] tokens = { 'root': [ (r'[0-9a-f]', Number), (r'[+*/%!`-]', Operator), # Traditional math (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives (r'[|_mw]', Keyword), (r'[{}]', Name.Tag), # Befunge-98 stack ops (r'".*?"', String.Double), # Strings don't appear to allow escapes (r'\'.', String.Single), # Single character (r'[#;]', Comment), # Trampoline... depends on direction hit (r'[pg&~=@iotsy]', Keyword), # Misc (r'[()A-Z]', Comment), # Fingerprints (r'\s+', Whitespace), # Whitespace doesn't matter ], } class CAmkESLexer(RegexLexer): """ Basic lexer for the input language for the CAmkES component platform. .. versionadded:: 2.1 """ name = 'CAmkES' url = 'https://sel4.systems/CAmkES/' aliases = ['camkes', 'idl4'] filenames = ['*.camkes', '*.idl4'] tokens = { 'root': [ # C pre-processor directive (r'^(\s*)(#.*)(\n)', bygroups(Whitespace, Comment.Preproc, Whitespace)), # Whitespace, comments (r'\s+', Whitespace), (r'/\*(.|\n)*?\*/', Comment), (r'//.*$', Comment), (r'[\[(){},.;\]]', Punctuation), (r'[~!%^&*+=|?:<>/-]', Operator), (words(('assembly', 'attribute', 'component', 'composition', 'configuration', 'connection', 'connector', 'consumes', 'control', 'dataport', 'Dataport', 'Dataports', 'emits', 'event', 'Event', 'Events', 'export', 'from', 'group', 'hardware', 'has', 'interface', 'Interface', 'maybe', 'procedure', 'Procedure', 'Procedures', 'provides', 'template', 'thread', 'threads', 'to', 'uses', 'with'), suffix=r'\b'), Keyword), (words(('bool', 'boolean', 'Buf', 'char', 'character', 'double', 'float', 'in', 'inout', 'int', 'int16_6', 'int32_t', 'int64_t', 'int8_t', 'integer', 'mutex', 'out', 'real', 'refin', 'semaphore', 'signed', 'string', 'struct', 'uint16_t', 'uint32_t', 'uint64_t', 'uint8_t', 'uintptr_t', 'unsigned', 'void'), suffix=r'\b'), Keyword.Type), # Recognised attributes (r'[a-zA-Z_]\w*_(priority|domain|buffer)', Keyword.Reserved), (words(('dma_pool', 'from_access', 'to_access'), suffix=r'\b'), Keyword.Reserved), # CAmkES-level include (r'(import)(\s+)((?:<[^>]*>|"[^"]*");)', bygroups(Comment.Preproc, Whitespace, Comment.Preproc)), # C-level include (r'(include)(\s+)((?:<[^>]*>|"[^"]*");)', bygroups(Comment.Preproc, Whitespace, Comment.Preproc)), # Literals (r'0[xX][\da-fA-F]+', Number.Hex), (r'-?[\d]+', Number), (r'-?[\d]+\.[\d]+', Number.Float), (r'"[^"]*"', String), (r'[Tt]rue|[Ff]alse', Name.Builtin), # Identifiers (r'[a-zA-Z_]\w*', Name), ], } class CapDLLexer(RegexLexer): """ Basic lexer for CapDL. The source of the primary tool that reads such specifications is available at https://github.com/seL4/capdl/tree/master/capDL-tool. Note that this lexer only supports a subset of the grammar. For example, identifiers can shadow type names, but these instances are currently incorrectly highlighted as types. Supporting this would need a stateful lexer that is considered unnecessarily complex for now. .. versionadded:: 2.2 """ name = 'CapDL' url = 'https://ssrg.nicta.com.au/publications/nictaabstracts/Kuz_KLW_10.abstract.pml' aliases = ['capdl'] filenames = ['*.cdl'] tokens = { 'root': [ # C pre-processor directive (r'^(\s*)(#.*)(\n)', bygroups(Whitespace, Comment.Preproc, Whitespace)), # Whitespace, comments (r'\s+', Whitespace), (r'/\*(.|\n)*?\*/', Comment), (r'(//|--).*$', Comment), (r'[<>\[(){},:;=\]]', Punctuation), (r'\.\.', Punctuation), (words(('arch', 'arm11', 'caps', 'child_of', 'ia32', 'irq', 'maps', 'objects'), suffix=r'\b'), Keyword), (words(('aep', 'asid_pool', 'cnode', 'ep', 'frame', 'io_device', 'io_ports', 'io_pt', 'notification', 'pd', 'pt', 'tcb', 'ut', 'vcpu'), suffix=r'\b'), Keyword.Type), # Properties (words(('asid', 'addr', 'badge', 'cached', 'dom', 'domainID', 'elf', 'fault_ep', 'G', 'guard', 'guard_size', 'init', 'ip', 'prio', 'sp', 'R', 'RG', 'RX', 'RW', 'RWG', 'RWX', 'W', 'WG', 'WX', 'level', 'masked', 'master_reply', 'paddr', 'ports', 'reply', 'uncached'), suffix=r'\b'), Keyword.Reserved), # Literals (r'0[xX][\da-fA-F]+', Number.Hex), (r'\d+(\.\d+)?(k|M)?', Number), (words(('bits',), suffix=r'\b'), Number), (words(('cspace', 'vspace', 'reply_slot', 'caller_slot', 'ipc_buffer_slot'), suffix=r'\b'), Number), # Identifiers (r'[a-zA-Z_][-@\.\w]*', Name), ], } class RedcodeLexer(RegexLexer): """ A simple Redcode lexer based on ICWS'94. Contributed by Adam Blinkinsop <[email protected]>. .. versionadded:: 0.8 """ name = 'Redcode' aliases = ['redcode'] filenames = ['*.cw'] opcodes = ('DAT', 'MOV', 'ADD', 'SUB', 'MUL', 'DIV', 'MOD', 'JMP', 'JMZ', 'JMN', 'DJN', 'CMP', 'SLT', 'SPL', 'ORG', 'EQU', 'END') modifiers = ('A', 'B', 'AB', 'BA', 'F', 'X', 'I') tokens = { 'root': [ # Whitespace: (r'\s+', Whitespace), (r';.*$', Comment.Single), # Lexemes: # Identifiers (r'\b(%s)\b' % '|'.join(opcodes), Name.Function), (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator), (r'[A-Za-z_]\w+', Name), # Operators (r'[-+*/%]', Operator), (r'[#$@<>]', Operator), # mode (r'[.,]', Punctuation), # mode # Numbers (r'[-+]?\d+', Number.Integer), ], } class AheuiLexer(RegexLexer): """ Aheui is esoteric language based on Korean alphabets. """ name = 'Aheui' url = 'http://aheui.github.io/' aliases = ['aheui'] filenames = ['*.aheui'] tokens = { 'root': [ ('[' '나-낳냐-냫너-넣녀-녛노-놓뇨-눟뉴-닇' '다-닿댜-댷더-덯뎌-뎧도-돟됴-둫듀-딓' '따-땋땨-떃떠-떻뗘-뗳또-똫뚀-뚷뜌-띟' '라-랗랴-럏러-렇려-렿로-롷료-뤃류-릫' '마-맣먀-먛머-멓며-몋모-뫃묘-뭏뮤-믷' '바-밯뱌-뱧버-벟벼-볗보-봏뵤-붛뷰-빃' '빠-빻뺘-뺳뻐-뻫뼈-뼣뽀-뽛뾰-뿧쀼-삏' '사-샇샤-샿서-섷셔-셯소-솧쇼-숳슈-싛' '싸-쌓쌰-썋써-쎃쎠-쎻쏘-쏳쑈-쑿쓔-씧' '자-잫쟈-쟣저-젛져-졓조-좋죠-줗쥬-즿' '차-챃챠-챻처-첳쳐-쳫초-촣쵸-춯츄-칗' '카-캏캬-컇커-컿켜-켷코-콯쿄-쿻큐-킣' '타-탛탸-턓터-텋텨-톃토-톻툐-퉇튜-틯' '파-팧퍄-퍟퍼-펗펴-폏포-퐇표-풓퓨-픻' '하-핳햐-햫허-헣혀-혛호-홓효-훟휴-힇' ']', Operator), ('.', Comment), ], }
9,976
Python
32.036424
90
0.453087
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/minecraft.py
""" pygments.lexers.minecraft ~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Minecraft related languages. SNBT. A data communication format used in Minecraft. wiki: https://minecraft.fandom.com/wiki/NBT_format MCFunction. The Function file for Minecraft Data packs and Add-ons. official: https://learn.microsoft.com/en-us/minecraft/creator/documents/functionsintroduction wiki: https://minecraft.fandom.com/wiki/Function MCSchema. A kind of data Schema for Minecraft Add-on Development. official: https://learn.microsoft.com/en-us/minecraft/creator/reference/content/schemasreference/ community example: https://www.mcbe-dev.net/addons/data-driven/manifest.html :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, default, include, bygroups from pygments.token import Comment, Keyword, Literal, Name, Number, Operator, \ Punctuation, String, Text, Whitespace __all__ = ['SNBTLexer', 'MCFunctionLexer', 'MCSchemaLexer'] class SNBTLexer(RegexLexer): """Lexer for stringified NBT, a data format used in Minecraft .. versionadded:: 2.12.0 """ name = "SNBT" url = "https://minecraft.fandom.com/wiki/NBT_format" aliases = ["snbt"] filenames = ["*.snbt"] mimetypes = ["text/snbt"] tokens = { "root": [ # We only look for the open bracket here since square bracket # is only valid in NBT pathing (which is a mcfunction idea). (r"\{", Punctuation, "compound"), (r"[^\{]+", Text), ], "whitespace": [ (r"\s+", Whitespace), ], "operators": [ (r"[,:;]", Punctuation), ], "literals": [ (r"(true|false)", Keyword.Constant), (r"-?\d+[eE]-?\d+", Number.Float), (r"-?\d*\.\d+[fFdD]?", Number.Float), (r"-?\d+[bBsSlLfFdD]?", Number.Integer), # Separate states for both types of strings so they don't entangle (r'"', String.Double, "literals.string_double"), (r"'", String.Single, "literals.string_single"), ], "literals.string_double": [ (r"\\.", String.Escape), (r'[^\\"\n]+', String.Double), (r'"', String.Double, "#pop"), ], "literals.string_single": [ (r"\\.", String.Escape), (r"[^\\'\n]+", String.Single), (r"'", String.Single, "#pop"), ], "compound": [ # this handles the unquoted snbt keys # note: stringified keys still work (r"[A-Z_a-z]+", Name.Attribute), include("operators"), include("whitespace"), include("literals"), (r"\{", Punctuation, "#push"), (r"\[", Punctuation, "list"), (r"\}", Punctuation, "#pop"), ], "list": [ (r"[A-Z_a-z]+", Name.Attribute), include("literals"), include("operators"), include("whitespace"), (r"\[", Punctuation, "#push"), (r"\{", Punctuation, "compound"), (r"\]", Punctuation, "#pop"), ], } class MCFunctionLexer(RegexLexer): """Lexer for the mcfunction scripting language used in Minecraft Modelled somewhat after the `GitHub mcfunction grammar <https://github.com/Arcensoth/language-mcfunction>`_. .. versionadded:: 2.12.0 """ name = "MCFunction" url = "https://minecraft.fandom.com/wiki/Commands" aliases = ["mcfunction", "mcf"] filenames = ["*.mcfunction"] mimetypes = ["text/mcfunction"] # Used to denotate the start of a block comment, borrowed from Github's mcfunction _block_comment_prefix = "[>!]" tokens = { "root": [ include("names"), include("comments"), include("literals"), include("whitespace"), include("property"), include("operators"), include("selectors"), ], "names": [ # The start of a command (either beginning of line OR after the run keyword) # We don't encode a list of keywords since mods, plugins, or even pre-processors # may add new commands, so we have a 'close-enough' regex which catches them. (r"^(\s*)([a-z_]+)", bygroups(Whitespace, Name.Builtin)), (r"(?<=run)\s+[a-z_]+", Name.Builtin), # UUID (r"\b[0-9a-fA-F]+(?:-[0-9a-fA-F]+){4}\b", Name.Variable), include("resource-name"), # normal command names and scoreboards # there's no way to know the differences unfortuntely (r"[A-Za-z_][\w.#%$]+", Keyword.Constant), (r"[#%$][\w.#%$]+", Name.Variable.Magic), ], "resource-name": [ # resource names have to be lowercase (r"#?[a-z_][a-z_.-]*:[a-z0-9_./-]+", Name.Function), # similar to above except optional `:`` # a `/` must be present "somewhere" (r"#?[a-z0-9_\.\-]+\/[a-z0-9_\.\-\/]+", Name.Function), ], "whitespace": [ (r"\s+", Whitespace), ], "comments": [ (rf"^\s*(#{_block_comment_prefix})", Comment.Multiline, ("comments.block", "comments.block.emphasized")), (r"#.*$", Comment.Single), ], "comments.block": [ (rf"^\s*#{_block_comment_prefix}", Comment.Multiline, "comments.block.emphasized"), (r"^\s*#", Comment.Multiline, "comments.block.normal"), default("#pop"), ], "comments.block.normal": [ include("comments.block.special"), (r"\S+", Comment.Multiline), (r"\n", Text, "#pop"), include("whitespace"), ], "comments.block.emphasized": [ include("comments.block.special"), (r"\S+", String.Doc), (r"\n", Text, "#pop"), include("whitespace"), ], "comments.block.special": [ # Params (r"@\S+", Name.Decorator), include("resource-name"), # Scoreboard player names (r"[#%$][\w.#%$]+", Name.Variable.Magic), ], "operators": [ (r"[\-~%^?!+*<>\\/|&=.]", Operator), ], "literals": [ (r"\.\.", Literal), (r"(true|false)", Keyword.Pseudo), # these are like unquoted strings and appear in many places (r"[A-Za-z_]+", Name.Variable.Class), (r"[0-7]b", Number.Byte), (r"[+-]?\d*\.?\d+([eE]?[+-]?\d+)?[df]?\b", Number.Float), (r"[+-]?\d+\b", Number.Integer), (r'"', String.Double, "literals.string-double"), (r"'", String.Single, "literals.string-single"), ], "literals.string-double": [ (r"\\.", String.Escape), (r'[^\\"\n]+', String.Double), (r'"', String.Double, "#pop"), ], "literals.string-single": [ (r"\\.", String.Escape), (r"[^\\'\n]+", String.Single), (r"'", String.Single, "#pop"), ], "selectors": [ (r"@[a-z]", Name.Variable), ], ## Generic Property Container # There are several, differing instances where the language accepts # specific contained keys or contained key, value pairings. # # Property Maps: # - Starts with either `[` or `{` # - Key separated by `:` or `=` # - Deliminated by `,` # # Property Lists: # - Starts with `[` # - Deliminated by `,` # # For simplicity, these patterns match a generic, nestable structure # which follow a key, value pattern. For normal lists, there's only keys. # This allow some "illegal" structures, but we'll accept those for # sake of simplicity # # Examples: # - `[facing=up, powered=true]` (blockstate) # - `[name="hello world", nbt={key: 1b}]` (selector + nbt) # - `[{"text": "value"}, "literal"]` (json) ## "property": [ # This state gets included in root and also several substates # We do this to shortcut the starting of new properties # within other properties. Lists can have sublists and compounds # and values can start a new property (see the `difficult_1.txt` # snippet). (r"\{", Punctuation, ("property.curly", "property.key")), (r"\[", Punctuation, ("property.square", "property.key")), ], "property.curly": [ include("whitespace"), include("property"), (r"\}", Punctuation, "#pop"), ], "property.square": [ include("whitespace"), include("property"), (r"\]", Punctuation, "#pop"), # lists can have sequences of items (r",", Punctuation), ], "property.key": [ include("whitespace"), # resource names (for advancements) # can omit `:` to default `minecraft:` # must check if there is a future equals sign if `:` is in the name (r"#?[a-z_][a-z_\.\-]*\:[a-z0-9_\.\-/]+(?=\s*\=)", Name.Attribute, "property.delimiter"), (r"#?[a-z_][a-z0-9_\.\-/]+", Name.Attribute, "property.delimiter"), # unquoted NBT key (r"[A-Za-z_\-\+]+", Name.Attribute, "property.delimiter"), # quoted JSON or NBT key (r'"', Name.Attribute, "property.delimiter", "literals.string-double"), (r"'", Name.Attribute, "property.delimiter", "literals.string-single"), # index for a list (r"-?\d+", Number.Integer, "property.delimiter"), default("#pop"), ], "property.key.string-double": [ (r"\\.", String.Escape), (r'[^\\"\n]+', Name.Attribute), (r'"', Name.Attribute, "#pop"), ], "property.key.string-single": [ (r"\\.", String.Escape), (r"[^\\'\n]+", Name.Attribute), (r"'", Name.Attribute, "#pop"), ], "property.delimiter": [ include("whitespace"), (r"[:=]!?", Punctuation, "property.value"), (r",", Punctuation), default("#pop"), ], "property.value": [ include("whitespace"), # unquoted resource names are valid literals here (r"#?[a-z_][a-z_\.\-]*\:[a-z0-9_\.\-/]+", Name.Tag), (r"#?[a-z_][a-z0-9_\.\-/]+", Name.Tag), include("literals"), include("property"), default("#pop"), ], } class MCSchemaLexer(RegexLexer): """Lexer for Minecraft Add-ons data Schemas, an interface structure standard used in Minecraft .. versionadded:: 2.14.0 """ name = 'MCSchema' url = 'https://learn.microsoft.com/en-us/minecraft/creator/reference/content/schemasreference/' aliases = ['mcschema'] filenames = ['*.mcschema'] mimetypes = ['text/mcschema'] tokens = { 'commentsandwhitespace': [ (r'\s+', Whitespace), (r'//.*?$', Comment.Single), (r'/\*.*?\*/', Comment.Multiline) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gimuysd]+\b|\B)', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), default('#pop') ], 'badregex': [ (r'\n', Whitespace, '#pop') ], 'singlestring': [ (r'\\.', String.Escape), (r"'", String.Single, '#pop'), (r"[^\\']+", String.Single), ], 'doublestring': [ (r'\\.', String.Escape), (r'"', String.Double, '#pop'), (r'[^\\"]+', String.Double), ], 'root': [ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), # keywords for optional word and field types (r'(?<=: )opt', Operator.Word), (r'(?<=\s)[\w-]*(?=(\s+"|\n))', Keyword.Declaration), # numeric literals (r'0[bB][01]+', Number.Bin), (r'0[oO]?[0-7]+', Number.Oct), (r'0[xX][0-9a-fA-F]+', Number.Hex), (r'\d+', Number.Integer), (r'(\.\d+|\d+\.\d*|\d+)([eE][-+]?\d+)?', Number.Float), # possible punctuations (r'\.\.\.|=>', Punctuation), (r'\+\+|--|~|\?\?=?|\?|:|\\(?=\n)|' r'(<<|>>>?|==?|!=?|(?:\*\*|\|\||&&|[-<>+*%&|^/]))=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), # strings (r"'", String.Single, 'singlestring'), (r'"', String.Double, 'doublestring'), # title line (r'[\w-]*?(?=:\{?\n)', String.Symbol), # title line with a version code, formatted # `major.minor.patch-prerelease+buildmeta` (r'([\w-]*?)(:)(\d+)(?:(\.)(\d+)(?:(\.)(\d+)(?:(\-)((?:[^\W_]|-)*(?:\.(?:[^\W_]|-)*)*))?(?:(\+)((?:[^\W_]|-)+(?:\.(?:[^\W_]|-)+)*))?)?)?(?=:\{?\n)', bygroups(String.Symbol, Operator, Number.Integer, Operator, Number.Integer, Operator, Number.Integer, Operator, String, Operator, String)), (r'.*\n', Text), ] }
13,846
Python
34.055696
300
0.476455
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/perl.py
""" pygments.lexers.perl ~~~~~~~~~~~~~~~~~~~~ Lexers for Perl, Raku and related languages. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \ using, this, default, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace from pygments.util import shebang_matches __all__ = ['PerlLexer', 'Perl6Lexer'] class PerlLexer(RegexLexer): """ For Perl source code. """ name = 'Perl' url = 'https://www.perl.org' aliases = ['perl', 'pl'] filenames = ['*.pl', '*.pm', '*.t', '*.perl'] mimetypes = ['text/x-perl', 'application/x-perl'] flags = re.DOTALL | re.MULTILINE # TODO: give this to a perl guy who knows how to parse perl... tokens = { 'balanced-regex': [ (r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'), (r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'), (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), (r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'), (r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'), (r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'), (r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'), (r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'), (r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'), (r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'), ], 'root': [ (r'\A\#!.+?$', Comment.Hashbang), (r'\#.*?$', Comment.Single), (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline), (words(( 'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach', 'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then', 'unless', 'until', 'while', 'print', 'new', 'BEGIN', 'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'), Keyword), (r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)', bygroups(Keyword, Whitespace, Name, Whitespace, Punctuation, Whitespace), 'format'), (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word), # common delimiters (r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex), (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex), (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex), (r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex), (r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex), # balanced delimiters (r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'), (r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'), (r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex, 'balanced-regex'), (r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex, 'balanced-regex'), (r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex), (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), (r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*', String.Regex), (r'\s+', Whitespace), (words(( 'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir', 'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect', 'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die', 'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent', 'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl', 'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid', 'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin', 'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp', 'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber', 'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname', 'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime', 'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last', 'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat', 'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'oct', 'open', 'opendir', 'ord', 'our', 'pack', 'pipe', 'pop', 'pos', 'printf', 'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir', 'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename', 'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir', 'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent', 'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent', 'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown', 'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt', 'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread', 'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr', 'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie', 'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'), Name.Builtin), (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo), (r'(<<)([\'"]?)([a-zA-Z_]\w*)(\2;?\n.*?\n)(\3)(\n)', bygroups(String, String, String.Delimiter, String, String.Delimiter, Whitespace)), (r'__END__', Comment.Preproc, 'end-part'), (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global), (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global), (r'[$@%#]+', Name.Variable, 'varname'), (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), (r'0b[01]+(_[01]+)*', Number.Bin), (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', Number.Float), (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), (r'\d+(_\d+)*', Number.Integer), (r"'(\\\\|\\[^\\]|[^'\\])*'", String), (r'"(\\\\|\\[^\\]|[^"\\])*"', String), (r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick), (r'<([^\s>]+)>', String.Regex), (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'), (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'), (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'), (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'), (r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other), (r'(package)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)', bygroups(Keyword, Whitespace, Name.Namespace)), (r'(use|require|no)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)', bygroups(Keyword, Whitespace, Name.Namespace)), (r'(sub)(\s+)', bygroups(Keyword, Whitespace), 'funcname'), (words(( 'no', 'package', 'require', 'use'), suffix=r'\b'), Keyword), (r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|' r'!~|&&?|\|\||\.{1,3})', Operator), (r'[-+/*%=<>&^|!\\~]=?', Operator), (r'[()\[\]:;,<>/?{}]', Punctuation), # yes, there's no shortage # of punctuation in Perl! (r'(?=\w)', Name, 'name'), ], 'format': [ (r'\.\n', String.Interpol, '#pop'), (r'[^\n]*\n', String.Interpol), ], 'varname': [ (r'\s+', Whitespace), (r'\{', Punctuation, '#pop'), # hash syntax? (r'\)|,', Punctuation, '#pop'), # argument specifier (r'\w+::', Name.Namespace), (r'[\w:]+', Name.Variable, '#pop'), ], 'name': [ (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*(::)?(?=\s*->)', Name.Namespace, '#pop'), (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*::', Name.Namespace, '#pop'), (r'[\w:]+', Name, '#pop'), (r'[A-Z_]+(?=\W)', Name.Constant, '#pop'), (r'(?=\W)', Text, '#pop'), ], 'funcname': [ (r'[a-zA-Z_]\w*[!?]?', Name.Function), (r'\s+', Whitespace), # argument declaration (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Whitespace)), (r';', Punctuation, '#pop'), (r'.*?\{', Punctuation, '#pop'), ], 'cb-string': [ (r'\\[{}\\]', String.Other), (r'\\', String.Other), (r'\{', String.Other, 'cb-string'), (r'\}', String.Other, '#pop'), (r'[^{}\\]+', String.Other) ], 'rb-string': [ (r'\\[()\\]', String.Other), (r'\\', String.Other), (r'\(', String.Other, 'rb-string'), (r'\)', String.Other, '#pop'), (r'[^()]+', String.Other) ], 'sb-string': [ (r'\\[\[\]\\]', String.Other), (r'\\', String.Other), (r'\[', String.Other, 'sb-string'), (r'\]', String.Other, '#pop'), (r'[^\[\]]+', String.Other) ], 'lt-string': [ (r'\\[<>\\]', String.Other), (r'\\', String.Other), (r'\<', String.Other, 'lt-string'), (r'\>', String.Other, '#pop'), (r'[^<>]+', String.Other) ], 'end-part': [ (r'.+', Comment.Preproc, '#pop') ] } def analyse_text(text): if shebang_matches(text, r'perl'): return True result = 0 if re.search(r'(?:my|our)\s+[$@%(]', text): result += 0.9 if ':=' in text: # := is not valid Perl, but it appears in unicon, so we should # become less confident if we think we found Perl with := result /= 2 return result class Perl6Lexer(ExtendedRegexLexer): """ For Raku (a.k.a. Perl 6) source code. .. versionadded:: 2.0 """ name = 'Perl6' url = 'https://www.raku.org' aliases = ['perl6', 'pl6', 'raku'] filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'] mimetypes = ['text/x-perl6', 'application/x-perl6'] flags = re.MULTILINE | re.DOTALL PERL6_IDENTIFIER_RANGE = r"['\w:-]" PERL6_KEYWORDS = ( #Phasers 'BEGIN','CATCH','CHECK','CLOSE','CONTROL','DOC','END','ENTER','FIRST', 'INIT','KEEP','LAST','LEAVE','NEXT','POST','PRE','QUIT','UNDO', #Keywords 'anon','augment','but','class','constant','default','does','else', 'elsif','enum','for','gather','given','grammar','has','if','import', 'is','let','loop','made','make','method','module','multi','my','need', 'orwith','our','proceed','proto','repeat','require','return', 'return-rw','returns','role','rule','state','sub','submethod','subset', 'succeed','supersede','token','try','unit','unless','until','use', 'when','while','with','without', #Traits 'export','native','repr','required','rw','symbol', ) PERL6_BUILTINS = ( 'ACCEPTS','abs','abs2rel','absolute','accept','accessed','acos', 'acosec','acosech','acosh','acotan','acotanh','acquire','act','action', 'actions','add','add_attribute','add_enum_value','add_fallback', 'add_method','add_parent','add_private_method','add_role','add_trustee', 'adverb','after','all','allocate','allof','allowed','alternative-names', 'annotations','antipair','antipairs','any','anyof','app_lifetime', 'append','arch','archname','args','arity','Array','asec','asech','asin', 'asinh','ASSIGN-KEY','ASSIGN-POS','assuming','ast','at','atan','atan2', 'atanh','AT-KEY','atomic-assign','atomic-dec-fetch','atomic-fetch', 'atomic-fetch-add','atomic-fetch-dec','atomic-fetch-inc', 'atomic-fetch-sub','atomic-inc-fetch','AT-POS','attributes','auth', 'await','backtrace','Bag','BagHash','bail-out','base','basename', 'base-repeating','batch','BIND-KEY','BIND-POS','bind-stderr', 'bind-stdin','bind-stdout','bind-udp','bits','bless','block','Bool', 'bool-only','bounds','break','Bridge','broken','BUILD','build-date', 'bytes','cache','callframe','calling-package','CALL-ME','callsame', 'callwith','can','cancel','candidates','cando','can-ok','canonpath', 'caps','caption','Capture','cas','catdir','categorize','categorize-list', 'catfile','catpath','cause','ceiling','cglobal','changed','Channel', 'chars','chdir','child','child-name','child-typename','chmod','chomp', 'chop','chr','chrs','chunks','cis','classify','classify-list','cleanup', 'clone','close','closed','close-stdin','cmp-ok','code','codes','collate', 'column','comb','combinations','command','comment','compiler','Complex', 'compose','compose_type','composer','condition','config', 'configure_destroy','configure_type_checking','conj','connect', 'constraints','construct','contains','contents','copy','cos','cosec', 'cosech','cosh','cotan','cotanh','count','count-only','cpu-cores', 'cpu-usage','CREATE','create_type','cross','cue','curdir','curupdir','d', 'Date','DateTime','day','daycount','day-of-month','day-of-week', 'day-of-year','days-in-month','declaration','decode','decoder','deepmap', 'default','defined','DEFINITE','delayed','DELETE-KEY','DELETE-POS', 'denominator','desc','DESTROY','destroyers','devnull','diag', 'did-you-mean','die','dies-ok','dir','dirname','dir-sep','DISTROnames', 'do','does','does-ok','done','done-testing','duckmap','dynamic','e', 'eager','earlier','elems','emit','enclosing','encode','encoder', 'encoding','end','ends-with','enum_from_value','enum_value_list', 'enum_values','enums','eof','EVAL','eval-dies-ok','EVALFILE', 'eval-lives-ok','exception','excludes-max','excludes-min','EXISTS-KEY', 'EXISTS-POS','exit','exitcode','exp','expected','explicitly-manage', 'expmod','extension','f','fail','fails-like','fc','feature','file', 'filename','find_method','find_method_qualified','finish','first','flat', 'flatmap','flip','floor','flunk','flush','fmt','format','formatter', 'freeze','from','from-list','from-loop','from-posix','full', 'full-barrier','get','get_value','getc','gist','got','grab','grabpairs', 'grep','handle','handled','handles','hardware','has_accessor','Hash', 'head','headers','hh-mm-ss','hidden','hides','hour','how','hyper','id', 'illegal','im','in','indent','index','indices','indir','infinite', 'infix','infix:<+>','infix:<->','install_method_cache','Instant', 'instead','Int','int-bounds','interval','in-timezone','invalid-str', 'invert','invocant','IO','IO::Notification.watch-path','is_trusted', 'is_type','isa','is-absolute','isa-ok','is-approx','is-deeply', 'is-hidden','is-initial-thread','is-int','is-lazy','is-leap-year', 'isNaN','isnt','is-prime','is-relative','is-routine','is-setting', 'is-win','item','iterator','join','keep','kept','KERNELnames','key', 'keyof','keys','kill','kv','kxxv','l','lang','last','lastcall','later', 'lazy','lc','leading','level','like','line','lines','link','List', 'listen','live','lives-ok','local','lock','log','log10','lookup','lsb', 'made','MAIN','make','Map','match','max','maxpairs','merge','message', 'method','method_table','methods','migrate','min','minmax','minpairs', 'minute','misplaced','Mix','MixHash','mkdir','mode','modified','month', 'move','mro','msb','multi','multiness','my','name','named','named_names', 'narrow','nativecast','native-descriptor','nativesizeof','new','new_type', 'new-from-daycount','new-from-pairs','next','nextcallee','next-handle', 'nextsame','nextwith','NFC','NFD','NFKC','NFKD','nl-in','nl-out', 'nodemap','nok','none','norm','not','note','now','nude','Num', 'numerator','Numeric','of','offset','offset-in-hours','offset-in-minutes', 'ok','old','on-close','one','on-switch','open','opened','operation', 'optional','ord','ords','orig','os-error','osname','out-buffer','pack', 'package','package-kind','package-name','packages','pair','pairs', 'pairup','parameter','params','parent','parent-name','parents','parse', 'parse-base','parsefile','parse-names','parts','pass','path','path-sep', 'payload','peer-host','peer-port','periods','perl','permutations','phaser', 'pick','pickpairs','pid','placeholder','plan','plus','polar','poll', 'polymod','pop','pos','positional','posix','postfix','postmatch', 'precomp-ext','precomp-target','pred','prefix','prematch','prepend', 'print','printf','print-nl','print-to','private','private_method_table', 'proc','produce','Promise','prompt','protect','pull-one','push', 'push-all','push-at-least','push-exactly','push-until-lazy','put', 'qualifier-type','quit','r','race','radix','rand','range','Rat','raw', 're','read','readchars','readonly','ready','Real','reallocate','reals', 'reason','rebless','receive','recv','redispatcher','redo','reduce', 'rel2abs','relative','release','rename','repeated','replacement', 'report','reserved','resolve','restore','result','resume','rethrow', 'reverse','right','rindex','rmdir','role','roles_to_compose','rolish', 'roll','rootdir','roots','rotate','rotor','round','roundrobin', 'routine-type','run','rwx','s','samecase','samemark','samewith','say', 'schedule-on','scheduler','scope','sec','sech','second','seek','self', 'send','Set','set_hidden','set_name','set_package','set_rw','set_value', 'SetHash','set-instruments','setup_finalization','shape','share','shell', 'shift','sibling','sigil','sign','signal','signals','signature','sin', 'sinh','sink','sink-all','skip','skip-at-least','skip-at-least-pull-one', 'skip-one','skip-rest','sleep','sleep-timer','sleep-until','Slip','slurp', 'slurp-rest','slurpy','snap','snapper','so','socket-host','socket-port', 'sort','source','source-package','spawn','SPEC','splice','split', 'splitdir','splitpath','sprintf','spurt','sqrt','squish','srand','stable', 'start','started','starts-with','status','stderr','stdout','Str', 'sub_signature','subbuf','subbuf-rw','subname','subparse','subst', 'subst-mutate','substr','substr-eq','substr-rw','subtest','succ','sum', 'Supply','symlink','t','tail','take','take-rw','tan','tanh','tap', 'target','target-name','tc','tclc','tell','then','throttle','throw', 'throws-like','timezone','tmpdir','to','today','todo','toggle','to-posix', 'total','trailing','trans','tree','trim','trim-leading','trim-trailing', 'truncate','truncated-to','trusts','try_acquire','trying','twigil','type', 'type_captures','typename','uc','udp','uncaught_handler','unimatch', 'uniname','uninames','uniparse','uniprop','uniprops','unique','unival', 'univals','unlike','unlink','unlock','unpack','unpolar','unshift', 'unwrap','updir','USAGE','use-ok','utc','val','value','values','VAR', 'variable','verbose-config','version','VMnames','volume','vow','w','wait', 'warn','watch','watch-path','week','weekday-of-month','week-number', 'week-year','WHAT','when','WHERE','WHEREFORE','WHICH','WHO', 'whole-second','WHY','wordcase','words','workaround','wrap','write', 'write-to','x','yada','year','yield','yyyy-mm-dd','z','zip','zip-latest', ) PERL6_BUILTIN_CLASSES = ( #Booleans 'False','True', #Classes 'Any','Array','Associative','AST','atomicint','Attribute','Backtrace', 'Backtrace::Frame','Bag','Baggy','BagHash','Blob','Block','Bool','Buf', 'Callable','CallFrame','Cancellation','Capture','CArray','Channel','Code', 'compiler','Complex','ComplexStr','Cool','CurrentThreadScheduler', 'Cursor','Date','Dateish','DateTime','Distro','Duration','Encoding', 'Exception','Failure','FatRat','Grammar','Hash','HyperWhatever','Instant', 'Int','int16','int32','int64','int8','IntStr','IO','IO::ArgFiles', 'IO::CatHandle','IO::Handle','IO::Notification','IO::Path', 'IO::Path::Cygwin','IO::Path::QNX','IO::Path::Unix','IO::Path::Win32', 'IO::Pipe','IO::Socket','IO::Socket::Async','IO::Socket::INET','IO::Spec', 'IO::Spec::Cygwin','IO::Spec::QNX','IO::Spec::Unix','IO::Spec::Win32', 'IO::Special','Iterable','Iterator','Junction','Kernel','Label','List', 'Lock','Lock::Async','long','longlong','Macro','Map','Match', 'Metamodel::AttributeContainer','Metamodel::C3MRO','Metamodel::ClassHOW', 'Metamodel::EnumHOW','Metamodel::Finalization','Metamodel::MethodContainer', 'Metamodel::MROBasedMethodDispatch','Metamodel::MultipleInheritance', 'Metamodel::Naming','Metamodel::Primitives','Metamodel::PrivateMethodContainer', 'Metamodel::RoleContainer','Metamodel::Trusting','Method','Mix','MixHash', 'Mixy','Mu','NFC','NFD','NFKC','NFKD','Nil','Num','num32','num64', 'Numeric','NumStr','ObjAt','Order','Pair','Parameter','Perl','Pod::Block', 'Pod::Block::Code','Pod::Block::Comment','Pod::Block::Declarator', 'Pod::Block::Named','Pod::Block::Para','Pod::Block::Table','Pod::Heading', 'Pod::Item','Pointer','Positional','PositionalBindFailover','Proc', 'Proc::Async','Promise','Proxy','PseudoStash','QuantHash','Range','Rat', 'Rational','RatStr','Real','Regex','Routine','Scalar','Scheduler', 'Semaphore','Seq','Set','SetHash','Setty','Signature','size_t','Slip', 'Stash','Str','StrDistance','Stringy','Sub','Submethod','Supplier', 'Supplier::Preserving','Supply','Systemic','Tap','Telemetry', 'Telemetry::Instrument::Thread','Telemetry::Instrument::Usage', 'Telemetry::Period','Telemetry::Sampler','Thread','ThreadPoolScheduler', 'UInt','uint16','uint32','uint64','uint8','Uni','utf8','Variable', 'Version','VM','Whatever','WhateverCode','WrapHandle' ) PERL6_OPERATORS = ( 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div', 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm', 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx', '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^', '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&', 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^', '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^', '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv', '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so', 'not', '<==', '==>', '<<==', '==>>','unicmp', ) # Perl 6 has a *lot* of possible bracketing characters # this list was lifted from STD.pm6 (https://github.com/perl6/std) PERL6_BRACKETS = { '\u0028': '\u0029', '\u003c': '\u003e', '\u005b': '\u005d', '\u007b': '\u007d', '\u00ab': '\u00bb', '\u0f3a': '\u0f3b', '\u0f3c': '\u0f3d', '\u169b': '\u169c', '\u2018': '\u2019', '\u201a': '\u2019', '\u201b': '\u2019', '\u201c': '\u201d', '\u201e': '\u201d', '\u201f': '\u201d', '\u2039': '\u203a', '\u2045': '\u2046', '\u207d': '\u207e', '\u208d': '\u208e', '\u2208': '\u220b', '\u2209': '\u220c', '\u220a': '\u220d', '\u2215': '\u29f5', '\u223c': '\u223d', '\u2243': '\u22cd', '\u2252': '\u2253', '\u2254': '\u2255', '\u2264': '\u2265', '\u2266': '\u2267', '\u2268': '\u2269', '\u226a': '\u226b', '\u226e': '\u226f', '\u2270': '\u2271', '\u2272': '\u2273', '\u2274': '\u2275', '\u2276': '\u2277', '\u2278': '\u2279', '\u227a': '\u227b', '\u227c': '\u227d', '\u227e': '\u227f', '\u2280': '\u2281', '\u2282': '\u2283', '\u2284': '\u2285', '\u2286': '\u2287', '\u2288': '\u2289', '\u228a': '\u228b', '\u228f': '\u2290', '\u2291': '\u2292', '\u2298': '\u29b8', '\u22a2': '\u22a3', '\u22a6': '\u2ade', '\u22a8': '\u2ae4', '\u22a9': '\u2ae3', '\u22ab': '\u2ae5', '\u22b0': '\u22b1', '\u22b2': '\u22b3', '\u22b4': '\u22b5', '\u22b6': '\u22b7', '\u22c9': '\u22ca', '\u22cb': '\u22cc', '\u22d0': '\u22d1', '\u22d6': '\u22d7', '\u22d8': '\u22d9', '\u22da': '\u22db', '\u22dc': '\u22dd', '\u22de': '\u22df', '\u22e0': '\u22e1', '\u22e2': '\u22e3', '\u22e4': '\u22e5', '\u22e6': '\u22e7', '\u22e8': '\u22e9', '\u22ea': '\u22eb', '\u22ec': '\u22ed', '\u22f0': '\u22f1', '\u22f2': '\u22fa', '\u22f3': '\u22fb', '\u22f4': '\u22fc', '\u22f6': '\u22fd', '\u22f7': '\u22fe', '\u2308': '\u2309', '\u230a': '\u230b', '\u2329': '\u232a', '\u23b4': '\u23b5', '\u2768': '\u2769', '\u276a': '\u276b', '\u276c': '\u276d', '\u276e': '\u276f', '\u2770': '\u2771', '\u2772': '\u2773', '\u2774': '\u2775', '\u27c3': '\u27c4', '\u27c5': '\u27c6', '\u27d5': '\u27d6', '\u27dd': '\u27de', '\u27e2': '\u27e3', '\u27e4': '\u27e5', '\u27e6': '\u27e7', '\u27e8': '\u27e9', '\u27ea': '\u27eb', '\u2983': '\u2984', '\u2985': '\u2986', '\u2987': '\u2988', '\u2989': '\u298a', '\u298b': '\u298c', '\u298d': '\u298e', '\u298f': '\u2990', '\u2991': '\u2992', '\u2993': '\u2994', '\u2995': '\u2996', '\u2997': '\u2998', '\u29c0': '\u29c1', '\u29c4': '\u29c5', '\u29cf': '\u29d0', '\u29d1': '\u29d2', '\u29d4': '\u29d5', '\u29d8': '\u29d9', '\u29da': '\u29db', '\u29f8': '\u29f9', '\u29fc': '\u29fd', '\u2a2b': '\u2a2c', '\u2a2d': '\u2a2e', '\u2a34': '\u2a35', '\u2a3c': '\u2a3d', '\u2a64': '\u2a65', '\u2a79': '\u2a7a', '\u2a7d': '\u2a7e', '\u2a7f': '\u2a80', '\u2a81': '\u2a82', '\u2a83': '\u2a84', '\u2a8b': '\u2a8c', '\u2a91': '\u2a92', '\u2a93': '\u2a94', '\u2a95': '\u2a96', '\u2a97': '\u2a98', '\u2a99': '\u2a9a', '\u2a9b': '\u2a9c', '\u2aa1': '\u2aa2', '\u2aa6': '\u2aa7', '\u2aa8': '\u2aa9', '\u2aaa': '\u2aab', '\u2aac': '\u2aad', '\u2aaf': '\u2ab0', '\u2ab3': '\u2ab4', '\u2abb': '\u2abc', '\u2abd': '\u2abe', '\u2abf': '\u2ac0', '\u2ac1': '\u2ac2', '\u2ac3': '\u2ac4', '\u2ac5': '\u2ac6', '\u2acd': '\u2ace', '\u2acf': '\u2ad0', '\u2ad1': '\u2ad2', '\u2ad3': '\u2ad4', '\u2ad5': '\u2ad6', '\u2aec': '\u2aed', '\u2af7': '\u2af8', '\u2af9': '\u2afa', '\u2e02': '\u2e03', '\u2e04': '\u2e05', '\u2e09': '\u2e0a', '\u2e0c': '\u2e0d', '\u2e1c': '\u2e1d', '\u2e20': '\u2e21', '\u3008': '\u3009', '\u300a': '\u300b', '\u300c': '\u300d', '\u300e': '\u300f', '\u3010': '\u3011', '\u3014': '\u3015', '\u3016': '\u3017', '\u3018': '\u3019', '\u301a': '\u301b', '\u301d': '\u301e', '\ufd3e': '\ufd3f', '\ufe17': '\ufe18', '\ufe35': '\ufe36', '\ufe37': '\ufe38', '\ufe39': '\ufe3a', '\ufe3b': '\ufe3c', '\ufe3d': '\ufe3e', '\ufe3f': '\ufe40', '\ufe41': '\ufe42', '\ufe43': '\ufe44', '\ufe47': '\ufe48', '\ufe59': '\ufe5a', '\ufe5b': '\ufe5c', '\ufe5d': '\ufe5e', '\uff08': '\uff09', '\uff1c': '\uff1e', '\uff3b': '\uff3d', '\uff5b': '\uff5d', '\uff5f': '\uff60', '\uff62': '\uff63', } def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''): if boundary_regex_fragment is None: return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \ suffix + r')\b' else: return r'(?<!' + boundary_regex_fragment + r')' + prefix + r'(' + \ r'|'.join(re.escape(x) for x in words) + r')' + suffix + r'(?!' + \ boundary_regex_fragment + r')' def brackets_callback(token_class): def callback(lexer, match, context): groups = match.groupdict() opening_chars = groups['delimiter'] n_chars = len(opening_chars) adverbs = groups.get('adverbs') closer = Perl6Lexer.PERL6_BRACKETS.get(opening_chars[0]) text = context.text if closer is None: # it's not a mirrored character, which means we # just need to look for the next occurrence end_pos = text.find(opening_chars, match.start('delimiter') + n_chars) else: # we need to look for the corresponding closing character, # keep nesting in mind closing_chars = closer * n_chars nesting_level = 1 search_pos = match.start('delimiter') while nesting_level > 0: next_open_pos = text.find(opening_chars, search_pos + n_chars) next_close_pos = text.find(closing_chars, search_pos + n_chars) if next_close_pos == -1: next_close_pos = len(text) nesting_level = 0 elif next_open_pos != -1 and next_open_pos < next_close_pos: nesting_level += 1 search_pos = next_open_pos else: # next_close_pos < next_open_pos nesting_level -= 1 search_pos = next_close_pos end_pos = next_close_pos if end_pos < 0: # if we didn't find a closer, just highlight the # rest of the text in this class end_pos = len(text) if adverbs is not None and re.search(r':to\b', adverbs): heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos] end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + r'\s*$', text[end_pos:], re.MULTILINE) if end_heredoc: end_pos += end_heredoc.end() else: end_pos = len(text) yield match.start(), token_class, text[match.start():end_pos + n_chars] context.pos = end_pos + n_chars return callback def opening_brace_callback(lexer, match, context): stack = context.stack yield match.start(), Text, context.text[match.start():match.end()] context.pos = match.end() # if we encounter an opening brace and we're one level # below a token state, it means we need to increment # the nesting level for braces so we know later when # we should return to the token rules. if len(stack) > 2 and stack[-2] == 'token': context.perl6_token_nesting_level += 1 def closing_brace_callback(lexer, match, context): stack = context.stack yield match.start(), Text, context.text[match.start():match.end()] context.pos = match.end() # if we encounter a free closing brace and we're one level # below a token state, it means we need to check the nesting # level to see if we need to return to the token state. if len(stack) > 2 and stack[-2] == 'token': context.perl6_token_nesting_level -= 1 if context.perl6_token_nesting_level == 0: stack.pop() def embedded_perl6_callback(lexer, match, context): context.perl6_token_nesting_level = 1 yield match.start(), Text, context.text[match.start():match.end()] context.pos = match.end() context.stack.append('root') # If you're modifying these rules, be careful if you need to process '{' or '}' # characters. We have special logic for processing these characters (due to the fact # that you can nest Perl 6 code in regex blocks), so if you need to process one of # them, make sure you also process the corresponding one! tokens = { 'common': [ (r'#[`|=](?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)', brackets_callback(Comment.Multiline)), (r'#[^\n]*$', Comment.Single), (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline), (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline), (r'^=.*?\n\s*?\n', Comment.Multiline), (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)', bygroups(Keyword, Name), 'token-sym-brackets'), (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + r')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?', bygroups(Keyword, Name), 'pre-token'), # deal with a special case in the Perl 6 grammar (role q { ... }) (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Whitespace, Name, Whitespace)), (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword), (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'), Name.Builtin), (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin), # copied from PerlLexer (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + '+(?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable), (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), (r'::\?\w+', Name.Variable.Global), (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + '+(?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), (r'\$(?:<.*?>)+', Name.Variable), (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z:\s])' r'(?P=first_char)*)', brackets_callback(String)), # copied from PerlLexer (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), (r'0b[01]+(_[01]+)*', Number.Bin), (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', Number.Float), (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), (r'\d+(_\d+)*', Number.Integer), (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex), (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex), (r'm\w+(?=\()', Name), (r'(?:m|ms|rx)\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^\w:\s])' r'(?P=first_char)*)', brackets_callback(String.Regex)), (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', String.Regex), (r'<[^\s=].*?\S>', String), (_build_word_match(PERL6_OPERATORS), Operator), (r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name), (r"'(\\\\|\\[^\\]|[^'\\])*'", String), (r'"(\\\\|\\[^\\]|[^"\\])*"', String), ], 'root': [ include('common'), (r'\{', opening_brace_callback), (r'\}', closing_brace_callback), (r'.+?', Text), ], 'pre-token': [ include('common'), (r'\{', Text, ('#pop', 'token')), (r'.+?', Text), ], 'token-sym-brackets': [ (r'(?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)', brackets_callback(Name), ('#pop', 'pre-token')), default(('#pop', 'pre-token')), ], 'token': [ (r'\}', Text, '#pop'), (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)), # make sure that quotes in character classes aren't treated as strings (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex), # make sure that '#' characters in quotes aren't treated as comments (r"(?<!\\)'(\\\\|\\[^\\]|[^'\\])*'", String.Regex), (r'(?<!\\)"(\\\\|\\[^\\]|[^"\\])*"', String.Regex), (r'#.*?$', Comment.Single), (r'\{', embedded_perl6_callback), ('.+?', String.Regex), ], } def analyse_text(text): def strip_pod(lines): in_pod = False stripped_lines = [] for line in lines: if re.match(r'^=(?:end|cut)', line): in_pod = False elif re.match(r'^=\w+', line): in_pod = True elif not in_pod: stripped_lines.append(line) return stripped_lines # XXX handle block comments lines = text.splitlines() lines = strip_pod(lines) text = '\n'.join(lines) if shebang_matches(text, r'perl6|rakudo|niecza|pugs'): return True saw_perl_decl = False rating = False # check for my/our/has declarations if re.search(r"(?:my|our|has)\s+(?:" + Perl6Lexer.PERL6_IDENTIFIER_RANGE + r"+\s+)?[$@%&(]", text): rating = 0.8 saw_perl_decl = True for line in lines: line = re.sub('#.*', '', line) if re.match(r'^\s*$', line): continue # match v6; use v6; use v6.0; use v6.0.0; if re.match(r'^\s*(?:use\s+)?v6(?:\.\d(?:\.\d)?)?;', line): return True # match class, module, role, enum, grammar declarations class_decl = re.match(r'^\s*(?:(?P<scope>my|our)\s+)?(?:module|class|role|enum|grammar)', line) if class_decl: if saw_perl_decl or class_decl.group('scope') is not None: return True rating = 0.05 continue break if ':=' in text: # Same logic as above for PerlLexer rating /= 2 return rating def __init__(self, **options): super().__init__(**options) self.encoding = options.get('encoding', 'utf-8')
39,164
Python
52.358311
109
0.482178
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/webassembly.py
""" pygments.lexers.webassembly ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for the WebAssembly text format. The grammar can be found at https://github.com/WebAssembly/spec/blob/master/interpreter/README.md and https://webassembly.github.io/spec/core/text/. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, words, bygroups, default from pygments.token import Text, Comment, Operator, Keyword, String, Number, Punctuation, Name __all__ = ['WatLexer'] keywords = ( 'module', 'import', 'func', 'funcref', 'start', 'param', 'local', 'type', 'result', 'export', 'memory', 'global', 'mut', 'data', 'table', 'elem', 'if', 'then', 'else', 'end', 'block', 'loop' ) builtins = ( 'unreachable', 'nop', 'block', 'loop', 'if', 'else', 'end', 'br', 'br_if', 'br_table', 'return', 'call', 'call_indirect', 'drop', 'select', 'local.get', 'local.set', 'local.tee', 'global.get', 'global.set', 'i32.load', 'i64.load', 'f32.load', 'f64.load', 'i32.load8_s', 'i32.load8_u', 'i32.load16_s', 'i32.load16_u', 'i64.load8_s', 'i64.load8_u', 'i64.load16_s', 'i64.load16_u', 'i64.load32_s', 'i64.load32_u', 'i32.store', 'i64.store', 'f32.store', 'f64.store', 'i32.store8', 'i32.store16', 'i64.store8', 'i64.store16', 'i64.store32', 'memory.size', 'memory.grow', 'i32.const', 'i64.const', 'f32.const', 'f64.const', 'i32.eqz', 'i32.eq', 'i32.ne', 'i32.lt_s', 'i32.lt_u', 'i32.gt_s', 'i32.gt_u', 'i32.le_s', 'i32.le_u', 'i32.ge_s', 'i32.ge_u', 'i64.eqz', 'i64.eq', 'i64.ne', 'i64.lt_s', 'i64.lt_u', 'i64.gt_s', 'i64.gt_u', 'i64.le_s', 'i64.le_u', 'i64.ge_s', 'i64.ge_u', 'f32.eq', 'f32.ne', 'f32.lt', 'f32.gt', 'f32.le', 'f32.ge', 'f64.eq', 'f64.ne', 'f64.lt', 'f64.gt', 'f64.le', 'f64.ge', 'i32.clz', 'i32.ctz', 'i32.popcnt', 'i32.add', 'i32.sub', 'i32.mul', 'i32.div_s', 'i32.div_u', 'i32.rem_s', 'i32.rem_u', 'i32.and', 'i32.or', 'i32.xor', 'i32.shl', 'i32.shr_s', 'i32.shr_u', 'i32.rotl', 'i32.rotr', 'i64.clz', 'i64.ctz', 'i64.popcnt', 'i64.add', 'i64.sub', 'i64.mul', 'i64.div_s', 'i64.div_u', 'i64.rem_s', 'i64.rem_u', 'i64.and', 'i64.or', 'i64.xor', 'i64.shl', 'i64.shr_s', 'i64.shr_u', 'i64.rotl', 'i64.rotr', 'f32.abs', 'f32.neg', 'f32.ceil', 'f32.floor', 'f32.trunc', 'f32.nearest', 'f32.sqrt', 'f32.add', 'f32.sub', 'f32.mul', 'f32.div', 'f32.min', 'f32.max', 'f32.copysign', 'f64.abs', 'f64.neg', 'f64.ceil', 'f64.floor', 'f64.trunc', 'f64.nearest', 'f64.sqrt', 'f64.add', 'f64.sub', 'f64.mul', 'f64.div', 'f64.min', 'f64.max', 'f64.copysign', 'i32.wrap_i64', 'i32.trunc_f32_s', 'i32.trunc_f32_u', 'i32.trunc_f64_s', 'i32.trunc_f64_u', 'i64.extend_i32_s', 'i64.extend_i32_u', 'i64.trunc_f32_s', 'i64.trunc_f32_u', 'i64.trunc_f64_s', 'i64.trunc_f64_u', 'f32.convert_i32_s', 'f32.convert_i32_u', 'f32.convert_i64_s', 'f32.convert_i64_u', 'f32.demote_f64', 'f64.convert_i32_s', 'f64.convert_i32_u', 'f64.convert_i64_s', 'f64.convert_i64_u', 'f64.promote_f32', 'i32.reinterpret_f32', 'i64.reinterpret_f64', 'f32.reinterpret_i32', 'f64.reinterpret_i64', ) class WatLexer(RegexLexer): """Lexer for the WebAssembly text format. .. versionadded:: 2.9 """ name = 'WebAssembly' url = 'https://webassembly.org/' aliases = ['wast', 'wat'] filenames = ['*.wat', '*.wast'] tokens = { 'root': [ (words(keywords, suffix=r'(?=[^a-z_\.])'), Keyword), (words(builtins), Name.Builtin, 'arguments'), (words(['i32', 'i64', 'f32', 'f64']), Keyword.Type), (r'\$[A-Za-z0-9!#$%&\'*+./:<=>?@\\^_`|~-]+', Name.Variable), # yes, all of the are valid in identifiers (r';;.*?$', Comment.Single), (r'\(;', Comment.Multiline, 'nesting_comment'), (r'[+-]?0x[\dA-Fa-f](_?[\dA-Fa-f])*(.([\dA-Fa-f](_?[\dA-Fa-f])*)?)?([pP][+-]?[\dA-Fa-f](_?[\dA-Fa-f])*)?', Number.Float), (r'[+-]?\d.\d(_?\d)*[eE][+-]?\d(_?\d)*', Number.Float), (r'[+-]?\d.\d(_?\d)*', Number.Float), (r'[+-]?\d.[eE][+-]?\d(_?\d)*', Number.Float), (r'[+-]?(inf|nan:0x[\dA-Fa-f](_?[\dA-Fa-f])*|nan)', Number.Float), (r'[+-]?0x[\dA-Fa-f](_?[\dA-Fa-f])*', Number.Hex), (r'[+-]?\d(_?\d)*', Number.Integer), (r'[\(\)]', Punctuation), (r'"', String.Double, 'string'), (r'\s+', Text), ], 'nesting_comment': [ (r'\(;', Comment.Multiline, '#push'), (r';\)', Comment.Multiline, '#pop'), (r'[^;(]+', Comment.Multiline), (r'[;(]', Comment.Multiline), ], 'string': [ (r'\\[\dA-Fa-f][\dA-Fa-f]', String.Escape), # must have exactly two hex digits (r'\\t', String.Escape), (r'\\n', String.Escape), (r'\\r', String.Escape), (r'\\"', String.Escape), (r"\\'", String.Escape), (r'\\u\{[\dA-Fa-f](_?[\dA-Fa-f])*\}', String.Escape), (r'\\\\', String.Escape), (r'"', String.Double, '#pop'), (r'[^"\\]+', String.Double), ], 'arguments': [ (r'\s+', Text), (r'(offset)(=)(0x[\dA-Fa-f](_?[\dA-Fa-f])*)', bygroups(Keyword, Operator, Number.Hex)), (r'(offset)(=)(\d(_?\d)*)', bygroups(Keyword, Operator, Number.Integer)), (r'(align)(=)(0x[\dA-Fa-f](_?[\dA-Fa-f])*)', bygroups(Keyword, Operator, Number.Hex)), (r'(align)(=)(\d(_?\d)*)', bygroups(Keyword, Operator, Number.Integer)), default('#pop'), ] }
5,699
Python
46.107438
133
0.506229
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/compiled.py
""" pygments.lexers.compiled ~~~~~~~~~~~~~~~~~~~~~~~~ Just export lexer classes previously contained in this module. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexers.jvm import JavaLexer, ScalaLexer from pygments.lexers.c_cpp import CLexer, CppLexer from pygments.lexers.d import DLexer from pygments.lexers.objective import ObjectiveCLexer, \ ObjectiveCppLexer, LogosLexer from pygments.lexers.go import GoLexer from pygments.lexers.rust import RustLexer from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer from pygments.lexers.pascal import DelphiLexer, PortugolLexer, Modula2Lexer from pygments.lexers.ada import AdaLexer from pygments.lexers.business import CobolLexer, CobolFreeformatLexer from pygments.lexers.fortran import FortranLexer from pygments.lexers.prolog import PrologLexer from pygments.lexers.python import CythonLexer from pygments.lexers.graphics import GLShaderLexer from pygments.lexers.ml import OcamlLexer from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer from pygments.lexers.ooc import OocLexer from pygments.lexers.felix import FelixLexer from pygments.lexers.nimrod import NimrodLexer from pygments.lexers.crystal import CrystalLexer __all__ = []
1,407
Python
39.22857
78
0.812367
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/monte.py
""" pygments.lexers.monte ~~~~~~~~~~~~~~~~~~~~~ Lexer for the Monte programming language. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ Punctuation, String, Whitespace from pygments.lexer import RegexLexer, include, words __all__ = ['MonteLexer'] # `var` handled separately # `interface` handled separately _declarations = ['bind', 'def', 'fn', 'object'] _methods = ['method', 'to'] _keywords = [ 'as', 'break', 'catch', 'continue', 'else', 'escape', 'exit', 'exports', 'extends', 'finally', 'for', 'guards', 'if', 'implements', 'import', 'in', 'match', 'meta', 'pass', 'return', 'switch', 'try', 'via', 'when', 'while', ] _operators = [ # Unary '~', '!', # Binary '+', '-', '*', '/', '%', '**', '&', '|', '^', '<<', '>>', # Binary augmented '+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=', # Comparison '==', '!=', '<', '<=', '>', '>=', '<=>', # Patterns and assignment ':=', '?', '=~', '!~', '=>', # Calls and sends '.', '<-', '->', ] _escape_pattern = ( r'(?:\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' r'\\["\'\\bftnr])') # _char = _escape_chars + [('.', String.Char)] _identifier = r'[_a-zA-Z]\w*' _constants = [ # Void constants 'null', # Bool constants 'false', 'true', # Double constants 'Infinity', 'NaN', # Special objects 'M', 'Ref', 'throw', 'traceln', ] _guards = [ 'Any', 'Binding', 'Bool', 'Bytes', 'Char', 'DeepFrozen', 'Double', 'Empty', 'Int', 'List', 'Map', 'Near', 'NullOk', 'Same', 'Selfless', 'Set', 'Str', 'SubrangeGuard', 'Transparent', 'Void', ] _safeScope = [ '_accumulateList', '_accumulateMap', '_auditedBy', '_bind', '_booleanFlow', '_comparer', '_equalizer', '_iterForever', '_loop', '_makeBytes', '_makeDouble', '_makeFinalSlot', '_makeInt', '_makeList', '_makeMap', '_makeMessageDesc', '_makeOrderedSpace', '_makeParamDesc', '_makeProtocolDesc', '_makeSourceSpan', '_makeString', '_makeVarSlot', '_makeVerbFacet', '_mapExtract', '_matchSame', '_quasiMatcher', '_slotToBinding', '_splitList', '_suchThat', '_switchFailed', '_validateFor', 'b__quasiParser', 'eval', 'import', 'm__quasiParser', 'makeBrandPair', 'makeLazySlot', 'safeScope', 'simple__quasiParser', ] class MonteLexer(RegexLexer): """ Lexer for the Monte programming language. .. versionadded:: 2.2 """ name = 'Monte' url = 'https://monte.readthedocs.io/' aliases = ['monte'] filenames = ['*.mt'] tokens = { 'root': [ # Comments (r'#[^\n]*\n', Comment), # Docstrings # Apologies for the non-greedy matcher here. (r'/\*\*.*?\*/', String.Doc), # `var` declarations (r'\bvar\b', Keyword.Declaration, 'var'), # `interface` declarations (r'\binterface\b', Keyword.Declaration, 'interface'), # method declarations (words(_methods, prefix='\\b', suffix='\\b'), Keyword, 'method'), # All other declarations (words(_declarations, prefix='\\b', suffix='\\b'), Keyword.Declaration), # Keywords (words(_keywords, prefix='\\b', suffix='\\b'), Keyword), # Literals ('[+-]?0x[_0-9a-fA-F]+', Number.Hex), (r'[+-]?[_0-9]+\.[_0-9]*([eE][+-]?[_0-9]+)?', Number.Float), ('[+-]?[_0-9]+', Number.Integer), ("'", String.Double, 'char'), ('"', String.Double, 'string'), # Quasiliterals ('`', String.Backtick, 'ql'), # Operators (words(_operators), Operator), # Verb operators (_identifier + '=', Operator.Word), # Safe scope constants (words(_constants, prefix='\\b', suffix='\\b'), Keyword.Pseudo), # Safe scope guards (words(_guards, prefix='\\b', suffix='\\b'), Keyword.Type), # All other safe scope names (words(_safeScope, prefix='\\b', suffix='\\b'), Name.Builtin), # Identifiers (_identifier, Name), # Punctuation (r'\(|\)|\{|\}|\[|\]|:|,', Punctuation), # Whitespace (' +', Whitespace), # Definite lexer errors ('=', Error), ], 'char': [ # It is definitely an error to have a char of width == 0. ("'", Error, 'root'), (_escape_pattern, String.Escape, 'charEnd'), ('.', String.Char, 'charEnd'), ], 'charEnd': [ ("'", String.Char, '#pop:2'), # It is definitely an error to have a char of width > 1. ('.', Error), ], # The state of things coming into an interface. 'interface': [ (' +', Whitespace), (_identifier, Name.Class, '#pop'), include('root'), ], # The state of things coming into a method. 'method': [ (' +', Whitespace), (_identifier, Name.Function, '#pop'), include('root'), ], 'string': [ ('"', String.Double, 'root'), (_escape_pattern, String.Escape), (r'\n', String.Double), ('.', String.Double), ], 'ql': [ ('`', String.Backtick, 'root'), (r'\$' + _escape_pattern, String.Escape), (r'\$\$', String.Escape), (r'@@', String.Escape), (r'\$\{', String.Interpol, 'qlNest'), (r'@\{', String.Interpol, 'qlNest'), (r'\$' + _identifier, Name), ('@' + _identifier, Name), ('.', String.Backtick), ], 'qlNest': [ (r'\}', String.Interpol, '#pop'), include('root'), ], # The state of things immediately following `var`. 'var': [ (' +', Whitespace), (_identifier, Name.Variable, '#pop'), include('root'), ], }
6,290
Python
29.687805
77
0.467568
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/wren.py
""" pygments.lexers.wren ~~~~~~~~~~~~~~~~~~~~ Lexer for Wren. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import include, RegexLexer, words from pygments.token import Whitespace, Punctuation, Keyword, Name, Comment, \ Operator, Number, String, Error __all__ = ['WrenLexer'] class WrenLexer(RegexLexer): """ For Wren source code, version 0.4.0. .. versionadded:: 2.14.0 """ name = 'Wren' url = 'https://wren.io' aliases = ['wren'] filenames = ['*.wren'] flags = re.MULTILINE | re.DOTALL tokens = { 'root': [ # Whitespace. (r'\s+', Whitespace), (r'[,\\\[\]{}]', Punctuation), # Really 'root', not '#push': in 'interpolation', # parentheses inside the interpolation expression are # Punctuation, not String.Interpol. (r'\(', Punctuation, 'root'), (r'\)', Punctuation, '#pop'), # Keywords. (words(( 'as', 'break', 'class', 'construct', 'continue', 'else', 'for', 'foreign', 'if', 'import', 'return', 'static', 'super', 'this', 'var', 'while'), prefix = r'(?<!\.)', suffix = r'\b'), Keyword), (words(( 'true', 'false', 'null'), prefix = r'(?<!\.)', suffix = r'\b'), Keyword.Constant), (words(( 'in', 'is'), prefix = r'(?<!\.)', suffix = r'\b'), Operator.Word), # Comments. (r'/\*', Comment.Multiline, 'comment'), # Multiline, can nest. (r'//.*?$', Comment.Single), # Single line. (r'#.*?(\(.*?\))?$', Comment.Special), # Attribute or shebang. # Names and operators. (r'[!%&*+\-./:<=>?\\^|~]+', Operator), (r'[a-z][a-zA-Z_0-9]*', Name), (r'[A-Z][a-zA-Z_0-9]*', Name.Class), (r'__[a-zA-Z_0-9]*', Name.Variable.Class), (r'_[a-zA-Z_0-9]*', Name.Variable.Instance), # Numbers. (r'0x[0-9a-fA-F]+', Number.Hex), (r'\d+(\.\d+)?([eE][-+]?\d+)?', Number.Float), # Strings. (r'""".*?"""', String), # Raw string (r'"', String, 'string'), # Other string ], 'comment': [ (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'([^*/]|\*(?!/)|/(?!\*))+', Comment.Multiline), ], 'string': [ (r'"', String, '#pop'), (r'\\[\\%"0abefnrtv]', String.Escape), # Escape. (r'\\x[a-fA-F0-9]{2}', String.Escape), # Byte escape. (r'\\u[a-fA-F0-9]{4}', String.Escape), # Unicode escape. (r'\\U[a-fA-F0-9]{8}', String.Escape), # Long Unicode escape. (r'%\(', String.Interpol, 'interpolation'), (r'[^\\"%]+', String), # All remaining characters. ], 'interpolation': [ # redefine closing paren to be String.Interpol (r'\)', String.Interpol, '#pop'), include('root'), ], }
3,239
Python
31.4
78
0.442421
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/inferno.py
""" pygments.lexers.inferno ~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Inferno os and all the related stuff. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, default from pygments.token import Punctuation, Comment, Operator, Keyword, \ Name, String, Number, Whitespace __all__ = ['LimboLexer'] class LimboLexer(RegexLexer): """ Lexer for Limbo programming language TODO: - maybe implement better var declaration highlighting - some simple syntax error highlighting .. versionadded:: 2.0 """ name = 'Limbo' url = 'http://www.vitanuova.com/inferno/limbo.html' aliases = ['limbo'] filenames = ['*.b'] mimetypes = ['text/limbo'] tokens = { 'whitespace': [ (r'^(\s*)([a-zA-Z_]\w*:)(\s*\n)', bygroups(Whitespace, Name.Label, Whitespace)), (r'\n', Whitespace), (r'\s+', Whitespace), (r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\', String), # stray backslash ], 'statements': [ (r'"', String, 'string'), (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float), (r'16r[0-9a-fA-F]+', Number.Hex), (r'8r[0-7]+', Number.Oct), (r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer), (r'[()\[\],.]', Punctuation), (r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator), (r'(alt|break|case|continue|cyclic|do|else|exit' r'for|hd|if|implement|import|include|len|load|or' r'pick|return|spawn|tagof|tl|to|while)\b', Keyword), (r'(byte|int|big|real|string|array|chan|list|adt' r'|fn|ref|of|module|self|type)\b', Keyword.Type), (r'(con|iota|nil)\b', Keyword.Constant), (r'[a-zA-Z_]\w*', Name), ], 'statement' : [ include('whitespace'), include('statements'), ('[{}]', Punctuation), (';', Punctuation, '#pop'), ], 'root': [ include('whitespace'), default('statement'), ], } def analyse_text(text): # Any limbo module implements something if re.search(r'^implement \w+;', text, re.MULTILINE): return 0.7 # TODO: # - Make lexers for: # - asm sources # - man pages # - mkfiles # - module definitions # - namespace definitions # - shell scripts # - maybe keyfiles and fonts # they all seem to be quite similar to their equivalents # from unix world, so there should not be a lot of problems
3,136
Python
31.340206
81
0.491709
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_julia_builtins.py
""" pygments.lexers._julia_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Julia builtins. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # operators # see https://github.com/JuliaLang/julia/blob/master/src/julia-parser.scm # Julia v1.6.0-rc1 OPERATORS_LIST = [ # other '->', # prec-assignment ':=', '$=', # prec-conditional, prec-lazy-or, prec-lazy-and '?', '||', '&&', # prec-colon ':', # prec-plus '$', # prec-decl '::', ] DOTTED_OPERATORS_LIST = [ # prec-assignment r'=', r'+=', r'-=', r'*=', r'/=', r'//=', r'\=', r'^=', r'÷=', r'%=', r'<<=', r'>>=', r'>>>=', r'|=', r'&=', r'⊻=', r'≔', r'⩴', r"≕'", r'~', # prec-pair '=>', # prec-arrow r'→', r'↔', r'↚', r'↛', r'↞', r'↠', r'↢', r'↣', r'↦', r'↤', r'↮', r'⇎', r'⇍', r'⇏', r'⇐', r'⇒', r'⇔', r'⇴', r'⇶', r'⇷', r'⇸', r'⇹', r'⇺', r'⇻', r'⇼', r'⇽', r'⇾', r'⇿', r'⟵', r'⟶', r'⟷', r'⟹', r'⟺', r'⟻', r'⟼', r'⟽', r'⟾', r'⟿', r'⤀', r'⤁', r'⤂', r'⤃', r'⤄', r'⤅', r'⤆', r'⤇', r'⤌', r'⤍', r'⤎', r'⤏', r'⤐', r'⤑', r'⤔', r'⤕', r'⤖', r'⤗', r'⤘', r'⤝', r'⤞', r'⤟', r'⤠', r'⥄', r'⥅', r'⥆', r'⥇', r'⥈', r'⥊', r'⥋', r'⥎', r'⥐', r'⥒', r'⥓', r'⥖', r'⥗', r'⥚', r'⥛', r'⥞', r'⥟', r'⥢', r'⥤', r'⥦', r'⥧', r'⥨', r'⥩', r'⥪', r'⥫', r'⥬', r'⥭', r'⥰', r'⧴', r'⬱', r'⬰', r'⬲', r'⬳', r'⬴', r'⬵', r'⬶', r'⬷', r'⬸', r'⬹', r'⬺', r'⬻', r'⬼', r'⬽', r'⬾', r'⬿', r'⭀', r'⭁', r'⭂', r'⭃', r'⭄', r'⭇', r'⭈', r'⭉', r'⭊', r'⭋', r'⭌', r'←', r'→', r'⇜', r'⇝', r'↜', r'↝', r'↩', r'↪', r'↫', r'↬', r'↼', r'↽', r'⇀', r'⇁', r'⇄', r'⇆', r'⇇', r'⇉', r'⇋', r'⇌', r'⇚', r'⇛', r'⇠', r'⇢', r'↷', r'↶', r'↺', r'↻', r'-->', r'<--', r'<-->', # prec-comparison r'>', r'<', r'>=', r'≥', r'<=', r'≤', r'==', r'===', r'≡', r'!=', r'≠', r'!==', r'≢', r'∈', r'∉', r'∋', r'∌', r'⊆', r'⊈', r'⊂', r'⊄', r'⊊', r'∝', r'∊', r'∍', r'∥', r'∦', r'∷', r'∺', r'∻', r'∽', r'∾', r'≁', r'≃', r'≂', r'≄', r'≅', r'≆', r'≇', r'≈', r'≉', r'≊', r'≋', r'≌', r'≍', r'≎', r'≐', r'≑', r'≒', r'≓', r'≖', r'≗', r'≘', r'≙', r'≚', r'≛', r'≜', r'≝', r'≞', r'≟', r'≣', r'≦', r'≧', r'≨', r'≩', r'≪', r'≫', r'≬', r'≭', r'≮', r'≯', r'≰', r'≱', r'≲', r'≳', r'≴', r'≵', r'≶', r'≷', r'≸', r'≹', r'≺', r'≻', r'≼', r'≽', r'≾', r'≿', r'⊀', r'⊁', r'⊃', r'⊅', r'⊇', r'⊉', r'⊋', r'⊏', r'⊐', r'⊑', r'⊒', r'⊜', r'⊩', r'⊬', r'⊮', r'⊰', r'⊱', r'⊲', r'⊳', r'⊴', r'⊵', r'⊶', r'⊷', r'⋍', r'⋐', r'⋑', r'⋕', r'⋖', r'⋗', r'⋘', r'⋙', r'⋚', r'⋛', r'⋜', r'⋝', r'⋞', r'⋟', r'⋠', r'⋡', r'⋢', r'⋣', r'⋤', r'⋥', r'⋦', r'⋧', r'⋨', r'⋩', r'⋪', r'⋫', r'⋬', r'⋭', r'⋲', r'⋳', r'⋴', r'⋵', r'⋶', r'⋷', r'⋸', r'⋹', r'⋺', r'⋻', r'⋼', r'⋽', r'⋾', r'⋿', r'⟈', r'⟉', r'⟒', r'⦷', r'⧀', r'⧁', r'⧡', r'⧣', r'⧤', r'⧥', r'⩦', r'⩧', r'⩪', r'⩫', r'⩬', r'⩭', r'⩮', r'⩯', r'⩰', r'⩱', r'⩲', r'⩳', r'⩵', r'⩶', r'⩷', r'⩸', r'⩹', r'⩺', r'⩻', r'⩼', r'⩽', r'⩾', r'⩿', r'⪀', r'⪁', r'⪂', r'⪃', r'⪄', r'⪅', r'⪆', r'⪇', r'⪈', r'⪉', r'⪊', r'⪋', r'⪌', r'⪍', r'⪎', r'⪏', r'⪐', r'⪑', r'⪒', r'⪓', r'⪔', r'⪕', r'⪖', r'⪗', r'⪘', r'⪙', r'⪚', r'⪛', r'⪜', r'⪝', r'⪞', r'⪟', r'⪠', r'⪡', r'⪢', r'⪣', r'⪤', r'⪥', r'⪦', r'⪧', r'⪨', r'⪩', r'⪪', r'⪫', r'⪬', r'⪭', r'⪮', r'⪯', r'⪰', r'⪱', r'⪲', r'⪳', r'⪴', r'⪵', r'⪶', r'⪷', r'⪸', r'⪹', r'⪺', r'⪻', r'⪼', r'⪽', r'⪾', r'⪿', r'⫀', r'⫁', r'⫂', r'⫃', r'⫄', r'⫅', r'⫆', r'⫇', r'⫈', r'⫉', r'⫊', r'⫋', r'⫌', r'⫍', r'⫎', r'⫏', r'⫐', r'⫑', r'⫒', r'⫓', r'⫔', r'⫕', r'⫖', r'⫗', r'⫘', r'⫙', r'⫷', r'⫸', r'⫹', r'⫺', r'⊢', r'⊣', r'⟂', r'<:', r'>:', # prec-pipe '<|', '|>', # prec-colon r'…', r'⁝', r'⋮', r'⋱', r'⋰', r'⋯', # prec-plus r'+', r'-', r'¦', r'|', r'⊕', r'⊖', r'⊞', r'⊟', r'++', r'∪', r'∨', r'⊔', r'±', r'∓', r'∔', r'∸', r'≏', r'⊎', r'⊻', r'⊽', r'⋎', r'⋓', r'⧺', r'⧻', r'⨈', r'⨢', r'⨣', r'⨤', r'⨥', r'⨦', r'⨧', r'⨨', r'⨩', r'⨪', r'⨫', r'⨬', r'⨭', r'⨮', r'⨹', r'⨺', r'⩁', r'⩂', r'⩅', r'⩊', r'⩌', r'⩏', r'⩐', r'⩒', r'⩔', r'⩖', r'⩗', r'⩛', r'⩝', r'⩡', r'⩢', r'⩣', # prec-times r'*', r'/', r'⌿', r'÷', r'%', r'&', r'⋅', r'∘', r'×', '\\', r'∩', r'∧', r'⊗', r'⊘', r'⊙', r'⊚', r'⊛', r'⊠', r'⊡', r'⊓', r'∗', r'∙', r'∤', r'⅋', r'≀', r'⊼', r'⋄', r'⋆', r'⋇', r'⋉', r'⋊', r'⋋', r'⋌', r'⋏', r'⋒', r'⟑', r'⦸', r'⦼', r'⦾', r'⦿', r'⧶', r'⧷', r'⨇', r'⨰', r'⨱', r'⨲', r'⨳', r'⨴', r'⨵', r'⨶', r'⨷', r'⨸', r'⨻', r'⨼', r'⨽', r'⩀', r'⩃', r'⩄', r'⩋', r'⩍', r'⩎', r'⩑', r'⩓', r'⩕', r'⩘', r'⩚', r'⩜', r'⩞', r'⩟', r'⩠', r'⫛', r'⊍', r'▷', r'⨝', r'⟕', r'⟖', r'⟗', r'⨟', # prec-rational, prec-bitshift '//', '>>', '<<', '>>>', # prec-power r'^', r'↑', r'↓', r'⇵', r'⟰', r'⟱', r'⤈', r'⤉', r'⤊', r'⤋', r'⤒', r'⤓', r'⥉', r'⥌', r'⥍', r'⥏', r'⥑', r'⥔', r'⥕', r'⥘', r'⥙', r'⥜', r'⥝', r'⥠', r'⥡', r'⥣', r'⥥', r'⥮', r'⥯', r'↑', r'↓', # unary-ops, excluding unary-and-binary-ops '!', r'¬', r'√', r'∛', r'∜' ] # Generated with the following in Julia v1.6.0-rc1 ''' #!/usr/bin/env julia import REPL.REPLCompletions res = String["in", "isa", "where"] for kw in collect(x.keyword for x in REPLCompletions.complete_keyword("")) if !(contains(kw, " ") || kw == "struct") push!(res, kw) end end sort!(unique!(setdiff!(res, ["true", "false"]))) foreach(x -> println("\'", x, "\',"), res) ''' KEYWORD_LIST = ( 'baremodule', 'begin', 'break', 'catch', 'ccall', 'const', 'continue', 'do', 'else', 'elseif', 'end', 'export', 'finally', 'for', 'function', 'global', 'if', 'import', 'in', 'isa', 'let', 'local', 'macro', 'module', 'quote', 'return', 'try', 'using', 'where', 'while', ) # Generated with the following in Julia v1.6.0-rc1 ''' #!/usr/bin/env julia import REPL.REPLCompletions res = String[] for compl in filter!(x -> isa(x, REPLCompletions.ModuleCompletion) && (x.parent === Base || x.parent === Core), REPLCompletions.completions("", 0)[1]) try v = eval(Symbol(compl.mod)) if (v isa Type || v isa TypeVar) && (compl.mod != "=>") push!(res, compl.mod) end catch e end end sort!(unique!(res)) foreach(x -> println("\'", x, "\',"), res) ''' BUILTIN_LIST = ( 'AbstractArray', 'AbstractChannel', 'AbstractChar', 'AbstractDict', 'AbstractDisplay', 'AbstractFloat', 'AbstractIrrational', 'AbstractMatch', 'AbstractMatrix', 'AbstractPattern', 'AbstractRange', 'AbstractSet', 'AbstractString', 'AbstractUnitRange', 'AbstractVecOrMat', 'AbstractVector', 'Any', 'ArgumentError', 'Array', 'AssertionError', 'BigFloat', 'BigInt', 'BitArray', 'BitMatrix', 'BitSet', 'BitVector', 'Bool', 'BoundsError', 'CapturedException', 'CartesianIndex', 'CartesianIndices', 'Cchar', 'Cdouble', 'Cfloat', 'Channel', 'Char', 'Cint', 'Cintmax_t', 'Clong', 'Clonglong', 'Cmd', 'Colon', 'Complex', 'ComplexF16', 'ComplexF32', 'ComplexF64', 'ComposedFunction', 'CompositeException', 'Condition', 'Cptrdiff_t', 'Cshort', 'Csize_t', 'Cssize_t', 'Cstring', 'Cuchar', 'Cuint', 'Cuintmax_t', 'Culong', 'Culonglong', 'Cushort', 'Cvoid', 'Cwchar_t', 'Cwstring', 'DataType', 'DenseArray', 'DenseMatrix', 'DenseVecOrMat', 'DenseVector', 'Dict', 'DimensionMismatch', 'Dims', 'DivideError', 'DomainError', 'EOFError', 'Enum', 'ErrorException', 'Exception', 'ExponentialBackOff', 'Expr', 'Float16', 'Float32', 'Float64', 'Function', 'GlobalRef', 'HTML', 'IO', 'IOBuffer', 'IOContext', 'IOStream', 'IdDict', 'IndexCartesian', 'IndexLinear', 'IndexStyle', 'InexactError', 'InitError', 'Int', 'Int128', 'Int16', 'Int32', 'Int64', 'Int8', 'Integer', 'InterruptException', 'InvalidStateException', 'Irrational', 'KeyError', 'LinRange', 'LineNumberNode', 'LinearIndices', 'LoadError', 'MIME', 'Matrix', 'Method', 'MethodError', 'Missing', 'MissingException', 'Module', 'NTuple', 'NamedTuple', 'Nothing', 'Number', 'OrdinalRange', 'OutOfMemoryError', 'OverflowError', 'Pair', 'PartialQuickSort', 'PermutedDimsArray', 'Pipe', 'ProcessFailedException', 'Ptr', 'QuoteNode', 'Rational', 'RawFD', 'ReadOnlyMemoryError', 'Real', 'ReentrantLock', 'Ref', 'Regex', 'RegexMatch', 'RoundingMode', 'SegmentationFault', 'Set', 'Signed', 'Some', 'StackOverflowError', 'StepRange', 'StepRangeLen', 'StridedArray', 'StridedMatrix', 'StridedVecOrMat', 'StridedVector', 'String', 'StringIndexError', 'SubArray', 'SubString', 'SubstitutionString', 'Symbol', 'SystemError', 'Task', 'TaskFailedException', 'Text', 'TextDisplay', 'Timer', 'Tuple', 'Type', 'TypeError', 'TypeVar', 'UInt', 'UInt128', 'UInt16', 'UInt32', 'UInt64', 'UInt8', 'UndefInitializer', 'UndefKeywordError', 'UndefRefError', 'UndefVarError', 'Union', 'UnionAll', 'UnitRange', 'Unsigned', 'Val', 'Vararg', 'VecElement', 'VecOrMat', 'Vector', 'VersionNumber', 'WeakKeyDict', 'WeakRef', ) # Generated with the following in Julia v1.6.0-rc1 ''' #!/usr/bin/env julia import REPL.REPLCompletions res = String["true", "false"] for compl in filter!(x -> isa(x, REPLCompletions.ModuleCompletion) && (x.parent === Base || x.parent === Core), REPLCompletions.completions("", 0)[1]) try v = eval(Symbol(compl.mod)) if !(v isa Function || v isa Type || v isa TypeVar || v isa Module || v isa Colon) push!(res, compl.mod) end catch e end end sort!(unique!(res)) foreach(x -> println("\'", x, "\',"), res) ''' LITERAL_LIST = ( 'ARGS', 'C_NULL', 'DEPOT_PATH', 'ENDIAN_BOM', 'ENV', 'Inf', 'Inf16', 'Inf32', 'Inf64', 'InsertionSort', 'LOAD_PATH', 'MergeSort', 'NaN', 'NaN16', 'NaN32', 'NaN64', 'PROGRAM_FILE', 'QuickSort', 'RoundDown', 'RoundFromZero', 'RoundNearest', 'RoundNearestTiesAway', 'RoundNearestTiesUp', 'RoundToZero', 'RoundUp', 'VERSION', 'devnull', 'false', 'im', 'missing', 'nothing', 'pi', 'stderr', 'stdin', 'stdout', 'true', 'undef', 'π', 'ℯ', )
10,708
Python
24.992718
111
0.390082