file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/textedit.py | """
pygments.lexers.textedit
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for languages related to text processing.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import RegexLexer, bygroups, default, include, this, using
from pygments.lexers.python import PythonLexer
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text, Whitespace
__all__ = ['AwkLexer', 'SedLexer', 'VimLexer']
class AwkLexer(RegexLexer):
"""
For Awk scripts.
.. versionadded:: 1.5
"""
name = 'Awk'
aliases = ['awk', 'gawk', 'mawk', 'nawk']
filenames = ['*.awk']
mimetypes = ['application/x-awk']
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'#.*$', Comment.Single)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'\B', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|\|\||&&|in\b|\$|!?~|'
r'(\*\*|[-<>+*%\^/!=|])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(break|continue|do|while|exit|for|if|else|'
r'return)\b', Keyword, 'slashstartsregex'),
(r'function\b', Keyword.Declaration, 'slashstartsregex'),
(r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|'
r'length|match|split|sprintf|sub|substr|tolower|toupper|close|'
r'fflush|getline|next|nextfile|print|printf|strftime|systime|'
r'delete|system)\b', Keyword.Reserved),
(r'(ARGC|ARGIND|ARGV|BEGIN|CONVFMT|ENVIRON|END|ERRNO|FIELDWIDTHS|'
r'FILENAME|FNR|FS|IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|'
r'RSTART|RT|SUBSEP)\b', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
]
}
class SedLexer(RegexLexer):
"""
Lexer for Sed script files.
"""
name = 'Sed'
aliases = ['sed', 'gsed', 'ssed']
filenames = ['*.sed', '*.[gs]sed']
mimetypes = ['text/x-sed']
flags = re.MULTILINE
# Match the contents within delimiters such as /<contents>/
_inside_delims = r'((?:(?:\\[^\n]|[^\\])*?\\\n)*?(?:\\.|[^\\])*?)'
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#.*$', Comment.Single),
(r'[0-9]+', Number.Integer),
(r'\$', Operator),
(r'[{};,!]', Punctuation),
(r'[dDFgGhHlnNpPqQxz=]', Keyword),
(r'([berRtTvwW:])([^;\n]*)', bygroups(Keyword, String.Single)),
(r'([aci])((?:.*?\\\n)*(?:.*?[^\\]$))', bygroups(Keyword, String.Double)),
(r'([qQ])([0-9]*)', bygroups(Keyword, Number.Integer)),
(r'(/)' + _inside_delims + r'(/)', bygroups(Punctuation, String.Regex, Punctuation)),
(r'(\\(.))' + _inside_delims + r'(\2)',
bygroups(Punctuation, None, String.Regex, Punctuation)),
(r'(y)(.)' + _inside_delims + r'(\2)' + _inside_delims + r'(\2)',
bygroups(Keyword, Punctuation, String.Single, Punctuation, String.Single, Punctuation)),
(r'(s)(.)' + _inside_delims + r'(\2)' + _inside_delims + r'(\2)((?:[gpeIiMm]|[0-9])*)',
bygroups(Keyword, Punctuation, String.Regex, Punctuation, String.Single, Punctuation,
Keyword))
]
}
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
.. versionadded:: 0.8
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc',
'_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
_python = r'py(?:t(?:h(?:o(?:n)?)?)?)?'
tokens = {
'root': [
(r'^([ \t:]*)(' + _python + r')([ \t]*)(<<)([ \t]*)(.*)((?:\n|.)*)(\6)',
bygroups(using(this), Keyword, Text, Operator, Text, Text,
using(PythonLexer), Text)),
(r'^([ \t:]*)(' + _python + r')([ \t])(.*)',
bygroups(using(this), Keyword, Text, using(PythonLexer))),
(r'^\s*".*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/[^/\\\n]*(?:\\[\s\S][^/\\\n]*)*/', String.Regex),
(r'"[^"\\\n]*(?:\\[\s\S][^"\\\n]*)*"', String.Double),
(r"'[^\n']*(?:''[^\n']*)*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vim_builtins import auto, command, option
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w:
return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
| 7,609 | Python | 36.487685 | 101 | 0.479827 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/actionscript.py | """
pygments.lexers.actionscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for ActionScript and MXML.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, using, this, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ActionScriptLexer', 'ActionScript3Lexer', 'MxmlLexer']
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
.. versionadded:: 0.9
"""
name = 'ActionScript'
aliases = ['actionscript', 'as']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Whitespace),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\[^\\]|[^/\\\n])*/[gim]*', String.Regex),
(r'[~^*!%&<>|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(words((
'case', 'default', 'for', 'each', 'in', 'while', 'do', 'break',
'return', 'continue', 'if', 'else', 'throw', 'try', 'catch',
'var', 'with', 'new', 'typeof', 'arguments', 'instanceof', 'this',
'switch'), suffix=r'\b'),
Keyword),
(words((
'class', 'public', 'final', 'internal', 'native', 'override', 'private',
'protected', 'static', 'import', 'extends', 'implements', 'interface',
'intrinsic', 'return', 'super', 'dynamic', 'function', 'const', 'get',
'namespace', 'package', 'set'), suffix=r'\b'),
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(words((
'Accessibility', 'AccessibilityProperties', 'ActionScriptVersion',
'ActivityEvent', 'AntiAliasType', 'ApplicationDomain', 'AsBroadcaster', 'Array',
'AsyncErrorEvent', 'AVM1Movie', 'BevelFilter', 'Bitmap', 'BitmapData',
'BitmapDataChannel', 'BitmapFilter', 'BitmapFilterQuality', 'BitmapFilterType',
'BlendMode', 'BlurFilter', 'Boolean', 'ByteArray', 'Camera', 'Capabilities', 'CapsStyle',
'Class', 'Color', 'ColorMatrixFilter', 'ColorTransform', 'ContextMenu',
'ContextMenuBuiltInItems', 'ContextMenuEvent', 'ContextMenuItem',
'ConvultionFilter', 'CSMSettings', 'DataEvent', 'Date', 'DefinitionError',
'DeleteObjectSample', 'Dictionary', 'DisplacmentMapFilter', 'DisplayObject',
'DisplacmentMapFilterMode', 'DisplayObjectContainer', 'DropShadowFilter',
'Endian', 'EOFError', 'Error', 'ErrorEvent', 'EvalError', 'Event', 'EventDispatcher',
'EventPhase', 'ExternalInterface', 'FileFilter', 'FileReference',
'FileReferenceList', 'FocusDirection', 'FocusEvent', 'Font', 'FontStyle', 'FontType',
'FrameLabel', 'FullScreenEvent', 'Function', 'GlowFilter', 'GradientBevelFilter',
'GradientGlowFilter', 'GradientType', 'Graphics', 'GridFitType', 'HTTPStatusEvent',
'IBitmapDrawable', 'ID3Info', 'IDataInput', 'IDataOutput', 'IDynamicPropertyOutput'
'IDynamicPropertyWriter', 'IEventDispatcher', 'IExternalizable',
'IllegalOperationError', 'IME', 'IMEConversionMode', 'IMEEvent', 'int',
'InteractiveObject', 'InterpolationMethod', 'InvalidSWFError', 'InvokeEvent',
'IOError', 'IOErrorEvent', 'JointStyle', 'Key', 'Keyboard', 'KeyboardEvent', 'KeyLocation',
'LineScaleMode', 'Loader', 'LoaderContext', 'LoaderInfo', 'LoadVars', 'LocalConnection',
'Locale', 'Math', 'Matrix', 'MemoryError', 'Microphone', 'MorphShape', 'Mouse', 'MouseEvent',
'MovieClip', 'MovieClipLoader', 'Namespace', 'NetConnection', 'NetStatusEvent',
'NetStream', 'NewObjectSample', 'Number', 'Object', 'ObjectEncoding', 'PixelSnapping',
'Point', 'PrintJob', 'PrintJobOptions', 'PrintJobOrientation', 'ProgressEvent', 'Proxy',
'QName', 'RangeError', 'Rectangle', 'ReferenceError', 'RegExp', 'Responder', 'Sample',
'Scene', 'ScriptTimeoutError', 'Security', 'SecurityDomain', 'SecurityError',
'SecurityErrorEvent', 'SecurityPanel', 'Selection', 'Shape', 'SharedObject',
'SharedObjectFlushStatus', 'SimpleButton', 'Socket', 'Sound', 'SoundChannel',
'SoundLoaderContext', 'SoundMixer', 'SoundTransform', 'SpreadMethod', 'Sprite',
'StackFrame', 'StackOverflowError', 'Stage', 'StageAlign', 'StageDisplayState',
'StageQuality', 'StageScaleMode', 'StaticText', 'StatusEvent', 'String', 'StyleSheet',
'SWFVersion', 'SyncEvent', 'SyntaxError', 'System', 'TextColorType', 'TextField',
'TextFieldAutoSize', 'TextFieldType', 'TextFormat', 'TextFormatAlign',
'TextLineMetrics', 'TextRenderer', 'TextSnapshot', 'Timer', 'TimerEvent', 'Transform',
'TypeError', 'uint', 'URIError', 'URLLoader', 'URLLoaderDataFormat', 'URLRequest',
'URLRequestHeader', 'URLRequestMethod', 'URLStream', 'URLVariabeles', 'VerifyError',
'Video', 'XML', 'XMLDocument', 'XMLList', 'XMLNode', 'XMLNodeType', 'XMLSocket',
'XMLUI'), suffix=r'\b'),
Name.Builtin),
(words((
'decodeURI', 'decodeURIComponent', 'encodeURI', 'escape', 'eval', 'isFinite', 'isNaN',
'isXMLName', 'clearInterval', 'fscommand', 'getTimer', 'getURL', 'getVersion',
'parseFloat', 'parseInt', 'setInterval', 'trace', 'updateAfterEvent',
'unescape'), suffix=r'\b'),
Name.Function),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
]
}
def analyse_text(text):
"""This is only used to disambiguate between ActionScript and
ActionScript3. We return 0 here; the ActionScript3 lexer will match
AS3 variable definitions and that will hopefully suffice."""
return 0
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
.. versionadded:: 0.11
"""
name = 'ActionScript 3'
url = 'https://help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/index.html'
aliases = ['actionscript3', 'as3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
identifier = r'[$a-zA-Z_]\w*'
typeidentifier = identifier + r'(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Whitespace, Name, Whitespace, Punctuation, Whitespace,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Whitespace, Keyword.Type, Whitespace, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\[^\\]|[^\\\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[~^*!%&<>|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Whitespace),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Whitespace, Punctuation, Name, Whitespace, Operator, Whitespace,
Keyword.Type, Whitespace), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Whitespace, Operator, Whitespace, Keyword.Type), '#pop:2'),
(r'\s+', Text, '#pop:2'),
default('#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Whitespace, using(this), Whitespace, Operator), '#pop'),
(r',', Operator, '#pop'),
default('#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
.. versionadded:: 1.1
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Whitespace),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
(r'\s+', Whitespace),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
| 11,676 | Python | 46.467479 | 109 | 0.520041 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/pawn.py | """
pygments.lexers.pawn
~~~~~~~~~~~~~~~~~~~~
Lexers for the Pawn languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import get_bool_opt
__all__ = ['SourcePawnLexer', 'PawnLexer']
class SourcePawnLexer(RegexLexer):
"""
For SourcePawn source code with preprocessor directives.
.. versionadded:: 1.6
"""
name = 'SourcePawn'
aliases = ['sp']
filenames = ['*.sp']
mimetypes = ['text/x-sourcepawn']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'root': [
# preprocessor directives: without whitespace
(r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(case|const|continue|native|'
r'default|else|enum|for|if|new|operator|'
r'public|return|sizeof|static|decl|struct|switch)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
SM_TYPES = {'Action', 'bool', 'Float', 'Plugin', 'String', 'any',
'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType',
'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart',
'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow',
'ConVarBounds', 'QueryCookie', 'ReplySource',
'ConVarQueryResult', 'ConVarQueryFinished', 'Function',
'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult',
'DBBindType', 'DBPriority', 'PropType', 'PropFieldType',
'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode',
'EventHook', 'FileType', 'FileTimeMode', 'PathType',
'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes',
'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction',
'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary',
'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType',
'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType',
'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus',
'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond',
'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType',
'TopMenuPosition', 'TopMenuObject', 'UserMsg'}
def __init__(self, **options):
self.smhighlighting = get_bool_opt(options,
'sourcemod', True)
self._functions = set()
if self.smhighlighting:
from pygments.lexers._sourcemod_builtins import FUNCTIONS
self._functions.update(FUNCTIONS)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.smhighlighting:
if value in self.SM_TYPES:
token = Keyword.Type
elif value in self._functions:
token = Name.Builtin
yield index, token, value
class PawnLexer(RegexLexer):
"""
For Pawn source code.
.. versionadded:: 2.0
"""
name = 'Pawn'
aliases = ['pawn']
filenames = ['*.p', '*.pwn', '*.inc']
mimetypes = ['text/x-pawn']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*][\w\W]*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'root': [
# preprocessor directives: without whitespace
(r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*[\w\W]*?\*(\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(switch|case|default|const|new|static|char|continue|break|'
r'if|else|for|while|do|operator|enum|'
r'public|return|sizeof|tagof|state|goto)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
"""This is basically C. There is a keyword which doesn't exist in C
though and is nearly unique to this language."""
if 'tagof' in text:
return 0.01
| 8,146 | Python | 39.133005 | 83 | 0.456911 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/jvm.py | """
pygments.lexers.jvm
~~~~~~~~~~~~~~~~~~~
Pygments lexers for JVM languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, combined, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
from pygments.util import shebang_matches
from pygments import unistring as uni
__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'ClojureScriptLexer',
'KotlinLexer', 'XtendLexer', 'AspectJLexer', 'CeylonLexer',
'PigLexer', 'GoloLexer', 'JasminLexer', 'SarlLexer']
class JavaLexer(RegexLexer):
"""
For Java source code.
"""
name = 'Java'
url = 'https://www.oracle.com/technetwork/java/'
aliases = ['java']
filenames = ['*.java']
mimetypes = ['text/x-java']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'(^\s*)((?:(?:public|private|protected|static|strictfp)(?:\s+))*)(record)\b',
bygroups(Whitespace, using(this), Keyword.Declaration), 'class'),
(r'[^\S\n]+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*.*?\*/', Comment.Multiline),
# keywords: go before method names to avoid lexing "throw new XYZ"
# as a method signature
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while)\b',
Keyword),
# method names
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Punctuation)),
(r'@[^\W\d][\w.]*', Name.Decorator),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|sealed|static|strictfp|super|synchronized|throws|'
r'transient|volatile|yield)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)\b', Keyword.Declaration, 'class'),
(r'(var)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'var'),
(r'(import(?:\s+static)?)(\s+)', bygroups(Keyword.Namespace, Whitespace),
'import'),
(r'"""\n', String, 'multiline_string'),
(r'"', String, 'string'),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Punctuation,
Name.Attribute)),
(r'^(\s*)(default)(:)', bygroups(Whitespace, Keyword, Punctuation)),
(r'^(\s*)((?:[^\W\d]|\$)[\w$]*)(:)', bygroups(Whitespace, Name.Label,
Punctuation)),
(r'([^\W\d]|\$)[\w$]*', Name),
(r'([0-9][0-9_]*\.([0-9][0-9_]*)?|'
r'\.[0-9][0-9_]*)'
r'([eE][+\-]?[0-9][0-9_]*)?[fFdD]?|'
r'[0-9][eE][+\-]?[0-9][0-9_]*[fFdD]?|'
r'[0-9]([eE][+\-]?[0-9][0-9_]*)?[fFdD]|'
r'0[xX]([0-9a-fA-F][0-9a-fA-F_]*\.?|'
r'([0-9a-fA-F][0-9a-fA-F_]*)?\.[0-9a-fA-F][0-9a-fA-F_]*)'
r'[pP][+\-]?[0-9][0-9_]*[fFdD]?', Number.Float),
(r'0[xX][0-9a-fA-F][0-9a-fA-F_]*[lL]?', Number.Hex),
(r'0[bB][01][01_]*[lL]?', Number.Bin),
(r'0[0-7_]+[lL]?', Number.Oct),
(r'0|[1-9][0-9_]*[lL]?', Number.Integer),
(r'[~^*!%&\[\]<>|+=/?-]', Operator),
(r'[{}();:.,]', Punctuation),
(r'\n', Whitespace)
],
'class': [
(r'\s+', Text),
(r'([^\W\d]|\$)[\w$]*', Name.Class, '#pop')
],
'var': [
(r'([^\W\d]|\$)[\w$]*', Name, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
'multiline_string': [
(r'"""', String, '#pop'),
(r'"', String),
include('string')
],
'string': [
(r'[^\\"]+', String),
(r'\\\\', String), # Escaped backslash
(r'\\"', String), # Escaped quote
(r'\\', String), # Bare backslash
(r'"', String, '#pop'), # Closing quote
],
}
class AspectJLexer(JavaLexer):
"""
For AspectJ source code.
.. versionadded:: 1.6
"""
name = 'AspectJ'
url = 'http://www.eclipse.org/aspectj/'
aliases = ['aspectj']
filenames = ['*.aj']
mimetypes = ['text/x-aspectj']
aj_keywords = {
'aspect', 'pointcut', 'privileged', 'call', 'execution',
'initialization', 'preinitialization', 'handler', 'get', 'set',
'staticinitialization', 'target', 'args', 'within', 'withincode',
'cflow', 'cflowbelow', 'annotation', 'before', 'after', 'around',
'proceed', 'throwing', 'returning', 'adviceexecution', 'declare',
'parents', 'warning', 'error', 'soft', 'precedence', 'thisJoinPoint',
'thisJoinPointStaticPart', 'thisEnclosingJoinPointStaticPart',
'issingleton', 'perthis', 'pertarget', 'percflow', 'percflowbelow',
'pertypewithin', 'lock', 'unlock', 'thisAspectInstance'
}
aj_inter_type = {'parents:', 'warning:', 'error:', 'soft:', 'precedence:'}
aj_inter_type_annotation = {'@type', '@method', '@constructor', '@field'}
def get_tokens_unprocessed(self, text):
for index, token, value in JavaLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.aj_keywords:
yield index, Keyword, value
elif token is Name.Label and value in self.aj_inter_type:
yield index, Keyword, value[:-1]
yield index, Operator, value[-1]
elif token is Name.Decorator and value in self.aj_inter_type_annotation:
yield index, Keyword, value
else:
yield index, token, value
class ScalaLexer(RegexLexer):
"""
For Scala source code.
"""
name = 'Scala'
url = 'http://www.scala-lang.org'
aliases = ['scala']
filenames = ['*.scala']
mimetypes = ['text/x-scala']
flags = re.MULTILINE | re.DOTALL
opchar = '[!#%&*\\-\\/:?@^' + uni.combine('Sm', 'So') + ']'
letter = '[_\\$' + uni.combine('Ll', 'Lu', 'Lo', 'Nl', 'Lt') + ']'
upperLetter = '[' + uni.combine('Lu', 'Lt') + ']'
letterOrDigit = '(?:%s|[0-9])' % letter
letterOrDigitNoDollarSign = '(?:%s|[0-9])' % letter.replace('\\$', '')
alphaId = '%s+' % letter
simpleInterpolatedVariable = '%s%s*' % (letter, letterOrDigitNoDollarSign)
idrest = '%s%s*(?:(?<=_)%s+)?' % (letter, letterOrDigit, opchar)
idUpper = '%s%s*(?:(?<=_)%s+)?' % (upperLetter, letterOrDigit, opchar)
plainid = '(?:%s|%s+)' % (idrest, opchar)
backQuotedId = r'`[^`]+`'
anyId = r'(?:%s|%s)' % (plainid, backQuotedId)
notStartOfComment = r'(?!//|/\*)'
endOfLineMaybeWithComment = r'(?=\s*(//|$))'
keywords = (
'new', 'return', 'throw', 'classOf', 'isInstanceOf', 'asInstanceOf',
'else', 'if', 'then', 'do', 'while', 'for', 'yield', 'match', 'case',
'catch', 'finally', 'try'
)
operators = (
'<%', '=:=', '<:<', '<%<', '>:', '<:', '=', '==', '!=', '<=', '>=',
'<>', '<', '>', '<-', '←', '->', '→', '=>', '⇒', '?', '@', '|', '-',
'+', '*', '%', '~', '\\'
)
storage_modifiers = (
'private', 'protected', 'synchronized', '@volatile', 'abstract',
'final', 'lazy', 'sealed', 'implicit', 'override', '@transient',
'@native'
)
tokens = {
'root': [
include('whitespace'),
include('comments'),
include('script-header'),
include('imports'),
include('exports'),
include('storage-modifiers'),
include('annotations'),
include('using'),
include('declarations'),
include('inheritance'),
include('extension'),
include('end'),
include('constants'),
include('strings'),
include('symbols'),
include('singleton-type'),
include('inline'),
include('quoted'),
include('keywords'),
include('operators'),
include('punctuation'),
include('names'),
],
# Includes:
'whitespace': [
(r'\s+', Whitespace),
],
'comments': [
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
],
'script-header': [
(r'^#!([^\n]*)$', Comment.Hashbang),
],
'imports': [
(r'\b(import)(\s+)', bygroups(Keyword, Whitespace), 'import-path'),
],
'exports': [
(r'\b(export)(\s+)(given)(\s+)',
bygroups(Keyword, Whitespace, Keyword, Whitespace), 'export-path'),
(r'\b(export)(\s+)', bygroups(Keyword, Whitespace), 'export-path'),
],
'storage-modifiers': [
(words(storage_modifiers, prefix=r'\b', suffix=r'\b'), Keyword),
# Only highlight soft modifiers if they are eventually followed by
# the correct keyword. Note that soft modifiers can be followed by a
# sequence of regular modifiers; [a-z\s]* skips those, and we just
# check that the soft modifier is applied to a supported statement.
(r'\b(transparent|opaque|infix|open|inline)\b(?=[a-z\s]*\b'
r'(def|val|var|given|type|class|trait|object|enum)\b)', Keyword),
],
'annotations': [
(r'@%s' % idrest, Name.Decorator),
],
'using': [
# using is a soft keyword, can only be used in the first position of
# a parameter or argument list.
(r'(\()(\s*)(using)(\s)', bygroups(Punctuation, Whitespace, Keyword, Whitespace)),
],
'declarations': [
(r'\b(def)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
bygroups(Keyword, Whitespace, Name.Function)),
(r'\b(trait)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
bygroups(Keyword, Whitespace, Name.Class)),
(r'\b(?:(case)(\s+))?(class|object|enum)\b(\s*)%s(%s)?' %
(notStartOfComment, anyId),
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class)),
(r'(?<!\.)\b(type)\b(\s*)%s(%s)?' % (notStartOfComment, anyId),
bygroups(Keyword, Whitespace, Name.Class)),
(r'\b(val|var)\b', Keyword.Declaration),
(r'\b(package)(\s+)(object)\b(\s*)%s(%s)?' %
(notStartOfComment, anyId),
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Namespace)),
(r'\b(package)(\s+)', bygroups(Keyword, Whitespace), 'package'),
(r'\b(given)\b(\s*)(%s)' % idUpper,
bygroups(Keyword, Whitespace, Name.Class)),
(r'\b(given)\b(\s*)(%s)?' % anyId,
bygroups(Keyword, Whitespace, Name)),
],
'inheritance': [
(r'\b(extends|with|derives)\b(\s*)'
r'(%s|%s|(?=\([^\)]+=>)|(?=%s)|(?="))?' %
(idUpper, backQuotedId, plainid),
bygroups(Keyword, Whitespace, Name.Class)),
],
'extension': [
(r'\b(extension)(\s+)(?=[\[\(])', bygroups(Keyword, Whitespace)),
],
'end': [
# end is a soft keyword, should only be highlighted in certain cases
(r'\b(end)(\s+)(if|while|for|match|new|extension|val|var)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'\b(end)(\s+)(%s)%s' % (idUpper, endOfLineMaybeWithComment),
bygroups(Keyword, Whitespace, Name.Class)),
(r'\b(end)(\s+)(%s|%s)?%s' %
(backQuotedId, plainid, endOfLineMaybeWithComment),
bygroups(Keyword, Whitespace, Name.Namespace)),
],
'punctuation': [
(r'[{}()\[\];,.]', Punctuation),
(r'(?<!:):(?!:)', Punctuation),
],
'keywords': [
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
],
'operators': [
(r'(%s{2,})(\s+)' % opchar, bygroups(Operator, Whitespace)),
(r'/(?![/*])', Operator),
(words(operators), Operator),
(r'(?<!%s)(!|&&|\|\|)(?!%s)' % (opchar, opchar), Operator),
],
'constants': [
(r'\b(this|super)\b', Name.Builtin.Pseudo),
(r'(true|false|null)\b', Keyword.Constant),
(r'0[xX][0-9a-fA-F_]*', Number.Hex),
(r'([0-9][0-9_]*\.[0-9][0-9_]*|\.[0-9][0-9_]*)'
r'([eE][+-]?[0-9][0-9_]*)?[fFdD]?', Number.Float),
(r'[0-9]+([eE][+-]?[0-9]+)?[fFdD]', Number.Float),
(r'[0-9]+([eE][+-]?[0-9]+)[fFdD]?', Number.Float),
(r'[0-9]+[lL]', Number.Integer.Long),
(r'[0-9]+', Number.Integer),
(r'""".*?"""(?!")', String),
(r'"(\\\\|\\"|[^"])*"', String),
(r"(')(\\.)(')", bygroups(String.Char, String.Escape, String.Char)),
(r"'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
],
"strings": [
(r'[fs]"""', String, 'interpolated-string-triple'),
(r'[fs]"', String, 'interpolated-string'),
(r'raw"(\\\\|\\"|[^"])*"', String),
],
'symbols': [
(r"('%s)(?!')" % plainid, String.Symbol),
],
'singleton-type': [
(r'(\.)(type)\b', bygroups(Punctuation, Keyword)),
],
'inline': [
# inline is a soft modifier, only highlighted if followed by if,
# match or parameters.
(r'\b(inline)(?=\s+(%s|%s)\s*:)' % (plainid, backQuotedId),
Keyword),
(r'\b(inline)\b(?=(?:.(?!\b(?:val|def|given)\b))*\b(if|match)\b)',
Keyword),
],
'quoted': [
# '{...} or ${...}
(r"['$]\{(?!')", Punctuation),
# '[...]
(r"'\[(?!')", Punctuation),
],
'names': [
(idUpper, Name.Class),
(anyId, Name),
],
# States
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'import-path': [
(r'(?<=[\n;:])', Text, '#pop'),
include('comments'),
(r'\b(given)\b', Keyword),
include('qualified-name'),
(r'\{', Punctuation, 'import-path-curly-brace'),
],
'import-path-curly-brace': [
include('whitespace'),
include('comments'),
(r'\b(given)\b', Keyword),
(r'=>', Operator),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation),
(r'[\[\]]', Punctuation),
include('qualified-name'),
],
'export-path': [
(r'(?<=[\n;:])', Text, '#pop'),
include('comments'),
include('qualified-name'),
(r'\{', Punctuation, 'export-path-curly-brace'),
],
'export-path-curly-brace': [
include('whitespace'),
include('comments'),
(r'=>', Operator),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation),
include('qualified-name'),
],
'package': [
(r'(?<=[\n;])', Text, '#pop'),
(r':', Punctuation, '#pop'),
include('comments'),
include('qualified-name'),
],
'interpolated-string-triple': [
(r'"""(?!")', String, '#pop'),
(r'"', String),
include('interpolated-string-common'),
],
'interpolated-string': [
(r'"', String, '#pop'),
include('interpolated-string-common'),
],
'interpolated-string-brace': [
(r'\}', String.Interpol, '#pop'),
(r'\{', Punctuation, 'interpolated-string-nested-brace'),
include('root'),
],
'interpolated-string-nested-brace': [
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
include('root'),
],
# Helpers
'qualified-name': [
(idUpper, Name.Class),
(r'(%s)(\.)' % anyId, bygroups(Name.Namespace, Punctuation)),
(r'\.', Punctuation),
(anyId, Name),
(r'[^\S\n]+', Whitespace),
],
'interpolated-string-common': [
(r'[^"$\\]+', String),
(r'\$\$', String.Escape),
(r'(\$)(%s)' % simpleInterpolatedVariable,
bygroups(String.Interpol, Name)),
(r'\$\{', String.Interpol, 'interpolated-string-brace'),
(r'\\.', String),
],
}
class GosuLexer(RegexLexer):
"""
For Gosu source code.
.. versionadded:: 1.5
"""
name = 'Gosu'
aliases = ['gosu']
filenames = ['*.gs', '*.gsx', '*.gsp', '*.vark']
mimetypes = ['text/x-gosu']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # modifiers etc.
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
(r'[^\S\n]+', Whitespace),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(in|as|typeof|statictypeof|typeis|typeas|if|else|foreach|for|'
r'index|while|do|continue|break|return|try|catch|finally|this|'
r'throw|new|switch|case|default|eval|super|outer|classpath|'
r'using)\b', Keyword),
(r'(var|delegate|construct|function|private|internal|protected|'
r'public|abstract|override|final|static|extends|transient|'
r'implements|represents|readonly)\b', Keyword.Declaration),
(r'(property)(\s+)(get|set)?', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration)),
(r'(boolean|byte|char|double|float|int|long|short|void|block)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
(r'(true|false|null|NaN|Infinity)\b', Keyword.Constant),
(r'(class|interface|enhancement|enum)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(uses)(\s+)([\w.]+\*?)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
(r'"', String, 'string'),
(r'(\??[.#])([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'(:)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_$]\w*', Name),
(r'and|or|not|[\\~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\n', Whitespace)
],
'templateText': [
(r'(\\<)|(\\\$)', String),
(r'(<%@\s+)(extends|params)',
bygroups(Operator, Name.Decorator), 'stringTemplate'),
(r'<%!--.*?--%>', Comment.Multiline),
(r'(<%)|(<%=)', Operator, 'stringTemplate'),
(r'\$\{', Operator, 'stringTemplateShorthand'),
(r'.', String)
],
'string': [
(r'"', String, '#pop'),
include('templateText')
],
'stringTemplate': [
(r'"', String, 'string'),
(r'%>', Operator, '#pop'),
include('root')
],
'stringTemplateShorthand': [
(r'"', String, 'string'),
(r'\{', Operator, 'stringTemplateShorthand'),
(r'\}', Operator, '#pop'),
include('root')
],
}
class GosuTemplateLexer(Lexer):
"""
For Gosu templates.
.. versionadded:: 1.5
"""
name = 'Gosu Template'
aliases = ['gst']
filenames = ['*.gst']
mimetypes = ['text/x-gosu-template']
def get_tokens_unprocessed(self, text):
lexer = GosuLexer()
stack = ['templateText']
yield from lexer.get_tokens_unprocessed(text, stack)
class GroovyLexer(RegexLexer):
"""
For Groovy source code.
.. versionadded:: 1.5
"""
name = 'Groovy'
url = 'https://groovy-lang.org/'
aliases = ['groovy']
filenames = ['*.groovy','*.gradle']
mimetypes = ['text/x-groovy']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# Groovy allows a file to start with a shebang
(r'#!(.*?)$', Comment.Preproc, 'base'),
default('base'),
],
'base': [
(r'[^\S\n]+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*.*?\*/', Comment.Multiline),
# keywords: go before method names to avoid lexing "throw new XYZ"
# as a method signature
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|in|as)\b',
Keyword),
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'('
r'[a-zA-Z_]\w*' # method name
r'|"(?:\\\\|\\[^\\]|[^"\\])*"' # or double-quoted method name
r"|'(?:\\\\|\\[^\\]|[^'\\])*'" # or single-quoted method name
r')'
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(def|boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Whitespace),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
(r'""".*?"""', String.Double),
(r"'''.*?'''", String.Single),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'\$/((?!/\$).)*/\$', String),
(r'/(\\\\|\\[^\\]|[^/\\])*/', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Whitespace)
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
def analyse_text(text):
return shebang_matches(text, r'groovy')
class IokeLexer(RegexLexer):
"""
For Ioke (a strongly typed, dynamic,
prototype based programming language) source.
.. versionadded:: 1.4
"""
name = 'Ioke'
url = 'https://ioke.org/'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}'
r'|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#\{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'\}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[im-psux]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][im-psux]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
# Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
# Symbols
(r':[\w!:?]+', String.Symbol),
(r'[\w!:?]+:(?![\w!?])', String.Other),
(r':"(\\\\|\\[^\\]|[^"\\])*"', String.Symbol),
# Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
r'|(?<=dsyntax\())(\s*)"', String.Doc, 'documentation'),
# Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
# Mimic
(r'\w[\w!:?]+(?=\s*=.*mimic\s)', Name.Entity),
# Assignment
(r'[a-zA-Z_][\w!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))',
Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|'
r'loop|p:for|p:for:dict|p:for:set|return|unless|until|while|'
r'with)(?![\w!:?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![\w!:?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|'
r'documentation|hash|identity|mimic|removeCell\!|undefineCell\!)'
r'(?![\w!:?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![\w!:?])', Keyword),
# DefaultBehaviour Literals
(r'(dict|list|message|set)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|'
r'case:otherwise|case:xor)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|'
r'mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|'
r'removeMimic\!|same\?|send|thaw\!|uniqueHexId)'
r'(?![\w!:?])', Keyword),
# DefaultBehaviour Aspects
(r'(after|around|before)(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)'
r'(?![\w!:?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
# DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|'
r'documentation|identity|removeCell!|undefineCell)'
r'(?![\w!:?])', Keyword),
# DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|'
r'internal:createDecimal|internal:createNumber|'
r'internal:createRegexp|internal:createText)'
r'(?![\w!:?])', Keyword.Reserved),
# DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|'
r'invokeRestart|rescue|restart|signal\!|warn\!)'
r'(?![\w!:?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![\w!:?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|'
r'Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|'
r'NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|'
r'Conditions|Definitions|FlowControl|Internal|Literals|'
r'Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|'
r'FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|'
r'LexicalBlock|LexicalMacro|List|Message|Method|Mixins|'
r'NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp Match|'
r'Regexp|Rescue|Restart|Runtime|Sequence|Set|Symbol|'
r'System|Text|Tuple)(?![\w!:?])', Name.Builtin),
# functions
('(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
'(?![\\w!:?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
r'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![\w!?])',
Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|\{|\})', Punctuation),
# kinds
(r'[A-Z][\w!:?]*', Name.Class),
# default cellnames
(r'[a-z_][\w!:?]*', Name)
]
}
class ClojureLexer(RegexLexer):
"""
Lexer for Clojure source code.
.. versionadded:: 0.11
"""
name = 'Clojure'
url = 'http://clojure.org/'
aliases = ['clojure', 'clj']
filenames = ['*.clj', '*.cljc']
mimetypes = ['text/x-clojure', 'application/x-clojure']
special_forms = (
'.', 'def', 'do', 'fn', 'if', 'let', 'new', 'quote', 'var', 'loop'
)
# It's safe to consider 'ns' a declaration thing because it defines a new
# namespace.
declarations = (
'def-', 'defn', 'defn-', 'defmacro', 'defmulti', 'defmethod',
'defstruct', 'defonce', 'declare', 'definline', 'definterface',
'defprotocol', 'defrecord', 'deftype', 'defproject', 'ns'
)
builtins = (
'*', '+', '-', '->', '/', '<', '<=', '=', '==', '>', '>=', '..',
'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
'butlast', 'byte', 'cast', 'char', 'children', 'class',
'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
'complement', 'concat', 'conj', 'cons', 'constantly', 'cond', 'if-not',
'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush', 'for',
'fnseq', 'frest', 'gensym', 'get-proxy-class', 'get',
'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
'lefts', 'line-seq', 'list*', 'list', 'load', 'load-file',
'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
'vector?', 'when', 'when-first', 'when-let', 'when-not',
'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper')
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
# TODO / should divide keywords/symbols into namespace/rest
# but that's hard, so just pretend / is part of the name
valid_name = r'(?!#)[\w!$%*+<=>?/.#|-]+'
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r',+', Text),
(r'\s+', Whitespace),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+/\d+', Number),
(r'-?\d+', Number.Integer),
(r'0x-?[abcdef\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\(.|[a-z]+)", String.Char),
# keywords
(r'::?#?' + valid_name, String.Symbol),
# special operators
(r'~@|[`\'#^~&@]', Operator),
# highlight the special forms
(words(special_forms, suffix=' '), Keyword),
# Technically, only the special forms are 'keywords'. The problem
# is that only treating them as keywords means that things like
# 'defn' and 'ns' need to be highlighted as builtins. This is ugly
# and weird for most styles. So, as a compromise we're going to
# highlight them as Keyword.Declarations.
(words(declarations, suffix=' '), Keyword.Declaration),
# highlight the builtins
(words(builtins, suffix=' '), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Clojure accepts vector notation
(r'(\[|\])', Punctuation),
# Clojure accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
class ClojureScriptLexer(ClojureLexer):
"""
Lexer for ClojureScript source code.
.. versionadded:: 2.0
"""
name = 'ClojureScript'
url = 'http://clojure.org/clojurescript'
aliases = ['clojurescript', 'cljs']
filenames = ['*.cljs']
mimetypes = ['text/x-clojurescript', 'application/x-clojurescript']
class TeaLangLexer(RegexLexer):
"""
For Tea source code. Only used within a
TeaTemplateLexer.
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
(r'[^\S\n]+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w\.]*', Name.Decorator),
(r'(and|break|else|foreach|if|in|not|or|reverse)\b',
Keyword),
(r'(as|call|define)\b', Keyword.Declaration),
(r'(true|false|null)\b', Keyword.Constant),
(r'(template)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'template'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_\$]\w*', Name),
(r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Whitespace)
],
'template': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
class CeylonLexer(RegexLexer):
"""
For Ceylon source code.
.. versionadded:: 1.6
"""
name = 'Ceylon'
url = 'http://ceylon-lang.org/'
aliases = ['ceylon']
filenames = ['*.ceylon']
mimetypes = ['text/x-ceylon']
flags = re.MULTILINE | re.DOTALL
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
(r'[^\S\n]+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*', Comment.Multiline, 'comment'),
(r'(shared|abstract|formal|default|actual|variable|deprecated|small|'
r'late|literal|doc|by|see|throws|optional|license|tagged|final|native|'
r'annotation|sealed)\b', Name.Decorator),
(r'(break|case|catch|continue|else|finally|for|in|'
r'if|return|switch|this|throw|try|while|is|exists|dynamic|'
r'nonempty|then|outer|assert|let)\b', Keyword),
(r'(abstracts|extends|satisfies|'
r'super|given|of|out|assign)\b', Keyword.Declaration),
(r'(function|value|void|new)\b',
Keyword.Type),
(r'(assembly|module|package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface|object|alias)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
(r'(\.)([a-z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'\d{1,3}(_\d{3})+\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'\d{1,3}(_\d{3})+\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'[0-9][0-9]*\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'[0-9][0-9]*\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'#([0-9a-fA-F]{4})(_[0-9a-fA-F]{4})+', Number.Hex),
(r'#[0-9a-fA-F]+', Number.Hex),
(r'\$([01]{4})(_[01]{4})+', Number.Bin),
(r'\$[01]+', Number.Bin),
(r'\d{1,3}(_\d{3})+[kMGTP]?', Number.Integer),
(r'[0-9]+[kMGTP]?', Number.Integer),
(r'\n', Whitespace)
],
'class': [
(r'[A-Za-z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[a-z][\w.]*',
Name.Namespace, '#pop')
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
class KotlinLexer(RegexLexer):
"""
For Kotlin source code.
.. versionadded:: 1.5
"""
name = 'Kotlin'
url = 'http://kotlinlang.org/'
aliases = ['kotlin']
filenames = ['*.kt', '*.kts']
mimetypes = ['text/x-kotlin']
flags = re.MULTILINE | re.DOTALL
kt_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
'Mn', 'Mc') + ']*')
kt_space_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
'Mn', 'Mc', 'Zs')
+ r'\'~!%^&*()+=|\[\]:;,.<>/\?-]*')
kt_id = '(' + kt_name + '|`' + kt_space_name + '`)'
modifiers = (r'actual|abstract|annotation|companion|const|crossinline|'
r'data|enum|expect|external|final|infix|inline|inner|'
r'internal|lateinit|noinline|open|operator|override|private|'
r'protected|public|sealed|suspend|tailrec|value')
tokens = {
'root': [
# Whitespaces
(r'[^\S\n]+', Whitespace),
(r'\s+', Whitespace),
(r'\\$', String.Escape), # line continuation
(r'\n', Whitespace),
# Comments
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'^(#!/.+?)(\n)', bygroups(Comment.Single, Whitespace)), # shebang for kotlin scripts
(r'/[*].*?[*]/', Comment.Multiline),
# Keywords
(r'as\?', Keyword),
(r'(as|break|by|catch|constructor|continue|do|dynamic|else|finally|'
r'get|for|if|init|[!]*in|[!]*is|out|reified|return|set|super|this|'
r'throw|try|typealias|typeof|vararg|when|where|while)\b', Keyword),
(r'it\b', Name.Builtin),
# Built-in types
(words(('Boolean?', 'Byte?', 'Char?', 'Double?', 'Float?',
'Int?', 'Long?', 'Short?', 'String?', 'Any?', 'Unit?')), Keyword.Type),
(words(('Boolean', 'Byte', 'Char', 'Double', 'Float',
'Int', 'Long', 'Short', 'String', 'Any', 'Unit'), suffix=r'\b'), Keyword.Type),
# Constants
(r'(true|false|null)\b', Keyword.Constant),
# Imports
(r'(package|import)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Namespace)),
# Dot access
(r'(\?\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Operator, Name.Attribute)),
(r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Punctuation, Name.Attribute)),
# Annotations
(r'@[^\W\d][\w.]*', Name.Decorator),
# Labels
(r'[^\W\d][\w.]+@', Name.Decorator),
# Object expression
(r'(object)(\s+)(:)(\s+)', bygroups(Keyword, Whitespace, Punctuation, Whitespace), 'class'),
# Types
(r'((?:(?:' + modifiers + r'|fun)\s+)*)(class|interface|object)(\s+)',
bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'class'),
# Variables
(r'(var|val)(\s+)(\()', bygroups(Keyword.Declaration, Whitespace, Punctuation),
'destructuring_assignment'),
(r'((?:(?:' + modifiers + r')\s+)*)(var|val)(\s+)',
bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'variable'),
# Functions
(r'((?:(?:' + modifiers + r')\s+)*)(fun)(\s+)',
bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'function'),
# Operators
(r'::|!!|\?[:.]', Operator),
(r'[~^*!%&\[\]<>|+=/?-]', Operator),
# Punctuation
(r'[{}();:.,]', Punctuation),
# Strings
(r'"""', String, 'multiline_string'),
(r'"', String, 'string'),
(r"'\\.'|'[^\\]'", String.Char),
# Numbers
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
# Identifiers
(r'' + kt_id + r'((\?[^.])?)', Name) # additionally handle nullable types
],
'class': [
(kt_id, Name.Class, '#pop')
],
'variable': [
(kt_id, Name.Variable, '#pop')
],
'destructuring_assignment': [
(r',', Punctuation),
(r'\s+', Whitespace),
(kt_id, Name.Variable),
(r'(:)(\s+)(' + kt_id + ')', bygroups(Punctuation, Whitespace, Name)),
(r'<', Operator, 'generic'),
(r'\)', Punctuation, '#pop')
],
'function': [
(r'<', Operator, 'generic'),
(r'' + kt_id + r'(\.)' + kt_id, bygroups(Name, Punctuation, Name.Function), '#pop'),
(kt_id, Name.Function, '#pop')
],
'generic': [
(r'(>)(\s*)', bygroups(Operator, Whitespace), '#pop'),
(r':', Punctuation),
(r'(reified|out|in)\b', Keyword),
(r',', Punctuation),
(r'\s+', Whitespace),
(kt_id, Name)
],
'modifiers': [
(r'\w+', Keyword.Declaration),
(r'\s+', Whitespace),
default('#pop')
],
'string': [
(r'"', String, '#pop'),
include('string_common')
],
'multiline_string': [
(r'"""', String, '#pop'),
(r'"', String),
include('string_common')
],
'string_common': [
(r'\\\\', String), # escaped backslash
(r'\\"', String), # escaped quote
(r'\\', String), # bare backslash
(r'\$\{', String.Interpol, 'interpolation'),
(r'(\$)(\w+)', bygroups(String.Interpol, Name)),
(r'[^\\"$]+', String)
],
'interpolation': [
(r'"', String),
(r'\$\{', String.Interpol, 'interpolation'),
(r'\{', Punctuation, 'scope'),
(r'\}', String.Interpol, '#pop'),
include('root')
],
'scope': [
(r'\{', Punctuation, 'scope'),
(r'\}', Punctuation, '#pop'),
include('root')
]
}
class XtendLexer(RegexLexer):
"""
For Xtend source code.
.. versionadded:: 1.6
"""
name = 'Xtend'
url = 'https://www.eclipse.org/xtend/'
aliases = ['xtend']
filenames = ['*.xtend']
mimetypes = ['text/x-xtend']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_$][\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
(r'[^\S\n]+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|IF|'
r'ELSE|ELSEIF|ENDIF|FOR|ENDFOR|SEPARATOR|BEFORE|AFTER)\b',
Keyword),
(r'(def|abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Whitespace),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
(r"(''')", String, 'template'),
(r'(\u00BB)', String, 'template'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Whitespace)
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
'template': [
(r"'''", String, '#pop'),
(r'\u00AB', String, '#pop'),
(r'.', String)
],
}
class PigLexer(RegexLexer):
"""
For Pig Latin source code.
.. versionadded:: 2.0
"""
name = 'Pig'
url = 'https://pig.apache.org/'
aliases = ['pig']
filenames = ['*.pig']
mimetypes = ['text/x-pig']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'--.*', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'\\$', String.Escape),
(r'\\', Text),
(r'\'(?:\\[ntbrf\\\']|\\u[0-9a-f]{4}|[^\'\\\n\r])*\'', String),
include('keywords'),
include('types'),
include('builtins'),
include('punct'),
include('operators'),
(r'[0-9]*\.[0-9]+(e[0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Whitespace),
(r'([a-z_]\w*)(\s*)(\()',
bygroups(Name.Function, Whitespace, Punctuation)),
(r'[()#:]', Text),
(r'[^(:#\'")\s]+', Text),
(r'\S+\s+', Text) # TODO: make tests pass without \s+
],
'keywords': [
(r'(assert|and|any|all|arrange|as|asc|bag|by|cache|CASE|cat|cd|cp|'
r'%declare|%default|define|dense|desc|describe|distinct|du|dump|'
r'eval|exex|explain|filter|flatten|foreach|full|generate|group|'
r'help|if|illustrate|import|inner|input|into|is|join|kill|left|'
r'limit|load|ls|map|matches|mkdir|mv|not|null|onschema|or|order|'
r'outer|output|parallel|pig|pwd|quit|register|returns|right|rm|'
r'rmf|rollup|run|sample|set|ship|split|stderr|stdin|stdout|store|'
r'stream|through|union|using|void)\b', Keyword)
],
'builtins': [
(r'(AVG|BinStorage|cogroup|CONCAT|copyFromLocal|copyToLocal|COUNT|'
r'cross|DIFF|MAX|MIN|PigDump|PigStorage|SIZE|SUM|TextLoader|'
r'TOKENIZE)\b', Name.Builtin)
],
'types': [
(r'(bytearray|BIGINTEGER|BIGDECIMAL|chararray|datetime|double|float|'
r'int|long|tuple)\b', Keyword.Type)
],
'punct': [
(r'[;(){}\[\]]', Punctuation),
],
'operators': [
(r'[#=,./%+\-?]', Operator),
(r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
(r'(==|<=|<|>=|>|!=)', Operator),
],
}
class GoloLexer(RegexLexer):
"""
For Golo source code.
.. versionadded:: 2.0
"""
name = 'Golo'
url = 'http://golo-lang.org/'
filenames = ['*.golo']
aliases = ['golo']
tokens = {
'root': [
(r'[^\S\n]+', Whitespace),
(r'#.*$', Comment),
(r'(\^|\.\.\.|:|\?:|->|==|!=|=|\+|\*|%|/|<=|<|>=|>|=|\.)',
Operator),
(r'(?<=[^-])(-)(?=[^-])', Operator),
(r'(?<=[^`])(is|isnt|and|or|not|oftype|in|orIfNull)\b', Operator.Word),
(r'[]{}|(),[]', Punctuation),
(r'(module|import)(\s+)',
bygroups(Keyword.Namespace, Whitespace),
'modname'),
(r'\b([a-zA-Z_][\w$.]*)(::)', bygroups(Name.Namespace, Punctuation)),
(r'\b([a-zA-Z_][\w$]*(?:\.[a-zA-Z_][\w$]*)+)\b', Name.Namespace),
(r'(let|var)(\s+)',
bygroups(Keyword.Declaration, Whitespace),
'varname'),
(r'(struct)(\s+)',
bygroups(Keyword.Declaration, Whitespace),
'structname'),
(r'(function)(\s+)',
bygroups(Keyword.Declaration, Whitespace),
'funcname'),
(r'(null|true|false)\b', Keyword.Constant),
(r'(augment|pimp'
r'|if|else|case|match|return'
r'|case|when|then|otherwise'
r'|while|for|foreach'
r'|try|catch|finally|throw'
r'|local'
r'|continue|break)\b', Keyword),
(r'(map|array|list|set|vector|tuple)(\[)',
bygroups(Name.Builtin, Punctuation)),
(r'(print|println|readln|raise|fun'
r'|asInterfaceInstance)\b', Name.Builtin),
(r'(`?[a-zA-Z_][\w$]*)(\()',
bygroups(Name.Function, Punctuation)),
(r'-?[\d_]*\.[\d_]*([eE][+-]?\d[\d_]*)?F?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'-?\d[\d_]*L', Number.Integer.Long),
(r'-?\d[\d_]*', Number.Integer),
(r'`?[a-zA-Z_][\w$]*', Name),
(r'@[a-zA-Z_][\w$.]*', Name.Decorator),
(r'"""', String, combined('stringescape', 'triplestring')),
(r'"', String, combined('stringescape', 'doublestring')),
(r"'", String, combined('stringescape', 'singlestring')),
(r'----((.|\n)*?)----', String.Doc)
],
'funcname': [
(r'`?[a-zA-Z_][\w$]*', Name.Function, '#pop'),
],
'modname': [
(r'[a-zA-Z_][\w$.]*\*?', Name.Namespace, '#pop')
],
'structname': [
(r'`?[\w.]+\*?', Name.Class, '#pop')
],
'varname': [
(r'`?[a-zA-Z_][\w$]*', Name.Variable, '#pop'),
],
'string': [
(r'[^\\\'"\n]+', String),
(r'[\'"\\]', String)
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'triplestring': [
(r'"""', String, '#pop'),
include('string'),
(r'\n', String),
],
'doublestring': [
(r'"', String.Double, '#pop'),
include('string'),
],
'singlestring': [
(r"'", String, '#pop'),
include('string'),
],
'operators': [
(r'[#=,./%+\-?]', Operator),
(r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
(r'(==|<=|<|>=|>|!=)', Operator),
],
}
class JasminLexer(RegexLexer):
"""
For Jasmin assembly code.
.. versionadded:: 2.0
"""
name = 'Jasmin'
url = 'http://jasmin.sourceforge.net/'
aliases = ['jasmin', 'jasminxt']
filenames = ['*.j']
_whitespace = r' \n\t\r'
_ws = r'(?:[%s]+)' % _whitespace
_separator = r'%s:=' % _whitespace
_break = r'(?=[%s]|$)' % _separator
_name = r'[^%s]+' % _separator
_unqualified_name = r'(?:[^%s.;\[/]+)' % _separator
tokens = {
'default': [
(r'\n', Whitespace, '#pop'),
(r"'", String.Single, ('#pop', 'quote')),
(r'"', String.Double, 'string'),
(r'=', Punctuation),
(r':', Punctuation, 'label'),
(_ws, Whitespace),
(r';.*', Comment.Single),
(r'(\$[-+])?0x-?[\da-fA-F]+%s' % _break, Number.Hex),
(r'(\$[-+]|\+)?-?\d+%s' % _break, Number.Integer),
(r'-?(\d+\.\d*|\.\d+)([eE][-+]?\d+)?[fFdD]?'
r'[\x00-\x08\x0b\x0c\x0e-\x1f]*%s' % _break, Number.Float),
(r'\$%s' % _name, Name.Variable),
# Directives
(r'\.annotation%s' % _break, Keyword.Reserved, 'annotation'),
(r'(\.attribute|\.bytecode|\.debug|\.deprecated|\.enclosing|'
r'\.interface|\.line|\.signature|\.source|\.stack|\.var|abstract|'
r'annotation|bridge|class|default|enum|field|final|fpstrict|'
r'interface|native|private|protected|public|signature|static|'
r'synchronized|synthetic|transient|varargs|volatile)%s' % _break,
Keyword.Reserved),
(r'\.catch%s' % _break, Keyword.Reserved, 'caught-exception'),
(r'(\.class|\.implements|\.inner|\.super|inner|invisible|'
r'invisibleparam|outer|visible|visibleparam)%s' % _break,
Keyword.Reserved, 'class/convert-dots'),
(r'\.field%s' % _break, Keyword.Reserved,
('descriptor/convert-dots', 'field')),
(r'(\.end|\.limit|use)%s' % _break, Keyword.Reserved,
'no-verification'),
(r'\.method%s' % _break, Keyword.Reserved, 'method'),
(r'\.set%s' % _break, Keyword.Reserved, 'var'),
(r'\.throws%s' % _break, Keyword.Reserved, 'exception'),
(r'(from|offset|to|using)%s' % _break, Keyword.Reserved, 'label'),
(r'is%s' % _break, Keyword.Reserved,
('descriptor/convert-dots', 'var')),
(r'(locals|stack)%s' % _break, Keyword.Reserved, 'verification'),
(r'method%s' % _break, Keyword.Reserved, 'enclosing-method'),
# Instructions
(words((
'aaload', 'aastore', 'aconst_null', 'aload', 'aload_0', 'aload_1', 'aload_2',
'aload_3', 'aload_w', 'areturn', 'arraylength', 'astore', 'astore_0', 'astore_1',
'astore_2', 'astore_3', 'astore_w', 'athrow', 'baload', 'bastore', 'bipush',
'breakpoint', 'caload', 'castore', 'd2f', 'd2i', 'd2l', 'dadd', 'daload', 'dastore',
'dcmpg', 'dcmpl', 'dconst_0', 'dconst_1', 'ddiv', 'dload', 'dload_0', 'dload_1',
'dload_2', 'dload_3', 'dload_w', 'dmul', 'dneg', 'drem', 'dreturn', 'dstore', 'dstore_0',
'dstore_1', 'dstore_2', 'dstore_3', 'dstore_w', 'dsub', 'dup', 'dup2', 'dup2_x1',
'dup2_x2', 'dup_x1', 'dup_x2', 'f2d', 'f2i', 'f2l', 'fadd', 'faload', 'fastore', 'fcmpg',
'fcmpl', 'fconst_0', 'fconst_1', 'fconst_2', 'fdiv', 'fload', 'fload_0', 'fload_1',
'fload_2', 'fload_3', 'fload_w', 'fmul', 'fneg', 'frem', 'freturn', 'fstore', 'fstore_0',
'fstore_1', 'fstore_2', 'fstore_3', 'fstore_w', 'fsub', 'i2b', 'i2c', 'i2d', 'i2f', 'i2l',
'i2s', 'iadd', 'iaload', 'iand', 'iastore', 'iconst_0', 'iconst_1', 'iconst_2',
'iconst_3', 'iconst_4', 'iconst_5', 'iconst_m1', 'idiv', 'iinc', 'iinc_w', 'iload',
'iload_0', 'iload_1', 'iload_2', 'iload_3', 'iload_w', 'imul', 'ineg', 'int2byte',
'int2char', 'int2short', 'ior', 'irem', 'ireturn', 'ishl', 'ishr', 'istore', 'istore_0',
'istore_1', 'istore_2', 'istore_3', 'istore_w', 'isub', 'iushr', 'ixor', 'l2d', 'l2f',
'l2i', 'ladd', 'laload', 'land', 'lastore', 'lcmp', 'lconst_0', 'lconst_1', 'ldc2_w',
'ldiv', 'lload', 'lload_0', 'lload_1', 'lload_2', 'lload_3', 'lload_w', 'lmul', 'lneg',
'lookupswitch', 'lor', 'lrem', 'lreturn', 'lshl', 'lshr', 'lstore', 'lstore_0',
'lstore_1', 'lstore_2', 'lstore_3', 'lstore_w', 'lsub', 'lushr', 'lxor',
'monitorenter', 'monitorexit', 'nop', 'pop', 'pop2', 'ret', 'ret_w', 'return', 'saload',
'sastore', 'sipush', 'swap'), suffix=_break), Keyword.Reserved),
(r'(anewarray|checkcast|instanceof|ldc|ldc_w|new)%s' % _break,
Keyword.Reserved, 'class/no-dots'),
(r'invoke(dynamic|interface|nonvirtual|special|'
r'static|virtual)%s' % _break, Keyword.Reserved,
'invocation'),
(r'(getfield|putfield)%s' % _break, Keyword.Reserved,
('descriptor/no-dots', 'field')),
(r'(getstatic|putstatic)%s' % _break, Keyword.Reserved,
('descriptor/no-dots', 'static')),
(words((
'goto', 'goto_w', 'if_acmpeq', 'if_acmpne', 'if_icmpeq',
'if_icmpge', 'if_icmpgt', 'if_icmple', 'if_icmplt', 'if_icmpne',
'ifeq', 'ifge', 'ifgt', 'ifle', 'iflt', 'ifne', 'ifnonnull',
'ifnull', 'jsr', 'jsr_w'), suffix=_break),
Keyword.Reserved, 'label'),
(r'(multianewarray|newarray)%s' % _break, Keyword.Reserved,
'descriptor/convert-dots'),
(r'tableswitch%s' % _break, Keyword.Reserved, 'table')
],
'quote': [
(r"'", String.Single, '#pop'),
(r'\\u[\da-fA-F]{4}', String.Escape),
(r"[^'\\]+", String.Single)
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\([nrtfb"\'\\]|u[\da-fA-F]{4}|[0-3]?[0-7]{1,2})',
String.Escape),
(r'[^"\\]+', String.Double)
],
'root': [
(r'\n+', Whitespace),
(r"'", String.Single, 'quote'),
include('default'),
(r'(%s)([ \t\r]*)(:)' % _name,
bygroups(Name.Label, Whitespace, Punctuation)),
(_name, String.Other)
],
'annotation': [
(r'\n', Whitespace, ('#pop', 'annotation-body')),
(r'default%s' % _break, Keyword.Reserved,
('#pop', 'annotation-default')),
include('default')
],
'annotation-body': [
(r'\n+', Whitespace),
(r'\.end%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
(_name, String.Other, ('annotation-items', 'descriptor/no-dots'))
],
'annotation-default': [
(r'\n+', Whitespace),
(r'\.end%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
default(('annotation-items', 'descriptor/no-dots'))
],
'annotation-items': [
(r"'", String.Single, 'quote'),
include('default'),
(_name, String.Other)
],
'caught-exception': [
(r'all%s' % _break, Keyword, '#pop'),
include('exception')
],
'class/convert-dots': [
include('default'),
(r'(L)((?:%s[/.])*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'((?:%s[/.])*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Class), '#pop')
],
'class/no-dots': [
include('default'),
(r'\[+', Punctuation, ('#pop', 'descriptor/no-dots')),
(r'(L)((?:%s/)*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'((?:%s/)*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Class), '#pop')
],
'descriptor/convert-dots': [
include('default'),
(r'\[+', Punctuation),
(r'(L)((?:%s[/.])*)(%s?)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'[^%s\[)L]+' % _separator, Keyword.Type, '#pop'),
default('#pop')
],
'descriptor/no-dots': [
include('default'),
(r'\[+', Punctuation),
(r'(L)((?:%s/)*)(%s)(;)' % (_unqualified_name, _name),
bygroups(Keyword.Type, Name.Namespace, Name.Class, Punctuation),
'#pop'),
(r'[^%s\[)L]+' % _separator, Keyword.Type, '#pop'),
default('#pop')
],
'descriptors/convert-dots': [
(r'\)', Punctuation, '#pop'),
default('descriptor/convert-dots')
],
'enclosing-method': [
(_ws, Whitespace),
(r'(?=[^%s]*\()' % _separator, Text, ('#pop', 'invocation')),
default(('#pop', 'class/convert-dots'))
],
'exception': [
include('default'),
(r'((?:%s[/.])*)(%s)' % (_unqualified_name, _name),
bygroups(Name.Namespace, Name.Exception), '#pop')
],
'field': [
(r'static%s' % _break, Keyword.Reserved, ('#pop', 'static')),
include('default'),
(r'((?:%s[/.](?=[^%s]*[/.]))*)(%s[/.])?(%s)' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Variable.Instance),
'#pop')
],
'invocation': [
include('default'),
(r'((?:%s[/.](?=[^%s(]*[/.]))*)(%s[/.])?(%s)(\()' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Function, Punctuation),
('#pop', 'descriptor/convert-dots', 'descriptors/convert-dots',
'descriptor/convert-dots'))
],
'label': [
include('default'),
(_name, Name.Label, '#pop')
],
'method': [
include('default'),
(r'(%s)(\()' % _name, bygroups(Name.Function, Punctuation),
('#pop', 'descriptor/convert-dots', 'descriptors/convert-dots',
'descriptor/convert-dots'))
],
'no-verification': [
(r'(locals|method|stack)%s' % _break, Keyword.Reserved, '#pop'),
include('default')
],
'static': [
include('default'),
(r'((?:%s[/.](?=[^%s]*[/.]))*)(%s[/.])?(%s)' %
(_unqualified_name, _separator, _unqualified_name, _name),
bygroups(Name.Namespace, Name.Class, Name.Variable.Class), '#pop')
],
'table': [
(r'\n+', Whitespace),
(r'default%s' % _break, Keyword.Reserved, '#pop'),
include('default'),
(_name, Name.Label)
],
'var': [
include('default'),
(_name, Name.Variable, '#pop')
],
'verification': [
include('default'),
(r'(Double|Float|Integer|Long|Null|Top|UninitializedThis)%s' %
_break, Keyword, '#pop'),
(r'Object%s' % _break, Keyword, ('#pop', 'class/no-dots')),
(r'Uninitialized%s' % _break, Keyword, ('#pop', 'label'))
]
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'^\s*[a-z]+_[a-z]+\b', text, re.MULTILINE):
score += 0.3
if re.search(r'^\s*\.(attribute|bytecode|debug|deprecated|enclosing|'
r'inner|interface|limit|set|signature|stack)\b', text,
re.MULTILINE):
score += 0.6
return min(score, 1.0)
class SarlLexer(RegexLexer):
"""
For SARL source code.
.. versionadded:: 2.4
"""
name = 'SARL'
url = 'http://www.sarl.io'
aliases = ['sarl']
filenames = ['*.sarl']
mimetypes = ['text/x-sarl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_$][\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
(r'[^\S\n]+', Whitespace),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(as|break|case|catch|default|do|else|extends|extension|finally|'
r'fires|for|if|implements|instanceof|new|on|requires|return|super|'
r'switch|throw|throws|try|typeof|uses|while|with)\b',
Keyword),
(r'(abstract|def|dispatch|final|native|override|private|protected|'
r'public|static|strictfp|synchronized|transient|val|var|volatile)\b',
Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace)),
(r'(false|it|null|occurrence|this|true|void)\b', Keyword.Constant),
(r'(agent|annotation|artifact|behavior|capacity|class|enum|event|'
r'interface|skill|space)(\s+)', bygroups(Keyword.Declaration, Whitespace),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Whitespace)
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
| 72,923 | Python | 39.046128 | 107 | 0.454205 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/d.py | """
pygments.lexers.d
~~~~~~~~~~~~~~~~~
Lexers for D languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Comment, Keyword, Name, String, Number, \
Punctuation, Whitespace
__all__ = ['DLexer', 'CrocLexer', 'MiniDLexer']
class DLexer(RegexLexer):
"""
For D source.
.. versionadded:: 1.2
"""
name = 'D'
url = 'https://dlang.org/'
filenames = ['*.d', '*.di']
aliases = ['d']
mimetypes = ['text/x-dsrc']
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
# (r'\\\n', Text), # line continuations
# Comments
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nested_comment'),
# Keywords
(words((
'abstract', 'alias', 'align', 'asm', 'assert', 'auto', 'body',
'break', 'case', 'cast', 'catch', 'class', 'const', 'continue',
'debug', 'default', 'delegate', 'delete', 'deprecated', 'do', 'else',
'enum', 'export', 'extern', 'finally', 'final', 'foreach_reverse',
'foreach', 'for', 'function', 'goto', 'if', 'immutable', 'import',
'interface', 'invariant', 'inout', 'in', 'is', 'lazy', 'mixin',
'module', 'new', 'nothrow', 'out', 'override', 'package', 'pragma',
'private', 'protected', 'public', 'pure', 'ref', 'return', 'scope',
'shared', 'static', 'struct', 'super', 'switch', 'synchronized',
'template', 'this', 'throw', 'try', 'typeid', 'typeof',
'union', 'unittest', 'version', 'volatile', 'while', 'with',
'__gshared', '__traits', '__vector', '__parameters'),
suffix=r'\b'),
Keyword),
(words((
# Removed in 2.072
'typedef', ),
suffix=r'\b'),
Keyword.Removed),
(words((
'bool', 'byte', 'cdouble', 'cent', 'cfloat', 'char', 'creal',
'dchar', 'double', 'float', 'idouble', 'ifloat', 'int', 'ireal',
'long', 'real', 'short', 'ubyte', 'ucent', 'uint', 'ulong',
'ushort', 'void', 'wchar'), suffix=r'\b'),
Keyword.Type),
(r'(false|true|null)\b', Keyword.Constant),
(words((
'__FILE__', '__FILE_FULL_PATH__', '__MODULE__', '__LINE__', '__FUNCTION__',
'__PRETTY_FUNCTION__', '__DATE__', '__EOF__', '__TIME__', '__TIMESTAMP__',
'__VENDOR__', '__VERSION__'), suffix=r'\b'),
Keyword.Pseudo),
(r'macro\b', Keyword.Reserved),
(r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number.Bin),
# -- Octal
(r'0[0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
String.Char),
# StringLiteral
# -- WysiwygString
(r'r"[^"]*"[cwd]?', String),
# -- AlternateWysiwygString
(r'`[^`]*`[cwd]?', String),
# -- DoubleQuotedString
(r'"(\\\\|\\[^\\]|[^"\\])*"[cwd]?', String),
# -- EscapeSequence
(r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
String),
# -- HexString
(r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
# -- DelimitedString
(r'q"\[', String, 'delimited_bracket'),
(r'q"\(', String, 'delimited_parenthesis'),
(r'q"<', String, 'delimited_angle'),
(r'q"\{', String, 'delimited_curly'),
(r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q\{', String, 'token_string'),
# Attributes
(r'@([a-zA-Z_]\w*)?', Name.Decorator),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
# Line
(r'(#line)(\s)(.*)(\n)', bygroups(Comment.Special, Whitespace,
Comment.Special, Whitespace)),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
'token_string': [
(r'\{', Punctuation, 'token_string_nest'),
(r'\}', String, '#pop'),
include('root'),
],
'token_string_nest': [
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
include('root'),
],
'delimited_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, 'delimited_inside_bracket'),
(r'\]"', String, '#pop'),
],
'delimited_inside_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, '#push'),
(r'\]', String, '#pop'),
],
'delimited_parenthesis': [
(r'[^()]+', String),
(r'\(', String, 'delimited_inside_parenthesis'),
(r'\)"', String, '#pop'),
],
'delimited_inside_parenthesis': [
(r'[^()]+', String),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'delimited_angle': [
(r'[^<>]+', String),
(r'<', String, 'delimited_inside_angle'),
(r'>"', String, '#pop'),
],
'delimited_inside_angle': [
(r'[^<>]+', String),
(r'<', String, '#push'),
(r'>', String, '#pop'),
],
'delimited_curly': [
(r'[^{}]+', String),
(r'\{', String, 'delimited_inside_curly'),
(r'\}"', String, '#pop'),
],
'delimited_inside_curly': [
(r'[^{}]+', String),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
}
class CrocLexer(RegexLexer):
"""
For Croc source.
"""
name = 'Croc'
url = 'http://jfbillingsley.com/croc'
filenames = ['*.croc']
aliases = ['croc']
mimetypes = ['text/x-crocsrc']
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
# Comments
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*', Comment.Multiline, 'nestedcomment'),
# Keywords
(words((
'as', 'assert', 'break', 'case', 'catch', 'class', 'continue',
'default', 'do', 'else', 'finally', 'for', 'foreach', 'function',
'global', 'namespace', 'if', 'import', 'in', 'is', 'local',
'module', 'return', 'scope', 'super', 'switch', 'this', 'throw',
'try', 'vararg', 'while', 'with', 'yield'), suffix=r'\b'),
Keyword),
(r'(false|true|null)\b', Keyword.Constant),
# FloatLiteral
(r'([0-9][0-9_]*)(?=[.eE])(\.[0-9][0-9_]*)?([eE][+\-]?[0-9_]+)?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[bB][01][01_]*', Number.Bin),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex),
# -- Decimal
(r'([0-9][0-9_]*)(?![.eE])', Number.Integer),
# CharacterLiteral
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char),
# StringLiteral
# -- WysiwygString
(r'@"(""|[^"])*"', String),
(r'@`(``|[^`])*`', String),
(r"@'(''|[^'])*'", String),
# -- DoubleQuotedString
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
# Tokens
(r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>'
r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)'
r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nestedcomment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
}
class MiniDLexer(CrocLexer):
"""
For MiniD source. MiniD is now known as Croc.
"""
name = 'MiniD'
filenames = [] # don't lex .md as MiniD, reserve for Markdown
aliases = ['minid']
mimetypes = ['text/x-minidsrc']
| 9,875 | Python | 37.131274 | 91 | 0.388051 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_stan_builtins.py | """
pygments.lexers._stan_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of functions for Stan used by
``pygments.lexers.math.StanLexer. This is for Stan language version 2.29.0.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = (
'break',
'continue',
'else',
'for',
'if',
'in',
'print',
'reject',
'return',
'while',
)
TYPES = (
'cholesky_factor_corr',
'cholesky_factor_cov',
'corr_matrix',
'cov_matrix',
'int',
'matrix',
'ordered',
'positive_ordered',
'real',
'row_vector',
'simplex',
'unit_vector',
'vector',
'void',
'array',
'complex'
)
FUNCTIONS = (
'abs',
'acos',
'acosh',
'add_diag',
'algebra_solver',
'algebra_solver_newton',
'append_array',
'append_col',
'append_row',
'arg',
'asin',
'asinh',
'atan',
'atan2',
'atanh',
'bernoulli_cdf',
'bernoulli_lccdf',
'bernoulli_lcdf',
'bernoulli_logit_glm_lpmf',
'bernoulli_logit_glm_lupmf',
'bernoulli_logit_glm_rng',
'bernoulli_logit_lpmf',
'bernoulli_logit_lupmf',
'bernoulli_logit_rng',
'bernoulli_lpmf',
'bernoulli_lupmf',
'bernoulli_rng',
'bessel_first_kind',
'bessel_second_kind',
'beta',
'beta_binomial_cdf',
'beta_binomial_lccdf',
'beta_binomial_lcdf',
'beta_binomial_lpmf',
'beta_binomial_lupmf',
'beta_binomial_rng',
'beta_cdf',
'beta_lccdf',
'beta_lcdf',
'beta_lpdf',
'beta_lupdf',
'beta_proportion_lccdf',
'beta_proportion_lcdf',
'beta_proportion_rng',
'beta_rng',
'binary_log_loss',
'binomial_cdf',
'binomial_coefficient_log',
'binomial_lccdf',
'binomial_lcdf',
'binomial_logit_lpmf',
'binomial_logit_lupmf',
'binomial_lpmf',
'binomial_lupmf',
'binomial_rng',
'block',
'categorical_logit_glm_lpmf',
'categorical_logit_glm_lupmf',
'categorical_logit_lpmf',
'categorical_logit_lupmf',
'categorical_logit_rng',
'categorical_lpmf',
'categorical_lupmf',
'categorical_rng',
'cauchy_cdf',
'cauchy_lccdf',
'cauchy_lcdf',
'cauchy_lpdf',
'cauchy_lupdf',
'cauchy_rng',
'cbrt',
'ceil',
'chi_square_cdf',
'chi_square_lccdf',
'chi_square_lcdf',
'chi_square_lpdf',
'chi_square_lupdf',
'chi_square_rng',
'chol2inv',
'cholesky_decompose',
'choose',
'col',
'cols',
'columns_dot_product',
'columns_dot_self',
'conj',
'cos',
'cosh',
'cov_exp_quad',
'crossprod',
'csr_extract_u',
'csr_extract_v',
'csr_extract_w',
'csr_matrix_times_vector',
'csr_to_dense_matrix',
'cumulative_sum',
'dae',
'dae_tol',
'determinant',
'diag_matrix',
'diag_post_multiply',
'diag_pre_multiply',
'diagonal',
'digamma',
'dims',
'dirichlet_lpdf',
'dirichlet_lupdf',
'dirichlet_rng',
'discrete_range_cdf',
'discrete_range_lccdf',
'discrete_range_lcdf',
'discrete_range_lpmf',
'discrete_range_lupmf',
'discrete_range_rng',
'distance',
'dot_product',
'dot_self',
'double_exponential_cdf',
'double_exponential_lccdf',
'double_exponential_lcdf',
'double_exponential_lpdf',
'double_exponential_lupdf',
'double_exponential_rng',
'e',
'eigenvalues_sym',
'eigenvectors_sym',
'erf',
'erfc',
'exp',
'exp2',
'exp_mod_normal_cdf',
'exp_mod_normal_lccdf',
'exp_mod_normal_lcdf',
'exp_mod_normal_lpdf',
'exp_mod_normal_lupdf',
'exp_mod_normal_rng',
'expm1',
'exponential_cdf',
'exponential_lccdf',
'exponential_lcdf',
'exponential_lpdf',
'exponential_lupdf',
'exponential_rng',
'fabs',
'falling_factorial',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
'frechet_cdf',
'frechet_lccdf',
'frechet_lcdf',
'frechet_lpdf',
'frechet_lupdf',
'frechet_rng',
'gamma_cdf',
'gamma_lccdf',
'gamma_lcdf',
'gamma_lpdf',
'gamma_lupdf',
'gamma_p',
'gamma_q',
'gamma_rng',
'gaussian_dlm_obs_lpdf',
'gaussian_dlm_obs_lupdf',
'generalized_inverse',
'get_imag',
'get_lp',
'get_real',
'gumbel_cdf',
'gumbel_lccdf',
'gumbel_lcdf',
'gumbel_lpdf',
'gumbel_lupdf',
'gumbel_rng',
'head',
'hmm_hidden_state_prob',
'hmm_latent_rng',
'hmm_marginal',
'hypergeometric_lpmf',
'hypergeometric_lupmf',
'hypergeometric_rng',
'hypot',
'identity_matrix',
'inc_beta',
'int_step',
'integrate_1d',
'integrate_ode',
'integrate_ode_adams',
'integrate_ode_bdf',
'integrate_ode_rk45',
'inv',
'inv_chi_square_cdf',
'inv_chi_square_lccdf',
'inv_chi_square_lcdf',
'inv_chi_square_lpdf',
'inv_chi_square_lupdf',
'inv_chi_square_rng',
'inv_cloglog',
'inv_erfc',
'inv_gamma_cdf',
'inv_gamma_lccdf',
'inv_gamma_lcdf',
'inv_gamma_lpdf',
'inv_gamma_lupdf',
'inv_gamma_rng',
'inv_logit',
'inv_Phi',
'inv_sqrt',
'inv_square',
'inv_wishart_lpdf',
'inv_wishart_lupdf',
'inv_wishart_rng',
'inverse',
'inverse_spd',
'is_inf',
'is_nan',
'lambert_w0',
'lambert_wm1',
'lbeta',
'lchoose',
'ldexp',
'lgamma',
'linspaced_array',
'linspaced_int_array',
'linspaced_row_vector',
'linspaced_vector',
'lkj_corr_cholesky_lpdf',
'lkj_corr_cholesky_lupdf',
'lkj_corr_cholesky_rng',
'lkj_corr_lpdf',
'lkj_corr_lupdf',
'lkj_corr_rng',
'lmgamma',
'lmultiply',
'log',
'log10',
'log1m',
'log1m_exp',
'log1m_inv_logit',
'log1p',
'log1p_exp',
'log2',
'log_determinant',
'log_diff_exp',
'log_falling_factorial',
'log_inv_logit',
'log_inv_logit_diff',
'log_mix',
'log_modified_bessel_first_kind',
'log_rising_factorial',
'log_softmax',
'log_sum_exp',
'logistic_cdf',
'logistic_lccdf',
'logistic_lcdf',
'logistic_lpdf',
'logistic_lupdf',
'logistic_rng',
'logit',
'loglogistic_cdf',
'loglogistic_lpdf',
'loglogistic_rng',
'lognormal_cdf',
'lognormal_lccdf',
'lognormal_lcdf',
'lognormal_lpdf',
'lognormal_lupdf',
'lognormal_rng',
'machine_precision',
'map_rect',
'matrix_exp',
'matrix_exp_multiply',
'matrix_power',
'max',
'mdivide_left_spd',
'mdivide_left_tri_low',
'mdivide_right_spd',
'mdivide_right_tri_low',
'mean',
'min',
'modified_bessel_first_kind',
'modified_bessel_second_kind',
'multi_gp_cholesky_lpdf',
'multi_gp_cholesky_lupdf',
'multi_gp_lpdf',
'multi_gp_lupdf',
'multi_normal_cholesky_lpdf',
'multi_normal_cholesky_lupdf',
'multi_normal_cholesky_rng',
'multi_normal_lpdf',
'multi_normal_lupdf',
'multi_normal_prec_lpdf',
'multi_normal_prec_lupdf',
'multi_normal_rng',
'multi_student_t_lpdf',
'multi_student_t_lupdf',
'multi_student_t_rng',
'multinomial_logit_lpmf',
'multinomial_logit_lupmf',
'multinomial_logit_rng',
'multinomial_lpmf',
'multinomial_lupmf',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
'neg_binomial_2_cdf',
'neg_binomial_2_lccdf',
'neg_binomial_2_lcdf',
'neg_binomial_2_log_glm_lpmf',
'neg_binomial_2_log_glm_lupmf',
'neg_binomial_2_log_lpmf',
'neg_binomial_2_log_lupmf',
'neg_binomial_2_log_rng',
'neg_binomial_2_lpmf',
'neg_binomial_2_lupmf',
'neg_binomial_2_rng',
'neg_binomial_cdf',
'neg_binomial_lccdf',
'neg_binomial_lcdf',
'neg_binomial_lpmf',
'neg_binomial_lupmf',
'neg_binomial_rng',
'negative_infinity',
'norm',
'normal_cdf',
'normal_id_glm_lpdf',
'normal_id_glm_lupdf',
'normal_lccdf',
'normal_lcdf',
'normal_lpdf',
'normal_lupdf',
'normal_rng',
'not_a_number',
'num_elements',
'ode_adams',
'ode_adams_tol',
'ode_adjoint_tol_ctl',
'ode_bdf',
'ode_bdf_tol',
'ode_ckrk',
'ode_ckrk_tol',
'ode_rk45',
'ode_rk45_tol',
'one_hot_array',
'one_hot_int_array',
'one_hot_row_vector',
'one_hot_vector',
'ones_array',
'ones_int_array',
'ones_row_vector',
'ones_vector',
'ordered_logistic_glm_lpmf',
'ordered_logistic_glm_lupmf',
'ordered_logistic_lpmf',
'ordered_logistic_lupmf',
'ordered_logistic_rng',
'ordered_probit_lpmf',
'ordered_probit_lupmf',
'ordered_probit_rng',
'owens_t',
'pareto_cdf',
'pareto_lccdf',
'pareto_lcdf',
'pareto_lpdf',
'pareto_lupdf',
'pareto_rng',
'pareto_type_2_cdf',
'pareto_type_2_lccdf',
'pareto_type_2_lcdf',
'pareto_type_2_lpdf',
'pareto_type_2_lupdf',
'pareto_type_2_rng',
'Phi',
'Phi_approx',
'pi',
'poisson_cdf',
'poisson_lccdf',
'poisson_lcdf',
'poisson_log_glm_lpmf',
'poisson_log_glm_lupmf',
'poisson_log_lpmf',
'poisson_log_lupmf',
'poisson_log_rng',
'poisson_lpmf',
'poisson_lupmf',
'poisson_rng',
'polar',
'positive_infinity',
'pow',
'print',
'prod',
'proj',
'qr_Q',
'qr_R',
'qr_thin_Q',
'qr_thin_R',
'quad_form',
'quad_form_diag',
'quad_form_sym',
'quantile',
'rank',
'rayleigh_cdf',
'rayleigh_lccdf',
'rayleigh_lcdf',
'rayleigh_lpdf',
'rayleigh_lupdf',
'rayleigh_rng',
'reduce_sum',
'reject',
'rep_array',
'rep_matrix',
'rep_row_vector',
'rep_vector',
'reverse',
'rising_factorial',
'round',
'row',
'rows',
'rows_dot_product',
'rows_dot_self',
'scale_matrix_exp_multiply',
'scaled_inv_chi_square_cdf',
'scaled_inv_chi_square_lccdf',
'scaled_inv_chi_square_lcdf',
'scaled_inv_chi_square_lpdf',
'scaled_inv_chi_square_lupdf',
'scaled_inv_chi_square_rng',
'sd',
'segment',
'sin',
'singular_values',
'sinh',
'size',
'skew_double_exponential_cdf',
'skew_double_exponential_lccdf',
'skew_double_exponential_lcdf',
'skew_double_exponential_lpdf',
'skew_double_exponential_lupdf',
'skew_double_exponential_rng',
'skew_normal_cdf',
'skew_normal_lccdf',
'skew_normal_lcdf',
'skew_normal_lpdf',
'skew_normal_lupdf',
'skew_normal_rng',
'softmax',
'sort_asc',
'sort_desc',
'sort_indices_asc',
'sort_indices_desc',
'sqrt',
'sqrt2',
'square',
'squared_distance',
'std_normal_cdf',
'std_normal_lccdf',
'std_normal_lcdf',
'std_normal_lpdf',
'std_normal_lupdf',
'std_normal_rng',
'step',
'student_t_cdf',
'student_t_lccdf',
'student_t_lcdf',
'student_t_lpdf',
'student_t_lupdf',
'student_t_rng',
'sub_col',
'sub_row',
'sum',
'svd_U',
'svd_V',
'symmetrize_from_lower_tri',
'tail',
'tan',
'tanh',
'target',
'tcrossprod',
'tgamma',
'to_array_1d',
'to_array_2d',
'to_complex',
'to_matrix',
'to_row_vector',
'to_vector',
'trace',
'trace_gen_quad_form',
'trace_quad_form',
'trigamma',
'trunc',
'uniform_cdf',
'uniform_lccdf',
'uniform_lcdf',
'uniform_lpdf',
'uniform_lupdf',
'uniform_rng',
'uniform_simplex',
'variance',
'von_mises_cdf',
'von_mises_lccdf',
'von_mises_lcdf',
'von_mises_lpdf',
'von_mises_lupdf',
'von_mises_rng',
'weibull_cdf',
'weibull_lccdf',
'weibull_lcdf',
'weibull_lpdf',
'weibull_lupdf',
'weibull_rng',
'wiener_lpdf',
'wiener_lupdf',
'wishart_lpdf',
'wishart_lupdf',
'wishart_rng',
'zeros_array',
'zeros_int_array',
'zeros_row_vector'
)
DISTRIBUTIONS = (
'bernoulli',
'bernoulli_logit',
'bernoulli_logit_glm',
'beta',
'beta_binomial',
'binomial',
'binomial_logit',
'categorical',
'categorical_logit',
'categorical_logit_glm',
'cauchy',
'chi_square',
'dirichlet',
'discrete_range',
'double_exponential',
'exp_mod_normal',
'exponential',
'frechet',
'gamma',
'gaussian_dlm_obs',
'gumbel',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
'lkj_corr_cholesky',
'logistic',
'loglogistic',
'lognormal',
'multi_gp',
'multi_gp_cholesky',
'multi_normal',
'multi_normal_cholesky',
'multi_normal_prec',
'multi_student_t',
'multinomial',
'multinomial_logit',
'neg_binomial',
'neg_binomial_2',
'neg_binomial_2_log',
'neg_binomial_2_log_glm',
'normal',
'normal_id_glm',
'ordered_logistic',
'ordered_logistic_glm',
'ordered_probit',
'pareto',
'pareto_type_2',
'poisson',
'poisson_log',
'poisson_log_glm',
'rayleigh',
'scaled_inv_chi_square',
'skew_double_exponential',
'skew_normal',
'std_normal',
'student_t',
'uniform',
'von_mises',
'weibull',
'wiener',
'wishart',
)
RESERVED = (
'repeat',
'until',
'then',
'true',
'false',
'var',
'struct',
'typedef',
'export',
'auto',
'extern',
'var',
'static',
)
| 13,445 | Python | 19.718028 | 79 | 0.556043 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/apl.py | """
pygments.lexers.apl
~~~~~~~~~~~~~~~~~~~
Lexers for APL.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['APLLexer']
class APLLexer(RegexLexer):
"""
A simple APL lexer.
.. versionadded:: 2.0
"""
name = 'APL'
url = 'https://en.m.wikipedia.org/wiki/APL_(programming_language)'
aliases = ['apl']
filenames = [
'*.apl', '*.aplf', '*.aplo', '*.apln',
'*.aplc', '*.apli', '*.dyalog',
]
tokens = {
'root': [
# Whitespace
# ==========
(r'\s+', Whitespace),
#
# Comment
# =======
# '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
(r'[⍝#].*$', Comment.Single),
#
# Strings
# =======
(r'\'((\'\')|[^\'])*\'', String.Single),
(r'"(("")|[^"])*"', String.Double), # supported by NGN APL
#
# Punctuation
# ===========
# This token type is used for diamond and parenthesis
# but not for bracket and ; (see below)
(r'[⋄◇()]', Punctuation),
#
# Array indexing
# ==============
# Since this token type is very important in APL, it is not included in
# the punctuation token type but rather in the following one
(r'[\[\];]', String.Regex),
#
# Distinguished names
# ===================
# following IBM APL2 standard
(r'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
#
# Labels
# ======
# following IBM APL2 standard
# (r'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
#
# Variables
# =========
# following IBM APL2 standard (with a leading _ ok for GNU APL and Dyalog)
(r'[A-Za-zΔ∆⍙_][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
#
# Numbers
# =======
(r'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
r'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
Number),
#
# Operators
# ==========
(r'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘⌸&⌶@⌺⍥⍛⍢]', Name.Attribute), # closest token type
(r'[+\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗⊆⊇⍸√⌾…⍮]',
Operator),
#
# Constant
# ========
(r'⍬', Name.Constant),
#
# Quad symbol
# ===========
(r'[⎕⍞]', Name.Variable.Global),
#
# Arrows left/right
# =================
(r'[←→]', Keyword.Declaration),
#
# D-Fn
# ====
(r'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
(r'[{}]', Keyword.Type),
],
}
| 3,201 | Python | 29.495238 | 86 | 0.373008 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/arrow.py | """
pygments.lexers.arrow
~~~~~~~~~~~~~~~~~~~~~
Lexer for Arrow.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, default, include
from pygments.token import Text, Operator, Keyword, Punctuation, Name, \
String, Number, Whitespace
__all__ = ['ArrowLexer']
TYPES = r'\b(int|bool|char)((?:\[\])*)(?=\s+)'
IDENT = r'([a-zA-Z_][a-zA-Z0-9_]*)'
DECL = TYPES + r'(\s+)' + IDENT
class ArrowLexer(RegexLexer):
"""
Lexer for Arrow
.. versionadded:: 2.7
"""
name = 'Arrow'
url = 'https://pypi.org/project/py-arrow-lang/'
aliases = ['arrow']
filenames = ['*.arw']
tokens = {
'root': [
(r'\s+', Whitespace),
(r'^[|\s]+', Punctuation),
include('blocks'),
include('statements'),
include('expressions'),
],
'blocks': [
(r'(function)(\n+)(/-->)(\s*)' +
DECL + # 4 groups
r'(\()', bygroups(
Keyword.Reserved, Whitespace, Punctuation,
Whitespace, Keyword.Type, Punctuation, Whitespace,
Name.Function, Punctuation
), 'fparams'),
(r'/-->$|\\-->$|/--<|\\--<|\^', Punctuation),
],
'statements': [
(DECL, bygroups(Keyword.Type, Punctuation, Text, Name.Variable)),
(r'\[', Punctuation, 'index'),
(r'=', Operator),
(r'require|main', Keyword.Reserved),
(r'print', Keyword.Reserved, 'print'),
],
'expressions': [
(r'\s+', Whitespace),
(r'[0-9]+', Number.Integer),
(r'true|false', Keyword.Constant),
(r"'", String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'\{', Punctuation, 'array'),
(r'==|!=|<|>|\+|-|\*|/|%', Operator),
(r'and|or|not|length', Operator.Word),
(r'(input)(\s+)(int|char\[\])', bygroups(
Keyword.Reserved, Whitespace, Keyword.Type
)),
(IDENT + r'(\()', bygroups(
Name.Function, Punctuation
), 'fargs'),
(IDENT, Name.Variable),
(r'\[', Punctuation, 'index'),
(r'\(', Punctuation, 'expressions'),
(r'\)', Punctuation, '#pop'),
],
'print': [
include('expressions'),
(r',', Punctuation),
default('#pop'),
],
'fparams': [
(DECL, bygroups(Keyword.Type, Punctuation, Whitespace, Name.Variable)),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'escape': [
(r'\\(["\\/abfnrtv]|[0-9]{1,3}|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4})',
String.Escape),
],
'char': [
(r"'", String.Char, '#pop'),
include('escape'),
(r"[^'\\]", String.Char),
],
'string': [
(r'"', String.Double, '#pop'),
include('escape'),
(r'[^"\\]+', String.Double),
],
'array': [
include('expressions'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation),
],
'fargs': [
include('expressions'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
],
'index': [
include('expressions'),
(r'\]', Punctuation, '#pop'),
],
}
| 3,565 | Python | 29.220339 | 83 | 0.432539 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/__init__.py | """
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import types
from fnmatch import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, guess_decode
COMPAT = {
'Python3Lexer': 'PythonLexer',
'Python3TracebackLexer': 'PythonTracebackLexer',
}
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
_lexer_cache = {}
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers(plugins=True):
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
If *plugins* is true (the default), plugin lexers supplied by entrypoints
are also returned. Otherwise, only builtin ones are considered.
"""
for item in LEXERS.values():
yield item[1:]
if plugins:
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.values():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def find_lexer_class_by_name(_alias):
"""Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
"""Load a lexer from a file.
This method expects a file located relative to the current working
directory, which contains a Lexer class. By default, it expects the
Lexer to be name CustomLexer; you can specify your own class name
as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Lexer.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `lexername` from that namespace
if lexername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(lexername, filename))
lexer_class = custom_namespace[lexername]
# And finally instantiate it with the options
return lexer_class(**options)
except OSError as err:
raise ClassNotFound('cannot read %s: %s' % (filename, err))
except ClassNotFound:
raise
except Exception as err:
raise ClassNotFound('error when loading custom lexer: %s' % err)
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.values():
for filename in filenames:
if fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch(fn, filename):
matches.append((cls, filename))
if isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus, cls.__name__
return cls.priority + bonus, cls.__name__
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in LEXERS.values():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
yield from find_plugin_lexers()
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if fnmatch(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if fnmatch(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
if not isinstance(_text, str):
inencoding = options.get('inencoding', options.get('encoding'))
if inencoding:
_text = _text.decode(inencoding or 'utf8')
else:
_text, _ = guess_decode(_text)
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
if name in COMPAT:
return getattr(self, COMPAT[name])
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
| 11,116 | Python | 32.185075 | 79 | 0.615149 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/trafficscript.py | """
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for RiverBed's TrafficScript (RTS) language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
__all__ = ['RtsLexer']
class RtsLexer(RegexLexer):
"""
For Riverbed Stingray Traffic Manager
.. versionadded:: 2.1
"""
name = 'TrafficScript'
aliases = ['trafficscript', 'rts']
filenames = ['*.rts']
tokens = {
'root' : [
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"', String, 'escapable-string'),
(r'(0x[0-9a-fA-F]+|\d+)', Number),
(r'\d+\.\d+', Number.Float),
(r'\$[a-zA-Z](\w|_)*', Name.Variable),
(r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
(r'[a-zA-Z][\w.]*', Name.Function),
(r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
(r'(>=|<=|==|!=|'
r'&&|\|\||'
r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
r'>>|<<|'
r'\+\+|--|=>)', Operator),
(r'[ \t\r]+', Text),
(r'#[^\n]*', Comment),
],
'escapable-string' : [
(r'\\[tsn]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
}
| 1,474 | Python | 27.365384 | 83 | 0.422659 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/algebra.py | """
pygments.lexers.algebra
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer algebra systems.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
__all__ = ['GAPLexer', 'GAPConsoleLexer', 'MathematicaLexer', 'MuPADLexer',
'BCLexer']
class GAPLexer(RegexLexer):
"""
For GAP source code.
.. versionadded:: 2.0
"""
name = 'GAP'
url = 'http://www.gap-system.org'
aliases = ['gap']
filenames = ['*.g', '*.gd', '*.gi', '*.gap']
tokens = {
'root': [
(r'#.*$', Comment.Single),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
if|then|elif|else|fi|
for|while|do|od|
repeat|until|
break|continue|
function|local|return|end|
rec|
quit|QUIT|
IsBound|Unbind|
TryNextMethod|
Info|Assert
)\b''', Keyword),
(r'''(?x)\b(?:
true|false|fail|infinity
)\b''',
Name.Constant),
(r'''(?x)\b(?:
(Declare|Install)([A-Z][A-Za-z]+)|
BindGlobal|BIND_GLOBAL
)\b''',
Name.Builtin),
(r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
(r'''(?x)\b(?:
and|or|not|mod|in
)\b''',
Operator.Word),
(r'''(?x)
(?:\w+|`[^`]*`)
(?:::\w+|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
}
def analyse_text(text):
score = 0.0
# Declaration part
if re.search(
r"(InstallTrueMethod|Declare(Attribute|Category|Filter|Operation" +
r"|GlobalFunction|Synonym|SynonymAttr|Property))", text
):
score += 0.7
# Implementation part
if re.search(
r"(DeclareRepresentation|Install(GlobalFunction|Method|" +
r"ImmediateMethod|OtherMethod)|New(Family|Type)|Objectify)", text
):
score += 0.7
return min(score, 1.0)
class GAPConsoleLexer(Lexer):
"""
For GAP console sessions. Modeled after JuliaConsoleLexer.
.. versionadded:: 2.14
"""
name = 'GAP session'
aliases = ['gap-console', 'gap-repl']
filenames = ['*.tst']
def get_tokens_unprocessed(self, text):
gaplexer = GAPLexer(**self.options)
start = 0
curcode = ''
insertions = []
output = False
error = False
for line in text.splitlines(keepends=True):
if line.startswith('gap> ') or line.startswith('brk> '):
insertions.append((len(curcode), [(0, Generic.Prompt, line[:5])]))
curcode += line[5:]
output = False
error = False
elif not output and line.startswith('> '):
insertions.append((len(curcode), [(0, Generic.Prompt, line[:2])]))
curcode += line[2:]
else:
if curcode:
yield from do_insertions(
insertions, gaplexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
if line.startswith('Error, ') or error:
yield start, Generic.Error, line
error = True
else:
yield start, Generic.Output, line
output = True
start += len(line)
if curcode:
yield from do_insertions(
insertions, gaplexer.get_tokens_unprocessed(curcode))
# the following is needed to distinguish Scilab and GAP .tst files
def analyse_text(text):
# GAP prompts are a dead give away, although hypothetical;y a
# file in another language could be trying to compare a variable
# "gap" as in "gap> 0.1". But that this should happen at the
# start of a line seems unlikely...
if re.search(r"^gap> ", text):
return 0.9
else:
return 0.0
class MathematicaLexer(RegexLexer):
"""
Lexer for Mathematica source code.
.. versionadded:: 2.0
"""
name = 'Mathematica'
url = 'http://www.wolfram.com/mathematica/'
aliases = ['mathematica', 'mma', 'nb']
filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma']
mimetypes = ['application/mathematica',
'application/vnd.wolfram.mathematica',
'application/vnd.wolfram.mathematica.package',
'application/vnd.wolfram.cdf']
# http://reference.wolfram.com/mathematica/guide/Syntax.html
operators = (
";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/",
"^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@",
"@@@", "~~", "===", "&", "<", ">", "<=", ">=",
)
punctuation = (",", ";", "(", ")", "[", "]", "{", "}")
def _multi_escape(entries):
return '(%s)' % ('|'.join(re.escape(entry) for entry in entries))
tokens = {
'root': [
(r'(?s)\(\*.*?\*\)', Comment),
(r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace),
(r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable),
(r'#\d*', Name.Variable),
(r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
(r'-?\d+\.\d*', Number.Float),
(r'-?\d*\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(words(operators), Operator),
(words(punctuation), Punctuation),
(r'".*?"', String),
(r'\s+', Text.Whitespace),
],
}
class MuPADLexer(RegexLexer):
"""
A MuPAD lexer.
Contributed by Christopher Creutzig <[email protected]>.
.. versionadded:: 0.8
"""
name = 'MuPAD'
url = 'http://www.mupad.com'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
# (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'\s+', Whitespace),
(r'.', Text)
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
class BCLexer(RegexLexer):
"""
A BC lexer.
.. versionadded:: 2.1
"""
name = 'BC'
url = 'https://www.gnu.org/software/bc/'
aliases = ['bc']
filenames = ['*.bc']
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'[{}();,]', Punctuation),
(words(('if', 'else', 'while', 'for', 'break', 'continue',
'halt', 'return', 'define', 'auto', 'print', 'read',
'length', 'scale', 'sqrt', 'limits', 'quit',
'warranty'), suffix=r'\b'), Keyword),
(r'\+\+|--|\|\||&&|'
r'([-<>+*%\^/!=])=?', Operator),
# bc doesn't support exponential
(r'[0-9]+(\.[0-9]*)?', Number),
(r'\.[0-9]+', Number),
(r'.', Text)
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
| 9,873 | Python | 31.587459 | 82 | 0.432898 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/slash.py | """
pygments.lexers.slash
~~~~~~~~~~~~~~~~~~~~~
Lexer for the `Slash <https://github.com/arturadib/Slash-A>`_ programming
language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer
from pygments.token import Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator, Whitespace
__all__ = ['SlashLexer']
class SlashLanguageLexer(ExtendedRegexLexer):
_nkw = r'(?=[^a-zA-Z_0-9])'
def move_state(new_state):
return ("#pop", new_state)
def right_angle_bracket(lexer, match, ctx):
if len(ctx.stack) > 1 and ctx.stack[-2] == "string":
ctx.stack.pop()
yield match.start(), String.Interpol, '}'
ctx.pos = match.end()
pass
tokens = {
"root": [
(r"<%=", Comment.Preproc, move_state("slash")),
(r"<%!!", Comment.Preproc, move_state("slash")),
(r"<%#.*?%>", Comment.Multiline),
(r"<%", Comment.Preproc, move_state("slash")),
(r".|\n", Other),
],
"string": [
(r"\\", String.Escape, move_state("string_e")),
(r"\"", String, move_state("slash")),
(r"#\{", String.Interpol, "slash"),
(r'.|\n', String),
],
"string_e": [
(r'n', String.Escape, move_state("string")),
(r't', String.Escape, move_state("string")),
(r'r', String.Escape, move_state("string")),
(r'e', String.Escape, move_state("string")),
(r'x[a-fA-F0-9]{2}', String.Escape, move_state("string")),
(r'.', String.Escape, move_state("string")),
],
"regexp": [
(r'}[a-z]*', String.Regex, move_state("slash")),
(r'\\(.|\n)', String.Regex),
(r'{', String.Regex, "regexp_r"),
(r'.|\n', String.Regex),
],
"regexp_r": [
(r'}[a-z]*', String.Regex, "#pop"),
(r'\\(.|\n)', String.Regex),
(r'{', String.Regex, "regexp_r"),
],
"slash": [
(r"%>", Comment.Preproc, move_state("root")),
(r"\"", String, move_state("string")),
(r"'[a-zA-Z0-9_]+", String),
(r'%r{', String.Regex, move_state("regexp")),
(r'/\*.*?\*/', Comment.Multiline),
(r"(#|//).*?\n", Comment.Single),
(r'-?[0-9]+e[+-]?[0-9]+', Number.Float),
(r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float),
(r'-?[0-9]+', Number.Integer),
(r'nil'+_nkw, Name.Builtin),
(r'true'+_nkw, Name.Builtin),
(r'false'+_nkw, Name.Builtin),
(r'self'+_nkw, Name.Builtin),
(r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'class'+_nkw, Keyword),
(r'extends'+_nkw, Keyword),
(r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)),
(r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'def'+_nkw, Keyword),
(r'if'+_nkw, Keyword),
(r'elsif'+_nkw, Keyword),
(r'else'+_nkw, Keyword),
(r'unless'+_nkw, Keyword),
(r'for'+_nkw, Keyword),
(r'in'+_nkw, Keyword),
(r'while'+_nkw, Keyword),
(r'until'+_nkw, Keyword),
(r'and'+_nkw, Keyword),
(r'or'+_nkw, Keyword),
(r'not'+_nkw, Keyword),
(r'lambda'+_nkw, Keyword),
(r'try'+_nkw, Keyword),
(r'catch'+_nkw, Keyword),
(r'return'+_nkw, Keyword),
(r'next'+_nkw, Keyword),
(r'last'+_nkw, Keyword),
(r'throw'+_nkw, Keyword),
(r'use'+_nkw, Keyword),
(r'switch'+_nkw, Keyword),
(r'\\', Keyword),
(r'λ', Keyword),
(r'__FILE__'+_nkw, Name.Builtin.Pseudo),
(r'__LINE__'+_nkw, Name.Builtin.Pseudo),
(r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant),
(r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name),
(r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance),
(r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class),
(r'\(', Punctuation),
(r'\)', Punctuation),
(r'\[', Punctuation),
(r'\]', Punctuation),
(r'\{', Punctuation),
(r'\}', right_angle_bracket),
(r';', Punctuation),
(r',', Punctuation),
(r'<<=', Operator),
(r'>>=', Operator),
(r'<<', Operator),
(r'>>', Operator),
(r'==', Operator),
(r'!=', Operator),
(r'=>', Operator),
(r'=', Operator),
(r'<=>', Operator),
(r'<=', Operator),
(r'>=', Operator),
(r'<', Operator),
(r'>', Operator),
(r'\+\+', Operator),
(r'\+=', Operator),
(r'-=', Operator),
(r'\*\*=', Operator),
(r'\*=', Operator),
(r'\*\*', Operator),
(r'\*', Operator),
(r'/=', Operator),
(r'\+', Operator),
(r'-', Operator),
(r'/', Operator),
(r'%=', Operator),
(r'%', Operator),
(r'^=', Operator),
(r'&&=', Operator),
(r'&=', Operator),
(r'&&', Operator),
(r'&', Operator),
(r'\|\|=', Operator),
(r'\|=', Operator),
(r'\|\|', Operator),
(r'\|', Operator),
(r'!', Operator),
(r'\.\.\.', Operator),
(r'\.\.', Operator),
(r'\.', Operator),
(r'::', Operator),
(r':', Operator),
(r'(\s|\n)+', Whitespace),
(r'[a-z_][a-zA-Z0-9_\']*', Name.Variable),
],
}
class SlashLexer(DelegatingLexer):
"""
Lexer for the Slash programming language.
.. versionadded:: 2.4
"""
name = 'Slash'
aliases = ['slash']
filenames = ['*.sla']
def __init__(self, **options):
from pygments.lexers.web import HtmlLexer
super().__init__(HtmlLexer, SlashLanguageLexer, **options)
| 8,481 | Python | 44.848648 | 140 | 0.326495 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/chapel.py | """
pygments.lexers.chapel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Chapel language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ChapelLexer']
class ChapelLexer(RegexLexer):
"""
For Chapel source.
.. versionadded:: 2.0
"""
name = 'Chapel'
url = 'https://chapel-lang.org/'
filenames = ['*.chpl']
aliases = ['chapel', 'chpl']
# mimetypes = ['text/x-chapel']
known_types = ('bool', 'bytes', 'complex', 'imag', 'int', 'locale',
'nothing', 'opaque', 'range', 'real', 'string', 'uint',
'void')
type_modifiers_par = ('atomic', 'single', 'sync')
type_modifiers_mem = ('borrowed', 'owned', 'shared', 'unmanaged')
type_modifiers = (*type_modifiers_par, *type_modifiers_mem)
declarations = ('config', 'const', 'in', 'inout', 'out', 'param', 'ref',
'type', 'var')
constants = ('false', 'nil', 'none', 'true')
other_keywords = ('align', 'as',
'begin', 'break', 'by',
'catch', 'cobegin', 'coforall', 'continue',
'defer', 'delete', 'dmapped', 'do', 'domain',
'else', 'enum', 'except', 'export', 'extern',
'for', 'forall', 'foreach', 'forwarding',
'if', 'implements', 'import', 'index', 'init', 'inline',
'label', 'lambda', 'let', 'lifetime', 'local',
'new', 'noinit',
'on', 'only', 'otherwise', 'override',
'pragma', 'primitive', 'private', 'prototype', 'public',
'reduce', 'require', 'return',
'scan', 'select', 'serial', 'sparse', 'subdomain',
'then', 'this', 'throw', 'throws', 'try',
'use',
'when', 'where', 'while', 'with',
'yield',
'zip')
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'\\\n', Text),
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(words(declarations, suffix=r'\b'), Keyword.Declaration),
(words(constants, suffix=r'\b'), Keyword.Constant),
(words(known_types, suffix=r'\b'), Keyword.Type),
(words((*type_modifiers, *other_keywords), suffix=r'\b'), Keyword),
(r'(iter)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
(r'(proc)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
(r'(operator)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
(r'(class|interface|module|record|union)(\s+)', bygroups(Keyword, Whitespace),
'classname'),
# imaginary integers
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# reals cannot end with a period due to lexical ambiguity with
# .. operator. See reference for rationale.
(r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
(r'\d+[eE][+-]?[0-9]+i?', Number.Float),
# integer literals
# -- binary
(r'0[bB][01]+', Number.Bin),
# -- hex
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- octal
(r'0[oO][0-7]+', Number.Oct),
# -- decimal
(r'[0-9]+', Number.Integer),
# strings
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
# tokens
(r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
r'<=>|<~>|\.\.|by|#|\.\.\.|'
r'&&|\|\||!|&|\||\^|~|<<|>>|'
r'==|!=|<=|>=|<|>|'
r'[+\-*/%]|\*\*)', Operator),
(r'[:;,.?()\[\]{}]', Punctuation),
# identifiers
(r'[a-zA-Z_][\w$]*', Name.Other),
],
'classname': [
(r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
],
'procname': [
(r'([a-zA-Z_][.\w$]*|' # regular function name, including secondary
r'\~[a-zA-Z_][.\w$]*|' # support for legacy destructors
r'[+*/!~%<>=&^|\-:]{1,2})', # operators
Name.Function, '#pop'),
# allow `proc (atomic T).foo`
(r'\(', Punctuation, "receivertype"),
(r'\)+\.', Punctuation),
],
'receivertype': [
(words(type_modifiers, suffix=r'\b'), Keyword),
(words(known_types, suffix=r'\b'), Keyword.Type),
(r'[^()]*', Name.Other, '#pop'),
],
}
| 5,014 | Python | 35.605839 | 90 | 0.422417 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/spice.py | """
pygments.lexers.spice
~~~~~~~~~~~~~~~~~~~~~
Lexers for the Spice programming language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['SpiceLexer']
class SpiceLexer(RegexLexer):
"""
For Spice source.
.. versionadded:: 2.11
"""
name = 'Spice'
url = 'https://www.spicelang.com'
filenames = ['*.spice']
aliases = ['spice', 'spicelang']
mimetypes = ['text/x-spice']
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'\\\n', Text),
# comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*]{2}(.|\n)*?[*](\\\n)?/', String.Doc),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
# keywords
(r'(import|as)\b', Keyword.Namespace),
(r'(f|p|type|struct|enum)\b', Keyword.Declaration),
(words(('if', 'else', 'for', 'foreach', 'while', 'break',
'continue', 'return', 'assert', 'thread', 'unsafe', 'ext',
'dll'), suffix=r'\b'), Keyword),
(words(('const', 'signed', 'unsigned', 'inline', 'public'),
suffix=r'\b'), Keyword.Pseudo),
(words(('new', 'switch', 'case', 'yield', 'stash', 'pick', 'sync',
'class'), suffix=r'\b'), Keyword.Reserved),
(r'(true|false|nil)\b', Keyword.Constant),
(words(('double', 'int', 'short', 'long', 'byte', 'char', 'string',
'bool', 'dyn'), suffix=r'\b'), Keyword.Type),
(words(('printf', 'sizeof', 'len', 'tid', 'join'), suffix=r'\b(\()'),
bygroups(Name.Builtin, Punctuation)),
# numeric literals
(r'[0-9]*[.][0-9]+', Number.Double),
(r'0[bB][01]+[sl]?', Number.Bin),
(r'0[oO][0-7]+[sl]?', Number.Oct),
(r'0[xXhH][0-9a-fA-F]+[sl]?', Number.Hex),
(r'(0[dD])?[0-9]+[sl]?', Number.Integer),
# string literal
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
# char literal
(r'\'(\\\\|\\[^\\]|[^\'\\])\'', String.Char),
# tokens
(r'<<=|>>=|<<|>>|<=|>=|\+=|-=|\*=|/=|\%=|\|=|&=|\^=|&&|\|\||&|\||'
r'\+\+|--|\%|\^|\~|==|!=|::|[.]{3}|[+\-*/&]', Operator),
(r'[|<>=!()\[\]{}.,;:\?]', Punctuation),
# identifiers
(r'[^\W\d]\w*', Name.Other),
]
}
| 2,694 | Python | 36.430555 | 81 | 0.429473 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/web.py | """
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Just export previously exported lexers.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.html import HtmlLexer, DtdLexer, XmlLexer, XsltLexer, \
HamlLexer, ScamlLexer, JadeLexer
from pygments.lexers.css import CssLexer, SassLexer, ScssLexer
from pygments.lexers.javascript import JavascriptLexer, LiveScriptLexer, \
DartLexer, TypeScriptLexer, LassoLexer, ObjectiveJLexer, CoffeeScriptLexer
from pygments.lexers.actionscript import ActionScriptLexer, \
ActionScript3Lexer, MxmlLexer
from pygments.lexers.php import PhpLexer
from pygments.lexers.webmisc import DuelLexer, XQueryLexer, SlimLexer, QmlLexer
from pygments.lexers.data import JsonLexer
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
__all__ = []
| 894 | Python | 36.291665 | 79 | 0.768456 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/diff.py | """
pygments.lexers.diff
~~~~~~~~~~~~~~~~~~~~
Lexers for diff/patch formats.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \
Literal, Whitespace
__all__ = ['DiffLexer', 'DarcsPatchLexer', 'WDiffLexer']
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r'( )(.*)(\n)', bygroups(Whitespace, Text, Whitespace)),
(r'(\+.*)(\n)', bygroups(Generic.Inserted, Whitespace)),
(r'(-.*)(\n)', bygroups(Generic.Deleted, Whitespace)),
(r'(!.*)(\n)', bygroups(Generic.Strong, Whitespace)),
(r'(@.*)(\n)', bygroups(Generic.Subheading, Whitespace)),
(r'((?:[Ii]ndex|diff).*)(\n)', bygroups(Generic.Heading, Whitespace)),
(r'(=.*)(\n)', bygroups(Generic.Heading, Whitespace)),
(r'(.*)(\n)', Whitespace),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
.. versionadded:: 0.10
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace')
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'\{', Operator),
(r'\}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Whitespace, Name, Operator,
Literal.Date, Whitespace, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Whitespace, Name, Operator,
Literal.Date, Whitespace), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*)(\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Whitespace, Keyword, Text, Whitespace)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'(.*)(\n)', bygroups(Text, Whitespace)),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Whitespace, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}
class WDiffLexer(RegexLexer):
"""
A wdiff lexer.
Note that:
* It only works with normal output (without options like ``-l``).
* If the target files contain "[-", "-]", "{+", or "+}",
especially they are unbalanced, the lexer will get confused.
.. versionadded:: 2.2
"""
name = 'WDiff'
url = 'https://www.gnu.org/software/wdiff/'
aliases = ['wdiff']
filenames = ['*.wdiff']
mimetypes = []
flags = re.MULTILINE | re.DOTALL
# We can only assume "[-" after "[-" before "-]" is `nested`,
# for instance wdiff to wdiff outputs. We have no way to
# distinct these marker is of wdiff output from original text.
ins_op = r"\{\+"
ins_cl = r"\+\}"
del_op = r"\[\-"
del_cl = r"\-\]"
normal = r'[^{}[\]+-]+' # for performance
tokens = {
'root': [
(ins_op, Generic.Inserted, 'inserted'),
(del_op, Generic.Deleted, 'deleted'),
(normal, Text),
(r'.', Text),
],
'inserted': [
(ins_op, Generic.Inserted, '#push'),
(del_op, Generic.Inserted, '#push'),
(del_cl, Generic.Inserted, '#pop'),
(ins_cl, Generic.Inserted, '#pop'),
(normal, Generic.Inserted),
(r'.', Generic.Inserted),
],
'deleted': [
(del_op, Generic.Deleted, '#push'),
(ins_op, Generic.Deleted, '#push'),
(ins_cl, Generic.Deleted, '#pop'),
(del_cl, Generic.Deleted, '#pop'),
(normal, Generic.Deleted),
(r'.', Generic.Deleted),
],
}
| 5,164 | Python | 30.114458 | 86 | 0.486832 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/dalvik.py | """
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation, Whitespace
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For Smali (Android/Dalvik) assembly
code.
.. versionadded:: 1.6
"""
name = 'Smali'
url = 'http://code.google.com/p/smali/'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^([ \t]*)(\.(?:class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source))', bygroups(Whitespace, Keyword)),
(r'^([ \t]*)(\.end)( )(field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)',
bygroups(Whitespace, Keyword, Whitespace, Keyword)),
(r'^([ \t]*)(\.restart)( )(local)',
bygroups(Whitespace, Keyword, Whitespace, Keyword)),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'(\b[a-z][A-Za-z0-9/-]+)(\s+)', bygroups(Text, Whitespace)), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([\w$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([\w$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':\w+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},():=.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'\b((check-cast|instance-of|throw-verification-error'
r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
score += 0.3
if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
r'\b(array-data|class-change-error|declared-synchronized|'
r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
r'illegal-class-access|illegal-field-access|'
r'illegal-method-access|instantiation-error|no-error|'
r'no-such-class|no-such-field|no-such-method|'
r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
score += 0.6
return score
| 4,607 | Python | 35 | 90 | 0.497287 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/nix.py | """
pygments.lexers.nix
~~~~~~~~~~~~~~~~~~~
Lexers for the NixOS Nix language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['NixLexer']
class NixLexer(RegexLexer):
"""
For the Nix language.
.. versionadded:: 2.0
"""
name = 'Nix'
url = 'http://nixos.org/nix/'
aliases = ['nixos', 'nix']
filenames = ['*.nix']
mimetypes = ['text/x-nix']
keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if',
'else', 'then', '...']
builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins',
'map', 'removeAttrs', 'throw', 'toString', 'derivation']
operators = ['++', '+', '?', '.', '!', '//', '==',
'!=', '&&', '||', '->', '=']
punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"]
tokens = {
'root': [
# comments starting with #
(r'#.*$', Comment.Single),
# multiline comments
(r'/\*', Comment.Multiline, 'comment'),
# whitespace
(r'\s+', Text),
# keywords
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword),
# highlight the builtins
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins),
Name.Builtin),
(r'\b(true|false|null)\b', Name.Constant),
# operators
('(%s)' % '|'.join(re.escape(entry) for entry in operators),
Operator),
# word operators
(r'\b(or|and)\b', Operator.Word),
# punctuations
('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation),
# integers
(r'[0-9]+', Number.Integer),
# strings
(r'"', String.Double, 'doublequote'),
(r"''", String.Single, 'singlequote'),
# paths
(r'[\w.+-]*(\/[\w.+-]+)+', Literal),
(r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal),
# urls
(r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal),
# names of variables
(r'[\w-]+\s*=', String.Symbol),
(r'[a-zA-Z_][\w\'-]*', Text),
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'singlequote': [
(r"'''", String.Escape),
(r"''\$\{", String.Escape),
(r"''\n", String.Escape),
(r"''\r", String.Escape),
(r"''\t", String.Escape),
(r"''", String.Single, '#pop'),
(r'\$\{', String.Interpol, 'antiquote'),
(r"['$]", String.Single),
(r"[^'$]+", String.Single),
],
'doublequote': [
(r'\\', String.Escape),
(r'\\"', String.Escape),
(r'\\$\{', String.Escape),
(r'"', String.Double, '#pop'),
(r'\$\{', String.Interpol, 'antiquote'),
(r'[^"]', String.Double),
],
'antiquote': [
(r"\}", String.Interpol, '#pop'),
# TODO: we should probably escape also here ''${ \${
(r"\$\{", String.Interpol, '#push'),
include('root'),
],
}
def analyse_text(text):
rv = 0.0
# TODO: let/in
if re.search(r'import.+?<[^>]+>', text):
rv += 0.4
if re.search(r'mkDerivation\s+(\(|\{|rec)', text):
rv += 0.4
if re.search(r'=\s+mkIf\s+', text):
rv += 0.4
if re.search(r'\{[a-zA-Z,\s]+\}:', text):
rv += 0.1
return rv
| 4,015 | Python | 28.529412 | 89 | 0.417933 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/julia.py | """
pygments.lexers.julia
~~~~~~~~~~~~~~~~~~~~~
Lexers for the Julia language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
words, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
from pygments.util import shebang_matches
from pygments.lexers._julia_builtins import OPERATORS_LIST, DOTTED_OPERATORS_LIST, \
KEYWORD_LIST, BUILTIN_LIST, LITERAL_LIST
__all__ = ['JuliaLexer', 'JuliaConsoleLexer']
# see https://docs.julialang.org/en/v1/manual/variables/#Allowed-Variable-Names
allowed_variable = \
'(?:[a-zA-Z_\u00A1-\U0010ffff][a-zA-Z_0-9!\u00A1-\U0010ffff]*)'
# see https://github.com/JuliaLang/julia/blob/master/src/flisp/julia_opsuffs.h
operator_suffixes = r'[²³¹ʰʲʳʷʸˡˢˣᴬᴮᴰᴱᴳᴴᴵᴶᴷᴸᴹᴺᴼᴾᴿᵀᵁᵂᵃᵇᵈᵉᵍᵏᵐᵒᵖᵗᵘᵛᵝᵞᵟᵠᵡᵢᵣᵤᵥᵦᵧᵨᵩᵪᶜᶠᶥᶦᶫᶰᶸᶻᶿ′″‴‵‶‷⁗⁰ⁱ⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾ⁿ₀₁₂₃₄₅₆₇₈₉₊₋₌₍₎ₐₑₒₓₕₖₗₘₙₚₛₜⱼⱽ]*'
class JuliaLexer(RegexLexer):
"""
For Julia source code.
.. versionadded:: 1.6
"""
name = 'Julia'
url = 'https://julialang.org/'
aliases = ['julia', 'jl']
filenames = ['*.jl']
mimetypes = ['text/x-julia', 'application/x-julia']
tokens = {
'root': [
(r'\n', Whitespace),
(r'[^\S\n]+', Whitespace),
(r'#=', Comment.Multiline, "blockcomment"),
(r'#.*$', Comment),
(r'[\[\](),;]', Punctuation),
# symbols
# intercept range expressions first
(r'(' + allowed_variable + r')(\s*)(:)(' + allowed_variable + ')',
bygroups(Name, Whitespace, Operator, Name)),
# then match :name which does not follow closing brackets, digits, or the
# ::, <:, and :> operators
(r'(?<![\]):<>\d.])(:' + allowed_variable + ')', String.Symbol),
# type assertions - excludes expressions like ::typeof(sin) and ::avec[1]
(r'(?<=::)(\s*)(' + allowed_variable + r')\b(?![(\[])',
bygroups(Whitespace, Keyword.Type)),
# type comparisons
# - MyType <: A or MyType >: A
('(' + allowed_variable + r')(\s*)([<>]:)(\s*)(' + allowed_variable + r')\b(?![(\[])',
bygroups(Keyword.Type, Whitespace, Operator, Whitespace, Keyword.Type)),
# - <: B or >: B
(r'([<>]:)(\s*)(' + allowed_variable + r')\b(?![(\[])',
bygroups(Operator, Whitespace, Keyword.Type)),
# - A <: or A >:
(r'\b(' + allowed_variable + r')(\s*)([<>]:)',
bygroups(Keyword.Type, Whitespace, Operator)),
# operators
# Suffixes aren't actually allowed on all operators, but we'll ignore that
# since those cases are invalid Julia code.
(words([*OPERATORS_LIST, *DOTTED_OPERATORS_LIST],
suffix=operator_suffixes), Operator),
(words(['.' + o for o in DOTTED_OPERATORS_LIST],
suffix=operator_suffixes), Operator),
(words(['...', '..']), Operator),
# NOTE
# Patterns below work only for definition sites and thus hardly reliable.
#
# functions
# (r'(function)(\s+)(' + allowed_variable + ')',
# bygroups(Keyword, Text, Name.Function)),
# chars
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|"
r"\\U[a-fA-F0-9]{1,6}|[^\\\'\n])'", String.Char),
# try to match trailing transpose
(r'(?<=[.\w)\]])(\'' + operator_suffixes + ')+', Operator),
# raw strings
(r'(raw)(""")', bygroups(String.Affix, String), 'tqrawstring'),
(r'(raw)(")', bygroups(String.Affix, String), 'rawstring'),
# regular expressions
(r'(r)(""")', bygroups(String.Affix, String.Regex), 'tqregex'),
(r'(r)(")', bygroups(String.Affix, String.Regex), 'regex'),
# other strings
(r'(' + allowed_variable + ')?(""")',
bygroups(String.Affix, String), 'tqstring'),
(r'(' + allowed_variable + ')?(")',
bygroups(String.Affix, String), 'string'),
# backticks
(r'(' + allowed_variable + ')?(```)',
bygroups(String.Affix, String.Backtick), 'tqcommand'),
(r'(' + allowed_variable + ')?(`)',
bygroups(String.Affix, String.Backtick), 'command'),
# type names
# - names that begin a curly expression
('(' + allowed_variable + r')(\{)',
bygroups(Keyword.Type, Punctuation), 'curly'),
# - names as part of bare 'where'
(r'(where)(\s+)(' + allowed_variable + ')',
bygroups(Keyword, Whitespace, Keyword.Type)),
# - curly expressions in general
(r'(\{)', Punctuation, 'curly'),
# - names as part of type declaration
(r'(abstract|primitive)([ \t]+)(type\b)([\s()]+)(' +
allowed_variable + r')',
bygroups(Keyword, Whitespace, Keyword, Text, Keyword.Type)),
(r'(mutable(?=[ \t]))?([ \t]+)?(struct\b)([\s()]+)(' +
allowed_variable + r')',
bygroups(Keyword, Whitespace, Keyword, Text, Keyword.Type)),
# macros
(r'@' + allowed_variable, Name.Decorator),
(words([*OPERATORS_LIST, '..', '.', *DOTTED_OPERATORS_LIST],
prefix='@', suffix=operator_suffixes), Name.Decorator),
# keywords
(words(KEYWORD_LIST, suffix=r'\b'), Keyword),
# builtin types
(words(BUILTIN_LIST, suffix=r'\b'), Keyword.Type),
# builtin literals
(words(LITERAL_LIST, suffix=r'\b'), Name.Builtin),
# names
(allowed_variable, Name),
# numbers
(r'(\d+((_\d+)+)?\.(?!\.)(\d+((_\d+)+)?)?|\.\d+((_\d+)+)?)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+((_\d+)+)?[eEf][+-]?[0-9]+', Number.Float),
(r'0x[a-fA-F0-9]+((_[a-fA-F0-9]+)+)?(\.([a-fA-F0-9]+((_[a-fA-F0-9]+)+)?)?)?p[+-]?\d+', Number.Float),
(r'0b[01]+((_[01]+)+)?', Number.Bin),
(r'0o[0-7]+((_[0-7]+)+)?', Number.Oct),
(r'0x[a-fA-F0-9]+((_[a-fA-F0-9]+)+)?', Number.Hex),
(r'\d+((_\d+)+)?', Number.Integer),
# single dot operator matched last to permit e.g. ".1" as a float
(words(['.']), Operator),
],
"blockcomment": [
(r'[^=#]', Comment.Multiline),
(r'#=', Comment.Multiline, '#push'),
(r'=#', Comment.Multiline, '#pop'),
(r'[=#]', Comment.Multiline),
],
'curly': [
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(allowed_variable, Keyword.Type),
include('root'),
],
'tqrawstring': [
(r'"""', String, '#pop'),
(r'([^"]|"[^"][^"])+', String),
],
'rawstring': [
(r'"', String, '#pop'),
(r'\\"', String.Escape),
(r'([^"\\]|\\[^"])+', String),
],
# Interpolation is defined as "$" followed by the shortest full
# expression, which is something we can't parse. Include the most
# common cases here: $word, and $(paren'd expr).
'interp': [
(r'\$' + allowed_variable, String.Interpol),
(r'(\$)(\()', bygroups(String.Interpol, Punctuation), 'in-intp'),
],
'in-intp': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
include('root'),
],
'string': [
(r'(")(' + allowed_variable + r'|\d+)?',
bygroups(String, String.Affix), '#pop'),
# FIXME: This escape pattern is not perfect.
(r'\\([\\"\'$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\d+)', String.Escape),
include('interp'),
# @printf and @sprintf formats
(r'%[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?[hlL]?[E-GXc-giorsux%]',
String.Interpol),
(r'[^"$%\\]+', String),
(r'.', String),
],
'tqstring': [
(r'(""")(' + allowed_variable + r'|\d+)?',
bygroups(String, String.Affix), '#pop'),
(r'\\([\\"\'$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\d+)', String.Escape),
include('interp'),
(r'[^"$%\\]+', String),
(r'.', String),
],
'regex': [
(r'(")([imsxa]*)?', bygroups(String.Regex, String.Affix), '#pop'),
(r'\\"', String.Regex),
(r'[^\\"]+', String.Regex),
],
'tqregex': [
(r'(""")([imsxa]*)?', bygroups(String.Regex, String.Affix), '#pop'),
(r'[^"]+', String.Regex),
],
'command': [
(r'(`)(' + allowed_variable + r'|\d+)?',
bygroups(String.Backtick, String.Affix), '#pop'),
(r'\\[`$]', String.Escape),
include('interp'),
(r'[^\\`$]+', String.Backtick),
(r'.', String.Backtick),
],
'tqcommand': [
(r'(```)(' + allowed_variable + r'|\d+)?',
bygroups(String.Backtick, String.Affix), '#pop'),
(r'\\\$', String.Escape),
include('interp'),
(r'[^\\`$]+', String.Backtick),
(r'.', String.Backtick),
],
}
def analyse_text(text):
return shebang_matches(text, r'julia')
class JuliaConsoleLexer(Lexer):
"""
For Julia console sessions. Modeled after MatlabSessionLexer.
.. versionadded:: 1.6
"""
name = 'Julia console'
aliases = ['jlcon', 'julia-repl']
def get_tokens_unprocessed(self, text):
jllexer = JuliaLexer(**self.options)
start = 0
curcode = ''
insertions = []
output = False
error = False
for line in text.splitlines(keepends=True):
if line.startswith('julia>'):
insertions.append((len(curcode), [(0, Generic.Prompt, line[:6])]))
curcode += line[6:]
output = False
error = False
elif line.startswith('help?>') or line.startswith('shell>'):
yield start, Generic.Prompt, line[:6]
yield start + 6, Text, line[6:]
output = False
error = False
elif line.startswith(' ') and not output:
insertions.append((len(curcode), [(0, Whitespace, line[:6])]))
curcode += line[6:]
else:
if curcode:
yield from do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
if line.startswith('ERROR: ') or error:
yield start, Generic.Error, line
error = True
else:
yield start, Generic.Output, line
output = True
start += len(line)
if curcode:
yield from do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode))
| 11,429 | Python | 37.745763 | 140 | 0.470032 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/j.py | """
pygments.lexers.j
~~~~~~~~~~~~~~~~~
Lexer for the J programming language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Whitespace
__all__ = ['JLexer']
class JLexer(RegexLexer):
"""
For J source code.
.. versionadded:: 2.1
"""
name = 'J'
url = 'http://jsoftware.com/'
aliases = ['j']
filenames = ['*.ijs']
mimetypes = ['text/x-j']
validName = r'\b[a-zA-Z]\w*'
tokens = {
'root': [
# Shebang script
(r'#!.*$', Comment.Preproc),
# Comments
(r'NB\..*', Comment.Single),
(r'(\n+\s*)(Note)', bygroups(Whitespace, Comment.Multiline),
'comment'),
(r'(\s*)(Note.*)', bygroups(Whitespace, Comment.Single)),
# Whitespace
(r'\s+', Whitespace),
# Strings
(r"'", String, 'singlequote'),
# Definitions
(r'0\s+:\s*0', Name.Entity, 'nounDefinition'),
(r'(noun)(\s+)(define)(\s*)$', bygroups(Name.Entity, Whitespace,
Name.Entity, Whitespace), 'nounDefinition'),
(r'([1-4]|13)\s+:\s*0\b',
Name.Function, 'explicitDefinition'),
(r'(adverb|conjunction|dyad|monad|verb)(\s+)(define)\b',
bygroups(Name.Function, Whitespace, Name.Function),
'explicitDefinition'),
# Flow Control
(words(('for_', 'goto_', 'label_'), suffix=validName+r'\.'), Name.Label),
(words((
'assert', 'break', 'case', 'catch', 'catchd',
'catcht', 'continue', 'do', 'else', 'elseif',
'end', 'fcase', 'for', 'if', 'return',
'select', 'throw', 'try', 'while', 'whilst',
), suffix=r'\.'), Name.Label),
# Variable Names
(validName, Name.Variable),
# Standard Library
(words((
'ARGV', 'CR', 'CRLF', 'DEL', 'Debug',
'EAV', 'EMPTY', 'FF', 'JVERSION', 'LF',
'LF2', 'Note', 'TAB', 'alpha17', 'alpha27',
'apply', 'bind', 'boxopen', 'boxxopen', 'bx',
'clear', 'cutLF', 'cutopen', 'datatype', 'def',
'dfh', 'drop', 'each', 'echo', 'empty',
'erase', 'every', 'evtloop', 'exit', 'expand',
'fetch', 'file2url', 'fixdotdot', 'fliprgb', 'getargs',
'getenv', 'hfd', 'inv', 'inverse', 'iospath',
'isatty', 'isutf8', 'items', 'leaf', 'list',
'nameclass', 'namelist', 'names', 'nc',
'nl', 'on', 'pick', 'rows',
'script', 'scriptd', 'sign', 'sminfo', 'smoutput',
'sort', 'split', 'stderr', 'stdin', 'stdout',
'table', 'take', 'timespacex', 'timex', 'tmoutput',
'toCRLF', 'toHOST', 'toJ', 'tolower', 'toupper',
'type', 'ucp', 'ucpcount', 'usleep', 'utf8',
'uucp',
)), Name.Function),
# Copula
(r'=[.:]', Operator),
# Builtins
(r'[-=+*#$%@!~`^&";:.,<>{}\[\]\\|/?]', Operator),
# Short Keywords
(r'[abCdDeEfHiIjLMoprtT]\.', Keyword.Reserved),
(r'[aDiLpqsStux]\:', Keyword.Reserved),
(r'(_[0-9])\:', Keyword.Constant),
# Parens
(r'\(', Punctuation, 'parentheses'),
# Numbers
include('numbers'),
],
'comment': [
(r'[^)]', Comment.Multiline),
(r'^\)', Comment.Multiline, '#pop'),
(r'[)]', Comment.Multiline),
],
'explicitDefinition': [
(r'\b[nmuvxy]\b', Name.Decorator),
include('root'),
(r'[^)]', Name),
(r'^\)', Name.Label, '#pop'),
(r'[)]', Name),
],
'numbers': [
(r'\b_{1,2}\b', Number),
(r'_?\d+(\.\d+)?(\s*[ejr]\s*)_?\d+(\.?=\d+)?', Number),
(r'_?\d+\.(?=\d+)', Number.Float),
(r'_?\d+x', Number.Integer.Long),
(r'_?\d+', Number.Integer),
],
'nounDefinition': [
(r'[^)]+', String),
(r'^\)', Name.Label, '#pop'),
(r'[)]', String),
],
'parentheses': [
(r'\)', Punctuation, '#pop'),
# include('nounDefinition'),
include('explicitDefinition'),
include('root'),
],
'singlequote': [
(r"[^']+", String),
(r"''", String),
(r"'", String, '#pop'),
],
}
| 4,854 | Python | 30.732026 | 85 | 0.425422 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/sgf.py | """
pygments.lexers.sgf
~~~~~~~~~~~~~~~~~~~
Lexer for Smart Game Format (sgf) file format.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Name, Literal, String, Punctuation, Whitespace
__all__ = ["SmartGameFormatLexer"]
class SmartGameFormatLexer(RegexLexer):
"""
Lexer for Smart Game Format (sgf) file format.
The format is used to store game records of board games for two players
(mainly Go game).
.. versionadded:: 2.4
"""
name = 'SmartGameFormat'
url = 'https://www.red-bean.com/sgf/'
aliases = ['sgf']
filenames = ['*.sgf']
tokens = {
'root': [
(r'[():;]+', Punctuation),
# tokens:
(r'(A[BW]|AE|AN|AP|AR|AS|[BW]L|BM|[BW]R|[BW]S|[BW]T|CA|CH|CP|CR|'
r'DD|DM|DO|DT|EL|EV|EX|FF|FG|G[BW]|GC|GM|GN|HA|HO|ID|IP|IT|IY|KM|'
r'KO|LB|LN|LT|L|MA|MN|M|N|OB|OM|ON|OP|OT|OV|P[BW]|PC|PL|PM|RE|RG|'
r'RO|RU|SO|SC|SE|SI|SL|SO|SQ|ST|SU|SZ|T[BW]|TC|TE|TM|TR|UC|US|VW|'
r'V|[BW]|C)',
Name.Builtin),
# number:
(r'(\[)([0-9.]+)(\])',
bygroups(Punctuation, Literal.Number, Punctuation)),
# date:
(r'(\[)([0-9]{4}-[0-9]{2}-[0-9]{2})(\])',
bygroups(Punctuation, Literal.Date, Punctuation)),
# point:
(r'(\[)([a-z]{2})(\])',
bygroups(Punctuation, String, Punctuation)),
# double points:
(r'(\[)([a-z]{2})(:)([a-z]{2})(\])',
bygroups(Punctuation, String, Punctuation, String, Punctuation)),
(r'(\[)([\w\s#()+,\-.:?]+)(\])',
bygroups(Punctuation, String, Punctuation)),
(r'(\[)(\s.*)(\])',
bygroups(Punctuation, Whitespace, Punctuation)),
(r'\s+', Whitespace)
],
}
| 1,986 | Python | 31.57377 | 79 | 0.505035 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/maxima.py | """
pygments.lexers.maxima
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the computer algebra system Maxima.
Derived from pygments/lexers/algebra.py.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['MaximaLexer']
class MaximaLexer(RegexLexer):
"""
A Maxima lexer.
Derived from pygments.lexers.MuPADLexer.
.. versionadded:: 2.11
"""
name = 'Maxima'
url = 'http://maxima.sourceforge.net'
aliases = ['maxima', 'macsyma']
filenames = ['*.mac', '*.max']
keywords = ('if', 'then', 'else', 'elseif',
'do', 'while', 'repeat', 'until',
'for', 'from', 'to', 'downto', 'step', 'thru')
constants = ('%pi', '%e', '%phi', '%gamma', '%i',
'und', 'ind', 'infinity', 'inf', 'minf',
'true', 'false', 'unknown', 'done')
operators = (r'.', r':', r'=', r'#',
r'+', r'-', r'*', r'/', r'^',
r'@', r'>', r'<', r'|', r'!', r"'")
operator_words = ('and', 'or', 'not')
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'[,;$]', Punctuation),
(words (constants), Name.Constant),
(words (keywords), Keyword),
(words (operators), Operator),
(words (operator_words), Operator.Word),
(r'''(?x)
((?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text.Whitespace, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#%][\w#%]*|`[^`]*`)
(?:::[a-zA-Z_#%][\w#%]*|`[^`]*`)*''', Name.Variable),
(r'[-+]?(\d*\.\d+([bdefls][-+]?\d+)?|\d+(\.\d*)?[bdefls][-+]?\d+)', Number.Float),
(r'[-+]?\d+', Number.Integer),
(r'\s+', Text.Whitespace),
(r'.', Text)
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
def analyse_text (text):
strength = 0.0
# Input expression terminator.
if re.search (r'\$\s*$', text, re.MULTILINE):
strength += 0.05
# Function definition operator.
if ':=' in text:
strength += 0.02
return strength
| 2,716 | Python | 30.593023 | 94 | 0.439617 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/sieve.py | """
pygments.lexers.sieve
~~~~~~~~~~~~~~~~~~~~~
Lexer for Sieve file format.
https://tools.ietf.org/html/rfc5228
https://tools.ietf.org/html/rfc5173
https://tools.ietf.org/html/rfc5229
https://tools.ietf.org/html/rfc5230
https://tools.ietf.org/html/rfc5232
https://tools.ietf.org/html/rfc5235
https://tools.ietf.org/html/rfc5429
https://tools.ietf.org/html/rfc8580
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Name, Literal, String, Text, Punctuation, \
Keyword
__all__ = ["SieveLexer"]
class SieveLexer(RegexLexer):
"""
Lexer for sieve format.
.. versionadded:: 2.6
"""
name = 'Sieve'
filenames = ['*.siv', '*.sieve']
aliases = ['sieve']
tokens = {
'root': [
(r'\s+', Text),
(r'[();,{}\[\]]', Punctuation),
# import:
(r'(?i)require',
Keyword.Namespace),
# tags:
(r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|'
r'count|days|detail|domain|fcc|flags|from|handle|importance|is|'
r'localpart|length|lowerfirst|lower|matches|message|mime|options|'
r'over|percent|quotewildcard|raw|regex|specialuse|subject|text|'
r'under|upperfirst|upper|value)',
bygroups(Name.Tag, Name.Tag)),
# tokens:
(r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|'
r'ereject|exists|false|fileinto|if|hasflag|header|keep|'
r'notify_method_capability|notify|not|redirect|reject|removeflag|'
r'setflag|size|spamtest|stop|string|true|vacation|virustest)',
Name.Builtin),
(r'(?i)set',
Keyword.Declaration),
# number:
(r'([0-9.]+)([kmgKMG])?',
bygroups(Literal.Number, Literal.Number)),
# comment:
(r'#.*$',
Comment.Single),
(r'/\*.*\*/',
Comment.Multiline),
# string:
(r'"[^"]*?"',
String),
# text block:
(r'text:',
Name.Tag, 'text'),
],
'text': [
(r'[^.].*?\n', String),
(r'^\.', Punctuation, "#pop"),
]
}
| 2,441 | Python | 29.911392 | 82 | 0.528062 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_scilab_builtins.py | """
pygments.lexers._scilab_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the ScilabLexer.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated
commands_kw = (
'abort',
'apropos',
'break',
'case',
'catch',
'continue',
'do',
'else',
'elseif',
'end',
'endfunction',
'for',
'function',
'help',
'if',
'pause',
'quit',
'select',
'then',
'try',
'while',
)
functions_kw = (
'!!_invoke_',
'%H5Object_e',
'%H5Object_fieldnames',
'%H5Object_p',
'%XMLAttr_6',
'%XMLAttr_e',
'%XMLAttr_i_XMLElem',
'%XMLAttr_length',
'%XMLAttr_p',
'%XMLAttr_size',
'%XMLDoc_6',
'%XMLDoc_e',
'%XMLDoc_i_XMLList',
'%XMLDoc_p',
'%XMLElem_6',
'%XMLElem_e',
'%XMLElem_i_XMLDoc',
'%XMLElem_i_XMLElem',
'%XMLElem_i_XMLList',
'%XMLElem_p',
'%XMLList_6',
'%XMLList_e',
'%XMLList_i_XMLElem',
'%XMLList_i_XMLList',
'%XMLList_length',
'%XMLList_p',
'%XMLList_size',
'%XMLNs_6',
'%XMLNs_e',
'%XMLNs_i_XMLElem',
'%XMLNs_p',
'%XMLSet_6',
'%XMLSet_e',
'%XMLSet_length',
'%XMLSet_p',
'%XMLSet_size',
'%XMLValid_p',
'%_EClass_6',
'%_EClass_e',
'%_EClass_p',
'%_EObj_0',
'%_EObj_1__EObj',
'%_EObj_1_b',
'%_EObj_1_c',
'%_EObj_1_i',
'%_EObj_1_s',
'%_EObj_2__EObj',
'%_EObj_2_b',
'%_EObj_2_c',
'%_EObj_2_i',
'%_EObj_2_s',
'%_EObj_3__EObj',
'%_EObj_3_b',
'%_EObj_3_c',
'%_EObj_3_i',
'%_EObj_3_s',
'%_EObj_4__EObj',
'%_EObj_4_b',
'%_EObj_4_c',
'%_EObj_4_i',
'%_EObj_4_s',
'%_EObj_5',
'%_EObj_6',
'%_EObj_a__EObj',
'%_EObj_a_b',
'%_EObj_a_c',
'%_EObj_a_i',
'%_EObj_a_s',
'%_EObj_d__EObj',
'%_EObj_d_b',
'%_EObj_d_c',
'%_EObj_d_i',
'%_EObj_d_s',
'%_EObj_disp',
'%_EObj_e',
'%_EObj_g__EObj',
'%_EObj_g_b',
'%_EObj_g_c',
'%_EObj_g_i',
'%_EObj_g_s',
'%_EObj_h__EObj',
'%_EObj_h_b',
'%_EObj_h_c',
'%_EObj_h_i',
'%_EObj_h_s',
'%_EObj_i__EObj',
'%_EObj_j__EObj',
'%_EObj_j_b',
'%_EObj_j_c',
'%_EObj_j_i',
'%_EObj_j_s',
'%_EObj_k__EObj',
'%_EObj_k_b',
'%_EObj_k_c',
'%_EObj_k_i',
'%_EObj_k_s',
'%_EObj_l__EObj',
'%_EObj_l_b',
'%_EObj_l_c',
'%_EObj_l_i',
'%_EObj_l_s',
'%_EObj_m__EObj',
'%_EObj_m_b',
'%_EObj_m_c',
'%_EObj_m_i',
'%_EObj_m_s',
'%_EObj_n__EObj',
'%_EObj_n_b',
'%_EObj_n_c',
'%_EObj_n_i',
'%_EObj_n_s',
'%_EObj_o__EObj',
'%_EObj_o_b',
'%_EObj_o_c',
'%_EObj_o_i',
'%_EObj_o_s',
'%_EObj_p',
'%_EObj_p__EObj',
'%_EObj_p_b',
'%_EObj_p_c',
'%_EObj_p_i',
'%_EObj_p_s',
'%_EObj_q__EObj',
'%_EObj_q_b',
'%_EObj_q_c',
'%_EObj_q_i',
'%_EObj_q_s',
'%_EObj_r__EObj',
'%_EObj_r_b',
'%_EObj_r_c',
'%_EObj_r_i',
'%_EObj_r_s',
'%_EObj_s__EObj',
'%_EObj_s_b',
'%_EObj_s_c',
'%_EObj_s_i',
'%_EObj_s_s',
'%_EObj_t',
'%_EObj_x__EObj',
'%_EObj_x_b',
'%_EObj_x_c',
'%_EObj_x_i',
'%_EObj_x_s',
'%_EObj_y__EObj',
'%_EObj_y_b',
'%_EObj_y_c',
'%_EObj_y_i',
'%_EObj_y_s',
'%_EObj_z__EObj',
'%_EObj_z_b',
'%_EObj_z_c',
'%_EObj_z_i',
'%_EObj_z_s',
'%_eigs',
'%_load',
'%b_1__EObj',
'%b_2__EObj',
'%b_3__EObj',
'%b_4__EObj',
'%b_a__EObj',
'%b_d__EObj',
'%b_g__EObj',
'%b_h__EObj',
'%b_i_XMLList',
'%b_i__EObj',
'%b_j__EObj',
'%b_k__EObj',
'%b_l__EObj',
'%b_m__EObj',
'%b_n__EObj',
'%b_o__EObj',
'%b_p__EObj',
'%b_q__EObj',
'%b_r__EObj',
'%b_s__EObj',
'%b_x__EObj',
'%b_y__EObj',
'%b_z__EObj',
'%c_1__EObj',
'%c_2__EObj',
'%c_3__EObj',
'%c_4__EObj',
'%c_a__EObj',
'%c_d__EObj',
'%c_g__EObj',
'%c_h__EObj',
'%c_i_XMLAttr',
'%c_i_XMLDoc',
'%c_i_XMLElem',
'%c_i_XMLList',
'%c_i__EObj',
'%c_j__EObj',
'%c_k__EObj',
'%c_l__EObj',
'%c_m__EObj',
'%c_n__EObj',
'%c_o__EObj',
'%c_p__EObj',
'%c_q__EObj',
'%c_r__EObj',
'%c_s__EObj',
'%c_x__EObj',
'%c_y__EObj',
'%c_z__EObj',
'%ce_i_XMLList',
'%fptr_i_XMLList',
'%h_i_XMLList',
'%hm_i_XMLList',
'%i_1__EObj',
'%i_2__EObj',
'%i_3__EObj',
'%i_4__EObj',
'%i_a__EObj',
'%i_abs',
'%i_cumprod',
'%i_cumsum',
'%i_d__EObj',
'%i_diag',
'%i_g__EObj',
'%i_h__EObj',
'%i_i_XMLList',
'%i_i__EObj',
'%i_j__EObj',
'%i_k__EObj',
'%i_l__EObj',
'%i_m__EObj',
'%i_matrix',
'%i_max',
'%i_maxi',
'%i_min',
'%i_mini',
'%i_mput',
'%i_n__EObj',
'%i_o__EObj',
'%i_p',
'%i_p__EObj',
'%i_prod',
'%i_q__EObj',
'%i_r__EObj',
'%i_s__EObj',
'%i_sum',
'%i_tril',
'%i_triu',
'%i_x__EObj',
'%i_y__EObj',
'%i_z__EObj',
'%ip_i_XMLList',
'%l_i_XMLList',
'%l_i__EObj',
'%lss_i_XMLList',
'%mc_i_XMLList',
'%msp_full',
'%msp_i_XMLList',
'%msp_spget',
'%p_i_XMLList',
'%ptr_i_XMLList',
'%r_i_XMLList',
'%s_1__EObj',
'%s_2__EObj',
'%s_3__EObj',
'%s_4__EObj',
'%s_a__EObj',
'%s_d__EObj',
'%s_g__EObj',
'%s_h__EObj',
'%s_i_XMLList',
'%s_i__EObj',
'%s_j__EObj',
'%s_k__EObj',
'%s_l__EObj',
'%s_m__EObj',
'%s_n__EObj',
'%s_o__EObj',
'%s_p__EObj',
'%s_q__EObj',
'%s_r__EObj',
'%s_s__EObj',
'%s_x__EObj',
'%s_y__EObj',
'%s_z__EObj',
'%sp_i_XMLList',
'%spb_i_XMLList',
'%st_i_XMLList',
'Calendar',
'ClipBoard',
'Matplot',
'Matplot1',
'PlaySound',
'TCL_DeleteInterp',
'TCL_DoOneEvent',
'TCL_EvalFile',
'TCL_EvalStr',
'TCL_ExistArray',
'TCL_ExistInterp',
'TCL_ExistVar',
'TCL_GetVar',
'TCL_GetVersion',
'TCL_SetVar',
'TCL_UnsetVar',
'TCL_UpVar',
'_',
'_code2str',
'_d',
'_str2code',
'about',
'abs',
'acos',
'addModulePreferences',
'addcolor',
'addf',
'addhistory',
'addinter',
'addlocalizationdomain',
'amell',
'and',
'argn',
'arl2_ius',
'ascii',
'asin',
'atan',
'backslash',
'balanc',
'banner',
'base2dec',
'basename',
'bdiag',
'beep',
'besselh',
'besseli',
'besselj',
'besselk',
'bessely',
'beta',
'bezout',
'bfinit',
'blkfc1i',
'blkslvi',
'bool2s',
'browsehistory',
'browsevar',
'bsplin3val',
'buildDoc',
'buildouttb',
'bvode',
'c_link',
'call',
'callblk',
'captions',
'cd',
'cdfbet',
'cdfbin',
'cdfchi',
'cdfchn',
'cdff',
'cdffnc',
'cdfgam',
'cdfnbn',
'cdfnor',
'cdfpoi',
'cdft',
'ceil',
'champ',
'champ1',
'chdir',
'chol',
'clc',
'clean',
'clear',
'clearfun',
'clearglobal',
'closeEditor',
'closeEditvar',
'closeXcos',
'code2str',
'coeff',
'color',
'comp',
'completion',
'conj',
'contour2di',
'contr',
'conv2',
'convstr',
'copy',
'copyfile',
'corr',
'cos',
'coserror',
'createdir',
'cshep2d',
'csvDefault',
'csvIsnum',
'csvRead',
'csvStringToDouble',
'csvTextScan',
'csvWrite',
'ctree2',
'ctree3',
'ctree4',
'cumprod',
'cumsum',
'curblock',
'curblockc',
'daskr',
'dasrt',
'dassl',
'data2sig',
'datatipCreate',
'datatipManagerMode',
'datatipMove',
'datatipRemove',
'datatipSetDisplay',
'datatipSetInterp',
'datatipSetOrientation',
'datatipSetStyle',
'datatipToggle',
'dawson',
'dct',
'debug',
'dec2base',
'deff',
'definedfields',
'degree',
'delbpt',
'delete',
'deletefile',
'delip',
'delmenu',
'det',
'dgettext',
'dhinf',
'diag',
'diary',
'diffobjs',
'disp',
'dispbpt',
'displayhistory',
'disposefftwlibrary',
'dlgamma',
'dnaupd',
'dneupd',
'double',
'drawaxis',
'drawlater',
'drawnow',
'driver',
'dsaupd',
'dsearch',
'dseupd',
'dst',
'duplicate',
'editvar',
'emptystr',
'end_scicosim',
'ereduc',
'erf',
'erfc',
'erfcx',
'erfi',
'errcatch',
'errclear',
'error',
'eval_cshep2d',
'exec',
'execstr',
'exists',
'exit',
'exp',
'expm',
'exportUI',
'export_to_hdf5',
'eye',
'fadj2sp',
'fec',
'feval',
'fft',
'fftw',
'fftw_flags',
'fftw_forget_wisdom',
'fftwlibraryisloaded',
'figure',
'file',
'filebrowser',
'fileext',
'fileinfo',
'fileparts',
'filesep',
'find',
'findBD',
'findfiles',
'fire_closing_finished',
'floor',
'format',
'fort',
'fprintfMat',
'freq',
'frexp',
'fromc',
'fromjava',
'fscanfMat',
'fsolve',
'fstair',
'full',
'fullpath',
'funcprot',
'funptr',
'gamma',
'gammaln',
'geom3d',
'get',
'getURL',
'get_absolute_file_path',
'get_fftw_wisdom',
'getblocklabel',
'getcallbackobject',
'getdate',
'getdebuginfo',
'getdefaultlanguage',
'getdrives',
'getdynlibext',
'getenv',
'getfield',
'gethistory',
'gethistoryfile',
'getinstalledlookandfeels',
'getio',
'getlanguage',
'getlongpathname',
'getlookandfeel',
'getmd5',
'getmemory',
'getmodules',
'getos',
'getpid',
'getrelativefilename',
'getscicosvars',
'getscilabmode',
'getshortpathname',
'gettext',
'getvariablesonstack',
'getversion',
'glist',
'global',
'glue',
'grand',
'graphicfunction',
'grayplot',
'grep',
'gsort',
'gstacksize',
'h5attr',
'h5close',
'h5cp',
'h5dataset',
'h5dump',
'h5exists',
'h5flush',
'h5get',
'h5group',
'h5isArray',
'h5isAttr',
'h5isCompound',
'h5isFile',
'h5isGroup',
'h5isList',
'h5isRef',
'h5isSet',
'h5isSpace',
'h5isType',
'h5isVlen',
'h5label',
'h5ln',
'h5ls',
'h5mount',
'h5mv',
'h5open',
'h5read',
'h5readattr',
'h5rm',
'h5umount',
'h5write',
'h5writeattr',
'havewindow',
'helpbrowser',
'hess',
'hinf',
'historymanager',
'historysize',
'host',
'htmlDump',
'htmlRead',
'htmlReadStr',
'htmlWrite',
'iconvert',
'ieee',
'ilib_verbose',
'imag',
'impl',
'import_from_hdf5',
'imult',
'inpnvi',
'int',
'int16',
'int2d',
'int32',
'int3d',
'int8',
'interp',
'interp2d',
'interp3d',
'intg',
'intppty',
'inttype',
'inv',
'invoke_lu',
'is_handle_valid',
'is_hdf5_file',
'isalphanum',
'isascii',
'isdef',
'isdigit',
'isdir',
'isequal',
'isequalbitwise',
'iserror',
'isfile',
'isglobal',
'isletter',
'isnum',
'isreal',
'iswaitingforinput',
'jallowClassReloading',
'jarray',
'jautoTranspose',
'jautoUnwrap',
'javaclasspath',
'javalibrarypath',
'jcast',
'jcompile',
'jconvMatrixMethod',
'jcreatejar',
'jdeff',
'jdisableTrace',
'jenableTrace',
'jexists',
'jgetclassname',
'jgetfield',
'jgetfields',
'jgetinfo',
'jgetmethods',
'jimport',
'jinvoke',
'jinvoke_db',
'jnewInstance',
'jremove',
'jsetfield',
'junwrap',
'junwraprem',
'jwrap',
'jwrapinfloat',
'kron',
'lasterror',
'ldiv',
'ldivf',
'legendre',
'length',
'lib',
'librarieslist',
'libraryinfo',
'light',
'linear_interpn',
'lines',
'link',
'linmeq',
'list',
'listvar_in_hdf5',
'load',
'loadGui',
'loadScicos',
'loadXcos',
'loadfftwlibrary',
'loadhistory',
'log',
'log1p',
'lsq',
'lsq_splin',
'lsqrsolve',
'lsslist',
'lstcat',
'lstsize',
'ltitr',
'lu',
'ludel',
'lufact',
'luget',
'lusolve',
'macr2lst',
'macr2tree',
'matfile_close',
'matfile_listvar',
'matfile_open',
'matfile_varreadnext',
'matfile_varwrite',
'matrix',
'max',
'maxfiles',
'mclearerr',
'mclose',
'meof',
'merror',
'messagebox',
'mfprintf',
'mfscanf',
'mget',
'mgeti',
'mgetl',
'mgetstr',
'min',
'mlist',
'mode',
'model2blk',
'mopen',
'move',
'movefile',
'mprintf',
'mput',
'mputl',
'mputstr',
'mscanf',
'mseek',
'msprintf',
'msscanf',
'mtell',
'mtlb_mode',
'mtlb_sparse',
'mucomp',
'mulf',
'name2rgb',
'nearfloat',
'newaxes',
'newest',
'newfun',
'nnz',
'norm',
'notify',
'number_properties',
'ode',
'odedc',
'ones',
'openged',
'opentk',
'optim',
'or',
'ordmmd',
'parallel_concurrency',
'parallel_run',
'param3d',
'param3d1',
'part',
'pathconvert',
'pathsep',
'phase_simulation',
'plot2d',
'plot2d1',
'plot2d2',
'plot2d3',
'plot2d4',
'plot3d',
'plot3d1',
'plotbrowser',
'pointer_xproperty',
'poly',
'ppol',
'pppdiv',
'predef',
'preferences',
'print',
'printf',
'printfigure',
'printsetupbox',
'prod',
'progressionbar',
'prompt',
'pwd',
'qld',
'qp_solve',
'qr',
'raise_window',
'rand',
'rankqr',
'rat',
'rcond',
'rdivf',
'read',
'read4b',
'read_csv',
'readb',
'readgateway',
'readmps',
'real',
'realtime',
'realtimeinit',
'regexp',
'relocate_handle',
'remez',
'removeModulePreferences',
'removedir',
'removelinehistory',
'res_with_prec',
'resethistory',
'residu',
'resume',
'return',
'ricc',
'rlist',
'roots',
'rotate_axes',
'round',
'rpem',
'rtitr',
'rubberbox',
'save',
'saveGui',
'saveafterncommands',
'saveconsecutivecommands',
'savehistory',
'schur',
'sci_haltscicos',
'sci_tree2',
'sci_tree3',
'sci_tree4',
'sciargs',
'scicos_debug',
'scicos_debug_count',
'scicos_time',
'scicosim',
'scinotes',
'sctree',
'semidef',
'set',
'set_blockerror',
'set_fftw_wisdom',
'set_xproperty',
'setbpt',
'setdefaultlanguage',
'setenv',
'setfield',
'sethistoryfile',
'setlanguage',
'setlookandfeel',
'setmenu',
'sfact',
'sfinit',
'show_window',
'sident',
'sig2data',
'sign',
'simp',
'simp_mode',
'sin',
'size',
'slash',
'sleep',
'sorder',
'sparse',
'spchol',
'spcompack',
'spec',
'spget',
'splin',
'splin2d',
'splin3d',
'splitURL',
'spones',
'sprintf',
'sqrt',
'stacksize',
'str2code',
'strcat',
'strchr',
'strcmp',
'strcspn',
'strindex',
'string',
'stringbox',
'stripblanks',
'strncpy',
'strrchr',
'strrev',
'strsplit',
'strspn',
'strstr',
'strsubst',
'strtod',
'strtok',
'subf',
'sum',
'svd',
'swap_handles',
'symfcti',
'syredi',
'system_getproperty',
'system_setproperty',
'ta2lpd',
'tan',
'taucs_chdel',
'taucs_chfact',
'taucs_chget',
'taucs_chinfo',
'taucs_chsolve',
'tempname',
'testmatrix',
'timer',
'tlist',
'tohome',
'tokens',
'toolbar',
'toprint',
'tr_zer',
'tril',
'triu',
'type',
'typename',
'uiDisplayTree',
'uicontextmenu',
'uicontrol',
'uigetcolor',
'uigetdir',
'uigetfile',
'uigetfont',
'uimenu',
'uint16',
'uint32',
'uint8',
'uipopup',
'uiputfile',
'uiwait',
'ulink',
'umf_ludel',
'umf_lufact',
'umf_luget',
'umf_luinfo',
'umf_lusolve',
'umfpack',
'unglue',
'unix',
'unsetmenu',
'unzoom',
'updatebrowsevar',
'usecanvas',
'useeditor',
'user',
'var2vec',
'varn',
'vec2var',
'waitbar',
'warnBlockByUID',
'warning',
'what',
'where',
'whereis',
'who',
'winsid',
'with_module',
'writb',
'write',
'write4b',
'write_csv',
'x_choose',
'x_choose_modeless',
'x_dialog',
'x_mdialog',
'xarc',
'xarcs',
'xarrows',
'xchange',
'xchoicesi',
'xclick',
'xcos',
'xcosAddToolsMenu',
'xcosConfigureXmlFile',
'xcosDiagramToScilab',
'xcosPalCategoryAdd',
'xcosPalDelete',
'xcosPalDisable',
'xcosPalEnable',
'xcosPalGenerateIcon',
'xcosPalGet',
'xcosPalLoad',
'xcosPalMove',
'xcosSimulationStarted',
'xcosUpdateBlock',
'xdel',
'xend',
'xfarc',
'xfarcs',
'xfpoly',
'xfpolys',
'xfrect',
'xget',
'xgetmouse',
'xgraduate',
'xgrid',
'xinit',
'xlfont',
'xls_open',
'xls_read',
'xmlAddNs',
'xmlAppend',
'xmlAsNumber',
'xmlAsText',
'xmlDTD',
'xmlDelete',
'xmlDocument',
'xmlDump',
'xmlElement',
'xmlFormat',
'xmlGetNsByHref',
'xmlGetNsByPrefix',
'xmlGetOpenDocs',
'xmlIsValidObject',
'xmlName',
'xmlNs',
'xmlRead',
'xmlReadStr',
'xmlRelaxNG',
'xmlRemove',
'xmlSchema',
'xmlSetAttributes',
'xmlValidate',
'xmlWrite',
'xmlXPath',
'xname',
'xpause',
'xpoly',
'xpolys',
'xrect',
'xrects',
'xs2bmp',
'xs2emf',
'xs2eps',
'xs2gif',
'xs2jpg',
'xs2pdf',
'xs2png',
'xs2ppm',
'xs2ps',
'xs2svg',
'xsegs',
'xset',
'xstring',
'xstringb',
'xtitle',
'zeros',
'znaupd',
'zneupd',
'zoom_rect',
)
macros_kw = (
'!_deff_wrapper',
'%0_i_st',
'%3d_i_h',
'%Block_xcosUpdateBlock',
'%TNELDER_p',
'%TNELDER_string',
'%TNMPLOT_p',
'%TNMPLOT_string',
'%TOPTIM_p',
'%TOPTIM_string',
'%TSIMPLEX_p',
'%TSIMPLEX_string',
'%_EVoid_p',
'%_gsort',
'%_listvarinfile',
'%_rlist',
'%_save',
'%_sodload',
'%_strsplit',
'%_unwrap',
'%ar_p',
'%asn',
'%b_a_b',
'%b_a_s',
'%b_c_s',
'%b_c_spb',
'%b_cumprod',
'%b_cumsum',
'%b_d_s',
'%b_diag',
'%b_e',
'%b_f_s',
'%b_f_spb',
'%b_g_s',
'%b_g_spb',
'%b_grand',
'%b_h_s',
'%b_h_spb',
'%b_i_b',
'%b_i_ce',
'%b_i_h',
'%b_i_hm',
'%b_i_s',
'%b_i_sp',
'%b_i_spb',
'%b_i_st',
'%b_iconvert',
'%b_l_b',
'%b_l_s',
'%b_m_b',
'%b_m_s',
'%b_matrix',
'%b_n_hm',
'%b_o_hm',
'%b_p_s',
'%b_prod',
'%b_r_b',
'%b_r_s',
'%b_s_b',
'%b_s_s',
'%b_string',
'%b_sum',
'%b_tril',
'%b_triu',
'%b_x_b',
'%b_x_s',
'%bicg',
'%bicgstab',
'%c_a_c',
'%c_b_c',
'%c_b_s',
'%c_diag',
'%c_dsearch',
'%c_e',
'%c_eye',
'%c_f_s',
'%c_grand',
'%c_i_c',
'%c_i_ce',
'%c_i_h',
'%c_i_hm',
'%c_i_lss',
'%c_i_r',
'%c_i_s',
'%c_i_st',
'%c_matrix',
'%c_n_l',
'%c_n_st',
'%c_o_l',
'%c_o_st',
'%c_ones',
'%c_rand',
'%c_tril',
'%c_triu',
'%cblock_c_cblock',
'%cblock_c_s',
'%cblock_e',
'%cblock_f_cblock',
'%cblock_p',
'%cblock_size',
'%ce_6',
'%ce_c_ce',
'%ce_e',
'%ce_f_ce',
'%ce_i_ce',
'%ce_i_s',
'%ce_i_st',
'%ce_matrix',
'%ce_p',
'%ce_size',
'%ce_string',
'%ce_t',
'%cgs',
'%champdat_i_h',
'%choose',
'%diagram_xcos',
'%dir_p',
'%fptr_i_st',
'%grand_perm',
'%grayplot_i_h',
'%h_i_st',
'%hmS_k_hmS_generic',
'%hm_1_hm',
'%hm_1_s',
'%hm_2_hm',
'%hm_2_s',
'%hm_3_hm',
'%hm_3_s',
'%hm_4_hm',
'%hm_4_s',
'%hm_5',
'%hm_a_hm',
'%hm_a_r',
'%hm_a_s',
'%hm_abs',
'%hm_and',
'%hm_bool2s',
'%hm_c_hm',
'%hm_ceil',
'%hm_conj',
'%hm_cos',
'%hm_cumprod',
'%hm_cumsum',
'%hm_d_hm',
'%hm_d_s',
'%hm_degree',
'%hm_dsearch',
'%hm_e',
'%hm_exp',
'%hm_eye',
'%hm_f_hm',
'%hm_find',
'%hm_floor',
'%hm_g_hm',
'%hm_grand',
'%hm_gsort',
'%hm_h_hm',
'%hm_i_b',
'%hm_i_ce',
'%hm_i_h',
'%hm_i_hm',
'%hm_i_i',
'%hm_i_p',
'%hm_i_r',
'%hm_i_s',
'%hm_i_st',
'%hm_iconvert',
'%hm_imag',
'%hm_int',
'%hm_isnan',
'%hm_isreal',
'%hm_j_hm',
'%hm_j_s',
'%hm_k_hm',
'%hm_k_s',
'%hm_log',
'%hm_m_p',
'%hm_m_r',
'%hm_m_s',
'%hm_matrix',
'%hm_max',
'%hm_mean',
'%hm_median',
'%hm_min',
'%hm_n_b',
'%hm_n_c',
'%hm_n_hm',
'%hm_n_i',
'%hm_n_p',
'%hm_n_s',
'%hm_o_b',
'%hm_o_c',
'%hm_o_hm',
'%hm_o_i',
'%hm_o_p',
'%hm_o_s',
'%hm_ones',
'%hm_or',
'%hm_p',
'%hm_prod',
'%hm_q_hm',
'%hm_r_s',
'%hm_rand',
'%hm_real',
'%hm_round',
'%hm_s',
'%hm_s_hm',
'%hm_s_r',
'%hm_s_s',
'%hm_sign',
'%hm_sin',
'%hm_size',
'%hm_sqrt',
'%hm_stdev',
'%hm_string',
'%hm_sum',
'%hm_x_hm',
'%hm_x_p',
'%hm_x_s',
'%hm_zeros',
'%i_1_s',
'%i_2_s',
'%i_3_s',
'%i_4_s',
'%i_Matplot',
'%i_a_i',
'%i_a_s',
'%i_and',
'%i_ascii',
'%i_b_s',
'%i_bezout',
'%i_champ',
'%i_champ1',
'%i_contour',
'%i_contour2d',
'%i_d_i',
'%i_d_s',
'%i_dsearch',
'%i_e',
'%i_fft',
'%i_g_i',
'%i_gcd',
'%i_grand',
'%i_h_i',
'%i_i_ce',
'%i_i_h',
'%i_i_hm',
'%i_i_i',
'%i_i_s',
'%i_i_st',
'%i_j_i',
'%i_j_s',
'%i_l_s',
'%i_lcm',
'%i_length',
'%i_m_i',
'%i_m_s',
'%i_mfprintf',
'%i_mprintf',
'%i_msprintf',
'%i_n_s',
'%i_o_s',
'%i_or',
'%i_p_i',
'%i_p_s',
'%i_plot2d',
'%i_plot2d1',
'%i_plot2d2',
'%i_q_s',
'%i_r_i',
'%i_r_s',
'%i_round',
'%i_s_i',
'%i_s_s',
'%i_sign',
'%i_string',
'%i_x_i',
'%i_x_s',
'%ip_a_s',
'%ip_i_st',
'%ip_m_s',
'%ip_n_ip',
'%ip_o_ip',
'%ip_p',
'%ip_part',
'%ip_s_s',
'%ip_string',
'%k',
'%l_i_h',
'%l_i_s',
'%l_i_st',
'%l_isequal',
'%l_n_c',
'%l_n_l',
'%l_n_m',
'%l_n_p',
'%l_n_s',
'%l_n_st',
'%l_o_c',
'%l_o_l',
'%l_o_m',
'%l_o_p',
'%l_o_s',
'%l_o_st',
'%lss_a_lss',
'%lss_a_p',
'%lss_a_r',
'%lss_a_s',
'%lss_c_lss',
'%lss_c_p',
'%lss_c_r',
'%lss_c_s',
'%lss_e',
'%lss_eye',
'%lss_f_lss',
'%lss_f_p',
'%lss_f_r',
'%lss_f_s',
'%lss_i_ce',
'%lss_i_lss',
'%lss_i_p',
'%lss_i_r',
'%lss_i_s',
'%lss_i_st',
'%lss_inv',
'%lss_l_lss',
'%lss_l_p',
'%lss_l_r',
'%lss_l_s',
'%lss_m_lss',
'%lss_m_p',
'%lss_m_r',
'%lss_m_s',
'%lss_n_lss',
'%lss_n_p',
'%lss_n_r',
'%lss_n_s',
'%lss_norm',
'%lss_o_lss',
'%lss_o_p',
'%lss_o_r',
'%lss_o_s',
'%lss_ones',
'%lss_r_lss',
'%lss_r_p',
'%lss_r_r',
'%lss_r_s',
'%lss_rand',
'%lss_s',
'%lss_s_lss',
'%lss_s_p',
'%lss_s_r',
'%lss_s_s',
'%lss_size',
'%lss_t',
'%lss_v_lss',
'%lss_v_p',
'%lss_v_r',
'%lss_v_s',
'%lt_i_s',
'%m_n_l',
'%m_o_l',
'%mc_i_h',
'%mc_i_s',
'%mc_i_st',
'%mc_n_st',
'%mc_o_st',
'%mc_string',
'%mps_p',
'%mps_string',
'%msp_a_s',
'%msp_abs',
'%msp_e',
'%msp_find',
'%msp_i_s',
'%msp_i_st',
'%msp_length',
'%msp_m_s',
'%msp_maxi',
'%msp_n_msp',
'%msp_nnz',
'%msp_o_msp',
'%msp_p',
'%msp_sparse',
'%msp_spones',
'%msp_t',
'%p_a_lss',
'%p_a_r',
'%p_c_lss',
'%p_c_r',
'%p_cumprod',
'%p_cumsum',
'%p_d_p',
'%p_d_r',
'%p_d_s',
'%p_det',
'%p_e',
'%p_f_lss',
'%p_f_r',
'%p_grand',
'%p_i_ce',
'%p_i_h',
'%p_i_hm',
'%p_i_lss',
'%p_i_p',
'%p_i_r',
'%p_i_s',
'%p_i_st',
'%p_inv',
'%p_j_s',
'%p_k_p',
'%p_k_r',
'%p_k_s',
'%p_l_lss',
'%p_l_p',
'%p_l_r',
'%p_l_s',
'%p_m_hm',
'%p_m_lss',
'%p_m_r',
'%p_matrix',
'%p_n_l',
'%p_n_lss',
'%p_n_r',
'%p_o_l',
'%p_o_lss',
'%p_o_r',
'%p_o_sp',
'%p_p_s',
'%p_part',
'%p_prod',
'%p_q_p',
'%p_q_r',
'%p_q_s',
'%p_r_lss',
'%p_r_p',
'%p_r_r',
'%p_r_s',
'%p_s_lss',
'%p_s_r',
'%p_simp',
'%p_string',
'%p_sum',
'%p_v_lss',
'%p_v_p',
'%p_v_r',
'%p_v_s',
'%p_x_hm',
'%p_x_r',
'%p_y_p',
'%p_y_r',
'%p_y_s',
'%p_z_p',
'%p_z_r',
'%p_z_s',
'%pcg',
'%plist_p',
'%plist_string',
'%r_0',
'%r_a_hm',
'%r_a_lss',
'%r_a_p',
'%r_a_r',
'%r_a_s',
'%r_c_lss',
'%r_c_p',
'%r_c_r',
'%r_c_s',
'%r_clean',
'%r_cumprod',
'%r_cumsum',
'%r_d_p',
'%r_d_r',
'%r_d_s',
'%r_det',
'%r_diag',
'%r_e',
'%r_eye',
'%r_f_lss',
'%r_f_p',
'%r_f_r',
'%r_f_s',
'%r_i_ce',
'%r_i_hm',
'%r_i_lss',
'%r_i_p',
'%r_i_r',
'%r_i_s',
'%r_i_st',
'%r_inv',
'%r_j_s',
'%r_k_p',
'%r_k_r',
'%r_k_s',
'%r_l_lss',
'%r_l_p',
'%r_l_r',
'%r_l_s',
'%r_m_hm',
'%r_m_lss',
'%r_m_p',
'%r_m_r',
'%r_m_s',
'%r_matrix',
'%r_n_lss',
'%r_n_p',
'%r_n_r',
'%r_n_s',
'%r_norm',
'%r_o_lss',
'%r_o_p',
'%r_o_r',
'%r_o_s',
'%r_ones',
'%r_p',
'%r_p_s',
'%r_prod',
'%r_q_p',
'%r_q_r',
'%r_q_s',
'%r_r_lss',
'%r_r_p',
'%r_r_r',
'%r_r_s',
'%r_rand',
'%r_s',
'%r_s_hm',
'%r_s_lss',
'%r_s_p',
'%r_s_r',
'%r_s_s',
'%r_simp',
'%r_size',
'%r_string',
'%r_sum',
'%r_t',
'%r_tril',
'%r_triu',
'%r_v_lss',
'%r_v_p',
'%r_v_r',
'%r_v_s',
'%r_varn',
'%r_x_p',
'%r_x_r',
'%r_x_s',
'%r_y_p',
'%r_y_r',
'%r_y_s',
'%r_z_p',
'%r_z_r',
'%r_z_s',
'%s_1_hm',
'%s_1_i',
'%s_2_hm',
'%s_2_i',
'%s_3_hm',
'%s_3_i',
'%s_4_hm',
'%s_4_i',
'%s_5',
'%s_a_b',
'%s_a_hm',
'%s_a_i',
'%s_a_ip',
'%s_a_lss',
'%s_a_msp',
'%s_a_r',
'%s_a_sp',
'%s_and',
'%s_b_i',
'%s_b_s',
'%s_bezout',
'%s_c_b',
'%s_c_cblock',
'%s_c_lss',
'%s_c_r',
'%s_c_sp',
'%s_d_b',
'%s_d_i',
'%s_d_p',
'%s_d_r',
'%s_d_sp',
'%s_e',
'%s_f_b',
'%s_f_cblock',
'%s_f_lss',
'%s_f_r',
'%s_f_sp',
'%s_g_b',
'%s_g_s',
'%s_gcd',
'%s_grand',
'%s_h_b',
'%s_h_s',
'%s_i_b',
'%s_i_c',
'%s_i_ce',
'%s_i_h',
'%s_i_hm',
'%s_i_i',
'%s_i_lss',
'%s_i_p',
'%s_i_r',
'%s_i_s',
'%s_i_sp',
'%s_i_spb',
'%s_i_st',
'%s_j_i',
'%s_k_hm',
'%s_k_p',
'%s_k_r',
'%s_k_sp',
'%s_l_b',
'%s_l_hm',
'%s_l_i',
'%s_l_lss',
'%s_l_p',
'%s_l_r',
'%s_l_s',
'%s_l_sp',
'%s_lcm',
'%s_m_b',
'%s_m_hm',
'%s_m_i',
'%s_m_ip',
'%s_m_lss',
'%s_m_msp',
'%s_m_r',
'%s_matrix',
'%s_n_hm',
'%s_n_i',
'%s_n_l',
'%s_n_lss',
'%s_n_r',
'%s_n_st',
'%s_o_hm',
'%s_o_i',
'%s_o_l',
'%s_o_lss',
'%s_o_r',
'%s_o_st',
'%s_or',
'%s_p_b',
'%s_p_i',
'%s_pow',
'%s_q_hm',
'%s_q_i',
'%s_q_p',
'%s_q_r',
'%s_q_sp',
'%s_r_b',
'%s_r_i',
'%s_r_lss',
'%s_r_p',
'%s_r_r',
'%s_r_s',
'%s_r_sp',
'%s_s_b',
'%s_s_hm',
'%s_s_i',
'%s_s_ip',
'%s_s_lss',
'%s_s_r',
'%s_s_sp',
'%s_simp',
'%s_v_lss',
'%s_v_p',
'%s_v_r',
'%s_v_s',
'%s_x_b',
'%s_x_hm',
'%s_x_i',
'%s_x_r',
'%s_y_p',
'%s_y_r',
'%s_y_sp',
'%s_z_p',
'%s_z_r',
'%s_z_sp',
'%sn',
'%sp_a_s',
'%sp_a_sp',
'%sp_and',
'%sp_c_s',
'%sp_ceil',
'%sp_conj',
'%sp_cos',
'%sp_cumprod',
'%sp_cumsum',
'%sp_d_s',
'%sp_d_sp',
'%sp_det',
'%sp_diag',
'%sp_e',
'%sp_exp',
'%sp_f_s',
'%sp_floor',
'%sp_grand',
'%sp_gsort',
'%sp_i_ce',
'%sp_i_h',
'%sp_i_s',
'%sp_i_sp',
'%sp_i_st',
'%sp_int',
'%sp_inv',
'%sp_k_s',
'%sp_k_sp',
'%sp_l_s',
'%sp_l_sp',
'%sp_length',
'%sp_max',
'%sp_min',
'%sp_norm',
'%sp_or',
'%sp_p_s',
'%sp_prod',
'%sp_q_s',
'%sp_q_sp',
'%sp_r_s',
'%sp_r_sp',
'%sp_round',
'%sp_s_s',
'%sp_s_sp',
'%sp_sin',
'%sp_sqrt',
'%sp_string',
'%sp_sum',
'%sp_tril',
'%sp_triu',
'%sp_y_s',
'%sp_y_sp',
'%sp_z_s',
'%sp_z_sp',
'%spb_and',
'%spb_c_b',
'%spb_cumprod',
'%spb_cumsum',
'%spb_diag',
'%spb_e',
'%spb_f_b',
'%spb_g_b',
'%spb_g_spb',
'%spb_h_b',
'%spb_h_spb',
'%spb_i_b',
'%spb_i_ce',
'%spb_i_h',
'%spb_i_st',
'%spb_or',
'%spb_prod',
'%spb_sum',
'%spb_tril',
'%spb_triu',
'%st_6',
'%st_c_st',
'%st_e',
'%st_f_st',
'%st_i_b',
'%st_i_c',
'%st_i_fptr',
'%st_i_h',
'%st_i_i',
'%st_i_ip',
'%st_i_lss',
'%st_i_msp',
'%st_i_p',
'%st_i_r',
'%st_i_s',
'%st_i_sp',
'%st_i_spb',
'%st_i_st',
'%st_matrix',
'%st_n_c',
'%st_n_l',
'%st_n_mc',
'%st_n_p',
'%st_n_s',
'%st_o_c',
'%st_o_l',
'%st_o_mc',
'%st_o_p',
'%st_o_s',
'%st_o_tl',
'%st_p',
'%st_size',
'%st_string',
'%st_t',
'%ticks_i_h',
'%xls_e',
'%xls_p',
'%xlssheet_e',
'%xlssheet_p',
'%xlssheet_size',
'%xlssheet_string',
'DominationRank',
'G_make',
'IsAScalar',
'NDcost',
'OS_Version',
'PlotSparse',
'ReadHBSparse',
'TCL_CreateSlave',
'abcd',
'abinv',
'accept_func_default',
'accept_func_vfsa',
'acf',
'acosd',
'acosh',
'acoshm',
'acosm',
'acot',
'acotd',
'acoth',
'acsc',
'acscd',
'acsch',
'add_demo',
'add_help_chapter',
'add_module_help_chapter',
'add_param',
'add_profiling',
'adj2sp',
'aff2ab',
'ana_style',
'analpf',
'analyze',
'aplat',
'arhnk',
'arl2',
'arma2p',
'arma2ss',
'armac',
'armax',
'armax1',
'arobasestring2strings',
'arsimul',
'ascii2string',
'asciimat',
'asec',
'asecd',
'asech',
'asind',
'asinh',
'asinhm',
'asinm',
'assert_checkalmostequal',
'assert_checkequal',
'assert_checkerror',
'assert_checkfalse',
'assert_checkfilesequal',
'assert_checktrue',
'assert_comparecomplex',
'assert_computedigits',
'assert_cond2reltol',
'assert_cond2reqdigits',
'assert_generror',
'atand',
'atanh',
'atanhm',
'atanm',
'atomsAutoload',
'atomsAutoloadAdd',
'atomsAutoloadDel',
'atomsAutoloadList',
'atomsCategoryList',
'atomsCheckModule',
'atomsDepTreeShow',
'atomsGetConfig',
'atomsGetInstalled',
'atomsGetInstalledPath',
'atomsGetLoaded',
'atomsGetLoadedPath',
'atomsInstall',
'atomsIsInstalled',
'atomsIsLoaded',
'atomsList',
'atomsLoad',
'atomsQuit',
'atomsRemove',
'atomsRepositoryAdd',
'atomsRepositoryDel',
'atomsRepositoryList',
'atomsRestoreConfig',
'atomsSaveConfig',
'atomsSearch',
'atomsSetConfig',
'atomsShow',
'atomsSystemInit',
'atomsSystemUpdate',
'atomsTest',
'atomsUpdate',
'atomsVersion',
'augment',
'auread',
'auwrite',
'balreal',
'bench_run',
'bilin',
'bilt',
'bin2dec',
'binomial',
'bitand',
'bitcmp',
'bitget',
'bitor',
'bitset',
'bitxor',
'black',
'blanks',
'bloc2exp',
'bloc2ss',
'block_parameter_error',
'bode',
'bode_asymp',
'bstap',
'buttmag',
'bvodeS',
'bytecode',
'bytecodewalk',
'cainv',
'calendar',
'calerf',
'calfrq',
'canon',
'casc',
'cat',
'cat_code',
'cb_m2sci_gui',
'ccontrg',
'cell',
'cell2mat',
'cellstr',
'center',
'cepstrum',
'cfspec',
'char',
'chart',
'cheb1mag',
'cheb2mag',
'check_gateways',
'check_modules_xml',
'check_versions',
'chepol',
'chfact',
'chsolve',
'classmarkov',
'clean_help',
'clock',
'cls2dls',
'cmb_lin',
'cmndred',
'cmoment',
'coding_ga_binary',
'coding_ga_identity',
'coff',
'coffg',
'colcomp',
'colcompr',
'colinout',
'colregul',
'companion',
'complex',
'compute_initial_temp',
'cond',
'cond2sp',
'condestsp',
'configure_msifort',
'configure_msvc',
'conjgrad',
'cont_frm',
'cont_mat',
'contrss',
'conv',
'convert_to_float',
'convertindex',
'convol',
'convol2d',
'copfac',
'correl',
'cosd',
'cosh',
'coshm',
'cosm',
'cotd',
'cotg',
'coth',
'cothm',
'cov',
'covar',
'createXConfiguration',
'createfun',
'createstruct',
'cross',
'crossover_ga_binary',
'crossover_ga_default',
'csc',
'cscd',
'csch',
'csgn',
'csim',
'cspect',
'ctr_gram',
'czt',
'dae',
'daeoptions',
'damp',
'datafit',
'date',
'datenum',
'datevec',
'dbphi',
'dcf',
'ddp',
'dec2bin',
'dec2hex',
'dec2oct',
'del_help_chapter',
'del_module_help_chapter',
'demo_begin',
'demo_choose',
'demo_compiler',
'demo_end',
'demo_file_choice',
'demo_folder_choice',
'demo_function_choice',
'demo_gui',
'demo_run',
'demo_viewCode',
'denom',
'derivat',
'derivative',
'des2ss',
'des2tf',
'detectmsifort64tools',
'detectmsvc64tools',
'determ',
'detr',
'detrend',
'devtools_run_builder',
'dhnorm',
'diff',
'diophant',
'dir',
'dirname',
'dispfiles',
'dllinfo',
'dscr',
'dsimul',
'dt_ility',
'dtsi',
'edit',
'edit_error',
'editor',
'eigenmarkov',
'eigs',
'ell1mag',
'enlarge_shape',
'entropy',
'eomday',
'epred',
'eqfir',
'eqiir',
'equil',
'equil1',
'erfinv',
'etime',
'eval',
'evans',
'evstr',
'example_run',
'expression2code',
'extract_help_examples',
'factor',
'factorial',
'factors',
'faurre',
'ffilt',
'fft2',
'fftshift',
'fieldnames',
'filt_sinc',
'filter',
'findABCD',
'findAC',
'findBDK',
'findR',
'find_freq',
'find_links',
'find_scicos_version',
'findm',
'findmsifortcompiler',
'findmsvccompiler',
'findx0BD',
'firstnonsingleton',
'fix',
'fixedpointgcd',
'flipdim',
'flts',
'fminsearch',
'formatBlackTip',
'formatBodeMagTip',
'formatBodePhaseTip',
'formatGainplotTip',
'formatHallModuleTip',
'formatHallPhaseTip',
'formatNicholsGainTip',
'formatNicholsPhaseTip',
'formatNyquistTip',
'formatPhaseplotTip',
'formatSgridDampingTip',
'formatSgridFreqTip',
'formatZgridDampingTip',
'formatZgridFreqTip',
'format_txt',
'fourplan',
'frep2tf',
'freson',
'frfit',
'frmag',
'fseek_origin',
'fsfirlin',
'fspec',
'fspecg',
'fstabst',
'ftest',
'ftuneq',
'fullfile',
'fullrf',
'fullrfk',
'fun2string',
'g_margin',
'gainplot',
'gamitg',
'gcare',
'gcd',
'gencompilationflags_unix',
'generateBlockImage',
'generateBlockImages',
'generic_i_ce',
'generic_i_h',
'generic_i_hm',
'generic_i_s',
'generic_i_st',
'genlib',
'genmarkov',
'geomean',
'getDiagramVersion',
'getModelicaPath',
'getPreferencesValue',
'get_file_path',
'get_function_path',
'get_param',
'get_profile',
'get_scicos_version',
'getd',
'getscilabkeywords',
'getshell',
'gettklib',
'gfare',
'gfrancis',
'givens',
'glever',
'gmres',
'group',
'gschur',
'gspec',
'gtild',
'h2norm',
'h_cl',
'h_inf',
'h_inf_st',
'h_norm',
'hallchart',
'halt',
'hank',
'hankelsv',
'harmean',
'haveacompiler',
'head_comments',
'help_from_sci',
'help_skeleton',
'hermit',
'hex2dec',
'hilb',
'hilbert',
'histc',
'horner',
'householder',
'hrmt',
'htrianr',
'hypermat',
'idct',
'idst',
'ifft',
'ifftshift',
'iir',
'iirgroup',
'iirlp',
'iirmod',
'ilib_build',
'ilib_build_jar',
'ilib_compile',
'ilib_for_link',
'ilib_gen_Make',
'ilib_gen_Make_unix',
'ilib_gen_cleaner',
'ilib_gen_gateway',
'ilib_gen_loader',
'ilib_include_flag',
'ilib_mex_build',
'im_inv',
'importScicosDiagram',
'importScicosPal',
'importXcosDiagram',
'imrep2ss',
'ind2sub',
'inistate',
'init_ga_default',
'init_param',
'initial_scicos_tables',
'input',
'instruction2code',
'intc',
'intdec',
'integrate',
'interp1',
'interpln',
'intersect',
'intl',
'intsplin',
'inttrap',
'inv_coeff',
'invr',
'invrs',
'invsyslin',
'iqr',
'isLeapYear',
'is_absolute_path',
'is_param',
'iscell',
'iscellstr',
'iscolumn',
'isempty',
'isfield',
'isinf',
'ismatrix',
'isnan',
'isrow',
'isscalar',
'issparse',
'issquare',
'isstruct',
'isvector',
'jmat',
'justify',
'kalm',
'karmarkar',
'kernel',
'kpure',
'krac2',
'kroneck',
'lattn',
'lattp',
'launchtest',
'lcf',
'lcm',
'lcmdiag',
'leastsq',
'leqe',
'leqr',
'lev',
'levin',
'lex_sort',
'lft',
'lin',
'lin2mu',
'lincos',
'lindquist',
'linf',
'linfn',
'linsolve',
'linspace',
'list2vec',
'list_param',
'listfiles',
'listfunctions',
'listvarinfile',
'lmisolver',
'lmitool',
'loadXcosLibs',
'loadmatfile',
'loadwave',
'log10',
'log2',
'logm',
'logspace',
'lqe',
'lqg',
'lqg2stan',
'lqg_ltr',
'lqr',
'ls',
'lyap',
'm2sci_gui',
'm_circle',
'macglov',
'macrovar',
'mad',
'makecell',
'manedit',
'mapsound',
'markp2ss',
'matfile2sci',
'mdelete',
'mean',
'meanf',
'median',
'members',
'mese',
'meshgrid',
'mfft',
'mfile2sci',
'minreal',
'minss',
'mkdir',
'modulo',
'moment',
'mrfit',
'msd',
'mstr2sci',
'mtlb',
'mtlb_0',
'mtlb_a',
'mtlb_all',
'mtlb_any',
'mtlb_axes',
'mtlb_axis',
'mtlb_beta',
'mtlb_box',
'mtlb_choices',
'mtlb_close',
'mtlb_colordef',
'mtlb_cond',
'mtlb_cov',
'mtlb_cumprod',
'mtlb_cumsum',
'mtlb_dec2hex',
'mtlb_delete',
'mtlb_diag',
'mtlb_diff',
'mtlb_dir',
'mtlb_double',
'mtlb_e',
'mtlb_echo',
'mtlb_error',
'mtlb_eval',
'mtlb_exist',
'mtlb_eye',
'mtlb_false',
'mtlb_fft',
'mtlb_fftshift',
'mtlb_filter',
'mtlb_find',
'mtlb_findstr',
'mtlb_fliplr',
'mtlb_fopen',
'mtlb_format',
'mtlb_fprintf',
'mtlb_fread',
'mtlb_fscanf',
'mtlb_full',
'mtlb_fwrite',
'mtlb_get',
'mtlb_grid',
'mtlb_hold',
'mtlb_i',
'mtlb_ifft',
'mtlb_image',
'mtlb_imp',
'mtlb_int16',
'mtlb_int32',
'mtlb_int8',
'mtlb_is',
'mtlb_isa',
'mtlb_isfield',
'mtlb_isletter',
'mtlb_isspace',
'mtlb_l',
'mtlb_legendre',
'mtlb_linspace',
'mtlb_logic',
'mtlb_logical',
'mtlb_loglog',
'mtlb_lower',
'mtlb_max',
'mtlb_mean',
'mtlb_median',
'mtlb_mesh',
'mtlb_meshdom',
'mtlb_min',
'mtlb_more',
'mtlb_num2str',
'mtlb_ones',
'mtlb_pcolor',
'mtlb_plot',
'mtlb_prod',
'mtlb_qr',
'mtlb_qz',
'mtlb_rand',
'mtlb_randn',
'mtlb_rcond',
'mtlb_realmax',
'mtlb_realmin',
'mtlb_s',
'mtlb_semilogx',
'mtlb_semilogy',
'mtlb_setstr',
'mtlb_size',
'mtlb_sort',
'mtlb_sortrows',
'mtlb_sprintf',
'mtlb_sscanf',
'mtlb_std',
'mtlb_strcmp',
'mtlb_strcmpi',
'mtlb_strfind',
'mtlb_strrep',
'mtlb_subplot',
'mtlb_sum',
'mtlb_t',
'mtlb_toeplitz',
'mtlb_tril',
'mtlb_triu',
'mtlb_true',
'mtlb_type',
'mtlb_uint16',
'mtlb_uint32',
'mtlb_uint8',
'mtlb_upper',
'mtlb_var',
'mtlb_zeros',
'mu2lin',
'mutation_ga_binary',
'mutation_ga_default',
'mvcorrel',
'mvvacov',
'nancumsum',
'nand2mean',
'nanmax',
'nanmean',
'nanmeanf',
'nanmedian',
'nanmin',
'nanreglin',
'nanstdev',
'nansum',
'narsimul',
'ndgrid',
'ndims',
'nehari',
'neigh_func_csa',
'neigh_func_default',
'neigh_func_fsa',
'neigh_func_vfsa',
'neldermead_cget',
'neldermead_configure',
'neldermead_costf',
'neldermead_defaultoutput',
'neldermead_destroy',
'neldermead_function',
'neldermead_get',
'neldermead_log',
'neldermead_new',
'neldermead_restart',
'neldermead_search',
'neldermead_updatesimp',
'nextpow2',
'nfreq',
'nicholschart',
'nlev',
'nmplot_cget',
'nmplot_configure',
'nmplot_contour',
'nmplot_destroy',
'nmplot_function',
'nmplot_get',
'nmplot_historyplot',
'nmplot_log',
'nmplot_new',
'nmplot_outputcmd',
'nmplot_restart',
'nmplot_search',
'nmplot_simplexhistory',
'noisegen',
'nonreg_test_run',
'now',
'nthroot',
'null',
'num2cell',
'numderivative',
'numdiff',
'numer',
'nyquist',
'nyquistfrequencybounds',
'obs_gram',
'obscont',
'observer',
'obsv_mat',
'obsvss',
'oct2dec',
'odeoptions',
'optim_ga',
'optim_moga',
'optim_nsga',
'optim_nsga2',
'optim_sa',
'optimbase_cget',
'optimbase_checkbounds',
'optimbase_checkcostfun',
'optimbase_checkx0',
'optimbase_configure',
'optimbase_destroy',
'optimbase_function',
'optimbase_get',
'optimbase_hasbounds',
'optimbase_hasconstraints',
'optimbase_hasnlcons',
'optimbase_histget',
'optimbase_histset',
'optimbase_incriter',
'optimbase_isfeasible',
'optimbase_isinbounds',
'optimbase_isinnonlincons',
'optimbase_log',
'optimbase_logshutdown',
'optimbase_logstartup',
'optimbase_new',
'optimbase_outputcmd',
'optimbase_outstruct',
'optimbase_proj2bnds',
'optimbase_set',
'optimbase_stoplog',
'optimbase_terminate',
'optimget',
'optimplotfunccount',
'optimplotfval',
'optimplotx',
'optimset',
'optimsimplex_center',
'optimsimplex_check',
'optimsimplex_compsomefv',
'optimsimplex_computefv',
'optimsimplex_deltafv',
'optimsimplex_deltafvmax',
'optimsimplex_destroy',
'optimsimplex_dirmat',
'optimsimplex_fvmean',
'optimsimplex_fvstdev',
'optimsimplex_fvvariance',
'optimsimplex_getall',
'optimsimplex_getallfv',
'optimsimplex_getallx',
'optimsimplex_getfv',
'optimsimplex_getn',
'optimsimplex_getnbve',
'optimsimplex_getve',
'optimsimplex_getx',
'optimsimplex_gradientfv',
'optimsimplex_log',
'optimsimplex_new',
'optimsimplex_reflect',
'optimsimplex_setall',
'optimsimplex_setallfv',
'optimsimplex_setallx',
'optimsimplex_setfv',
'optimsimplex_setn',
'optimsimplex_setnbve',
'optimsimplex_setve',
'optimsimplex_setx',
'optimsimplex_shrink',
'optimsimplex_size',
'optimsimplex_sort',
'optimsimplex_xbar',
'orth',
'output_ga_default',
'output_moga_default',
'output_nsga2_default',
'output_nsga_default',
'p_margin',
'pack',
'pareto_filter',
'parrot',
'pbig',
'pca',
'pcg',
'pdiv',
'pen2ea',
'pencan',
'pencost',
'penlaur',
'perctl',
'perl',
'perms',
'permute',
'pertrans',
'pfactors',
'pfss',
'phasemag',
'phaseplot',
'phc',
'pinv',
'playsnd',
'plotprofile',
'plzr',
'pmodulo',
'pol2des',
'pol2str',
'polar',
'polfact',
'prbs_a',
'prettyprint',
'primes',
'princomp',
'profile',
'proj',
'projsl',
'projspec',
'psmall',
'pspect',
'qmr',
'qpsolve',
'quart',
'quaskro',
'rafiter',
'randpencil',
'range',
'rank',
'readxls',
'recompilefunction',
'recons',
'reglin',
'regress',
'remezb',
'remove_param',
'remove_profiling',
'repfreq',
'replace_Ix_by_Fx',
'repmat',
'reset_profiling',
'resize_matrix',
'returntoscilab',
'rhs2code',
'ric_desc',
'riccati',
'rmdir',
'routh_t',
'rowcomp',
'rowcompr',
'rowinout',
'rowregul',
'rowshuff',
'rref',
'sample',
'samplef',
'samwr',
'savematfile',
'savewave',
'scanf',
'sci2exp',
'sciGUI_init',
'sci_sparse',
'scicos_getvalue',
'scicos_simulate',
'scicos_workspace_init',
'scisptdemo',
'scitest',
'sdiff',
'sec',
'secd',
'sech',
'selection_ga_elitist',
'selection_ga_random',
'sensi',
'setPreferencesValue',
'set_param',
'setdiff',
'sgrid',
'show_margins',
'show_pca',
'showprofile',
'signm',
'sinc',
'sincd',
'sind',
'sinh',
'sinhm',
'sinm',
'sm2des',
'sm2ss',
'smga',
'smooth',
'solve',
'sound',
'soundsec',
'sp2adj',
'spaninter',
'spanplus',
'spantwo',
'specfact',
'speye',
'sprand',
'spzeros',
'sqroot',
'sqrtm',
'squarewave',
'squeeze',
'srfaur',
'srkf',
'ss2des',
'ss2ss',
'ss2tf',
'sskf',
'ssprint',
'ssrand',
'st_deviation',
'st_i_generic',
'st_ility',
'stabil',
'statgain',
'stdev',
'stdevf',
'steadycos',
'strange',
'strcmpi',
'struct',
'sub2ind',
'sva',
'svplot',
'sylm',
'sylv',
'sysconv',
'sysdiag',
'sysfact',
'syslin',
'syssize',
'system',
'systmat',
'tabul',
'tand',
'tanh',
'tanhm',
'tanm',
'tbx_build_blocks',
'tbx_build_cleaner',
'tbx_build_gateway',
'tbx_build_gateway_clean',
'tbx_build_gateway_loader',
'tbx_build_help',
'tbx_build_help_loader',
'tbx_build_loader',
'tbx_build_localization',
'tbx_build_macros',
'tbx_build_pal_loader',
'tbx_build_src',
'tbx_builder',
'tbx_builder_gateway',
'tbx_builder_gateway_lang',
'tbx_builder_help',
'tbx_builder_help_lang',
'tbx_builder_macros',
'tbx_builder_src',
'tbx_builder_src_lang',
'tbx_generate_pofile',
'temp_law_csa',
'temp_law_default',
'temp_law_fsa',
'temp_law_huang',
'temp_law_vfsa',
'test_clean',
'test_on_columns',
'test_run',
'test_run_level',
'testexamples',
'tf2des',
'tf2ss',
'thrownan',
'tic',
'time_id',
'toc',
'toeplitz',
'tokenpos',
'toolboxes',
'trace',
'trans',
'translatepaths',
'tree2code',
'trfmod',
'trianfml',
'trimmean',
'trisolve',
'trzeros',
'typeof',
'ui_observer',
'union',
'unique',
'unit_test_run',
'unix_g',
'unix_s',
'unix_w',
'unix_x',
'unobs',
'unpack',
'unwrap',
'variance',
'variancef',
'vec2list',
'vectorfind',
'ver',
'warnobsolete',
'wavread',
'wavwrite',
'wcenter',
'weekday',
'wfir',
'wfir_gui',
'whereami',
'who_user',
'whos',
'wiener',
'wigner',
'window',
'winlist',
'with_javasci',
'with_macros_source',
'with_modelica_compiler',
'with_tk',
'xcorr',
'xcosBlockEval',
'xcosBlockInterface',
'xcosCodeGeneration',
'xcosConfigureModelica',
'xcosPal',
'xcosPalAdd',
'xcosPalAddBlock',
'xcosPalExport',
'xcosPalGenerateAllIcons',
'xcosShowBlockWarning',
'xcosValidateBlockSet',
'xcosValidateCompareBlock',
'xcos_compile',
'xcos_debug_gui',
'xcos_run',
'xcos_simulate',
'xcov',
'xmltochm',
'xmltoformat',
'xmltohtml',
'xmltojar',
'xmltopdf',
'xmltops',
'xmltoweb',
'yulewalk',
'zeropen',
'zgrid',
'zpbutt',
'zpch1',
'zpch2',
'zpell',
)
variables_kw = (
'$',
'%F',
'%T',
'%e',
'%eps',
'%f',
'%fftw',
'%gui',
'%i',
'%inf',
'%io',
'%modalWarning',
'%nan',
'%pi',
'%s',
'%t',
'%tk',
'%toolboxes',
'%toolboxes_dir',
'%z',
'PWD',
'SCI',
'SCIHOME',
'TMPDIR',
'arnoldilib',
'assertlib',
'atomslib',
'cacsdlib',
'compatibility_functilib',
'corelib',
'data_structureslib',
'demo_toolslib',
'development_toolslib',
'differential_equationlib',
'dynamic_linklib',
'elementary_functionslib',
'enull',
'evoid',
'external_objectslib',
'fd',
'fileiolib',
'functionslib',
'genetic_algorithmslib',
'helptoolslib',
'home',
'integerlib',
'interpolationlib',
'iolib',
'jnull',
'jvoid',
'linear_algebralib',
'm2scilib',
'matiolib',
'modules_managerlib',
'neldermeadlib',
'optimbaselib',
'optimizationlib',
'optimsimplexlib',
'output_streamlib',
'overloadinglib',
'parameterslib',
'polynomialslib',
'preferenceslib',
'randliblib',
'scicos_autolib',
'scicos_utilslib',
'scinoteslib',
'signal_processinglib',
'simulated_annealinglib',
'soundlib',
'sparselib',
'special_functionslib',
'spreadsheetlib',
'statisticslib',
'stringlib',
'tclscilib',
'timelib',
'umfpacklib',
'xcoslib',
)
if __name__ == '__main__': # pragma: no cover
import subprocess
from pygments.util import format_lines, duplicates_removed
mapping = {'variables': 'builtin'}
def extract_completion(var_type):
s = subprocess.Popen(['scilab', '-nwni'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = s.communicate('''\
fd = mopen("/dev/stderr", "wt");
mputl(strcat(completion("", "%s"), "||"), fd);
mclose(fd)\n''' % var_type)
if '||' not in output[1]:
raise Exception(output[0])
# Invalid DISPLAY causes this to be output:
text = output[1].strip()
if text.startswith('Error: unable to open display \n'):
text = text[len('Error: unable to open display \n'):]
return text.split('||')
new_data = {}
seen = set() # only keep first type for a given word
for t in ('functions', 'commands', 'macros', 'variables'):
new_data[t] = duplicates_removed(extract_completion(t), seen)
seen.update(set(new_data[t]))
with open(__file__) as f:
content = f.read()
header = content[:content.find('# Autogenerated')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(__file__, 'w') as f:
f.write(header)
f.write('# Autogenerated\n\n')
for k, v in sorted(new_data.items()):
f.write(format_lines(k + '_kw', v) + '\n\n')
f.write(footer)
| 52,377 | Python | 15.928895 | 76 | 0.450331 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/promql.py | """
pygments.lexers.promql
~~~~~~~~~~~~~~~~~~~~~~
Lexer for Prometheus Query Language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Whitespace
__all__ = ["PromQLLexer"]
class PromQLLexer(RegexLexer):
"""
For PromQL queries.
For details about the grammar see:
https://github.com/prometheus/prometheus/tree/master/promql/parser
.. versionadded: 2.7
"""
name = "PromQL"
url = 'https://prometheus.io/docs/prometheus/latest/querying/basics/'
aliases = ["promql"]
filenames = ["*.promql"]
base_keywords = (
words(
(
"bool",
"by",
"group_left",
"group_right",
"ignoring",
"offset",
"on",
"without",
),
suffix=r"\b",
),
Keyword,
)
aggregator_keywords = (
words(
(
"sum",
"min",
"max",
"avg",
"group",
"stddev",
"stdvar",
"count",
"count_values",
"bottomk",
"topk",
"quantile",
),
suffix=r"\b",
),
Keyword,
)
function_keywords = (
words(
(
"abs",
"absent",
"absent_over_time",
"avg_over_time",
"ceil",
"changes",
"clamp_max",
"clamp_min",
"count_over_time",
"day_of_month",
"day_of_week",
"days_in_month",
"delta",
"deriv",
"exp",
"floor",
"histogram_quantile",
"holt_winters",
"hour",
"idelta",
"increase",
"irate",
"label_join",
"label_replace",
"ln",
"log10",
"log2",
"max_over_time",
"min_over_time",
"minute",
"month",
"predict_linear",
"quantile_over_time",
"rate",
"resets",
"round",
"scalar",
"sort",
"sort_desc",
"sqrt",
"stddev_over_time",
"stdvar_over_time",
"sum_over_time",
"time",
"timestamp",
"vector",
"year",
),
suffix=r"\b",
),
Keyword.Reserved,
)
tokens = {
"root": [
(r"\n", Whitespace),
(r"\s+", Whitespace),
(r",", Punctuation),
# Keywords
base_keywords,
aggregator_keywords,
function_keywords,
# Offsets
(r"[1-9][0-9]*[smhdwy]", String),
# Numbers
(r"-?[0-9]+\.[0-9]+", Number.Float),
(r"-?[0-9]+", Number.Integer),
# Comments
(r"#.*?$", Comment.Single),
# Operators
(r"(\+|\-|\*|\/|\%|\^)", Operator),
(r"==|!=|>=|<=|<|>", Operator),
(r"and|or|unless", Operator.Word),
# Metrics
(r"[_a-zA-Z][a-zA-Z0-9_]+", Name.Variable),
# Params
(r'(["\'])(.*?)(["\'])', bygroups(Punctuation, String, Punctuation)),
# Other states
(r"\(", Operator, "function"),
(r"\)", Operator),
(r"\{", Punctuation, "labels"),
(r"\[", Punctuation, "range"),
],
"labels": [
(r"\}", Punctuation, "#pop"),
(r"\n", Whitespace),
(r"\s+", Whitespace),
(r",", Punctuation),
(r'([_a-zA-Z][a-zA-Z0-9_]*?)(\s*?)(=~|!=|=|!~)(\s*?)("|\')(.*?)("|\')',
bygroups(Name.Label, Whitespace, Operator, Whitespace,
Punctuation, String, Punctuation)),
],
"range": [
(r"\]", Punctuation, "#pop"),
(r"[1-9][0-9]*[smhdwy]", String),
],
"function": [
(r"\)", Operator, "#pop"),
(r"\(", Operator, "#push"),
default("#pop"),
],
}
| 4,715 | Python | 25.795454 | 83 | 0.362884 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/sas.py | """
pygments.lexers.sas
~~~~~~~~~~~~~~~~~~~
Lexer for SAS.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Keyword, Name, Number, String, Text, \
Other, Generic
__all__ = ['SASLexer']
class SASLexer(RegexLexer):
"""
For SAS files.
.. versionadded:: 2.2
"""
# Syntax from syntax/sas.vim by James Kidd <[email protected]>
name = 'SAS'
aliases = ['sas']
filenames = ['*.SAS', '*.sas']
mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
flags = re.IGNORECASE | re.MULTILINE
builtins_macros = (
"bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp",
"display", "do", "else", "end", "eval", "global", "goto", "if",
"index", "input", "keydef", "label", "left", "length", "let",
"local", "lowcase", "macro", "mend", "nrquote",
"nrstr", "put", "qleft", "qlowcase", "qscan",
"qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan",
"str", "substr", "superq", "syscall", "sysevalf", "sysexec",
"sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput",
"then", "to", "trim", "unquote", "until", "upcase", "verify",
"while", "window"
)
builtins_conditionals = (
"do", "if", "then", "else", "end", "until", "while"
)
builtins_statements = (
"abort", "array", "attrib", "by", "call", "cards", "cards4",
"catname", "continue", "datalines", "datalines4", "delete", "delim",
"delimiter", "display", "dm", "drop", "endsas", "error", "file",
"filename", "footnote", "format", "goto", "in", "infile", "informat",
"input", "keep", "label", "leave", "length", "libname", "link",
"list", "lostcard", "merge", "missing", "modify", "options", "output",
"out", "page", "put", "redirect", "remove", "rename", "replace",
"retain", "return", "select", "set", "skip", "startsas", "stop",
"title", "update", "waitsas", "where", "window", "x", "systask"
)
builtins_sql = (
"add", "and", "alter", "as", "cascade", "check", "create",
"delete", "describe", "distinct", "drop", "foreign", "from",
"group", "having", "index", "insert", "into", "in", "key", "like",
"message", "modify", "msgtype", "not", "null", "on", "or",
"order", "primary", "references", "reset", "restrict", "select",
"set", "table", "unique", "update", "validate", "view", "where"
)
builtins_functions = (
"abs", "addr", "airy", "arcos", "arsin", "atan", "attrc",
"attrn", "band", "betainv", "blshift", "bnot", "bor",
"brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv",
"close", "cnonct", "collate", "compbl", "compound",
"compress", "cos", "cosh", "css", "curobs", "cv", "daccdb",
"daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date",
"datejul", "datepart", "datetime", "day", "dclose", "depdb",
"depdbsl", "depsl", "depsyd",
"deptab", "dequote", "dhms", "dif", "digamma",
"dim", "dinfo", "dnum", "dopen", "doptname", "doptnum",
"dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp",
"fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs",
"fexist", "fget", "fileexist", "filename", "fileref",
"finfo", "finv", "fipname", "fipnamel", "fipstate", "floor",
"fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint",
"fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz",
"fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn",
"hbound", "hms", "hosthelp", "hour", "ibessel", "index",
"indexc", "indexw", "input", "inputc", "inputn", "int",
"intck", "intnx", "intrr", "irr", "jbessel", "juldate",
"kurtosis", "lag", "lbound", "left", "length", "lgamma",
"libname", "libref", "log", "log10", "log2", "logpdf", "logpmf",
"logsdf", "lowcase", "max", "mdy", "mean", "min", "minute",
"mod", "month", "mopen", "mort", "n", "netpv", "nmiss",
"normal", "note", "npv", "open", "ordinal", "pathname",
"pdf", "peek", "peekc", "pmf", "point", "poisson", "poke",
"probbeta", "probbnml", "probchi", "probf", "probgam",
"probhypr", "probit", "probnegb", "probnorm", "probt",
"put", "putc", "putn", "qtr", "quote", "ranbin", "rancau",
"ranexp", "rangam", "range", "rank", "rannor", "ranpoi",
"rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse",
"rewind", "right", "round", "saving", "scan", "sdf", "second",
"sign", "sin", "sinh", "skewness", "soundex", "spedis",
"sqrt", "std", "stderr", "stfips", "stname", "stnamel",
"substr", "sum", "symget", "sysget", "sysmsg", "sysprod",
"sysrc", "system", "tan", "tanh", "time", "timepart", "tinv",
"tnonct", "today", "translate", "tranwrd", "trigamma",
"trim", "trimn", "trunc", "uniform", "upcase", "uss", "var",
"varfmt", "varinfmt", "varlabel", "varlen", "varname",
"varnum", "varray", "varrayx", "vartype", "verify", "vformat",
"vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw",
"vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat",
"vinformatd", "vinformatdx", "vinformatn", "vinformatnx",
"vinformatw", "vinformatwx", "vinformatx", "vlabel",
"vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype",
"vtypex", "weekday", "year", "yyq", "zipfips", "zipname",
"zipnamel", "zipstate"
)
tokens = {
'root': [
include('comments'),
include('proc-data'),
include('cards-datalines'),
include('logs'),
include('general'),
(r'.', Text),
],
# SAS is multi-line regardless, but * is ended by ;
'comments': [
(r'^\s*\*.*?;', Comment),
(r'/\*.*?\*/', Comment),
(r'^\s*\*(.|\n)*?;', Comment.Multiline),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
],
# Special highlight for proc, data, quit, run
'proc-data': [
(r'(^|;)\s*(proc \w+|data|run|quit)[\s;]',
Keyword.Reserved),
],
# Special highlight cards and datalines
'cards-datalines': [
(r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'),
],
'data': [
(r'(.|\n)*^\s*;\s*$', Other, '#pop'),
],
# Special highlight for put NOTE|ERROR|WARNING (order matters)
'logs': [
(r'\n?^\s*%?put ', Keyword, 'log-messages'),
],
'log-messages': [
(r'NOTE(:|-).*', Generic, '#pop'),
(r'WARNING(:|-).*', Generic.Emph, '#pop'),
(r'ERROR(:|-).*', Generic.Error, '#pop'),
include('general'),
],
'general': [
include('keywords'),
include('vars-strings'),
include('special'),
include('numbers'),
],
# Keywords, statements, functions, macros
'keywords': [
(words(builtins_statements,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_sql,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_conditionals,
prefix = r'\b',
suffix = r'\b'),
Keyword),
(words(builtins_macros,
prefix = r'%',
suffix = r'\b'),
Name.Builtin),
(words(builtins_functions,
prefix = r'\b',
suffix = r'\('),
Name.Builtin),
],
# Strings and user-defined variables and macros (order matters)
'vars-strings': [
(r'&[a-z_]\w{0,31}\.?', Name.Variable),
(r'%[a-z_]\w{0,31}', Name.Function),
(r'\'', String, 'string_squote'),
(r'"', String, 'string_dquote'),
],
'string_squote': [
('\'', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
# AFAIK, macro variables are not evaluated in single quotes
# (r'&', Name.Variable, 'validvar'),
(r'[^$\'\\]+', String),
(r'[$\'\\]', String),
],
'string_dquote': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
(r'&', Name.Variable, 'validvar'),
(r'[^$&"\\]+', String),
(r'[$"\\]', String),
],
'validvar': [
(r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'),
],
# SAS numbers and special variables
'numbers': [
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b',
Number),
],
'special': [
(r'(null|missing|_all_|_automatic_|_character_|_n_|'
r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)',
Keyword.Constant),
],
# 'operators': [
# (r'(-|=|<=|>=|<|>|<>|&|!=|'
# r'\||\*|\+|\^|/|!|~|~=)', Operator)
# ],
}
| 9,400 | Python | 40.232456 | 78 | 0.465957 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/css.py | """
pygments.lexers.css
~~~~~~~~~~~~~~~~~~~
Lexers for CSS and related stylesheet formats.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
default, words, inherit
from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
Punctuation, Whitespace
from pygments.lexers._css_builtins import _css_properties
__all__ = ['CssLexer', 'SassLexer', 'ScssLexer', 'LessCssLexer']
# List of vendor prefixes obtained from:
# https://www.w3.org/TR/CSS21/syndata.html#vendor-keyword-history
_vendor_prefixes = (
'-ms-', 'mso-', '-moz-', '-o-', '-xv-', '-atsc-', '-wap-', '-khtml-',
'-webkit-', 'prince-', '-ah-', '-hp-', '-ro-', '-rim-', '-tc-',
)
# List of extended color keywords obtained from:
# https://drafts.csswg.org/css-color/#named-colors
_color_keywords = (
'aliceblue', 'antiquewhite', 'aqua', 'aquamarine', 'azure', 'beige',
'bisque', 'black', 'blanchedalmond', 'blue', 'blueviolet', 'brown',
'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral',
'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan',
'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki',
'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred',
'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray',
'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue',
'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite',
'forestgreen', 'fuchsia', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod',
'gray', 'green', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred',
'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen',
'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan',
'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey',
'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue',
'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow',
'lime', 'limegreen', 'linen', 'magenta', 'maroon', 'mediumaquamarine',
'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen',
'mediumslateblue', 'mediumspringgreen', 'mediumturquoise',
'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin',
'navajowhite', 'navy', 'oldlace', 'olive', 'olivedrab', 'orange',
'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise',
'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum',
'powderblue', 'purple', 'rebeccapurple', 'red', 'rosybrown', 'royalblue',
'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna',
'silver', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow',
'springgreen', 'steelblue', 'tan', 'teal', 'thistle', 'tomato', 'turquoise',
'violet', 'wheat', 'white', 'whitesmoke', 'yellow', 'yellowgreen',
) + ('transparent',)
# List of keyword values obtained from:
# http://cssvalues.com/
_keyword_values = (
'absolute', 'alias', 'all', 'all-petite-caps', 'all-scroll',
'all-small-caps', 'allow-end', 'alpha', 'alternate', 'alternate-reverse',
'always', 'armenian', 'auto', 'avoid', 'avoid-column', 'avoid-page',
'backwards', 'balance', 'baseline', 'below', 'blink', 'block', 'bold',
'bolder', 'border-box', 'both', 'bottom', 'box-decoration', 'break-word',
'capitalize', 'cell', 'center', 'circle', 'clip', 'clone', 'close-quote',
'col-resize', 'collapse', 'color', 'color-burn', 'color-dodge', 'column',
'column-reverse', 'compact', 'condensed', 'contain', 'container',
'content-box', 'context-menu', 'copy', 'cover', 'crisp-edges', 'crosshair',
'currentColor', 'cursive', 'darken', 'dashed', 'decimal',
'decimal-leading-zero', 'default', 'descendants', 'difference', 'digits',
'disc', 'distribute', 'dot', 'dotted', 'double', 'double-circle', 'e-resize',
'each-line', 'ease', 'ease-in', 'ease-in-out', 'ease-out', 'edges',
'ellipsis', 'end', 'ew-resize', 'exclusion', 'expanded', 'extra-condensed',
'extra-expanded', 'fantasy', 'fill', 'fill-box', 'filled', 'first', 'fixed',
'flat', 'flex', 'flex-end', 'flex-start', 'flip', 'force-end', 'forwards',
'from-image', 'full-width', 'geometricPrecision', 'georgian', 'groove',
'hanging', 'hard-light', 'help', 'hidden', 'hide', 'horizontal', 'hue',
'icon', 'infinite', 'inherit', 'initial', 'ink', 'inline', 'inline-block',
'inline-flex', 'inline-table', 'inset', 'inside', 'inter-word', 'invert',
'isolate', 'italic', 'justify', 'large', 'larger', 'last', 'left',
'lighten', 'lighter', 'line-through', 'linear', 'list-item', 'local',
'loose', 'lower-alpha', 'lower-greek', 'lower-latin', 'lower-roman',
'lowercase', 'ltr', 'luminance', 'luminosity', 'mandatory', 'manipulation',
'manual', 'margin-box', 'match-parent', 'medium', 'mixed', 'monospace',
'move', 'multiply', 'n-resize', 'ne-resize', 'nesw-resize',
'no-close-quote', 'no-drop', 'no-open-quote', 'no-repeat', 'none', 'normal',
'not-allowed', 'nowrap', 'ns-resize', 'nw-resize', 'nwse-resize', 'objects',
'oblique', 'off', 'on', 'open', 'open-quote', 'optimizeLegibility',
'optimizeSpeed', 'outset', 'outside', 'over', 'overlay', 'overline',
'padding-box', 'page', 'pan-down', 'pan-left', 'pan-right', 'pan-up',
'pan-x', 'pan-y', 'paused', 'petite-caps', 'pixelated', 'pointer',
'preserve-3d', 'progress', 'proximity', 'relative', 'repeat',
'repeat no-repeat', 'repeat-x', 'repeat-y', 'reverse', 'ridge', 'right',
'round', 'row', 'row-resize', 'row-reverse', 'rtl', 'ruby', 'ruby-base',
'ruby-base-container', 'ruby-text', 'ruby-text-container', 'run-in',
'running', 's-resize', 'sans-serif', 'saturation', 'scale-down', 'screen',
'scroll', 'se-resize', 'semi-condensed', 'semi-expanded', 'separate',
'serif', 'sesame', 'show', 'sideways', 'sideways-left', 'sideways-right',
'slice', 'small', 'small-caps', 'smaller', 'smooth', 'snap', 'soft-light',
'solid', 'space', 'space-around', 'space-between', 'spaces', 'square',
'start', 'static', 'step-end', 'step-start', 'sticky', 'stretch', 'strict',
'stroke-box', 'style', 'sw-resize', 'table', 'table-caption', 'table-cell',
'table-column', 'table-column-group', 'table-footer-group',
'table-header-group', 'table-row', 'table-row-group', 'text', 'thick',
'thin', 'titling-caps', 'to', 'top', 'triangle', 'ultra-condensed',
'ultra-expanded', 'under', 'underline', 'unicase', 'unset', 'upper-alpha',
'upper-latin', 'upper-roman', 'uppercase', 'upright', 'use-glyph-orientation',
'vertical', 'vertical-text', 'view-box', 'visible', 'w-resize', 'wait',
'wavy', 'weight', 'weight style', 'wrap', 'wrap-reverse', 'x-large',
'x-small', 'xx-large', 'xx-small', 'zoom-in', 'zoom-out',
)
# List of other keyword values from other sources:
_other_keyword_values = (
'above', 'aural', 'behind', 'bidi-override', 'center-left', 'center-right',
'cjk-ideographic', 'continuous', 'crop', 'cross', 'embed', 'far-left',
'far-right', 'fast', 'faster', 'hebrew', 'high', 'higher', 'hiragana',
'hiragana-iroha', 'katakana', 'katakana-iroha', 'landscape', 'left-side',
'leftwards', 'level', 'loud', 'low', 'lower', 'message-box', 'middle',
'mix', 'narrower', 'once', 'portrait', 'right-side', 'rightwards', 'silent',
'slow', 'slower', 'small-caption', 'soft', 'spell-out', 'status-bar',
'super', 'text-bottom', 'text-top', 'wider', 'x-fast', 'x-high', 'x-loud',
'x-low', 'x-soft', 'yes', 'pre', 'pre-wrap', 'pre-line',
)
# List of functional notation and function keyword values:
_functional_notation_keyword_values = (
'attr', 'blackness', 'blend', 'blenda', 'blur', 'brightness', 'calc',
'circle', 'color-mod', 'contrast', 'counter', 'cubic-bezier', 'device-cmyk',
'drop-shadow', 'ellipse', 'gray', 'grayscale', 'hsl', 'hsla', 'hue',
'hue-rotate', 'hwb', 'image', 'inset', 'invert', 'lightness',
'linear-gradient', 'matrix', 'matrix3d', 'opacity', 'perspective',
'polygon', 'radial-gradient', 'rect', 'repeating-linear-gradient',
'repeating-radial-gradient', 'rgb', 'rgba', 'rotate', 'rotate3d', 'rotateX',
'rotateY', 'rotateZ', 'saturate', 'saturation', 'scale', 'scale3d',
'scaleX', 'scaleY', 'scaleZ', 'sepia', 'shade', 'skewX', 'skewY', 'steps',
'tint', 'toggle', 'translate', 'translate3d', 'translateX', 'translateY',
'translateZ', 'whiteness',
)
# Note! Handle url(...) separately.
# List of units obtained from:
# https://www.w3.org/TR/css3-values/
_angle_units = (
'deg', 'grad', 'rad', 'turn',
)
_frequency_units = (
'Hz', 'kHz',
)
_length_units = (
'em', 'ex', 'ch', 'rem',
'vh', 'vw', 'vmin', 'vmax',
'px', 'mm', 'cm', 'in', 'pt', 'pc', 'q',
)
_resolution_units = (
'dpi', 'dpcm', 'dppx',
)
_time_units = (
's', 'ms',
)
_all_units = _angle_units + _frequency_units + _length_units + \
_resolution_units + _time_units
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
url = 'https://www.w3.org/TR/CSS/#css'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Whitespace),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\{', Punctuation, 'content'),
(r'(\:{1,2})([\w-]+)', bygroups(Punctuation, Name.Decorator)),
(r'(\.)([\w-]+)', bygroups(Punctuation, Name.Class)),
(r'(\#)([\w-]+)', bygroups(Punctuation, Name.Namespace)),
(r'(@)([\w-]+)', bygroups(Punctuation, Keyword), 'atrule'),
(r'[\w-]+', Name.Tag),
(r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
],
'atrule': [
(r'\{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'\}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Whitespace),
(r'\}', Punctuation, '#pop'),
(r';', Punctuation),
(r'^@.*?$', Comment.Preproc),
(words(_vendor_prefixes,), Keyword.Pseudo),
(r'('+r'|'.join(_css_properties)+r')(\s*)(\:)',
bygroups(Keyword, Whitespace, Punctuation), 'value-start'),
(r'([-]+[a-zA-Z_][\w-]*)(\s*)(\:)', bygroups(Name.Variable, Whitespace, Punctuation),
'value-start'),
(r'([a-zA-Z_][\w-]*)(\s*)(\:)', bygroups(Name, Whitespace, Punctuation),
'value-start'),
(r'/\*(?:.|\n)*?\*/', Comment),
],
'value-start': [
(r'\s+', Whitespace),
(words(_vendor_prefixes,), Name.Builtin.Pseudo),
include('urls'),
(r'('+r'|'.join(_functional_notation_keyword_values)+r')(\()',
bygroups(Name.Builtin, Punctuation), 'function-start'),
(r'([a-zA-Z_][\w-]+)(\()',
bygroups(Name.Function, Punctuation), 'function-start'),
(words(_keyword_values, suffix=r'\b'), Keyword.Constant),
(words(_other_keyword_values, suffix=r'\b'), Keyword.Constant),
(words(_color_keywords, suffix=r'\b'), Keyword.Constant),
# for transition-property etc.
(words(_css_properties, suffix=r'\b'), Keyword),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
include('numeric-values'),
(r'[~^*!%&<>|+=@:./?-]+', Operator),
(r'[\[\](),]+', Punctuation),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_][\w-]*', Name),
(r';', Punctuation, '#pop'),
(r'\}', Punctuation, '#pop:2'),
],
'function-start': [
(r'\s+', Whitespace),
(r'[-]+([\w+]+[-]*)+', Name.Variable),
include('urls'),
(words(_vendor_prefixes,), Keyword.Pseudo),
(words(_keyword_values, suffix=r'\b'), Keyword.Constant),
(words(_other_keyword_values, suffix=r'\b'), Keyword.Constant),
(words(_color_keywords, suffix=r'\b'), Keyword.Constant),
# function-start may be entered recursively
(r'(' + r'|'.join(_functional_notation_keyword_values) + r')(\()',
bygroups(Name.Builtin, Punctuation), 'function-start'),
(r'([a-zA-Z_][\w-]+)(\()',
bygroups(Name.Function, Punctuation), 'function-start'),
(r'/\*(?:.|\n)*?\*/', Comment),
include('numeric-values'),
(r'[*+/-]', Operator),
(r',', Punctuation),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_-]\w*', Name),
(r'\)', Punctuation, '#pop'),
],
'urls': [
(r'(url)(\()(".*?")(\))', bygroups(Name.Builtin, Punctuation,
String.Double, Punctuation)),
(r"(url)(\()('.*?')(\))", bygroups(Name.Builtin, Punctuation,
String.Single, Punctuation)),
(r'(url)(\()(.*?)(\))', bygroups(Name.Builtin, Punctuation,
String.Other, Punctuation)),
],
'numeric-values': [
(r'\#[a-zA-Z0-9]{1,6}', Number.Hex),
(r'[+\-]?[0-9]*[.][0-9]+', Number.Float, 'numeric-end'),
(r'[+\-]?[0-9]+', Number.Integer, 'numeric-end'),
],
'numeric-end': [
(words(_all_units, suffix=r'\b'), Keyword.Type),
(r'%', Keyword.Type),
default('#pop'),
],
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Whitespace),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(words(_css_properties + (
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Name.Constant),
(words(_color_keywords, suffix=r'\b'), Name.Entity),
(words((
'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green',
'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'),
Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#\{', String.Interpol, 'interpolation'),
(r'[~^*!&%<>|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Whitespace),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[\w-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Single),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Single, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Whitespace, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
.. versionadded:: 1.3
"""
name = 'Sass'
url = 'https://sass-lang.com/'
aliases = ['sass']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'[ \t]*\n', Whitespace),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( )([\w-]+)', bygroups(Keyword, Whitespace, Name.Function), 'value'),
(r'(@include)( )([\w-]+)', bygroups(Keyword, Whitespace, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[\w-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
default('selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Whitespace, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Whitespace, 'root'),
],
'import': [
(r'[ \t]+', Whitespace),
(r'\S+', String),
(r'\n', Whitespace, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'([ \t]*)(=)', bygroups(Whitespace, Operator), 'value'),
default('value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'([ \t]*)([=:])', bygroups(Whitespace, Operator), 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.items():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Whitespace, 'root'))
tokens['selector'].append((r'\n', Whitespace, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
url = 'https://sass-lang.com/'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Whitespace),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'(@media)(\s+)', bygroups(Keyword, Whitespace), 'value'),
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
# TODO: broken, and prone to infinite loops.
# (r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
# (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
default('selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
default('#pop'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.items():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Whitespace), (r'[;{}]', Punctuation, '#pop')])
tokens['selector'].extend([(r'\n', Whitespace), (r'[;{}]', Punctuation, '#pop')])
class LessCssLexer(CssLexer):
"""
For LESS styleshets.
.. versionadded:: 2.1
"""
name = 'LessCss'
url = 'http://lesscss.org/'
aliases = ['less']
filenames = ['*.less']
mimetypes = ['text/x-less-css']
tokens = {
'root': [
(r'@\w+', Name.Variable),
inherit,
],
'content': [
(r'\{', Punctuation, '#push'),
(r'//.*\n', Comment.Single),
inherit,
],
}
| 25,314 | Python | 40.981758 | 97 | 0.510469 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/idl.py | """
pygments.lexers.idl
~~~~~~~~~~~~~~~~~~~
Lexers for IDL.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
String, Whitespace
__all__ = ['IDLLexer']
class IDLLexer(RegexLexer):
"""
Pygments Lexer for IDL (Interactive Data Language).
.. versionadded:: 1.6
"""
name = 'IDL'
url = 'https://www.l3harrisgeospatial.com/Software-Technology/IDL'
aliases = ['idl']
filenames = ['*.pro']
mimetypes = ['text/idl']
flags = re.IGNORECASE | re.MULTILINE
_RESERVED = (
'and', 'begin', 'break', 'case', 'common', 'compile_opt',
'continue', 'do', 'else', 'end', 'endcase', 'endelse',
'endfor', 'endforeach', 'endif', 'endrep', 'endswitch',
'endwhile', 'eq', 'for', 'foreach', 'forward_function',
'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le',
'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro',
'repeat', 'switch', 'then', 'until', 'while', 'xor')
"""Reserved words from: http://www.exelisvis.com/docs/reswords.html"""
_BUILTIN_LIB = (
'abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10',
'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query',
'arg_present', 'array_equal', 'array_indices', 'arrow',
'ascii_template', 'asin', 'assoc', 'atan', 'axis',
'a_correlate', 'bandpass_filter', 'bandreject_filter',
'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk',
'besely', 'beta', 'bilinear', 'binary_template', 'bindgen',
'binomial', 'bin_date', 'bit_ffs', 'bit_population',
'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint',
'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder',
'bytscl', 'caldat', 'calendar', 'call_external',
'call_function', 'call_method', 'call_procedure', 'canny',
'catch', 'cd', r'cdf_\w*', 'ceil', 'chebyshev',
'check_math',
'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen',
'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts',
'cmyk_convert', 'colorbar', 'colorize_sample',
'colormap_applicable', 'colormap_gradient',
'colormap_rotation', 'colortable', 'color_convert',
'color_exchange', 'color_quan', 'color_range_map', 'comfit',
'command_line_args', 'complex', 'complexarr', 'complexround',
'compute_mesh_normals', 'cond', 'congrid', 'conj',
'constrained_min', 'contour', 'convert_coord', 'convol',
'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos',
'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct',
'create_view', 'crossp', 'crvlength', 'cti_test',
'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord',
'cw_animate', 'cw_animate_getp', 'cw_animate_load',
'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index',
'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel',
'cw_form', 'cw_fslider', 'cw_light_editor',
'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient',
'cw_palette_editor', 'cw_palette_editor_get',
'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider',
'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists',
'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key',
'define_msgblk', 'define_msgblk_from_file', 'defroi',
'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv',
'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix',
'dialog_dbconnect', 'dialog_message', 'dialog_pickfile',
'dialog_printersetup', 'dialog_printjob',
'dialog_read_image', 'dialog_write_image', 'digital_filter',
'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure',
'dlm_load', 'dlm_register', 'doc_library', 'double',
'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec',
'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn',
'eof', r'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx',
'erode', 'errorplot', 'errplot', 'estimator_filter',
'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint',
'extrac', 'extract_slice', 'factorial', 'fft', 'filepath',
'file_basename', 'file_chmod', 'file_copy', 'file_delete',
'file_dirname', 'file_expand_path', 'file_info',
'file_lines', 'file_link', 'file_mkdir', 'file_move',
'file_poll_input', 'file_readlink', 'file_same',
'file_search', 'file_test', 'file_which', 'findgen',
'finite', 'fix', 'flick', 'float', 'floor', 'flow3',
'fltarr', 'flush', 'format_axis_values', 'free_lun',
'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root',
'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct',
'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint',
'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv',
'getwindows', 'get_drive_list', 'get_dxf_objects',
'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size',
'greg2jul', r'grib_\w*', 'grid3', 'griddata',
'grid_input', 'grid_tps', 'gs_iter',
r'h5[adfgirst]_\w*', 'h5_browser', 'h5_close',
'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse',
'hanning', 'hash', r'hdf_\w*', 'heap_free',
'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save',
'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal',
'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int',
'i18n_multibytetoutf8', 'i18n_multibytetowidechar',
'i18n_utf8tomultibyte', 'i18n_widechartomultibyte',
'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity',
'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64',
'idl_validname', 'iellipse', 'igamma', 'igetcurrent',
'igetdata', 'igetid', 'igetproperty', 'iimage', 'image',
'image_cont', 'image_statistics', 'imaginary', 'imap',
'indgen', 'intarr', 'interpol', 'interpolate',
'interval_volume', 'int_2d', 'int_3d', 'int_tabulated',
'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon',
'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve',
'irotate', 'ir_filter', 'isa', 'isave', 'iscale',
'isetcurrent', 'isetproperty', 'ishft', 'isocontour',
'isosurface', 'isurface', 'itext', 'itranslate', 'ivector',
'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse',
'json_serialize', 'jul2greg', 'julday', 'keyword_set',
'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date',
'label_region', 'ladfit', 'laguerre', 'laplacian',
'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ',
'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes',
'la_gm_linear_model', 'la_hqr', 'la_invert',
'la_least_squares', 'la_least_square_equality',
'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol',
'la_svd', 'la_tridc', 'la_trimprove', 'la_triql',
'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt',
'legend', 'legendre', 'linbcg', 'lindgen', 'linfit',
'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr',
'lngamma', 'lnp_test', 'loadct', 'locale_get',
'logical_and', 'logical_or', 'logical_true', 'lon64arr',
'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove',
'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll',
'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points',
'map_continents', 'map_grid', 'map_image', 'map_patch',
'map_proj_forward', 'map_proj_image', 'map_proj_info',
'map_proj_init', 'map_proj_inverse', 'map_set',
'matrix_multiply', 'matrix_power', 'max', 'md_test',
'mean', 'meanabsdev', 'mean_filter', 'median', 'memory',
'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge',
'mesh_numtriangles', 'mesh_obj', 'mesh_smooth',
'mesh_surfacearea', 'mesh_validate', 'mesh_volume',
'message', 'min', 'min_curve_surf', 'mk_html_help',
'modifyct', 'moment', 'morph_close', 'morph_distance',
'morph_gradient', 'morph_hitormiss', 'morph_open',
'morph_thin', 'morph_tophat', 'multi', 'm_correlate',
r'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick',
'noise_scatter', 'noise_slur', 'norm', 'n_elements',
'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy',
'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid',
'online_help', 'on_error', 'open', 'oplot', 'oploterr',
'parse_url', 'particle_trace', 'path_cache', 'path_sep',
'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox',
'plot_field', 'pnt_line', 'point_lun', 'polarplot',
'polar_contour', 'polar_surface', 'poly', 'polyfill',
'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp',
'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell',
'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes',
'print', 'printd', 'product', 'profile', 'profiler',
'profiles', 'project_vol', 'psafm', 'pseudo',
'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new',
'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull',
'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp',
'query_csv', 'query_dicom', 'query_gif', 'query_image',
'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict',
'query_png', 'query_ppm', 'query_srf', 'query_tiff',
'query_wav', 'radon', 'randomn', 'randomu', 'ranks',
'rdpix', 'read', 'reads', 'readu', 'read_ascii',
'read_binary', 'read_bmp', 'read_csv', 'read_dicom',
'read_gif', 'read_image', 'read_interfile', 'read_jpeg',
'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png',
'read_ppm', 'read_spr', 'read_srf', 'read_sylk',
'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap',
'read_xwd', 'real_part', 'rebin', 'recall_commands',
'recon3', 'reduce_colors', 'reform', 'region_grow',
'register_cursor', 'regress', 'replicate',
'replicate_inplace', 'resolve_all', 'resolve_routine',
'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts',
'rot', 'rotate', 'round', 'routine_filepath',
'routine_info', 'rs_test', 'r_correlate', 'r_test',
'save', 'savgol', 'scale3', 'scale3d', 'scope_level',
'scope_traceback', 'scope_varfetch', 'scope_varname',
'search2d', 'search3d', 'sem_create', 'sem_delete',
'sem_lock', 'sem_release', 'setenv', 'set_plot',
'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr',
'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap',
'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin',
'sindgen', 'sinh', 'size', 'skewness', 'skip_lun',
'slicer3', 'slide_image', 'smooth', 'sobel', 'socket',
'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat',
'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab',
'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize',
'stddev', 'stop', 'strarr', 'strcmp', 'strcompress',
'streamline', 'stregex', 'stretch', 'string', 'strjoin',
'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid',
'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign',
'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc',
'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace',
'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan',
'tanh', 'tek_color', 'temporary', 'tetra_clip',
'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed',
'timegen', 'time_test2', 'tm_test', 'total', 'trace',
'transpose', 'triangulate', 'trigrid', 'triql', 'trired',
'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff',
'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd',
'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint',
'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr',
'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym',
'value_locate', 'variance', 'vector', 'vector_field', 'vel',
'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj',
'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw',
'where', 'widget_base', 'widget_button', 'widget_combobox',
'widget_control', 'widget_displaycontextmen', 'widget_draw',
'widget_droplist', 'widget_event', 'widget_info',
'widget_label', 'widget_list', 'widget_propertysheet',
'widget_slider', 'widget_tab', 'widget_table',
'widget_text', 'widget_tree', 'widget_tree_move',
'widget_window', 'wiener_filter', 'window', 'writeu',
'write_bmp', 'write_csv', 'write_gif', 'write_image',
'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict',
'write_png', 'write_ppm', 'write_spr', 'write_srf',
'write_sylk', 'write_tiff', 'write_wav', 'write_wave',
'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt',
'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet',
'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar',
'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet',
'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps',
'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise',
'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont',
'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl',
'xmtool', 'xobjview', 'xobjview_rotate',
'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d',
'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit',
'xvolume', 'xvolume_rotate', 'xvolume_write_image',
'xyouts', 'zoom', 'zoom_24')
"""Functions from: http://www.exelisvis.com/docs/routines-1.html"""
tokens = {
'root': [
(r'(^\s*)(;.*?)(\n)', bygroups(Whitespace, Comment.Single,
Whitespace)),
(words(_RESERVED, prefix=r'\b', suffix=r'\b'), Keyword),
(words(_BUILTIN_LIB, prefix=r'\b', suffix=r'\b'), Name.Builtin),
(r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator),
(r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator),
(r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator),
(r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator),
(r'"[^\"]*"', String.Double),
(r"'[^\']*'", String.Single),
(r'\b[+\-]?([0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(D|E)?([+\-]?[0-9]+)?\b',
Number.Float),
(r'\b\'[+\-]?[0-9A-F]+\'X(U?(S?|L{1,2})|B)\b', Number.Hex),
(r'\b\'[+\-]?[0-7]+\'O(U?(S?|L{1,2})|B)\b', Number.Oct),
(r'\b[+\-]?[0-9]+U?L{1,2}\b', Number.Integer.Long),
(r'\b[+\-]?[0-9]+U?S?\b', Number.Integer),
(r'\b[+\-]?[0-9]+B\b', Number),
(r'[ \t]+', Whitespace),
(r'\n', Whitespace),
(r'.', Text),
]
}
def analyse_text(text):
"""endelse seems to be unique to IDL, endswitch is rare at least."""
result = 0
if 'endelse' in text:
result += 0.2
if 'endswitch' in text:
result += 0.01
return result
| 15,450 | Python | 53.024475 | 79 | 0.544595 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/math.py | """
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Just export lexers that were contained in this module.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.python import NumPyLexer
from pygments.lexers.matlab import MatlabLexer, MatlabSessionLexer, \
OctaveLexer, ScilabLexer
from pygments.lexers.julia import JuliaLexer, JuliaConsoleLexer
from pygments.lexers.r import RConsoleLexer, SLexer, RdLexer
from pygments.lexers.modeling import BugsLexer, JagsLexer, StanLexer
from pygments.lexers.idl import IDLLexer
from pygments.lexers.algebra import MuPADLexer
__all__ = []
| 676 | Python | 31.238094 | 70 | 0.757396 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/textfmts.py | """
pygments.lexers.textfmts
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for various text formats.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.lexer import RegexLexer, bygroups, default, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Generic, Literal, Punctuation
from pygments.util import ClassNotFound
__all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer',
'NotmuchLexer', 'KernelLogLexer']
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/])* # Date as - or /-separated groups of digits
(?:\d{1,4})
[T ])? # Date/time separator: T or space
(?: \d?\d [:.])* # Time as :/.-separated groups of 1 or 2 digits
(?: \d?\d)
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
(\S+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
(\S+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"\S+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
.. versionadded:: 0.9
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
# (r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([A-Za-z-]+:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr|msgctxt)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class HttpLexer(RegexLexer):
"""
Lexer for HTTP sessions.
.. versionadded:: 1.5
"""
name = 'HTTP'
aliases = ['http']
flags = re.DOTALL
def get_tokens_unprocessed(self, text, stack=('root',)):
"""Reset the content-type state."""
self.content_type = None
return RegexLexer.get_tokens_unprocessed(self, text, stack)
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
if ';' in content_type:
content_type = content_type[:content_type.find(';')].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
def continuous_header_callback(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Literal, match.group(2)
yield match.start(3), Text, match.group(3)
def content_callback(self, match):
content_type = getattr(self, 'content_type', None)
content = match.group()
offset = match.start()
if content_type:
from pygments.lexers import get_lexer_for_mimetype
possible_lexer_mimetypes = [content_type]
if '+' in content_type:
# application/calendar+xml can be treated as application/xml
# if there's not a better match.
general_type = re.sub(r'^(.*)/.*\+(.*)$', r'\1/\2',
content_type)
possible_lexer_mimetypes.append(general_type)
for i in possible_lexer_mimetypes:
try:
lexer = get_lexer_for_mimetype(i)
except ClassNotFound:
pass
else:
for idx, token, value in lexer.get_tokens_unprocessed(content):
yield offset + idx, token, value
return
yield offset, Text, content
tokens = {
'root': [
(r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH|CONNECT)( +)([^ ]+)( +)'
r'(HTTP)(/)(1\.[01]|2(?:\.0)?|3)(\r?\n|\Z)',
bygroups(Name.Function, Text, Name.Namespace, Text,
Keyword.Reserved, Operator, Number, Text),
'headers'),
(r'(HTTP)(/)(1\.[01]|2(?:\.0)?|3)( +)(\d{3})(?:( +)([^\r\n]*))?(\r?\n|\Z)',
bygroups(Keyword.Reserved, Operator, Number, Text, Number, Text,
Name.Exception, Text),
'headers'),
],
'headers': [
(r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|\Z)', header_callback),
(r'([\t ]+)([^\r\n]+)(\r?\n|\Z)', continuous_header_callback),
(r'\r?\n', Text, 'content')
],
'content': [
(r'.+', content_callback)
]
}
def analyse_text(text):
return text.startswith(('GET /', 'POST /', 'PUT /', 'DELETE /', 'HEAD /',
'OPTIONS /', 'TRACE /', 'PATCH /', 'CONNECT '))
class TodotxtLexer(RegexLexer):
"""
Lexer for Todo.txt todo list format.
.. versionadded:: 2.0
"""
name = 'Todotxt'
url = 'http://todotxt.com/'
aliases = ['todotxt']
# *.todotxt is not a standard extension for Todo.txt files; including it
# makes testing easier, and also makes autodetecting file type easier.
filenames = ['todo.txt', '*.todotxt']
mimetypes = ['text/x-todo']
# Aliases mapping standard token types of Todo.txt format concepts
CompleteTaskText = Operator # Chosen to de-emphasize complete tasks
IncompleteTaskText = Text # Incomplete tasks should look like plain text
# Priority should have most emphasis to indicate importance of tasks
Priority = Generic.Heading
# Dates should have next most emphasis because time is important
Date = Generic.Subheading
# Project and context should have equal weight, and be in different colors
Project = Generic.Error
Context = String
# If tag functionality is added, it should have the same weight as Project
# and Context, and a different color. Generic.Traceback would work well.
# Regex patterns for building up rules; dates, priorities, projects, and
# contexts are all atomic
# TODO: Make date regex more ISO 8601 compliant
date_regex = r'\d{4,}-\d{2}-\d{2}'
priority_regex = r'\([A-Z]\)'
project_regex = r'\+\S+'
context_regex = r'@\S+'
# Compound regex expressions
complete_one_date_regex = r'(x )(' + date_regex + r')'
complete_two_date_regex = (complete_one_date_regex + r'( )(' +
date_regex + r')')
priority_date_regex = r'(' + priority_regex + r')( )(' + date_regex + r')'
tokens = {
# Should parse starting at beginning of line; each line is a task
'root': [
# Complete task entry points: two total:
# 1. Complete task with two dates
(complete_two_date_regex, bygroups(CompleteTaskText, Date,
CompleteTaskText, Date),
'complete'),
# 2. Complete task with one date
(complete_one_date_regex, bygroups(CompleteTaskText, Date),
'complete'),
# Incomplete task entry points: six total:
# 1. Priority plus date
(priority_date_regex, bygroups(Priority, IncompleteTaskText, Date),
'incomplete'),
# 2. Priority only
(priority_regex, Priority, 'incomplete'),
# 3. Leading date
(date_regex, Date, 'incomplete'),
# 4. Leading context
(context_regex, Context, 'incomplete'),
# 5. Leading project
(project_regex, Project, 'incomplete'),
# 6. Non-whitespace catch-all
(r'\S+', IncompleteTaskText, 'incomplete'),
],
# Parse a complete task
'complete': [
# Newline indicates end of task, should return to root
(r'\s*\n', CompleteTaskText, '#pop'),
# Tokenize contexts and projects
(context_regex, Context),
(project_regex, Project),
# Tokenize non-whitespace text
(r'\S+', CompleteTaskText),
# Tokenize whitespace not containing a newline
(r'\s+', CompleteTaskText),
],
# Parse an incomplete task
'incomplete': [
# Newline indicates end of task, should return to root
(r'\s*\n', IncompleteTaskText, '#pop'),
# Tokenize contexts and projects
(context_regex, Context),
(project_regex, Project),
# Tokenize non-whitespace text
(r'\S+', IncompleteTaskText),
# Tokenize whitespace not containing a newline
(r'\s+', IncompleteTaskText),
],
}
class NotmuchLexer(RegexLexer):
"""
For Notmuch email text format.
.. versionadded:: 2.5
Additional options accepted:
`body_lexer`
If given, highlight the contents of the message body with the specified
lexer, else guess it according to the body content (default: ``None``).
"""
name = 'Notmuch'
url = 'https://notmuchmail.org/'
aliases = ['notmuch']
def _highlight_code(self, match):
code = match.group(1)
try:
if self.body_lexer:
lexer = get_lexer_by_name(self.body_lexer)
else:
lexer = guess_lexer(code.strip())
except ClassNotFound:
lexer = get_lexer_by_name('text')
yield from lexer.get_tokens_unprocessed(code)
tokens = {
'root': [
(r'\fmessage\{\s*', Keyword, ('message', 'message-attr')),
],
'message-attr': [
(r'(\s*id:\s*)(\S+)', bygroups(Name.Attribute, String)),
(r'(\s*(?:depth|match|excluded):\s*)(\d+)',
bygroups(Name.Attribute, Number.Integer)),
(r'(\s*filename:\s*)(.+\n)',
bygroups(Name.Attribute, String)),
default('#pop'),
],
'message': [
(r'\fmessage\}\n', Keyword, '#pop'),
(r'\fheader\{\n', Keyword, 'header'),
(r'\fbody\{\n', Keyword, 'body'),
],
'header': [
(r'\fheader\}\n', Keyword, '#pop'),
(r'((?:Subject|From|To|Cc|Date):\s*)(.*\n)',
bygroups(Name.Attribute, String)),
(r'(.*)(\s*\(.*\))(\s*\(.*\)\n)',
bygroups(Generic.Strong, Literal, Name.Tag)),
],
'body': [
(r'\fpart\{\n', Keyword, 'part'),
(r'\f(part|attachment)\{\s*', Keyword, ('part', 'part-attr')),
(r'\fbody\}\n', Keyword, '#pop'),
],
'part-attr': [
(r'(ID:\s*)(\d+)', bygroups(Name.Attribute, Number.Integer)),
(r'(,\s*)((?:Filename|Content-id):\s*)([^,]+)',
bygroups(Punctuation, Name.Attribute, String)),
(r'(,\s*)(Content-type:\s*)(.+\n)',
bygroups(Punctuation, Name.Attribute, String)),
default('#pop'),
],
'part': [
(r'\f(?:part|attachment)\}\n', Keyword, '#pop'),
(r'\f(?:part|attachment)\{\s*', Keyword, ('#push', 'part-attr')),
(r'^Non-text part: .*\n', Comment),
(r'(?s)(.*?(?=\f(?:part|attachment)\}\n))', _highlight_code),
],
}
def analyse_text(text):
return 1.0 if text.startswith('\fmessage{') else 0.0
def __init__(self, **options):
self.body_lexer = options.get('body_lexer', None)
RegexLexer.__init__(self, **options)
class KernelLogLexer(RegexLexer):
"""
For Linux Kernel log ("dmesg") output.
.. versionadded:: 2.6
"""
name = 'Kernel log'
aliases = ['kmsg', 'dmesg']
filenames = ['*.kmsg', '*.dmesg']
tokens = {
'root': [
(r'^[^:]+:debug : (?=\[)', Text, 'debug'),
(r'^[^:]+:info : (?=\[)', Text, 'info'),
(r'^[^:]+:warn : (?=\[)', Text, 'warn'),
(r'^[^:]+:notice: (?=\[)', Text, 'warn'),
(r'^[^:]+:err : (?=\[)', Text, 'error'),
(r'^[^:]+:crit : (?=\[)', Text, 'error'),
(r'^(?=\[)', Text, 'unknown'),
],
'unknown': [
(r'^(?=.+(warning|notice|audit|deprecated))', Text, 'warn'),
(r'^(?=.+(error|critical|fail|Bug))', Text, 'error'),
default('info'),
],
'base': [
(r'\[[0-9. ]+\] ', Number),
(r'(?<=\] ).+?:', Keyword),
(r'\n', Text, '#pop'),
],
'debug': [
include('base'),
(r'.+\n', Comment, '#pop')
],
'info': [
include('base'),
(r'.+\n', Text, '#pop')
],
'warn': [
include('base'),
(r'.+\n', Generic.Strong, '#pop')
],
'error': [
include('base'),
(r'.+\n', Generic.Error, '#pop')
]
}
| 15,192 | Python | 34.168981 | 89 | 0.483939 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/make.py | """
pygments.lexers.make
~~~~~~~~~~~~~~~~~~~~
Lexers for Makefiles and similar.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, \
do_insertions, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation, Whitespace
from pygments.lexers.shell import BashLexer
__all__ = ['MakefileLexer', 'BaseMakefileLexer', 'CMakeLexer']
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(
r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:|vpath)|'
# GNU Automake
r'\s*(if|else|endif))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(keepends=True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
yield from do_insertions(ins, lex.get_tokens_unprocessed(done))
def analyse_text(text):
# Many makefiles have $(BIG_CAPS) style variables
if re.search(r'\$\([A-Z_]+\)', text):
return 0.1
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
.. versionadded:: 0.10
"""
name = 'Base Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
# recipes (need to allow spaces because of expandtabs)
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
# special variables
(r'\$[<@$+%?|*]', Keyword),
(r'\s+', Whitespace),
(r'#.*?\n', Comment),
(r'((?:un)?export)(\s+)(?=[\w${}\t -]+\n)',
bygroups(Keyword, Whitespace), 'export'),
(r'(?:un)?export\s+', Keyword),
# assignment
(r'([\w${}().-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)',
bygroups(
Name.Variable, Whitespace, Operator, Whitespace,
using(BashLexer))),
# strings
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(
Name.Function, Operator, Whitespace),
'block-header'),
# expansions
(r'\$\(', Keyword, 'expansion'),
],
'expansion': [
(r'[^\w$().-]+', Text),
(r'[\w.-]+', Name.Variable),
(r'\$', Keyword),
(r'\(', Keyword, '#push'),
(r'\)', Keyword, '#pop'),
],
'export': [
(r'[\w${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Whitespace),
],
'block-header': [
(r'[,|]', Punctuation),
(r'#.*?\n', Comment, '#pop'),
(r'\\\n', Text), # line continuation
(r'\$\(', Keyword, 'expansion'),
(r'[a-zA-Z_]+', Name),
(r'\n', Whitespace, '#pop'),
(r'.', Text),
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for CMake files.
.. versionadded:: 1.2
"""
name = 'CMake'
url = 'https://cmake.org/documentation/'
aliases = ['cmake']
filenames = ['*.cmake', 'CMakeLists.txt']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
# (r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b(\w+)([ \t]*)(\()', bygroups(Name.Builtin, Whitespace,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\$\{)(.+?)(\})', bygroups(Operator, Name.Variable, Operator)),
(r'(\$ENV\{)(.+?)(\})', bygroups(Operator, Name.Variable, Operator)),
(r'(\$<)(.+?)(>)', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^)$"# \t\n]+', String),
(r'\n', Whitespace), # explicitly legal
include('keywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Whitespace),
(r'#.*\n', Comment),
]
}
def analyse_text(text):
exp = (
r'^[ \t]*CMAKE_MINIMUM_REQUIRED[ \t]*'
r'\([ \t]*VERSION[ \t]*\d+(\.\d+)*[ \t]*'
r'([ \t]FATAL_ERROR)?[ \t]*\)[ \t]*'
r'(#[^\n]*)?$'
)
if re.search(exp, text, flags=re.MULTILINE | re.IGNORECASE):
return 0.8
return 0.0
| 7,550 | Python | 34.957143 | 86 | 0.502517 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/clean.py | """
pygments.lexers.clean
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Clean language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import ExtendedRegexLexer, words, default, include, bygroups
from pygments.token import Comment, Error, Keyword, Literal, Name, Number, \
Operator, Punctuation, String, Whitespace
__all__ = ['CleanLexer']
class CleanLexer(ExtendedRegexLexer):
"""
Lexer for the general purpose, state-of-the-art, pure and lazy functional
programming language Clean.
.. versionadded: 2.2
"""
name = 'Clean'
url = 'http://clean.cs.ru.nl/Clean'
aliases = ['clean']
filenames = ['*.icl', '*.dcl']
keywords = (
'case', 'ccall', 'class', 'code', 'code inline', 'derive', 'export',
'foreign', 'generic', 'if', 'in', 'infix', 'infixl', 'infixr',
'instance', 'let', 'of', 'otherwise', 'special', 'stdcall', 'where',
'with')
modulewords = ('implementation', 'definition', 'system')
lowerId = r'[a-z`][\w`]*'
upperId = r'[A-Z`][\w`]*'
funnyId = r'[~@#$%\^?!+\-*<>\\/|&=:]+'
scoreUpperId = r'_' + upperId
scoreLowerId = r'_' + lowerId
moduleId = r'[a-zA-Z_][a-zA-Z0-9_.`]+'
classId = '|'.join([lowerId, upperId, funnyId])
tokens = {
'root': [
include('comments'),
include('keywords'),
include('module'),
include('import'),
include('whitespace'),
include('literals'),
include('operators'),
include('delimiters'),
include('names'),
],
'whitespace': [
(r'\s+', Whitespace),
],
'comments': [
(r'//.*\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comments.in'),
(r'/\*\*', Comment.Special, 'comments.in'),
],
'comments.in': [
(r'\*\/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'\*(?!/)', Comment.Multiline),
(r'/', Comment.Multiline),
],
'keywords': [
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
],
'module': [
(words(modulewords, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(r'\bmodule\b', Keyword.Namespace, 'module.name'),
],
'module.name': [
include('whitespace'),
(moduleId, Name.Class, '#pop'),
],
'import': [
(r'\b(import)\b(\s*)', bygroups(Keyword, Whitespace), 'import.module'),
(r'\b(from)\b(\s*)\b(' + moduleId + r')\b(\s*)\b(import)\b',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword),
'import.what'),
],
'import.module': [
(r'\b(qualified)\b(\s*)', bygroups(Keyword, Whitespace)),
(r'(\s*)\b(as)\b', bygroups(Whitespace, Keyword), ('#pop', 'import.module.as')),
(moduleId, Name.Class),
(r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
(r'\s+', Whitespace),
default('#pop'),
],
'import.module.as': [
include('whitespace'),
(lowerId, Name.Class, '#pop'),
(upperId, Name.Class, '#pop'),
],
'import.what': [
(r'\b(class)\b(\s+)(' + classId + r')',
bygroups(Keyword, Whitespace, Name.Class), 'import.what.class'),
(r'\b(instance)(\s+)(' + classId + r')(\s+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace), 'import.what.instance'),
(r'(::)(\s*)\b(' + upperId + r')\b',
bygroups(Punctuation, Whitespace, Name.Class), 'import.what.type'),
(r'\b(generic)\b(\s+)\b(' + lowerId + '|' + upperId + r')\b',
bygroups(Keyword, Whitespace, Name)),
include('names'),
(r'(,)(\s+)', bygroups(Punctuation, Whitespace)),
(r'$', Whitespace, '#pop'),
include('whitespace'),
],
'import.what.class': [
(r',', Punctuation, '#pop'),
(r'\(', Punctuation, 'import.what.class.members'),
(r'$', Whitespace, '#pop:2'),
include('whitespace'),
],
'import.what.class.members': [
(r',', Punctuation),
(r'\.\.', Punctuation),
(r'\)', Punctuation, '#pop'),
include('names'),
],
'import.what.instance': [
(r'[,)]', Punctuation, '#pop'),
(r'\(', Punctuation, 'import.what.instance'),
(r'$', Whitespace, '#pop:2'),
include('whitespace'),
include('names'),
],
'import.what.type': [
(r',', Punctuation, '#pop'),
(r'[({]', Punctuation, 'import.what.type.consesandfields'),
(r'$', Whitespace, '#pop:2'),
include('whitespace'),
],
'import.what.type.consesandfields': [
(r',', Punctuation),
(r'\.\.', Punctuation),
(r'[)}]', Punctuation, '#pop'),
include('names'),
],
'literals': [
(r'\'([^\'\\]|\\(x[\da-fA-F]+|\d+|.))\'', Literal.Char),
(r'[+~-]?0[0-7]+\b', Number.Oct),
(r'[+~-]?\d+\.\d+(E[+-]?\d+)?', Number.Float),
(r'[+~-]?\d+\b', Number.Integer),
(r'[+~-]?0x[\da-fA-F]+\b', Number.Hex),
(r'True|False', Literal),
(r'"', String.Double, 'literals.stringd'),
],
'literals.stringd': [
(r'[^\\"\n]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\\.', String.Double),
(r'[$\n]', Error, '#pop'),
],
'operators': [
(r'[-~@#$%\^?!+*<>\\/|&=:.]+', Operator),
(r'\b_+\b', Operator),
],
'delimiters': [
(r'[,;(){}\[\]]', Punctuation),
(r'(\')([\w`.]+)(\')',
bygroups(Punctuation, Name.Class, Punctuation)),
],
'names': [
(lowerId, Name),
(scoreLowerId, Name),
(funnyId, Name.Function),
(upperId, Name.Class),
(scoreUpperId, Name.Class),
]
}
| 6,395 | Python | 34.533333 | 95 | 0.451446 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/igor.py | """
pygments.lexers.igor
~~~~~~~~~~~~~~~~~~~~
Lexers for Igor Pro.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Keyword, Name, String, Whitespace
__all__ = ['IgorLexer']
class IgorLexer(RegexLexer):
"""
Pygments Lexer for Igor Pro procedure files (.ipf).
See http://www.wavemetrics.com/ and http://www.igorexchange.com/.
.. versionadded:: 2.0
"""
name = 'Igor'
aliases = ['igor', 'igorpro']
filenames = ['*.ipf']
mimetypes = ['text/ipf']
flags = re.IGNORECASE | re.MULTILINE
flowControl = (
'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch',
'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry',
'break', 'continue', 'return', 'AbortOnRTE', 'AbortOnValue'
)
types = (
'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE',
'STRUCT', 'dfref', 'funcref', 'char', 'uchar', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'float', 'double'
)
keywords = (
'override', 'ThreadSafe', 'MultiThread', 'static', 'Proc',
'Picture', 'Prompt', 'DoPrompt', 'macro', 'window', 'function', 'end',
'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu'
)
operations = (
'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', 'AddMovieFrame',
'AddWavesToBoxPlot', 'AddWavesToViolinPlot', 'AdoptFiles', 'APMath', 'Append',
'AppendBoxPlot', 'AppendImage', 'AppendLayoutObject', 'AppendMatrixContour',
'AppendText', 'AppendToGizmo', 'AppendToGraph', 'AppendToLayout', 'AppendToTable',
'AppendViolinPlot', 'AppendXYZContour', 'AutoPositionWindow',
'AxonTelegraphFindServers', 'BackgroundInfo', 'Beep', 'BoundingBall', 'BoxSmooth',
'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', 'CheckDisplayed',
'ChooseColor', 'Close', 'CloseHelp', 'CloseMovie', 'CloseProc', 'ColorScale',
'ColorTab2Wave', 'Concatenate', 'ControlBar', 'ControlInfo', 'ControlUpdate',
'ConvertGlobalStringTextEncoding', 'ConvexHull', 'Convolve', 'CopyDimLabels',
'CopyFile', 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut',
'CreateBrowser', 'Cross', 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground',
'Cursor', 'CurveFit', 'CustomControl', 'CWT', 'DAQmx_AI_SetupReader',
'DAQmx_AO_SetOutputs', 'DAQmx_CTR_CountEdges', 'DAQmx_CTR_OutputPulse',
'DAQmx_CTR_Period', 'DAQmx_CTR_PulseWidth', 'DAQmx_DIO_Config',
'DAQmx_DIO_WriteNewData', 'DAQmx_Scan', 'DAQmx_WaveformGen', 'Debugger',
'DebuggerOptions', 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont',
'DefaultTextEncoding', 'DefineGuide', 'DelayUpdate', 'DeleteAnnotations',
'DeleteFile', 'DeleteFolder', 'DeletePoints', 'Differentiate', 'dir', 'Display',
'DisplayHelpTopic', 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate',
'DoWindow', 'DoXOPIdle', 'DPSS', 'DrawAction', 'DrawArc', 'DrawBezier',
'DrawLine', 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect',
'DrawText', 'DrawUserShape', 'DSPDetrend', 'DSPPeriodogram', 'Duplicate',
'DuplicateDataFolder', 'DWT', 'EdgeStats', 'Edit', 'ErrorBars',
'EstimatePeakSizes', 'Execute', 'ExecuteScriptText', 'ExperimentInfo',
'ExperimentModified', 'ExportGizmo', 'Extract', 'FastGaussTransform', 'FastOp',
'FBinRead', 'FBinWrite', 'FFT', 'FGetPos', 'FIFOStatus', 'FIFO2Wave', 'FilterFIR',
'FilterIIR', 'FindAPeak', 'FindContour', 'FindDuplicates', 'FindLevel',
'FindLevels', 'FindPeak', 'FindPointsInPoly', 'FindRoots', 'FindSequence',
'FindValue', 'FMaxFlat', 'FPClustering', 'fprintf', 'FReadLine', 'FSetPos',
'FStatus', 'FTPCreateDirectory', 'FTPDelete', 'FTPDownload', 'FTPUpload',
'FuncFit', 'FuncFitMD', 'GBLoadWave', 'GetAxis', 'GetCamera', 'GetFileFolderInfo',
'GetGizmo', 'GetLastUserMenuInfo', 'GetMarquee', 'GetMouse', 'GetSelection',
'GetWindow', 'GISCreateVectorLayer', 'GISGetRasterInfo',
'GISGetRegisteredFileInfo', 'GISGetVectorLayerInfo', 'GISLoadRasterData',
'GISLoadVectorData', 'GISRasterizeVectorData', 'GISRegisterFile',
'GISTransformCoords', 'GISUnRegisterFile', 'GISWriteFieldData',
'GISWriteGeometryData', 'GISWriteRaster', 'GPIBReadBinaryWave2',
'GPIBReadBinary2', 'GPIBReadWave2', 'GPIBRead2', 'GPIBWriteBinaryWave2',
'GPIBWriteBinary2', 'GPIBWriteWave2', 'GPIBWrite2', 'GPIB2', 'GraphNormal',
'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', 'Hanning', 'HDFInfo',
'HDFReadImage', 'HDFReadSDS', 'HDFReadVset', 'HDF5CloseFile', 'HDF5CloseGroup',
'HDF5ConvertColors', 'HDF5CreateFile', 'HDF5CreateGroup', 'HDF5CreateLink',
'HDF5Dump', 'HDF5DumpErrors', 'HDF5DumpState', 'HDF5FlushFile',
'HDF5ListAttributes', 'HDF5ListGroup', 'HDF5LoadData', 'HDF5LoadGroup',
'HDF5LoadImage', 'HDF5OpenFile', 'HDF5OpenGroup', 'HDF5SaveData', 'HDF5SaveGroup',
'HDF5SaveImage', 'HDF5TestOperation', 'HDF5UnlinkObject', 'HideIgorMenus',
'HideInfo', 'HideProcedures', 'HideTools', 'HilbertTransform', 'Histogram', 'ICA',
'IFFT', 'ImageAnalyzeParticles', 'ImageBlend', 'ImageBoundaryToMask',
'ImageComposite', 'ImageEdgeDetection', 'ImageFileInfo', 'ImageFilter',
'ImageFocus', 'ImageFromXYZ', 'ImageGenerateROIMask', 'ImageGLCM',
'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', 'ImageLineProfile',
'ImageLoad', 'ImageMorphology', 'ImageRegistration', 'ImageRemoveBackground',
'ImageRestore', 'ImageRotate', 'ImageSave', 'ImageSeedFill', 'ImageSkeleton3d',
'ImageSnake', 'ImageStats', 'ImageThreshold', 'ImageTransform',
'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', 'InsertPoints', 'Integrate',
'IntegrateODE', 'Integrate2D', 'Interpolate2', 'Interpolate3D', 'Interp3DPath',
'ITCCloseAll2', 'ITCCloseDevice2', 'ITCConfigAllChannels2',
'ITCConfigChannelReset2', 'ITCConfigChannelUpload2', 'ITCConfigChannel2',
'ITCFIFOAvailableAll2', 'ITCFIFOAvailable2', 'ITCGetAllChannelsConfig2',
'ITCGetChannelConfig2', 'ITCGetCurrentDevice2', 'ITCGetDeviceInfo2',
'ITCGetDevices2', 'ITCGetErrorString2', 'ITCGetSerialNumber2', 'ITCGetState2',
'ITCGetVersions2', 'ITCInitialize2', 'ITCOpenDevice2', 'ITCReadADC2',
'ITCReadDigital2', 'ITCReadTimer2', 'ITCSelectDevice2', 'ITCSetDAC2',
'ITCSetGlobals2', 'ITCSetModes2', 'ITCSetState2', 'ITCStartAcq2', 'ITCStopAcq2',
'ITCUpdateFIFOPositionAll2', 'ITCUpdateFIFOPosition2', 'ITCWriteDigital2',
'JCAMPLoadWave', 'JointHistogram', 'KillBackground', 'KillControl',
'KillDataFolder', 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs',
'KillStrings', 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label',
'Layout', 'LayoutPageAction', 'LayoutSlideShow', 'Legend',
'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', 'LoadPackagePreferences',
'LoadPICT', 'LoadWave', 'Loess', 'LombPeriodogram', 'Make', 'MakeIndex',
'MarkPerfTestTime', 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV',
'MatrixFilter', 'MatrixGaussJ', 'MatrixGLM', 'MatrixInverse', 'MatrixLinearSolve',
'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', 'MatrixLUDTD',
'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', 'MatrixSVBkSub',
'MatrixSVD', 'MatrixTranspose', 'MCC_FindServers', 'MeasureStyledText',
'MFR_CheckForNewBricklets',
'MFR_CloseResultFile', 'MFR_CreateOverviewTable', 'MFR_GetBrickletCount',
'MFR_GetBrickletData', 'MFR_GetBrickletDeployData', 'MFR_GetBrickletMetaData',
'MFR_GetBrickletRawData', 'MFR_GetReportTemplate', 'MFR_GetResultFileMetaData',
'MFR_GetResultFileName', 'MFR_GetVernissageVersion', 'MFR_GetVersion',
'MFR_GetXOPErrorMessage', 'MFR_OpenResultFile',
'MLLoadWave', 'Modify', 'ModifyBoxPlot', 'ModifyBrowser', 'ModifyCamera',
'ModifyContour', 'ModifyControl', 'ModifyControlList', 'ModifyFreeAxis',
'ModifyGizmo', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', 'ModifyPanel',
'ModifyTable', 'ModifyViolinPlot', 'ModifyWaterfall', 'MoveDataFolder',
'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable',
'MoveWave', 'MoveWindow', 'MultiTaperPSD', 'MultiThreadingControl',
'NC_CloseFile', 'NC_DumpErrors', 'NC_Inquire', 'NC_ListAttributes',
'NC_ListObjects', 'NC_LoadData', 'NC_OpenFile', 'NeuralNetworkRun',
'NeuralNetworkTrain', 'NewCamera', 'NewDataFolder', 'NewFIFO', 'NewFIFOChan',
'NewFreeAxis', 'NewGizmo', 'NewImage', 'NewLayout', 'NewMovie', 'NewNotebook',
'NewPanel', 'NewPath', 'NewWaterfall', 'NILoadWave', 'NI4882', 'Note', 'Notebook',
'NotebookAction', 'Open', 'OpenHelp', 'OpenNotebook', 'Optimize',
'ParseOperationTemplate', 'PathInfo', 'PauseForUser', 'PauseUpdate', 'PCA',
'PlayMovie', 'PlayMovieAction', 'PlaySound', 'PopupContextualMenu', 'PopupMenu',
'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', 'PrintLayout',
'PrintNotebook', 'PrintSettings', 'PrintTable', 'Project', 'PulseStats',
'PutScrapText', 'pwd', 'Quit', 'RatioFromNumber', 'Redimension', 'Remez',
'Remove', 'RemoveContour', 'RemoveFromGizmo', 'RemoveFromGraph',
'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', 'RemoveLayoutObjects',
'RemovePath', 'Rename', 'RenameDataFolder', 'RenamePath', 'RenamePICT',
'RenameWindow', 'ReorderImages', 'ReorderTraces', 'ReplaceText', 'ReplaceWave',
'Resample', 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData',
'SaveExperiment', 'SaveGizmoCopy', 'SaveGraphCopy', 'SaveNotebook',
'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', 'SetActiveSubwindow',
'SetAxis', 'SetBackground', 'SetDashPattern', 'SetDataFolder', 'SetDimLabel',
'SetDrawEnv', 'SetDrawLayer', 'SetFileFolderInfo', 'SetFormula', 'SetIdlePeriod',
'SetIgorHook', 'SetIgorMenuMode', 'SetIgorOption', 'SetMarquee',
'SetProcessSleep', 'SetRandomSeed', 'SetScale', 'SetVariable', 'SetWaveLock',
'SetWaveTextEncoding', 'SetWindow', 'ShowIgorMenus', 'ShowInfo', 'ShowTools',
'Silent', 'Sleep', 'Slider', 'Smooth', 'SmoothCustom', 'Sort', 'SortColumns',
'SoundInRecord', 'SoundInSet', 'SoundInStartChart', 'SoundInStatus',
'SoundInStopChart', 'SoundLoadWave', 'SoundSaveWave', 'SphericalInterpolate',
'SphericalTriangulate', 'SplitString', 'SplitWave', 'sprintf', 'SQLHighLevelOp',
'sscanf', 'Stack', 'StackWindows', 'StatsAngularDistanceTest', 'StatsANOVA1Test',
'StatsANOVA2NRTest', 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest',
'StatsCircularCorrelationTest', 'StatsCircularMeans', 'StatsCircularMoments',
'StatsCircularTwoSampleTest', 'StatsCochranTest', 'StatsContingencyTable',
'StatsDIPTest', 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKDE', 'StatsKendallTauTest',
'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest',
'StatsLinearRegression', 'StatsMultiCorrelationTest', 'StatsNPMCTest',
'StatsNPNominalSRTest', 'StatsQuantiles', 'StatsRankCorrelationTest',
'StatsResample', 'StatsSample', 'StatsScheffeTest', 'StatsShapiroWilkTest',
'StatsSignTest', 'StatsSRTest', 'StatsTTest', 'StatsTukeyTest',
'StatsVariancesTest', 'StatsWatsonUSquaredTest', 'StatsWatsonWilliamsTest',
'StatsWheelerWatsonTest', 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest',
'STFT', 'String', 'StructFill', 'StructGet', 'StructPut', 'SumDimension',
'SumSeries', 'TabControl', 'Tag', 'TDMLoadData', 'TDMSaveData', 'TextBox',
'ThreadGroupPutDF', 'ThreadStart', 'TickWavesFromAxis', 'Tile', 'TileWindows',
'TitleBox', 'ToCommandLine', 'ToolsGrid', 'Triangulate3d', 'Unwrap', 'URLRequest',
'ValDisplay', 'Variable', 'VDTClosePort2', 'VDTGetPortList2', 'VDTGetStatus2',
'VDTOpenPort2', 'VDTOperationsPort2', 'VDTReadBinaryWave2', 'VDTReadBinary2',
'VDTReadHexWave2', 'VDTReadHex2', 'VDTReadWave2', 'VDTRead2', 'VDTTerminalPort2',
'VDTWriteBinaryWave2', 'VDTWriteBinary2', 'VDTWriteHexWave2', 'VDTWriteHex2',
'VDTWriteWave2', 'VDTWrite2', 'VDT2', 'VISAControl', 'VISARead', 'VISAReadBinary',
'VISAReadBinaryWave', 'VISAReadWave', 'VISAWrite', 'VISAWriteBinary',
'VISAWriteBinaryWave', 'VISAWriteWave', 'WaveMeanStdv', 'WaveStats',
'WaveTransform', 'wfprintf', 'WignerTransform', 'WindowFunction', 'XLLoadWave'
)
functions = (
'abs', 'acos', 'acosh', 'AddListItem', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD',
'alog', 'AnnotationInfo', 'AnnotationList', 'area', 'areaXY', 'asin', 'asinh',
'atan', 'atanh', 'atan2', 'AxisInfo', 'AxisList', 'AxisValFromPixel',
'AxonTelegraphAGetDataNum', 'AxonTelegraphAGetDataString',
'AxonTelegraphAGetDataStruct', 'AxonTelegraphGetDataNum',
'AxonTelegraphGetDataString', 'AxonTelegraphGetDataStruct',
'AxonTelegraphGetTimeoutMs', 'AxonTelegraphSetTimeoutMs', 'Base64Decode',
'Base64Encode', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'beta', 'betai',
'BinarySearch', 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise',
'cabs', 'CaptureHistory', 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num',
'chebyshev', 'chebyshevU', 'CheckName', 'ChildWindowList', 'CleanupName', 'cmplx',
'cmpstr', 'conj', 'ContourInfo', 'ContourNameList', 'ContourNameToWaveRef',
'ContourZ', 'ControlNameList', 'ConvertTextEncoding', 'cos', 'cosh',
'cosIntegral', 'cot', 'coth', 'CountObjects', 'CountObjectsDFR', 'cpowi',
'CreationDate', 'csc', 'csch', 'CsrInfo', 'CsrWave', 'CsrWaveRef', 'CsrXWave',
'CsrXWaveRef', 'CTabList', 'DataFolderDir', 'DataFolderExists',
'DataFolderRefsEqual', 'DataFolderRefStatus', 'date', 'datetime', 'DateToJulian',
'date2secs', 'Dawson', 'defined', 'deltax', 'digamma', 'dilogarithm', 'DimDelta',
'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', 'erfc', 'erfcw',
'exists', 'exp', 'expInt', 'expIntegralE1', 'expNoise', 'factorial', 'Faddeeva',
'fakedata', 'faverage', 'faverageXY', 'fDAQmx_AI_GetReader',
'fDAQmx_AO_UpdateOutputs', 'fDAQmx_ConnectTerminals', 'fDAQmx_CTR_Finished',
'fDAQmx_CTR_IsFinished', 'fDAQmx_CTR_IsPulseFinished', 'fDAQmx_CTR_ReadCounter',
'fDAQmx_CTR_ReadWithOptions', 'fDAQmx_CTR_SetPulseFrequency', 'fDAQmx_CTR_Start',
'fDAQmx_DeviceNames', 'fDAQmx_DIO_Finished', 'fDAQmx_DIO_PortWidth',
'fDAQmx_DIO_Read', 'fDAQmx_DIO_Write', 'fDAQmx_DisconnectTerminals',
'fDAQmx_ErrorString', 'fDAQmx_ExternalCalDate', 'fDAQmx_NumAnalogInputs',
'fDAQmx_NumAnalogOutputs', 'fDAQmx_NumCounters', 'fDAQmx_NumDIOPorts',
'fDAQmx_ReadChan', 'fDAQmx_ReadNamedChan', 'fDAQmx_ResetDevice',
'fDAQmx_ScanGetAvailable', 'fDAQmx_ScanGetNextIndex', 'fDAQmx_ScanStart',
'fDAQmx_ScanStop', 'fDAQmx_ScanWait', 'fDAQmx_ScanWaitWithTimeout',
'fDAQmx_SelfCalDate', 'fDAQmx_SelfCalibration', 'fDAQmx_WaveformStart',
'fDAQmx_WaveformStop', 'fDAQmx_WF_IsFinished', 'fDAQmx_WF_WaitUntilFinished',
'fDAQmx_WriteChan', 'FetchURL', 'FindDimLabel', 'FindListItem', 'floor',
'FontList', 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin',
'FuncRefInfo', 'FunctionInfo', 'FunctionList', 'FunctionPath', 'gamma',
'gammaEuler', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
'Gauss1D', 'Gauss2D', 'gcd', 'GetBrowserLine', 'GetBrowserSelection',
'GetDataFolder', 'GetDataFolderDFR', 'GetDefaultFont', 'GetDefaultFontSize',
'GetDefaultFontStyle', 'GetDimLabel', 'GetEnvironmentVariable', 'GetErrMessage',
'GetFormula', 'GetIndependentModuleName', 'GetIndexedObjName',
'GetIndexedObjNameDFR', 'GetKeyState', 'GetRTErrMessage', 'GetRTError',
'GetRTLocation', 'GetRTLocInfo', 'GetRTStackInfo', 'GetScrapText', 'GetUserData',
'GetWavesDataFolder', 'GetWavesDataFolderDFR', 'GISGetAllFileFormats',
'GISSRefsAreEqual', 'GizmoInfo', 'GizmoScale', 'gnoise', 'GrepList', 'GrepString',
'GuideInfo', 'GuideNameList', 'Hash', 'hcsr', 'HDF5AttributeInfo',
'HDF5DatasetInfo', 'HDF5LibraryInfo', 'HDF5TypeInfo', 'hermite', 'hermiteGauss',
'HyperGNoise', 'HyperGPFQ', 'HyperG0F1', 'HyperG1F1', 'HyperG2F1', 'IgorInfo',
'IgorVersion', 'imag', 'ImageInfo', 'ImageNameList', 'ImageNameToWaveRef',
'IndependentModuleList', 'IndexedDir', 'IndexedFile', 'IndexToScale', 'Inf',
'Integrate1D', 'interp', 'Interp2D', 'Interp3D', 'inverseERF', 'inverseERFC',
'ItemsInList', 'JacobiCn', 'JacobiSn', 'JulianToDate', 'Laguerre', 'LaguerreA',
'LaguerreGauss', 'LambertW', 'LayoutInfo', 'leftx', 'LegendreA', 'limit',
'ListMatch', 'ListToTextWave', 'ListToWaveRefWave', 'ln', 'log', 'logNormalNoise',
'lorentzianNoise', 'LowerStr', 'MacroList', 'magsqr', 'MandelbrotPoint',
'MarcumQ', 'MatrixCondition', 'MatrixDet', 'MatrixDot', 'MatrixRank',
'MatrixTrace', 'max', 'MCC_AutoBridgeBal', 'MCC_AutoFastComp',
'MCC_AutoPipetteOffset', 'MCC_AutoSlowComp', 'MCC_AutoWholeCellComp',
'MCC_GetBridgeBalEnable', 'MCC_GetBridgeBalResist', 'MCC_GetFastCompCap',
'MCC_GetFastCompTau', 'MCC_GetHolding', 'MCC_GetHoldingEnable', 'MCC_GetMode',
'MCC_GetNeutralizationCap', 'MCC_GetNeutralizationEnable',
'MCC_GetOscKillerEnable', 'MCC_GetPipetteOffset', 'MCC_GetPrimarySignalGain',
'MCC_GetPrimarySignalHPF', 'MCC_GetPrimarySignalLPF', 'MCC_GetRsCompBandwidth',
'MCC_GetRsCompCorrection', 'MCC_GetRsCompEnable', 'MCC_GetRsCompPrediction',
'MCC_GetSecondarySignalGain', 'MCC_GetSecondarySignalLPF', 'MCC_GetSlowCompCap',
'MCC_GetSlowCompTau', 'MCC_GetSlowCompTauX20Enable',
'MCC_GetSlowCurrentInjEnable', 'MCC_GetSlowCurrentInjLevel',
'MCC_GetSlowCurrentInjSetlTime', 'MCC_GetWholeCellCompCap',
'MCC_GetWholeCellCompEnable', 'MCC_GetWholeCellCompResist',
'MCC_SelectMultiClamp700B', 'MCC_SetBridgeBalEnable', 'MCC_SetBridgeBalResist',
'MCC_SetFastCompCap', 'MCC_SetFastCompTau', 'MCC_SetHolding',
'MCC_SetHoldingEnable', 'MCC_SetMode', 'MCC_SetNeutralizationCap',
'MCC_SetNeutralizationEnable', 'MCC_SetOscKillerEnable', 'MCC_SetPipetteOffset',
'MCC_SetPrimarySignalGain', 'MCC_SetPrimarySignalHPF', 'MCC_SetPrimarySignalLPF',
'MCC_SetRsCompBandwidth', 'MCC_SetRsCompCorrection', 'MCC_SetRsCompEnable',
'MCC_SetRsCompPrediction', 'MCC_SetSecondarySignalGain',
'MCC_SetSecondarySignalLPF', 'MCC_SetSlowCompCap', 'MCC_SetSlowCompTau',
'MCC_SetSlowCompTauX20Enable', 'MCC_SetSlowCurrentInjEnable',
'MCC_SetSlowCurrentInjLevel', 'MCC_SetSlowCurrentInjSetlTime', 'MCC_SetTimeoutMs',
'MCC_SetWholeCellCompCap', 'MCC_SetWholeCellCompEnable',
'MCC_SetWholeCellCompResist', 'mean', 'median', 'min', 'mod', 'ModDate',
'MPFXEMGPeak', 'MPFXExpConvExpPeak', 'MPFXGaussPeak', 'MPFXLorenzianPeak',
'MPFXVoigtPeak', 'NameOfWave', 'NaN', 'NewFreeDataFolder', 'NewFreeWave', 'norm',
'NormalizeUnicode', 'note', 'NumberByKey', 'numpnts', 'numtype',
'NumVarOrDefault', 'num2char', 'num2istr', 'num2str', 'NVAR_Exists',
'OperationList', 'PadString', 'PanelResolution', 'ParamIsDefault',
'ParseFilePath', 'PathList', 'pcsr', 'Pi', 'PICTInfo', 'PICTList',
'PixelFromAxisVal', 'pnt2x', 'poissonNoise', 'poly', 'PolygonArea', 'poly2D',
'PossiblyQuoteName', 'ProcedureText', 'p2rect', 'qcsr', 'real', 'RemoveByKey',
'RemoveEnding', 'RemoveFromList', 'RemoveListItem', 'ReplaceNumberByKey',
'ReplaceString', 'ReplaceStringByKey', 'rightx', 'round', 'r2polar', 'sawtooth',
'scaleToIndex', 'ScreenResolution', 'sec', 'sech', 'Secs2Date', 'Secs2Time',
'SelectNumber', 'SelectString', 'SetEnvironmentVariable', 'sign', 'sin', 'sinc',
'sinh', 'sinIntegral', 'SortList', 'SpecialCharacterInfo', 'SpecialCharacterList',
'SpecialDirPath', 'SphericalBessJ', 'SphericalBessJD', 'SphericalBessY',
'SphericalBessYD', 'SphericalHarmonics', 'SQLAllocHandle', 'SQLAllocStmt',
'SQLBinaryWavesToTextWave', 'SQLBindCol', 'SQLBindParameter', 'SQLBrowseConnect',
'SQLBulkOperations', 'SQLCancel', 'SQLCloseCursor', 'SQLColAttributeNum',
'SQLColAttributeStr', 'SQLColumnPrivileges', 'SQLColumns', 'SQLConnect',
'SQLDataSources', 'SQLDescribeCol', 'SQLDescribeParam', 'SQLDisconnect',
'SQLDriverConnect', 'SQLDrivers', 'SQLEndTran', 'SQLError', 'SQLExecDirect',
'SQLExecute', 'SQLFetch', 'SQLFetchScroll', 'SQLForeignKeys', 'SQLFreeConnect',
'SQLFreeEnv', 'SQLFreeHandle', 'SQLFreeStmt', 'SQLGetConnectAttrNum',
'SQLGetConnectAttrStr', 'SQLGetCursorName', 'SQLGetDataNum', 'SQLGetDataStr',
'SQLGetDescFieldNum', 'SQLGetDescFieldStr', 'SQLGetDescRec', 'SQLGetDiagFieldNum',
'SQLGetDiagFieldStr', 'SQLGetDiagRec', 'SQLGetEnvAttrNum', 'SQLGetEnvAttrStr',
'SQLGetFunctions', 'SQLGetInfoNum', 'SQLGetInfoStr', 'SQLGetStmtAttrNum',
'SQLGetStmtAttrStr', 'SQLGetTypeInfo', 'SQLMoreResults', 'SQLNativeSql',
'SQLNumParams', 'SQLNumResultCols', 'SQLNumResultRowsIfKnown',
'SQLNumRowsFetched', 'SQLParamData', 'SQLPrepare', 'SQLPrimaryKeys',
'SQLProcedureColumns', 'SQLProcedures', 'SQLPutData', 'SQLReinitialize',
'SQLRowCount', 'SQLSetConnectAttrNum', 'SQLSetConnectAttrStr', 'SQLSetCursorName',
'SQLSetDescFieldNum', 'SQLSetDescFieldStr', 'SQLSetDescRec', 'SQLSetEnvAttrNum',
'SQLSetEnvAttrStr', 'SQLSetPos', 'SQLSetStmtAttrNum', 'SQLSetStmtAttrStr',
'SQLSpecialColumns', 'SQLStatistics', 'SQLTablePrivileges', 'SQLTables',
'SQLTextWaveToBinaryWaves', 'SQLTextWaveTo2DBinaryWave', 'SQLUpdateBoundValues',
'SQLXOPCheckState', 'SQL2DBinaryWaveToTextWave', 'sqrt', 'StartMSTimer',
'StatsBetaCDF', 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF',
'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', 'StatsCMSSDCDF',
'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', 'StatsErlangCDF',
'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', 'StatsEValuePDF',
'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', 'StatsFPDF', 'StatsFriedmanCDF',
'StatsGammaCDF', 'StatsGammaPDF', 'StatsGeometricCDF', 'StatsGeometricPDF',
'StatsGEVCDF', 'StatsGEVPDF', 'StatsHyperGCDF', 'StatsHyperGPDF',
'StatsInvBetaCDF', 'StatsInvBinomialCDF', 'StatsInvCauchyCDF', 'StatsInvChiCDF',
'StatsInvCMSSDCDF', 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', 'StatsInvGeometricCDF',
'StatsInvKuiperCDF', 'StatsInvLogisticCDF', 'StatsInvLogNormalCDF',
'StatsInvMaxwellCDF', 'StatsInvMooreCDF', 'StatsInvNBinomialCDF',
'StatsInvNCChiCDF', 'StatsInvNCFCDF', 'StatsInvNormalCDF', 'StatsInvParetoCDF',
'StatsInvPoissonCDF', 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF',
'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF',
'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF',
'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', 'StatsLogNormalCDF',
'StatsLogNormalPDF', 'StatsMaxwellCDF', 'StatsMaxwellPDF', 'StatsMedian',
'StatsMooreCDF', 'StatsNBinomialCDF', 'StatsNBinomialPDF', 'StatsNCChiCDF',
'StatsNCChiPDF', 'StatsNCFCDF', 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF',
'StatsNormalCDF', 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF',
'StatsPermute', 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', 'StatsRayleighCDF',
'StatsRayleighPDF', 'StatsRectangularCDF', 'StatsRectangularPDF', 'StatsRunsCDF',
'StatsSpearmanRhoCDF', 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean',
'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', 'StatsVonMisesPDF',
'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', 'StatsWeibullPDF',
'StopMSTimer', 'StringByKey', 'stringCRC', 'StringFromList', 'StringList',
'stringmatch', 'strlen', 'strsearch', 'StrVarOrDefault', 'str2num', 'StudentA',
'StudentT', 'sum', 'SVAR_Exists', 'TableInfo', 'TagVal', 'TagWaveRef', 'tan',
'tango_close_device', 'tango_command_inout', 'tango_compute_image_proj',
'tango_get_dev_attr_list', 'tango_get_dev_black_box', 'tango_get_dev_cmd_list',
'tango_get_dev_status', 'tango_get_dev_timeout', 'tango_get_error_stack',
'tango_open_device', 'tango_ping_device', 'tango_read_attribute',
'tango_read_attributes', 'tango_reload_dev_interface',
'tango_resume_attr_monitor', 'tango_set_attr_monitor_period',
'tango_set_dev_timeout', 'tango_start_attr_monitor', 'tango_stop_attr_monitor',
'tango_suspend_attr_monitor', 'tango_write_attribute', 'tango_write_attributes',
'tanh', 'TDMAddChannel', 'TDMAddGroup', 'TDMAppendDataValues',
'TDMAppendDataValuesTime', 'TDMChannelPropertyExists', 'TDMCloseChannel',
'TDMCloseFile', 'TDMCloseGroup', 'TDMCreateChannelProperty', 'TDMCreateFile',
'TDMCreateFileProperty', 'TDMCreateGroupProperty', 'TDMFilePropertyExists',
'TDMGetChannelPropertyNames', 'TDMGetChannelPropertyNum',
'TDMGetChannelPropertyStr', 'TDMGetChannelPropertyTime',
'TDMGetChannelPropertyType', 'TDMGetChannels', 'TDMGetChannelStringPropertyLen',
'TDMGetDataType', 'TDMGetDataValues', 'TDMGetDataValuesTime',
'TDMGetFilePropertyNames', 'TDMGetFilePropertyNum', 'TDMGetFilePropertyStr',
'TDMGetFilePropertyTime', 'TDMGetFilePropertyType', 'TDMGetFileStringPropertyLen',
'TDMGetGroupPropertyNames', 'TDMGetGroupPropertyNum', 'TDMGetGroupPropertyStr',
'TDMGetGroupPropertyTime', 'TDMGetGroupPropertyType', 'TDMGetGroups',
'TDMGetGroupStringPropertyLen', 'TDMGetLibraryErrorDescription',
'TDMGetNumChannelProperties', 'TDMGetNumChannels', 'TDMGetNumDataValues',
'TDMGetNumFileProperties', 'TDMGetNumGroupProperties', 'TDMGetNumGroups',
'TDMGroupPropertyExists', 'TDMOpenFile', 'TDMOpenFileEx', 'TDMRemoveChannel',
'TDMRemoveGroup', 'TDMReplaceDataValues', 'TDMReplaceDataValuesTime',
'TDMSaveFile', 'TDMSetChannelPropertyNum', 'TDMSetChannelPropertyStr',
'TDMSetChannelPropertyTime', 'TDMSetDataValues', 'TDMSetDataValuesTime',
'TDMSetFilePropertyNum', 'TDMSetFilePropertyStr', 'TDMSetFilePropertyTime',
'TDMSetGroupPropertyNum', 'TDMSetGroupPropertyStr', 'TDMSetGroupPropertyTime',
'TextEncodingCode', 'TextEncodingName', 'TextFile', 'ThreadGroupCreate',
'ThreadGroupGetDF', 'ThreadGroupGetDFR', 'ThreadGroupRelease', 'ThreadGroupWait',
'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', 'time', 'TraceFromPixel',
'TraceInfo', 'TraceNameList', 'TraceNameToWaveRef', 'TrimString', 'trunc',
'UniqueName', 'UnPadString', 'UnsetEnvironmentVariable', 'UpperStr', 'URLDecode',
'URLEncode', 'VariableList', 'Variance', 'vcsr', 'viAssertIntrSignal',
'viAssertTrigger', 'viAssertUtilSignal', 'viClear', 'viClose', 'viDisableEvent',
'viDiscardEvents', 'viEnableEvent', 'viFindNext', 'viFindRsrc', 'viGetAttribute',
'viGetAttributeString', 'viGpibCommand', 'viGpibControlATN', 'viGpibControlREN',
'viGpibPassControl', 'viGpibSendIFC', 'viIn8', 'viIn16', 'viIn32', 'viLock',
'viMapAddress', 'viMapTrigger', 'viMemAlloc', 'viMemFree', 'viMoveIn8',
'viMoveIn16', 'viMoveIn32', 'viMoveOut8', 'viMoveOut16', 'viMoveOut32', 'viOpen',
'viOpenDefaultRM', 'viOut8', 'viOut16', 'viOut32', 'viPeek8', 'viPeek16',
'viPeek32', 'viPoke8', 'viPoke16', 'viPoke32', 'viRead', 'viReadSTB',
'viSetAttribute', 'viSetAttributeString', 'viStatusDesc', 'viTerminate',
'viUnlock', 'viUnmapAddress', 'viUnmapTrigger', 'viUsbControlIn',
'viUsbControlOut', 'viVxiCommandQuery', 'viWaitOnEvent', 'viWrite', 'VoigtFunc',
'VoigtPeak', 'WaveCRC', 'WaveDims', 'WaveExists', 'WaveHash', 'WaveInfo',
'WaveList', 'WaveMax', 'WaveMin', 'WaveName', 'WaveRefIndexed',
'WaveRefIndexedDFR', 'WaveRefsEqual', 'WaveRefWaveToList', 'WaveTextEncoding',
'WaveType', 'WaveUnits', 'WhichListItem', 'WinList', 'WinName', 'WinRecreation',
'WinType', 'wnoise', 'xcsr', 'XWaveName', 'XWaveRefFromTrace', 'x2pnt', 'zcsr',
'ZernikeR', 'zeromq_client_connect', 'zeromq_client_recv',
'zeromq_client_send', 'zeromq_handler_start', 'zeromq_handler_stop',
'zeromq_server_bind', 'zeromq_server_recv', 'zeromq_server_send', 'zeromq_set',
'zeromq_stop', 'zeromq_test_callfunction', 'zeromq_test_serializeWave', 'zeta'
)
tokens = {
'root': [
(r'//.*$', Comment.Single),
(r'"([^"\\]|\\.)*"', String),
# Flow Control.
(words(flowControl, prefix=r'\b', suffix=r'\b'), Keyword),
# Types.
(words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
# Keywords.
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
# Built-in operations.
(words(operations, prefix=r'\b', suffix=r'\b'), Name.Class),
# Built-in functions.
(words(functions, prefix=r'\b', suffix=r'\b'), Name.Function),
# Compiler directives.
(r'^#(include|pragma|define|undef|ifdef|ifndef|if|elif|else|endif)',
Name.Decorator),
(r'\s+', Whitespace),
(r'[^a-z"/]+$', Text),
(r'.', Text),
],
}
| 30,631 | Python | 71.760095 | 90 | 0.673207 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/phix.py | """
pygments.lexers.phix
~~~~~~~~~~~~~~~~~~~~
Lexers for Phix.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Whitespace
__all__ = ['PhixLexer']
class PhixLexer(RegexLexer):
"""
Pygments Lexer for Phix files (.exw).
See http://phix.x10.mx
.. versionadded:: 2.14.0
"""
name = 'Phix'
url = 'http://phix.x10.mx'
aliases = ['phix']
filenames = ['*.exw']
mimetypes = ['text/x-phix']
flags = re.MULTILINE # nb: **NOT** re.DOTALL! (totally spanners comment handling)
preproc = (
'ifdef', 'elsifdef', 'elsedef'
)
# Note these lists are auto-generated by pwa/p2js.exw, when pwa\src\p2js_keywords.e (etc)
# change, though of course subsequent copy/commit/pull requests are all manual steps.
types = (
'string', 'nullable_string', 'atom_string', 'atom', 'bool', 'boolean',
'cdCanvan', 'cdCanvas', 'complex', 'CURLcode', 'dictionary', 'int',
'integer', 'Ihandle', 'Ihandles', 'Ihandln', 'mpfr', 'mpq', 'mpz',
'mpz_or_string', 'number', 'rid_string', 'seq', 'sequence', 'timedate',
'object'
)
keywords = (
'abstract', 'class', 'continue', 'export', 'extends', 'nullable',
'private', 'public', 'static', 'struct', 'trace',
'and', 'break', 'by', 'case', 'catch', 'const', 'constant', 'debug',
'default', 'do', 'else', 'elsif', 'end', 'enum', 'exit', 'fallthru',
'fallthrough', 'for', 'forward', 'function', 'global', 'if', 'in',
'include', 'js', 'javascript', 'javascript_semantics', 'let', 'not',
'or', 'procedure', 'profile', 'profile_time', 'return', 'safe_mode',
'switch', 'then', 'to', 'try', 'type', 'type_check', 'until', 'warning',
'while', 'with', 'without', 'xor'
)
routines = (
'abort', 'abs', 'adjust_timedate', 'and_bits', 'and_bitsu', 'apply',
'append', 'arccos', 'arcsin', 'arctan', 'assert', 'atan2',
'atom_to_float32', 'atom_to_float64', 'bankers_rounding', 'beep',
'begins', 'binary_search', 'bits_to_int', 'bk_color', 'bytes_to_int',
'call_func', 'call_proc', 'cdCanvasActivate', 'cdCanvasArc',
'cdCanvasBegin', 'cdCanvasBox', 'cdCanvasChord', 'cdCanvasCircle',
'cdCanvasClear', 'cdCanvasEnd', 'cdCanvasFlush', 'cdCanvasFont',
'cdCanvasGetImageRGB', 'cdCanvasGetSize', 'cdCanvasGetTextAlignment',
'cdCanvasGetTextSize', 'cdCanvasLine', 'cdCanvasMark',
'cdCanvasMarkSize', 'cdCanvasMultiLineVectorText', 'cdCanvasPixel',
'cdCanvasRect', 'cdCanvasRoundedBox', 'cdCanvasRoundedRect',
'cdCanvasSector', 'cdCanvasSetAttribute', 'cdCanvasSetBackground',
'cdCanvasSetFillMode', 'cdCanvasSetForeground',
'cdCanvasSetInteriorStyle', 'cdCanvasSetLineStyle',
'cdCanvasSetLineWidth', 'cdCanvasSetTextAlignment', 'cdCanvasText',
'cdCanvasSetTextOrientation', 'cdCanvasGetTextOrientation',
'cdCanvasVectorText', 'cdCanvasVectorTextDirection',
'cdCanvasVectorTextSize', 'cdCanvasVertex', 'cdCreateCanvas',
'cdDecodeAlpha', 'cdDecodeColor', 'cdDecodeColorAlpha', 'cdEncodeAlpha',
'cdEncodeColor', 'cdEncodeColorAlpha', 'cdKillCanvas', 'cdVersion',
'cdVersionDate', 'ceil', 'change_timezone', 'choose', 'clear_screen',
'columnize', 'command_line', 'compare', 'complex_abs', 'complex_add',
'complex_arg', 'complex_conjugate', 'complex_cos', 'complex_cosh',
'complex_div', 'complex_exp', 'complex_imag', 'complex_inv',
'complex_log', 'complex_mul', 'complex_neg', 'complex_new',
'complex_norm', 'complex_power', 'complex_rho', 'complex_real',
'complex_round', 'complex_sin', 'complex_sinh', 'complex_sprint',
'complex_sqrt', 'complex_sub', 'complex_theta', 'concat', 'cos',
'crash', 'custom_sort', 'date', 'day_of_week', 'day_of_year',
'days_in_month', 'decode_base64', 'decode_flags', 'deep_copy', 'deld',
'deserialize', 'destroy_dict', 'destroy_queue', 'destroy_stack',
'dict_name', 'dict_size', 'elapsed', 'elapsed_short', 'encode_base64',
'equal', 'even', 'exp', 'extract', 'factorial', 'factors',
'file_size_k', 'find', 'find_all', 'find_any', 'find_replace', 'filter',
'flatten', 'float32_to_atom', 'float64_to_atom', 'floor',
'format_timedate', 'free_console', 'from_polar', 'gcd', 'get_file_base',
'get_file_extension', 'get_file_name', 'get_file_name_and_path',
'get_file_path', 'get_file_path_and_name', 'get_maxprime', 'get_prime',
'get_primes', 'get_primes_le', 'get_proper_dir', 'get_proper_path',
'get_rand', 'get_routine_info', 'get_test_abort', 'get_test_logfile',
'get_test_pause', 'get_test_verbosity', 'get_tzid', 'getd', 'getdd',
'getd_all_keys', 'getd_by_index', 'getd_index', 'getd_partial_key',
'glAttachShader', 'glBindBuffer', 'glBindTexture', 'glBufferData',
'glCanvasSpecialText', 'glClear', 'glClearColor', 'glColor',
'glCompileShader', 'glCreateBuffer', 'glCreateProgram',
'glCreateShader', 'glCreateTexture', 'glDeleteProgram',
'glDeleteShader', 'glDrawArrays', 'glEnable',
'glEnableVertexAttribArray', 'glFloat32Array', 'glInt32Array',
'glFlush', 'glGetAttribLocation', 'glGetError', 'glGetProgramInfoLog',
'glGetProgramParameter', 'glGetShaderInfoLog', 'glGetShaderParameter',
'glGetUniformLocation', 'glLinkProgram', 'glLoadIdentity',
'glMatrixMode', 'glOrtho', 'glRotatef', 'glShadeModel',
'glShaderSource', 'glSimpleA7texcoords', 'glTexImage2Dc',
'glTexParameteri', 'glTranslate', 'glUniform1f', 'glUniform1i',
'glUniformMatrix4fv', 'glUseProgram', 'glVertex',
'glVertexAttribPointer', 'glViewport', 'head', 'hsv_to_rgb', 'iff',
'iif', 'include_file', 'incl0de_file', 'insert', 'instance',
'int_to_bits', 'int_to_bytes', 'is_dict', 'is_integer', 's_leap_year',
'is_prime', 'is_prime2', 'islower', 'isupper', 'Icallback',
'iup_isdouble', 'iup_isprint', 'iup_XkeyBase', 'IupAppend', 'IupAlarm',
'IupBackgroundBox', 'IupButton', 'IupCalendar', 'IupCanvas',
'IupClipboard', 'IupClose', 'IupCloseOnEscape', 'IupControlsOpen',
'IupDatePick', 'IupDestroy', 'IupDialog', 'IupDrawArc', 'IupDrawBegin',
'IupDrawEnd', 'IupDrawGetSize', 'IupDrawGetTextSize', 'IupDrawLine',
'IupDrawRectangle', 'IupDrawText', 'IupExpander', 'IupFill',
'IupFlatLabel', 'IupFlatList', 'IupFlatTree', 'IupFlush', 'IupFrame',
'IupGetAttribute', 'IupGetAttributeId', 'IupGetAttributePtr',
'IupGetBrother', 'IupGetChild', 'IupGetChildCount', 'IupGetClassName',
'IupGetDialog', 'IupGetDialogChild', 'IupGetDouble', 'IupGetFocus',
'IupGetGlobal', 'IupGetGlobalInt', 'IupGetGlobalIntInt', 'IupGetInt',
'IupGetInt2', 'IupGetIntId', 'IupGetIntInt', 'IupGetParent',
'IupGLCanvas', 'IupGLCanvasOpen', 'IupGLMakeCurrent', 'IupGraph',
'IupHbox', 'IupHide', 'IupImage', 'IupImageRGBA', 'IupItem',
'iupKeyCodeToName', 'IupLabel', 'IupLink', 'IupList', 'IupMap',
'IupMenu', 'IupMenuItem', 'IupMessage', 'IupMessageDlg', 'IupMultiBox',
'IupMultiLine', 'IupNextField', 'IupNormaliser', 'IupOpen',
'IupPlayInput', 'IupPopup', 'IupPreviousField', 'IupProgressBar',
'IupRadio', 'IupRecordInput', 'IupRedraw', 'IupRefresh',
'IupRefreshChildren', 'IupSeparator', 'IupSetAttribute',
'IupSetAttributes', 'IupSetAttributeHandle', 'IupSetAttributeId',
'IupSetAttributePtr', 'IupSetCallback', 'IupSetCallbacks',
'IupSetDouble', 'IupSetFocus', 'IupSetGlobal', 'IupSetGlobalInt',
'IupSetGlobalFunction', 'IupSetHandle', 'IupSetInt',
'IupSetStrAttribute', 'IupSetStrGlobal', 'IupShow', 'IupShowXY',
'IupSplit', 'IupStoreAttribute', 'IupSubmenu', 'IupTable',
'IupTableClearSelected', 'IupTableClick_cb', 'IupTableGetSelected',
'IupTableResize_cb', 'IupTableSetData', 'IupTabs', 'IupText',
'IupTimer', 'IupToggle', 'IupTreeAddNodes', 'IupTreeView', 'IupUpdate',
'IupValuator', 'IupVbox', 'join', 'join_by', 'join_path', 'k_perm',
'largest', 'lcm', 'length', 'log', 'log10', 'log2', 'lower',
'm4_crossProduct', 'm4_inverse', 'm4_lookAt', 'm4_multiply',
'm4_normalize', 'm4_perspective', 'm4_subtractVectors', 'm4_xRotate',
'm4_yRotate', 'machine_bits', 'machine_word', 'match', 'match_all',
'match_replace', 'max', 'maxsq', 'min', 'minsq', 'mod', 'mpfr_add',
'mpfr_ceil', 'mpfr_cmp', 'mpfr_cmp_si', 'mpfr_const_pi', 'mpfr_div',
'mpfr_div_si', 'mpfr_div_z', 'mpfr_floor', 'mpfr_free', 'mpfr_get_d',
'mpfr_get_default_precision', 'mpfr_get_default_rounding_mode',
'mpfr_get_fixed', 'mpfr_get_precision', 'mpfr_get_si', 'mpfr_init',
'mpfr_inits', 'mpfr_init_set', 'mpfr_init_set_q', 'mpfr_init_set_z',
'mpfr_mul', 'mpfr_mul_si', 'mpfr_pow_si', 'mpfr_set', 'mpfr_set_d',
'mpfr_set_default_precision', 'mpfr_set_default_rounding_mode',
'mpfr_set_precision', 'mpfr_set_q', 'mpfr_set_si', 'mpfr_set_str',
'mpfr_set_z', 'mpfr_si_div', 'mpfr_si_sub', 'mpfr_sqrt', 'mpfr_sub',
'mpfr_sub_si', 'mpq_abs', 'mpq_add', 'mpq_add_si', 'mpq_canonicalize',
'mpq_cmp', 'mpq_cmp_si', 'mpq_div', 'mpq_div_2exp', 'mpq_free',
'mpq_get_den', 'mpq_get_num', 'mpq_get_str', 'mpq_init', 'mpq_init_set',
'mpq_init_set_si', 'mpq_init_set_str', 'mpq_init_set_z', 'mpq_inits',
'mpq_inv', 'mpq_mul', 'mpq_neg', 'mpq_set', 'mpq_set_si', 'mpq_set_str',
'mpq_set_z', 'mpq_sub', 'mpz_abs', 'mpz_add', 'mpz_addmul',
'mpz_addmul_ui', 'mpz_addmul_si', 'mpz_add_si', 'mpz_add_ui', 'mpz_and',
'mpz_bin_uiui', 'mpz_cdiv_q', 'mpz_cmp', 'mpz_cmp_si', 'mpz_divexact',
'mpz_divexact_ui', 'mpz_divisible_p', 'mpz_divisible_ui_p', 'mpz_even',
'mpz_fac_ui', 'mpz_factorstring', 'mpz_fdiv_q', 'mpz_fdiv_q_2exp',
'mpz_fdiv_q_ui', 'mpz_fdiv_qr', 'mpz_fdiv_r', 'mpz_fdiv_ui',
'mpz_fib_ui', 'mpz_fib2_ui', 'mpz_fits_atom', 'mpz_fits_integer',
'mpz_free', 'mpz_gcd', 'mpz_gcd_ui', 'mpz_get_atom', 'mpz_get_integer',
'mpz_get_short_str', 'mpz_get_str', 'mpz_init', 'mpz_init_set',
'mpz_inits', 'mpz_invert', 'mpz_lcm', 'mpz_lcm_ui', 'mpz_max',
'mpz_min', 'mpz_mod', 'mpz_mod_ui', 'mpz_mul', 'mpz_mul_2exp',
'mpz_mul_d', 'mpz_mul_si', 'mpz_neg', 'mpz_nthroot', 'mpz_odd',
'mpz_pollard_rho', 'mpz_pow_ui', 'mpz_powm', 'mpz_powm_ui', 'mpz_prime',
'mpz_prime_factors', 'mpz_prime_mr', 'mpz_rand', 'mpz_rand_ui',
'mpz_re_compose', 'mpz_remove', 'mpz_scan0', 'mpz_scan1', 'mpz_set',
'mpz_set_d', 'mpz_set_si', 'mpz_set_str', 'mpz_set_v', 'mpz_sign',
'mpz_sizeinbase', 'mpz_sqrt', 'mpz_sub', 'mpz_sub_si', 'mpz_sub_ui',
'mpz_si_sub', 'mpz_tdiv_q_2exp', 'mpz_tdiv_r_2exp', 'mpz_tstbit',
'mpz_ui_pow_ui', 'mpz_xor', 'named_dict', 'new_dict', 'new_queue',
'new_stack', 'not_bits', 'not_bitsu', 'odd', 'or_all', 'or_allu',
'or_bits', 'or_bitsu', 'ord', 'ordinal', 'ordinant',
'override_timezone', 'pad', 'pad_head', 'pad_tail', 'parse_date_string',
'papply', 'peep', 'peepn', 'peep_dict', 'permute', 'permutes',
'platform', 'pop', 'popn', 'pop_dict', 'power', 'pp', 'ppEx', 'ppExf',
'ppf', 'ppOpt', 'pq_add', 'pq_destroy', 'pq_empty', 'pq_new', 'pq_peek',
'pq_pop', 'pq_pop_data', 'pq_size', 'prepend', 'prime_factors',
'printf', 'product', 'proper', 'push', 'pushn', 'putd', 'puts',
'queue_empty', 'queue_size', 'rand', 'rand_range', 'reinstate',
'remainder', 'remove', 'remove_all', 'repeat', 'repeatch', 'replace',
'requires', 'reverse', 'rfind', 'rgb', 'rmatch', 'rmdr', 'rnd', 'round',
'routine_id', 'scanf', 'serialize', 'series', 'set_rand',
'set_test_abort', 'set_test_logfile', 'set_test_module',
'set_test_pause', 'set_test_verbosity', 'set_timedate_formats',
'set_timezone', 'setd', 'setd_default', 'shorten', 'sha256',
'shift_bits', 'shuffle', 'sign', 'sin', 'smallest', 'sort',
'sort_columns', 'speak', 'splice', 'split', 'split_any', 'split_by',
'sprint', 'sprintf', 'sq_abs', 'sq_add', 'sq_and', 'sq_and_bits',
'sq_arccos', 'sq_arcsin', 'sq_arctan', 'sq_atom', 'sq_ceil', 'sq_cmp',
'sq_cos', 'sq_div', 'sq_even', 'sq_eq', 'sq_floor', 'sq_floor_div',
'sq_ge', 'sq_gt', 'sq_int', 'sq_le', 'sq_log', 'sq_log10', 'sq_log2',
'sq_lt', 'sq_max', 'sq_min', 'sq_mod', 'sq_mul', 'sq_ne', 'sq_not',
'sq_not_bits', 'sq_odd', 'sq_or', 'sq_or_bits', 'sq_power', 'sq_rand',
'sq_remainder', 'sq_rmdr', 'sq_rnd', 'sq_round', 'sq_seq', 'sq_sign',
'sq_sin', 'sq_sqrt', 'sq_str', 'sq_sub', 'sq_tan', 'sq_trunc',
'sq_uminus', 'sq_xor', 'sq_xor_bits', 'sqrt', 'square_free',
'stack_empty', 'stack_size', 'substitute', 'substitute_all', 'sum',
'tail', 'tan', 'test_equal', 'test_fail', 'test_false',
'test_not_equal', 'test_pass', 'test_summary', 'test_true',
'text_color', 'throw', 'time', 'timedate_diff', 'timedelta',
'to_integer', 'to_number', 'to_rgb', 'to_string', 'traverse_dict',
'traverse_dict_partial_key', 'trim', 'trim_head', 'trim_tail', 'trunc',
'tagset', 'tagstart', 'typeof', 'unique', 'unix_dict', 'upper',
'utf8_to_utf32', 'utf32_to_utf8', 'version', 'vlookup', 'vslice',
'wglGetProcAddress', 'wildcard_file', 'wildcard_match', 'with_rho',
'with_theta', 'xml_new_doc', 'xml_new_element', 'xml_set_attribute',
'xml_sprint', 'xor_bits', 'xor_bitsu',
'accept', 'allocate', 'allocate_string', 'allow_break', 'ARM',
'atom_to_float80', 'c_func', 'c_proc', 'call_back', 'chdir',
'check_break', 'clearDib', 'close', 'closesocket', 'console',
'copy_file', 'create', 'create_directory', 'create_thread',
'curl_easy_cleanup', 'curl_easy_get_file', 'curl_easy_init',
'curl_easy_perform', 'curl_easy_perform_ex', 'curl_easy_setopt',
'curl_easy_strerror', 'curl_global_cleanup', 'curl_global_init',
'curl_slist_append', 'curl_slist_free_all', 'current_dir', 'cursor',
'define_c_func', 'define_c_proc', 'delete', 'delete_cs', 'delete_file',
'dir', 'DLL', 'drawDib', 'drawShadedPolygonToDib', 'ELF32', 'ELF64',
'enter_cs', 'eval', 'exit_thread', 'free', 'file_exists', 'final',
'float80_to_atom', 'format', 'get_bytes', 'get_file_date',
'get_file_size', 'get_file_type', 'get_interpreter', 'get_key',
'get_socket_error', 'get_text', 'get_thread_exitcode', 'get_thread_id',
'getc', 'getenv', 'gets', 'getsockaddr', 'glBegin', 'glCallList',
'glFrustum', 'glGenLists', 'glGetString', 'glLight', 'glMaterial',
'glNewList', 'glNormal', 'glPopMatrix', 'glPushMatrix', 'glRotate',
'glEnd', 'glEndList', 'glTexImage2D', 'goto', 'GUI', 'icons', 'ilASM',
'include_files', 'include_paths', 'init_cs', 'ip_to_string',
'IupConfig', 'IupConfigDialogClosed', 'IupConfigDialogShow',
'IupConfigGetVariableInt', 'IupConfigLoad', 'IupConfigSave',
'IupConfigSetVariableInt', 'IupExitLoop', 'IupFileDlg', 'IupFileList',
'IupGLSwapBuffers', 'IupHelp', 'IupLoopStep', 'IupMainLoop',
'IupNormalizer', 'IupPlot', 'IupPlotAdd', 'IupPlotBegin', 'IupPlotEnd',
'IupPlotInsert', 'IupSaveImage', 'IupTreeGetUserId', 'IupUser',
'IupVersion', 'IupVersionDate', 'IupVersionNumber', 'IupVersionShow',
'killDib', 'leave_cs', 'listen', 'manifest', 'mem_copy', 'mem_set',
'mpfr_gamma', 'mpfr_printf', 'mpfr_sprintf', 'mpz_export', 'mpz_import',
'namespace', 'new', 'newDib', 'open', 'open_dll', 'PE32', 'PE64',
'peek', 'peek_string', 'peek1s', 'peek1u', 'peek2s', 'peek2u', 'peek4s',
'peek4u', 'peek8s', 'peek8u', 'peekNS', 'peekns', 'peeknu', 'poke',
'poke2', 'poke4', 'poke8', 'pokeN', 'poke_string', 'poke_wstring',
'position', 'progress', 'prompt_number', 'prompt_string', 'read_file',
'read_lines', 'recv', 'resume_thread', 'seek', 'select', 'send',
'setHandler', 'shutdown', 'sleep', 'SO', 'sockaddr_in', 'socket',
'split_path', 'suspend_thread', 'system', 'system_exec', 'system_open',
'system_wait', 'task_clock_start', 'task_clock_stop', 'task_create',
'task_delay', 'task_list', 'task_schedule', 'task_self', 'task_status',
'task_suspend', 'task_yield', 'thread_safe_string', 'try_cs',
'utf8_to_utf16', 'utf16_to_utf8', 'utf16_to_utf32', 'utf32_to_utf16',
'video_config', 'WSACleanup', 'wait_thread', 'walk_dir', 'where',
'write_lines', 'wait_key'
)
constants = (
'ANY_QUEUE', 'ASCENDING', 'BLACK', 'BLOCK_CURSOR', 'BLUE',
'BRIGHT_CYAN', 'BRIGHT_BLUE', 'BRIGHT_GREEN', 'BRIGHT_MAGENTA',
'BRIGHT_RED', 'BRIGHT_WHITE', 'BROWN', 'C_DWORD', 'C_INT', 'C_POINTER',
'C_USHORT', 'C_WORD', 'CD_AMBER', 'CD_BLACK', 'CD_BLUE', 'CD_BOLD',
'CD_BOLD_ITALIC', 'CD_BOX', 'CD_CENTER', 'CD_CIRCLE', 'CD_CLOSED_LINES',
'CD_CONTINUOUS', 'CD_CUSTOM', 'CD_CYAN', 'CD_DARK_BLUE', 'CD_DARK_CYAN',
'CD_DARK_GRAY', 'CD_DARK_GREY', 'CD_DARK_GREEN', 'CD_DARK_MAGENTA',
'CD_DARK_RED', 'CD_DARK_YELLOW', 'CD_DASH_DOT', 'CD_DASH_DOT_DOT',
'CD_DASHED', 'CD_DBUFFER', 'CD_DEG2RAD', 'CD_DIAMOND', 'CD_DOTTED',
'CD_EAST', 'CD_EVENODD', 'CD_FILL', 'CD_GL', 'CD_GRAY', 'CD_GREY',
'CD_GREEN', 'CD_HATCH', 'CD_HOLLOW', 'CD_HOLLOW_BOX',
'CD_HOLLOW_CIRCLE', 'CD_HOLLOW_DIAMOND', 'CD_INDIGO', 'CD_ITALIC',
'CD_IUP', 'CD_IUPDBUFFER', 'CD_LIGHT_BLUE', 'CD_LIGHT_GRAY',
'CD_LIGHT_GREY', 'CD_LIGHT_GREEN', 'CD_LIGHT_PARCHMENT', 'CD_MAGENTA',
'CD_NAVY', 'CD_NORTH', 'CD_NORTH_EAST', 'CD_NORTH_WEST', 'CD_OLIVE',
'CD_OPEN_LINES', 'CD_ORANGE', 'CD_PARCHMENT', 'CD_PATTERN',
'CD_PRINTER', 'CD_PURPLE', 'CD_PLAIN', 'CD_PLUS', 'CD_QUERY',
'CD_RAD2DEG', 'CD_RED', 'CD_SILVER', 'CD_SOLID', 'CD_SOUTH_EAST',
'CD_SOUTH_WEST', 'CD_STAR', 'CD_STIPPLE', 'CD_STRIKEOUT',
'CD_UNDERLINE', 'CD_WEST', 'CD_WHITE', 'CD_WINDING', 'CD_VIOLET',
'CD_X', 'CD_YELLOW', 'CURLE_OK', 'CURLOPT_MAIL_FROM',
'CURLOPT_MAIL_RCPT', 'CURLOPT_PASSWORD', 'CURLOPT_READDATA',
'CURLOPT_READFUNCTION', 'CURLOPT_SSL_VERIFYPEER',
'CURLOPT_SSL_VERIFYHOST', 'CURLOPT_UPLOAD', 'CURLOPT_URL',
'CURLOPT_USE_SSL', 'CURLOPT_USERNAME', 'CURLOPT_VERBOSE',
'CURLOPT_WRITEFUNCTION', 'CURLUSESSL_ALL', 'CYAN', 'D_NAME',
'D_ATTRIBUTES', 'D_SIZE', 'D_YEAR', 'D_MONTH', 'D_DAY', 'D_HOUR',
'D_MINUTE', 'D_SECOND', 'D_CREATION', 'D_LASTACCESS', 'D_MODIFICATION',
'DT_YEAR', 'DT_MONTH', 'DT_DAY', 'DT_HOUR', 'DT_MINUTE', 'DT_SECOND',
'DT_DOW', 'DT_MSEC', 'DT_DOY', 'DT_GMT', 'EULER', 'E_CODE', 'E_ADDR',
'E_LINE', 'E_RTN', 'E_NAME', 'E_FILE', 'E_PATH', 'E_USER', 'false',
'False', 'FALSE', 'FIFO_QUEUE', 'FILETYPE_DIRECTORY', 'FILETYPE_FILE',
'GET_EOF', 'GET_FAIL', 'GET_IGNORE', 'GET_SUCCESS',
'GL_AMBIENT_AND_DIFFUSE', 'GL_ARRAY_BUFFER', 'GL_CLAMP',
'GL_CLAMP_TO_BORDER', 'GL_CLAMP_TO_EDGE', 'GL_COLOR_BUFFER_BIT',
'GL_COMPILE', 'GL_COMPILE_STATUS', 'GL_CULL_FACE',
'GL_DEPTH_BUFFER_BIT', 'GL_DEPTH_TEST', 'GL_EXTENSIONS', 'GL_FLAT',
'GL_FLOAT', 'GL_FRAGMENT_SHADER', 'GL_FRONT', 'GL_LIGHT0',
'GL_LIGHTING', 'GL_LINEAR', 'GL_LINK_STATUS', 'GL_MODELVIEW',
'GL_NEAREST', 'GL_NO_ERROR', 'GL_NORMALIZE', 'GL_POSITION',
'GL_PROJECTION', 'GL_QUAD_STRIP', 'GL_QUADS', 'GL_RENDERER',
'GL_REPEAT', 'GL_RGB', 'GL_RGBA', 'GL_SMOOTH', 'GL_STATIC_DRAW',
'GL_TEXTURE_2D', 'GL_TEXTURE_MAG_FILTER', 'GL_TEXTURE_MIN_FILTER',
'GL_TEXTURE_WRAP_S', 'GL_TEXTURE_WRAP_T', 'GL_TRIANGLES',
'GL_UNSIGNED_BYTE', 'GL_VENDOR', 'GL_VERSION', 'GL_VERTEX_SHADER',
'GRAY', 'GREEN', 'GT_LF_STRIPPED', 'GT_WHOLE_FILE', 'INVLN10',
'IUP_CLOSE', 'IUP_CONTINUE', 'IUP_DEFAULT', 'IUP_BLACK', 'IUP_BLUE',
'IUP_BUTTON1', 'IUP_BUTTON3', 'IUP_CENTER', 'IUP_CYAN', 'IUP_DARK_BLUE',
'IUP_DARK_CYAN', 'IUP_DARK_GRAY', 'IUP_DARK_GREY', 'IUP_DARK_GREEN',
'IUP_DARK_MAGENTA', 'IUP_DARK_RED', 'IUP_GRAY', 'IUP_GREY', 'IUP_GREEN',
'IUP_IGNORE', 'IUP_INDIGO', 'IUP_MAGENTA', 'IUP_MASK_INT',
'IUP_MASK_UINT', 'IUP_MOUSEPOS', 'IUP_NAVY', 'IUP_OLIVE', 'IUP_RECTEXT',
'IUP_RED', 'IUP_LIGHT_BLUE', 'IUP_LIGHT_GRAY', 'IUP_LIGHT_GREY',
'IUP_LIGHT_GREEN', 'IUP_ORANGE', 'IUP_PARCHMENT', 'IUP_PURPLE',
'IUP_SILVER', 'IUP_TEAL', 'IUP_VIOLET', 'IUP_WHITE', 'IUP_YELLOW',
'K_BS', 'K_cA', 'K_cC', 'K_cD', 'K_cF5', 'K_cK', 'K_cM', 'K_cN', 'K_cO',
'K_cP', 'K_cR', 'K_cS', 'K_cT', 'K_cW', 'K_CR', 'K_DEL', 'K_DOWN',
'K_END', 'K_ESC', 'K_F1', 'K_F2', 'K_F3', 'K_F4', 'K_F5', 'K_F6',
'K_F7', 'K_F8', 'K_F9', 'K_F10', 'K_F11', 'K_F12', 'K_HOME', 'K_INS',
'K_LEFT', 'K_MIDDLE', 'K_PGDN', 'K_PGUP', 'K_RIGHT', 'K_SP', 'K_TAB',
'K_UP', 'K_h', 'K_i', 'K_j', 'K_p', 'K_r', 'K_s', 'JS', 'LIFO_QUEUE',
'LINUX', 'MAX_HEAP', 'MAGENTA', 'MIN_HEAP', 'Nan', 'NO_CURSOR', 'null',
'NULL', 'PI', 'pp_Ascii', 'pp_Brkt', 'pp_Date', 'pp_File', 'pp_FltFmt',
'pp_Indent', 'pp_IntCh', 'pp_IntFmt', 'pp_Maxlen', 'pp_Nest',
'pp_Pause', 'pp_Q22', 'pp_StrFmt', 'RED', 'SEEK_OK', 'SLASH',
'TEST_ABORT', 'TEST_CRASH', 'TEST_PAUSE', 'TEST_PAUSE_FAIL',
'TEST_QUIET', 'TEST_SHOW_ALL', 'TEST_SHOW_FAILED', 'TEST_SUMMARY',
'true', 'True', 'TRUE', 'VC_SCRNLINES', 'WHITE', 'WINDOWS', 'YELLOW'
)
tokens = {
'root': [
(r"\s+", Whitespace),
(r'/\*|--/\*|#\[', Comment.Multiline, 'comment'),
(r'(?://|--|#!).*$', Comment.Single),
#Alt:
# (r'//.*$|--.*$|#!.*$', Comment.Single),
(r'"([^"\\]|\\.)*"', String.Other),
(r'\'[^\']*\'', String.Other),
(r'`[^`]*`', String.Other),
(words(types, prefix=r'\b', suffix=r'\b'), Name.Function),
(words(routines, prefix=r'\b', suffix=r'\b'), Name.Function),
(words(preproc, prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
(words(constants, prefix=r'\b', suffix=r'\b'), Name.Constant),
# Aside: Phix only supports/uses the ascii/non-unicode tilde
(r'!=|==|<<|>>|:=|[-~+/*%=<>&^|\.(){},?:\[\]$\\;#]', Operator),
(r'[\w-]+', Text)
],
'comment': [
(r'[^*/#]+', Comment.Multiline),
(r'/\*|#\[', Comment.Multiline, '#push'),
(r'\*/|#\]', Comment.Multiline, '#pop'),
(r'[*/#]', Comment.Multiline)
]
}
| 23,252 | Python | 62.706849 | 93 | 0.574445 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/floscript.py | """
pygments.lexers.floscript
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for FloScript
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['FloScriptLexer']
class FloScriptLexer(RegexLexer):
"""
For FloScript configuration language source code.
.. versionadded:: 2.4
"""
name = 'FloScript'
url = 'https://github.com/ioflo/ioflo'
aliases = ['floscript', 'flo']
filenames = ['*.flo']
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%', ttype),
# newlines are an error (use "nl" state)
]
tokens = {
'root': [
(r'\s+', Whitespace),
(r'[]{}:(),;[]', Punctuation),
(r'(\\)(\n)', bygroups(Text, Whitespace)),
(r'\\', Text),
(r'(to|by|with|from|per|for|cum|qua|via|as|at|in|of|on|re|is|if|be|into|'
r'and|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
(r'(load|init|server|logger|log|loggee|first|over|under|next|done|timeout|'
r'repeat|native|benter|enter|recur|exit|precur|renter|rexit|print|put|inc|'
r'copy|set|aux|rear|raze|go|let|do|bid|ready|start|stop|run|abort|use|flo|'
r'give|take)\b', Name.Builtin),
(r'(frame|framer|house)\b', Keyword),
('"', String, 'string'),
include('name'),
include('numbers'),
(r'#.+$', Comment.Single),
],
'string': [
('[^"]+', String),
('"', String, '#pop'),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'name': [
(r'@[\w.]+', Name.Decorator),
(r'[a-zA-Z_]\w*', Name),
],
}
| 2,668 | Python | 31.156626 | 88 | 0.46964 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/archetype.py | """
pygments.lexers.archetype
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Archetype-related syntaxes, including:
- ODIN syntax <https://github.com/openEHR/odin>
- ADL syntax <http://www.openehr.org/releases/trunk/architecture/am/adl2.pdf>
- cADL sub-syntax of ADL
For uses of this syntax, see the openEHR archetypes <http://www.openEHR.org/ckm>
Contributed by Thomas Beale <https://github.com/wolandscat>,
<https://bitbucket.org/thomas_beale>.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, using, default
from pygments.token import Text, Comment, Name, Literal, Number, String, \
Punctuation, Keyword, Operator, Generic, Whitespace
__all__ = ['OdinLexer', 'CadlLexer', 'AdlLexer']
class AtomsLexer(RegexLexer):
"""
Lexer for Values used in ADL and ODIN.
.. versionadded:: 2.1
"""
tokens = {
# ----- pseudo-states for inclusion -----
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'([ \t]*)(--.*)$', bygroups(Whitespace, Comment)),
],
'archetype_id': [
(r'([ \t]*)(([a-zA-Z]\w+(\.[a-zA-Z]\w+)*::)?[a-zA-Z]\w+(-[a-zA-Z]\w+){2}'
r'\.\w+[\w-]*\.v\d+(\.\d+){,2}((-[a-z]+)(\.\d+)?)?)',
bygroups(Whitespace, Name.Decorator)),
],
'date_constraints': [
# ISO 8601-based date/time constraints
(r'[Xx?YyMmDdHhSs\d]{2,4}([:-][Xx?YyMmDdHhSs\d]{2}){2}', Literal.Date),
# ISO 8601-based duration constraints + optional trailing slash
(r'(P[YyMmWwDd]+(T[HhMmSs]+)?|PT[HhMmSs]+)/?', Literal.Date),
],
'ordered_values': [
# ISO 8601 date with optional 'T' ligature
(r'\d{4}-\d{2}-\d{2}T?', Literal.Date),
# ISO 8601 time
(r'\d{2}:\d{2}:\d{2}(\.\d+)?([+-]\d{4}|Z)?', Literal.Date),
# ISO 8601 duration
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'[+-]?\d*\.\d+%?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[+-]?\d+%?', Number.Integer),
],
'values': [
include('ordered_values'),
(r'([Tt]rue|[Ff]alse)', Literal),
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'[a-z][a-z0-9+.-]*:', Literal, 'uri'),
# term code
(r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)(\w[\w-]*)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation, Name.Decorator,
Punctuation)),
(r'\|', Punctuation, 'interval'),
# list continuation
(r'\.\.\.', Punctuation),
],
'constraint_values': [
(r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)',
bygroups(Punctuation, Name.Decorator, Punctuation), 'adl14_code_constraint'),
# ADL 1.4 ordinal constraint
(r'(\d*)(\|)(\[\w[\w-]*::\w[\w-]*\])((?:[,;])?)',
bygroups(Number, Punctuation, Name.Decorator, Punctuation)),
include('date_constraints'),
include('values'),
],
# ----- real states -----
'string': [
('"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
# all other characters
(r'[^\\"]+', String),
# stray backslash
(r'\\', String),
],
'uri': [
# effective URI terminators
(r'[,>\s]', Punctuation, '#pop'),
(r'[^>\s,]+', Literal),
],
'interval': [
(r'\|', Punctuation, '#pop'),
include('ordered_values'),
(r'\.\.', Punctuation),
(r'[<>=] *', Punctuation),
# handle +/-
(r'\+/-', Punctuation),
(r'\s+', Whitespace),
],
'any_code': [
include('archetype_id'),
# if it is a code
(r'[a-z_]\w*[0-9.]+(@[^\]]+)?', Name.Decorator),
# if it is tuple with attribute names
(r'[a-z_]\w*', Name.Class),
# if it is an integer, i.e. Xpath child index
(r'[0-9]+', Text),
(r'\|', Punctuation, 'code_rubric'),
(r'\]', Punctuation, '#pop'),
# handle use_archetype statement
(r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
],
'code_rubric': [
(r'\|', Punctuation, '#pop'),
(r'[^|]+', String),
],
'adl14_code_constraint': [
(r'\]', Punctuation, '#pop'),
(r'\|', Punctuation, 'code_rubric'),
(r'(\w[\w-]*)([;,]?)', bygroups(Name.Decorator, Punctuation)),
include('whitespace'),
],
}
class OdinLexer(AtomsLexer):
"""
Lexer for ODIN syntax.
.. versionadded:: 2.1
"""
name = 'ODIN'
aliases = ['odin']
filenames = ['*.odin']
mimetypes = ['text/odin']
tokens = {
'path': [
(r'>', Punctuation, '#pop'),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'/', Punctuation),
(r'\[', Punctuation, 'key'),
(r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace), '#pop'),
(r'\s+', Whitespace, '#pop'),
],
'key': [
include('values'),
(r'\]', Punctuation, '#pop'),
],
'type_cast': [
(r'\)', Punctuation, '#pop'),
(r'[^)]+', Name.Class),
],
'root': [
include('whitespace'),
(r'([Tt]rue|[Ff]alse)', Literal),
include('values'),
# x-ref path
(r'/', Punctuation, 'path'),
# x-ref path starting with key
(r'\[', Punctuation, 'key'),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'=', Operator),
(r'\(', Punctuation, 'type_cast'),
(r',', Punctuation),
(r'<', Punctuation),
(r'>', Punctuation),
(r';', Punctuation),
],
}
class CadlLexer(AtomsLexer):
"""
Lexer for cADL syntax.
.. versionadded:: 2.1
"""
name = 'cADL'
aliases = ['cadl']
filenames = ['*.cadl']
tokens = {
'path': [
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'/', Punctuation),
(r'\[', Punctuation, 'any_code'),
(r'\s+', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
(r'(cardinality|existence|occurrences|group|include|exclude|'
r'allow_archetype|use_archetype|use_node)\W', Keyword.Type),
(r'(and|or|not|there_exists|xor|implies|for_all)\W', Keyword.Type),
(r'(after|before|closed)\W', Keyword.Type),
(r'(not)\W', Operator),
(r'(matches|is_in)\W', Operator),
# is_in / not is_in char
('(\u2208|\u2209)', Operator),
# there_exists / not there_exists / for_all / and / or
('(\u2203|\u2204|\u2200|\u2227|\u2228|\u22BB|\223C)',
Operator),
# regex in slot or as string constraint
(r'(\{)(\s*)(/[^}]+/)(\s*)(\})',
bygroups(Punctuation, Whitespace, String.Regex, Whitespace, Punctuation)),
# regex in slot or as string constraint
(r'(\{)(\s*)(\^[^}]+\^)(\s*)(\})',
bygroups(Punctuation, Whitespace, String.Regex, Whitespace, Punctuation)),
(r'/', Punctuation, 'path'),
# for cardinality etc
(r'(\{)((?:\d+\.\.)?(?:\d+|\*))'
r'((?:\s*;\s*(?:ordered|unordered|unique)){,2})(\})',
bygroups(Punctuation, Number, Number, Punctuation)),
# [{ is start of a tuple value
(r'\[\{', Punctuation),
(r'\}\]', Punctuation),
(r'\{', Punctuation),
(r'\}', Punctuation),
include('constraint_values'),
# type name
(r'[A-Z]\w+(<[A-Z]\w+([A-Za-z_<>]*)>)?', Name.Class),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'\[', Punctuation, 'any_code'),
(r'(~|//|\\\\|\+|-|/|\*|\^|!=|=|<=|>=|<|>]?)', Operator),
(r'\(', Punctuation),
(r'\)', Punctuation),
# for lists of values
(r',', Punctuation),
(r'"', String, 'string'),
# for assumed value
(r';', Punctuation),
],
}
class AdlLexer(AtomsLexer):
"""
Lexer for ADL syntax.
.. versionadded:: 2.1
"""
name = 'ADL'
aliases = ['adl']
filenames = ['*.adl', '*.adls', '*.adlf', '*.adlx']
tokens = {
'whitespace': [
# blank line ends
(r'\s*\n', Whitespace),
# comment-only line
(r'^([ \t]*)(--.*)$', bygroups(Whitespace, Comment)),
],
'odin_section': [
# repeating the following two rules from the root state enable multi-line
# strings that start in the first column to be dealt with
(r'^(language|description|ontology|terminology|annotations|'
r'component_terminologies|revision_history)([ \t]*\n)',
bygroups(Generic.Heading, Whitespace)),
(r'^(definition)([ \t]*\n)', bygroups(Generic.Heading, Whitespace), 'cadl_section'),
(r'^([ \t]*|[ \t]+.*)\n', using(OdinLexer)),
(r'^([^"]*")(>[ \t]*\n)', bygroups(String, Punctuation)),
# template overlay delimiter
(r'^----------*\n', Text, '#pop'),
(r'^.*\n', String),
default('#pop'),
],
'cadl_section': [
(r'^([ \t]*|[ \t]+.*)\n', using(CadlLexer)),
default('#pop'),
],
'rules_section': [
(r'^[ \t]+.*\n', using(CadlLexer)),
default('#pop'),
],
'metadata': [
(r'\)', Punctuation, '#pop'),
(r';', Punctuation),
(r'([Tt]rue|[Ff]alse)', Literal),
# numbers and version ids
(r'\d+(\.\d+)*', Literal),
# Guids
(r'(\d|[a-fA-F])+(-(\d|[a-fA-F])+){3,}', Literal),
(r'\w+', Name.Class),
(r'"', String, 'string'),
(r'=', Operator),
(r'[ \t]+', Whitespace),
default('#pop'),
],
'root': [
(r'^(archetype|template_overlay|operational_template|template|'
r'speciali[sz]e)', Generic.Heading),
(r'^(language|description|ontology|terminology|annotations|'
r'component_terminologies|revision_history)[ \t]*\n',
Generic.Heading, 'odin_section'),
(r'^(definition)[ \t]*\n', Generic.Heading, 'cadl_section'),
(r'^(rules)[ \t]*\n', Generic.Heading, 'rules_section'),
include('archetype_id'),
(r'([ \t]*)(\()', bygroups(Whitespace, Punctuation), 'metadata'),
include('whitespace'),
],
}
| 11,469 | Python | 34.84375 | 96 | 0.439969 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/lilypond.py | """
pygments.lexers.lilypond
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for LilyPond.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import bygroups, default, inherit, words
from pygments.lexers.lisp import SchemeLexer
from pygments.lexers._lilypond_builtins import (
keywords, pitch_language_names, clefs, scales, repeat_types, units,
chord_modifiers, pitches, music_functions, dynamics, articulations,
music_commands, markup_commands, grobs, translators, contexts,
context_properties, grob_properties, scheme_functions, paper_variables,
header_variables
)
from pygments.token import Token
__all__ = ["LilyPondLexer"]
# In LilyPond, (unquoted) name tokens only contain letters, hyphens,
# and underscores, where hyphens and underscores must not start or end
# a name token.
#
# Note that many of the entities listed as LilyPond built-in keywords
# (in file `_lilypond_builtins.py`) are only valid if surrounded by
# double quotes, for example, 'hufnagel-fa1'. This means that
# `NAME_END_RE` doesn't apply to such entities in valid LilyPond code.
NAME_END_RE = r"(?=\d|[^\w\-]|[\-_][\W\d])"
def builtin_words(names, backslash, suffix=NAME_END_RE):
prefix = r"[\-_^]?"
if backslash == "mandatory":
prefix += r"\\"
elif backslash == "optional":
prefix += r"\\?"
else:
assert backslash == "disallowed"
return words(names, prefix, suffix)
class LilyPondLexer(SchemeLexer):
"""
Lexer for input to LilyPond, a text-based music typesetter.
.. important::
This lexer is meant to be used in conjunction with the ``lilypond`` style.
.. versionadded:: 2.11
"""
name = 'LilyPond'
url = 'https://lilypond.org'
aliases = ['lilypond']
filenames = ['*.ly']
mimetypes = []
flags = re.DOTALL | re.MULTILINE
# Because parsing LilyPond input is very tricky (and in fact
# impossible without executing LilyPond when there is Scheme
# code in the file), this lexer does not try to recognize
# lexical modes. Instead, it catches the most frequent pieces
# of syntax, and, above all, knows about many kinds of builtins.
# In order to parse embedded Scheme, this lexer subclasses the SchemeLexer.
# It redefines the 'root' state entirely, and adds a rule for #{ #}
# to the 'value' state. The latter is used to parse a Scheme expression
# after #.
def get_tokens_unprocessed(self, text):
"""Highlight Scheme variables as LilyPond builtins when applicable."""
for index, token, value in super().get_tokens_unprocessed(text):
if token is Token.Name.Function or token is Token.Name.Variable:
if value in scheme_functions:
token = Token.Name.Builtin.SchemeFunction
elif token is Token.Name.Builtin:
token = Token.Name.Builtin.SchemeBuiltin
yield index, token, value
tokens = {
"root": [
# Whitespace.
(r"\s+", Token.Text.Whitespace),
# Multi-line comments. These are non-nestable.
(r"%\{.*?%\}", Token.Comment.Multiline),
# Simple comments.
(r"%.*?$", Token.Comment.Single),
# End of embedded LilyPond in Scheme.
(r"#\}", Token.Punctuation, "#pop"),
# Embedded Scheme, starting with # ("delayed"),
# or $ (immediate). #@ and and $@ are the lesser known
# "list splicing operators".
(r"[#$]@?", Token.Punctuation, "value"),
# Any kind of punctuation:
# - sequential music: { },
# - parallel music: << >>,
# - voice separator: << \\ >>,
# - chord: < >,
# - bar check: |,
# - dot in nested properties: \revert NoteHead.color,
# - equals sign in assignments and lists for various commands:
# \override Stem.color = red,
# - comma as alternative syntax for lists: \time 3,3,2 4/4,
# - colon in tremolos: c:32,
# - double hyphen and underscore in lyrics: li -- ly -- pond __
# (which must be preceded by ASCII whitespace)
(r"""(?x)
\\\\
| (?<= \s ) (?: -- | __ )
| [{}<>=.,:|]
""", Token.Punctuation),
# Pitches, with optional octavation marks, octave check,
# and forced or cautionary accidental.
(words(pitches, suffix=r"=?[',]*!?\??" + NAME_END_RE), Token.Pitch),
# Strings, optionally with direction specifier.
(r'[\-_^]?"', Token.String, "string"),
# Numbers.
(r"-?\d+\.\d+", Token.Number.Float), # 5. and .5 are not allowed
(r"-?\d+/\d+", Token.Number.Fraction),
# Integers, or durations with optional augmentation dots.
# We have no way to distinguish these, so we highlight
# them all as numbers.
#
# Normally, there is a space before the integer (being an
# argument to a music function), which we check here. The
# case without a space is handled below (as a fingering
# number).
(r"""(?x)
(?<= \s ) -\d+
| (?: (?: \d+ | \\breve | \\longa | \\maxima )
\.* )
""", Token.Number),
# Separates duration and duration multiplier highlighted as fraction.
(r"\*", Token.Number),
# Ties, slurs, manual beams.
(r"[~()[\]]", Token.Name.Builtin.Articulation),
# Predefined articulation shortcuts. A direction specifier is
# required here.
(r"[\-_^][>^_!.\-+]", Token.Name.Builtin.Articulation),
# Fingering numbers, string numbers.
(r"[\-_^]?\\?\d+", Token.Name.Builtin.Articulation),
# Builtins.
(builtin_words(keywords, "mandatory"), Token.Keyword),
(builtin_words(pitch_language_names, "disallowed"), Token.Name.PitchLanguage),
(builtin_words(clefs, "disallowed"), Token.Name.Builtin.Clef),
(builtin_words(scales, "mandatory"), Token.Name.Builtin.Scale),
(builtin_words(repeat_types, "disallowed"), Token.Name.Builtin.RepeatType),
(builtin_words(units, "mandatory"), Token.Number),
(builtin_words(chord_modifiers, "disallowed"), Token.ChordModifier),
(builtin_words(music_functions, "mandatory"), Token.Name.Builtin.MusicFunction),
(builtin_words(dynamics, "mandatory"), Token.Name.Builtin.Dynamic),
# Those like slurs that don't take a backslash are covered above.
(builtin_words(articulations, "mandatory"), Token.Name.Builtin.Articulation),
(builtin_words(music_commands, "mandatory"), Token.Name.Builtin.MusicCommand),
(builtin_words(markup_commands, "mandatory"), Token.Name.Builtin.MarkupCommand),
(builtin_words(grobs, "disallowed"), Token.Name.Builtin.Grob),
(builtin_words(translators, "disallowed"), Token.Name.Builtin.Translator),
# Optional backslash because of \layout { \context { \Score ... } }.
(builtin_words(contexts, "optional"), Token.Name.Builtin.Context),
(builtin_words(context_properties, "disallowed"), Token.Name.Builtin.ContextProperty),
(builtin_words(grob_properties, "disallowed"),
Token.Name.Builtin.GrobProperty,
"maybe-subproperties"),
# Optional backslashes here because output definitions are wrappers
# around modules. Concretely, you can do, e.g.,
# \paper { oddHeaderMarkup = \evenHeaderMarkup }
(builtin_words(paper_variables, "optional"), Token.Name.Builtin.PaperVariable),
(builtin_words(header_variables, "optional"), Token.Name.Builtin.HeaderVariable),
# Other backslashed-escaped names (like dereferencing a
# music variable), possibly with a direction specifier.
(r"[\-_^]?\\.+?" + NAME_END_RE, Token.Name.BackslashReference),
# Definition of a variable. Support assignments to alist keys
# (myAlist.my-key.my-nested-key = \markup \spam \eggs).
(r"""(?x)
(?: [^\W\d] | - )+
(?= (?: [^\W\d] | [\-.] )* \s* = )
""", Token.Name.Lvalue),
# Virtually everything can appear in markup mode, so we highlight
# as text. Try to get a complete word, or we might wrongly lex
# a suffix that happens to be a builtin as a builtin (e.g., "myStaff").
(r"([^\W\d]|-)+?" + NAME_END_RE, Token.Text),
(r".", Token.Text),
],
"string": [
(r'"', Token.String, "#pop"),
(r'\\.', Token.String.Escape),
(r'[^\\"]+', Token.String),
],
"value": [
# Scan a LilyPond value, then pop back since we had a
# complete expression.
(r"#\{", Token.Punctuation, ("#pop", "root")),
inherit,
],
# Grob subproperties are undeclared and it would be tedious
# to maintain them by hand. Instead, this state allows recognizing
# everything that looks like a-known-property.foo.bar-baz as
# one single property name.
"maybe-subproperties": [
(r"\s+", Token.Text.Whitespace),
(r"(\.)((?:[^\W\d]|-)+?)" + NAME_END_RE,
bygroups(Token.Punctuation, Token.Name.Builtin.GrobProperty)),
default("#pop"),
]
}
| 9,753 | Python | 41.969163 | 98 | 0.57367 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/agile.py | """
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.lisp import SchemeLexer
from pygments.lexers.jvm import IokeLexer, ClojureLexer
from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \
PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer
from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer
from pygments.lexers.perl import PerlLexer, Perl6Lexer
from pygments.lexers.d import CrocLexer, MiniDLexer
from pygments.lexers.iolang import IoLexer
from pygments.lexers.tcl import TclLexer
from pygments.lexers.factor import FactorLexer
from pygments.lexers.scripting import LuaLexer, MoonScriptLexer
__all__ = []
| 876 | Python | 35.541665 | 72 | 0.784247 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/dotnet.py | """
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number, Literal, Other, Whitespace
from pygments.util import get_choice_opt
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For C# source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
url = 'https://docs.microsoft.com/en-us/dotnet/csharp/'
aliases = ['csharp', 'c#', 'cs']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' +
'[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(Whitespace, using(this), Name.Function, Whitespace,
Punctuation)),
(r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)),
(r'[^\S\n]+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Whitespace),
(words((
'>>>=', '>>=', '<<=', '<=', '>=', '+=', '-=', '*=', '/=',
'%=', '&=', '|=', '^=', '??=', '=>', '??', '?.', '!=', '==',
'&&', '||', '>>>', '>>', '<<', '++', '--', '+', '-', '*',
'/', '%', '&', '|', '^', '<', '>', '?', '!', '~', '=',
)), Operator),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'[()\[\];:,.]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9]+(\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'(#)([ \t]*)(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b(.*?)(\n)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
Comment.Preproc, Whitespace)),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace,
Keyword)),
(r'(abstract|as|async|await|base|break|by|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|let|lock|new|null|on|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|thenby|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Whitespace), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For Nemerle source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 1.5
"""
name = 'Nemerle'
url = 'http://nemerle.org'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' +
'[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(Whitespace, using(this), Name.Function, Whitespace, \
Punctuation)),
(r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)),
(r'[^\S\n]+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Whitespace),
(r'(\$)(\s*)(")', bygroups(String, Whitespace, String),
'splice-string'),
(r'(\$)(\s*)(<#)', bygroups(String, Whitespace, String),
'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)(\s*)(' + cs_ident + ':)?',
bygroups(Keyword, Whitespace, Keyword)),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'(#)([ \t]*)(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc), 'preproc'),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace, Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)(\s*)(' + cs_ident + r'\??)',
bygroups(Punctuation, Whitespace, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Whitespace), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'preproc': [
(r'\w+', Comment.Preproc),
(r'[ \t]+', Whitespace),
(r'\n', Whitespace, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
def analyse_text(text):
"""Nemerle is quite similar to Python, but @if is relatively uncommon
elsewhere."""
result = 0
if '@if' in text:
result += 0.1
return result
class BooLexer(RegexLexer):
"""
For Boo source code.
"""
name = 'Boo'
url = 'https://github.com/boo-lang/boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'(\\)(\n)', bygroups(Text, Whitespace)),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\[^\\]|[^/\\\s])/', String.Regex),
(r'@/(\\\\|\\[^\\]|[^/\\])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Whitespace), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For Visual Basic.NET source code.
Also LibreOffice Basic, OpenOffice Basic, and StarOffice Basic.
"""
name = 'VB.net'
url = 'https://docs.microsoft.com/en-us/dotnet/visual-basic/'
aliases = ['vb.net', 'vbnet', 'lobas', 'oobas', 'sobas']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Whitespace),
(r'\n', Whitespace),
(r'(rem\b.*?)(\n)', bygroups(Comment, Whitespace)),
(r"('.*?)(\n)", bygroups(Comment, Whitespace)),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'(Option)(\s+)(Strict|Explicit|Compare)(\s+)'
r'(On|Off|Binary|Text)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
Whitespace, Keyword.Declaration)),
(words((
'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Whitespace), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Whitespace), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Whitespace), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'(_)(\n)', bygroups(Text, Whitespace)), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Whitespace),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
# TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highlighting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super().__init__(CSharpLexer, GenericAspxLexer, **options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highlighting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super().__init__(VbNetLexer, GenericAspxLexer, **options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
.. versionadded:: 1.5
"""
name = 'F#'
url = 'https://fsharp.org/'
aliases = ['fsharp', 'f#']
filenames = ['*.fs', '*.fsi', '*.fsx']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.',
'->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-',
r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]',
'_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Whitespace),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'(///.*?)(\n)', bygroups(String.Doc, Whitespace)),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Whitespace, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'(#)([ \t]*)(if|endif|else|line|nowarn|light|\d+)\b(.*?)(\n)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
Comment.Preproc, Whitespace)),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Whitespace),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
def analyse_text(text):
"""F# doesn't have that many unique features -- |> and <| are weak
indicators."""
result = 0
if '|>' in text:
result += 0.05
if '<|' in text:
result += 0.05
return result
| 29,696 | Python | 39.680822 | 97 | 0.44208 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/srcinfo.py | """
pygments.lexers.srcinfo
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for .SRCINFO files used by Arch Linux Packages.
The description of the format can be found in the wiki:
https://wiki.archlinux.org/title/.SRCINFO
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Keyword, Name, Operator, Whitespace
__all__ = ['SrcinfoLexer']
keywords = (
'pkgbase', 'pkgname',
'pkgver', 'pkgrel', 'epoch',
'pkgdesc', 'url', 'install', 'changelog',
'arch', 'groups', 'license', 'noextract', 'options', 'backup',
'validpgpkeys',
)
architecture_dependent_keywords = (
'source', 'depends', 'checkdepends', 'makedepends', 'optdepends',
'provides', 'conflicts', 'replaces',
'md5sums', 'sha1sums', 'sha224sums', 'sha256sums', 'sha384sums',
'sha512sums',
)
class SrcinfoLexer(RegexLexer):
"""Lexer for .SRCINFO files used by Arch Linux Packages.
.. versionadded:: 2.11
"""
name = 'Srcinfo'
aliases = ['srcinfo']
filenames = ['.SRCINFO']
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#.*', Comment.Single),
(words(keywords), Keyword, 'assignment'),
(words(architecture_dependent_keywords, suffix=r'_\w+'),
Keyword, 'assignment'),
(r'\w+', Name.Variable, 'assignment'),
],
'assignment': [
(r' +', Whitespace),
(r'=', Operator, 'value'),
],
'value': [
(r' +', Whitespace),
(r'.*', Text, '#pop:2'),
],
}
| 1,693 | Python | 25.888888 | 77 | 0.559362 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/praat.py | """
pygments.lexers.praat
~~~~~~~~~~~~~~~~~~~~~
Lexer for Praat
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, bygroups, include
from pygments.token import Name, Text, Comment, Keyword, String, Punctuation, \
Number, Operator, Whitespace
__all__ = ['PraatLexer']
class PraatLexer(RegexLexer):
"""
For Praat scripts.
.. versionadded:: 2.1
"""
name = 'Praat'
url = 'http://www.praat.org'
aliases = ['praat']
filenames = ['*.praat', '*.proc', '*.psc']
keywords = (
'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to',
'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus',
'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress',
'editor', 'endeditor', 'clearinfo',
)
functions_string = (
'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile',
'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine',
'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace',
'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs',
)
functions_numeric = (
'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz',
'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2',
'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ',
'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile',
'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed',
'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed',
'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput',
'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor',
'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc',
'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ',
'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel',
'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index',
'index_regex', 'integer', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ',
'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma',
'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number',
'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical',
'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject',
'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson',
'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex',
'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject',
'selected', 'semitonesToHertz', 'sentence', 'sentencetext', 'sigmoid', 'sin', 'sinc',
'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP',
'studentQ', 'tan', 'tanh', 'text', 'variableExists', 'word', 'writeFile', 'writeFileLine',
'writeInfo', 'writeInfoLine',
)
functions_array = (
'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero',
)
objects = (
'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword',
'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories',
'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable',
'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion',
'ContingencyTable', 'Corpus', 'Correlation', 'Covariance',
'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler',
'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions',
'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable',
'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights',
'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid',
'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM',
'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence',
'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier',
'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries',
'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline',
'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram',
'MixingMatrix', 'Movie', 'Network', 'Object', 'OTGrammar', 'OTHistory', 'OTMulti',
'PCA', 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo',
'Pitch', 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial',
'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier',
'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct',
'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker',
'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker',
'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval',
'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier',
'Weight', 'WordList',
)
variables_numeric = (
'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined',
)
variables_string = (
'praatVersion', 'tab', 'shellDirectory', 'homeDirectory',
'preferencesDirectory', 'newline', 'temporaryDirectory',
'defaultDirectory',
)
object_attributes = (
'ncol', 'nrow', 'xmin', 'ymin', 'xmax', 'ymax', 'nx', 'ny', 'dx', 'dy',
)
tokens = {
'root': [
(r'(\s+)(#.*?$)', bygroups(Whitespace, Comment.Single)),
(r'^#.*?$', Comment.Single),
(r';[^\n]*', Comment.Single),
(r'\s+', Whitespace),
(r'\bprocedure\b', Keyword, 'procedure_definition'),
(r'\bcall\b', Keyword, 'procedure_call'),
(r'@', Name.Function, 'procedure_call'),
include('function_call'),
(words(keywords, suffix=r'\b'), Keyword),
(r'(\bform\b)(\s+)([^\n]+)',
bygroups(Keyword, Whitespace, String), 'old_form'),
(r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|'
r'include|execute|system(?:_nocheck)?)(\s+)',
bygroups(Keyword, Whitespace), 'string_unquoted'),
(r'(goto|label)(\s+)(\w+)', bygroups(Keyword, Whitespace, Name.Label)),
include('variable_name'),
include('number'),
(r'"', String, 'string'),
(words((objects), suffix=r'(?=\s+\S+\n)'), Name.Class, 'string_unquoted'),
(r'\b[A-Z]', Keyword, 'command'),
(r'(\.{3}|[)(,])', Punctuation),
],
'command': [
(r'( ?[\w()-]+ ?)', Keyword),
include('string_interpolated'),
(r'\.{3}', Keyword, ('#pop', 'old_arguments')),
(r':', Keyword, ('#pop', 'comma_list')),
(r'\s', Whitespace, '#pop'),
],
'procedure_call': [
(r'\s+', Whitespace),
(r'([\w.]+)(?:(:)|(?:(\s*)(\()))',
bygroups(Name.Function, Punctuation,
Text.Whitespace, Punctuation), '#pop'),
(r'([\w.]+)', Name.Function, ('#pop', 'old_arguments')),
],
'procedure_definition': [
(r'\s', Whitespace),
(r'([\w.]+)(\s*?[(:])',
bygroups(Name.Function, Whitespace), '#pop'),
(r'([\w.]+)([^\n]*)',
bygroups(Name.Function, Text), '#pop'),
],
'function_call': [
(words(functions_string, suffix=r'\$(?=\s*[:(])'), Name.Function, 'function'),
(words(functions_array, suffix=r'#(?=\s*[:(])'), Name.Function, 'function'),
(words(functions_numeric, suffix=r'(?=\s*[:(])'), Name.Function, 'function'),
],
'function': [
(r'\s+', Whitespace),
(r':', Punctuation, ('#pop', 'comma_list')),
(r'\s*\(', Punctuation, ('#pop', 'comma_list')),
],
'comma_list': [
(r'(\s*\n\s*)(\.{3})', bygroups(Whitespace, Punctuation)),
(r'(\s*)(?:([)\]])|(\n))', bygroups(
Whitespace, Punctuation, Whitespace), '#pop'),
(r'\s+', Whitespace),
(r'"', String, 'string'),
(r'\b(if|then|else|fi|endif)\b', Keyword),
include('function_call'),
include('variable_name'),
include('operator'),
include('number'),
(r'[()]', Text),
(r',', Punctuation),
],
'old_arguments': [
(r'\n', Whitespace, '#pop'),
include('variable_name'),
include('operator'),
include('number'),
(r'"', String, 'string'),
(r'[^\n]', Text),
],
'number': [
(r'\n', Whitespace, '#pop'),
(r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number),
],
'object_reference': [
include('string_interpolated'),
(r'([a-z][a-zA-Z0-9_]*|\d+)', Name.Builtin),
(words(object_attributes, prefix=r'\.'), Name.Builtin, '#pop'),
(r'\$', Name.Builtin),
(r'\[', Text, '#pop'),
],
'variable_name': [
include('operator'),
include('number'),
(words(variables_string, suffix=r'\$'), Name.Variable.Global),
(words(variables_numeric,
suffix=r'(?=[^a-zA-Z0-9_."\'$#\[:(]|\s|^|$)'),
Name.Variable.Global),
(words(objects, prefix=r'\b', suffix=r"(_)"),
bygroups(Name.Builtin, Name.Builtin),
'object_reference'),
(r'\.?_?[a-z][\w.]*(\$|#)?', Text),
(r'[\[\]]', Punctuation, 'comma_list'),
include('string_interpolated'),
],
'operator': [
(r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)', Operator),
(r'(?<![\w.])(and|or|not|div|mod)(?![\w.])', Operator.Word),
],
'string_interpolated': [
(r'\'[_a-z][^\[\]\'":]*(\[([\d,]+|"[\w,]+")\])?(:[0-9]+)?\'',
String.Interpol),
],
'string_unquoted': [
(r'(\n\s*)(\.{3})', bygroups(Whitespace, Punctuation)),
(r'\n', Whitespace, '#pop'),
(r'\s', Whitespace),
include('string_interpolated'),
(r"'", String),
(r"[^'\n]+", String),
],
'string': [
(r'(\n\s*)(\.{3})', bygroups(Whitespace, Punctuation)),
(r'"', String, '#pop'),
include('string_interpolated'),
(r"'", String),
(r'[^\'"\n]+', String),
],
'old_form': [
(r'(\s+)(#.*?$)', bygroups(Whitespace, Comment.Single)),
(r'\s+', Whitespace),
(r'(optionmenu|choice)([ \t]+)(\S+)(:)([ \t]+)',
bygroups(Keyword, Whitespace, Text, Punctuation, Whitespace), 'number'),
(r'(option|button)([ \t]+)',
bygroups(Keyword, Whitespace), 'string_unquoted'),
(r'(sentence|text)([ \t]+)(\S+)',
bygroups(Keyword, Whitespace, String), 'string_unquoted'),
(r'(word)([ \t]+)(\S+)([ \t]*)(\S+)?(?:([ \t]+)(.*))?',
bygroups(Keyword, Whitespace, Text, Whitespace, Text, Whitespace, Text)),
(r'(boolean)(\s+\S+\s*)(0|1|"?(?:yes|no)"?)',
bygroups(Keyword, Whitespace, Name.Variable)),
# Ideally processing of the number would happen in the 'number'
# but that doesn't seem to work
(r'(real|natural|positive|integer)([ \t]+\S+[ \t]*)([+-]?)(\d+(?:\.\d*)?'
r'(?:[eE][-+]?\d+)?%?)',
bygroups(Keyword, Whitespace, Operator, Number)),
(r'(comment)(\s+)',
bygroups(Keyword, Whitespace), 'string_unquoted'),
(r'\bendform\b', Keyword, '#pop'),
]
}
| 12,677 | Python | 40.567213 | 98 | 0.510215 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/pascal.py | """
pygments.lexers.pascal
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Pascal family languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Whitespace
from pygments.scanner import Scanner
# compatibility import
from pygments.lexers.modula2 import Modula2Lexer
__all__ = ['DelphiLexer', 'PortugolLexer']
class PortugolLexer(Lexer):
"""For Portugol, a Pascal dialect with keywords in Portuguese."""
name = 'Portugol'
aliases = ['portugol']
filenames = ['*.alg', '*.portugol']
mimetypes = []
url = "https://www.apoioinformatica.inf.br/produtos/visualg/linguagem"
def __init__(self, **options):
Lexer.__init__(self, **options)
self.lexer = DelphiLexer(**options, portugol=True)
def get_tokens_unprocessed(self, text):
return self.lexer.get_tokens_unprocessed(text)
class DelphiLexer(Lexer):
"""
For Delphi (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas', '*.dpr']
mimetypes = ['text/x-pascal']
TURBO_PASCAL_KEYWORDS = (
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
)
DELPHI_KEYWORDS = (
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
)
FREE_PASCAL_KEYWORDS = (
'dispose', 'exit', 'false', 'new', 'true'
)
BLOCK_KEYWORDS = {
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
}
FUNCTION_MODIFIERS = {
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
}
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = {
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
}
BUILTIN_TYPES = {
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
}
BUILTIN_UNITS = {
'System': (
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
),
'SysUtils': (
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
'getenvironmentvariable', 'getfileversion', 'getformatsettings',
'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
'includetrailingbackslash', 'includetrailingpathdelimiter',
'incmonth', 'initializepackage', 'interlockeddecrement',
'interlockedexchange', 'interlockedexchangeadd',
'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
'outofmemoryerror', 'quotedstr', 'raiselastoserror',
'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
'strtotimedef', 'strupper', 'supports', 'syserrormessage',
'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
'wraptext'
),
'Classes': (
'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
'groupdescendantswith', 'hextobin', 'identtoint',
'initinheritedcomponent', 'inttoident', 'invalidpoint',
'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
'pointsequal', 'readcomponentres', 'readcomponentresex',
'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
'registerclasses', 'registercomponents', 'registerintegerconsts',
'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
'teststreamformat', 'unregisterclass', 'unregisterclasses',
'unregisterintegerconsts', 'unregistermoduleclasses',
'writecomponentresfile'
),
'Math': (
'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
'tan', 'tanh', 'totalvariance', 'variance'
)
}
ASM_REGISTERS = {
'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
'xmm6', 'xmm7'
}
ASM_INSTRUCTIONS = {
'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
'xlatb', 'xor'
}
PORTUGOL_KEYWORDS = (
'aleatorio',
'algoritmo',
'arquivo',
'ate',
'caso',
'cronometro',
'debug',
'e',
'eco',
'enquanto',
'entao',
'escolha',
'escreva',
'escreval',
'faca',
'falso',
'fimalgoritmo',
'fimenquanto',
'fimescolha',
'fimfuncao',
'fimpara',
'fimprocedimento',
'fimrepita',
'fimse',
'funcao',
'inicio',
'int',
'interrompa',
'leia',
'limpatela',
'mod',
'nao',
'ou',
'outrocaso',
'para',
'passo',
'pausa',
'procedimento',
'repita',
'retorne',
'se',
'senao',
'timer',
'var',
'vetor',
'verdadeiro',
'xou',
'div',
'mod',
'abs',
'arccos',
'arcsen',
'arctan',
'cos',
'cotan',
'Exp',
'grauprad',
'int',
'log',
'logn',
'pi',
'quad',
'radpgrau',
'raizq',
'rand',
'randi',
'sen',
'Tan',
'asc',
'carac',
'caracpnum',
'compr',
'copia',
'maiusc',
'minusc',
'numpcarac',
'pos',
)
PORTUGOL_BUILTIN_TYPES = {
'inteiro', 'real', 'caractere', 'logico'
}
def __init__(self, **options):
Lexer.__init__(self, **options)
self.keywords = set()
self.builtins = set()
if get_bool_opt(options, 'portugol', False):
self.keywords.update(self.PORTUGOL_KEYWORDS)
self.builtins.update(self.PORTUGOL_BUILTIN_TYPES)
self.is_portugol = True
else:
self.is_portugol = False
if get_bool_opt(options, 'turbopascal', True):
self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
if get_bool_opt(options, 'delphi', True):
self.keywords.update(self.DELPHI_KEYWORDS)
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
stack = ['initial']
in_function_block = False
in_property_block = False
was_dot = False
next_token_is_function = False
next_token_is_property = False
collect_labels = False
block_labels = set()
brace_balance = [0, 0]
while not scanner.eos:
token = Error
if stack[-1] == 'initial':
if scanner.scan(r'\s+'):
token = Whitespace
elif not self.is_portugol and scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif self.is_portugol and scanner.scan(r'(<\-)|(>=)|(<=)|%|<|>|-|\+|\*|\=|(<>)|\/|\.|:|,'):
token = Operator
elif not self.is_portugol and scanner.scan(r'[-+*\/=<>:;,.@\^]'):
token = Operator
# stop label highlighting on next ";"
if collect_labels and scanner.match == ';':
collect_labels = False
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
# abort function naming ``foo = Function(...)``
next_token_is_function = False
# if we are in a function block we count the open
# braces because ootherwise it's impossible to
# determine the end of the modifier context
if in_function_block or in_property_block:
if scanner.match == '(':
brace_balance[0] += 1
elif scanner.match == ')':
brace_balance[0] -= 1
elif scanner.match == '[':
brace_balance[1] += 1
elif scanner.match == ']':
brace_balance[1] -= 1
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name == 'result':
token = Name.Builtin.Pseudo
elif lowercase_name in self.keywords:
token = Keyword
# if we are in a special block and a
# block ending keyword occurs (and the parenthesis
# is balanced) we end the current block context
if self.is_portugol:
if lowercase_name in ('funcao', 'procedimento'):
in_function_block = True
next_token_is_function = True
else:
if (in_function_block or in_property_block) and \
lowercase_name in self.BLOCK_KEYWORDS and \
brace_balance[0] <= 0 and \
brace_balance[1] <= 0:
in_function_block = False
in_property_block = False
brace_balance = [0, 0]
block_labels = set()
if lowercase_name in ('label', 'goto'):
collect_labels = True
elif lowercase_name == 'asm':
stack.append('asm')
elif lowercase_name == 'property':
in_property_block = True
next_token_is_property = True
elif lowercase_name in ('procedure', 'operator',
'function', 'constructor',
'destructor'):
in_function_block = True
next_token_is_function = True
# we are in a function block and the current name
# is in the set of registered modifiers. highlight
# it as pseudo keyword
elif not self.is_portugol and in_function_block and \
lowercase_name in self.FUNCTION_MODIFIERS:
token = Keyword.Pseudo
# if we are in a property highlight some more
# modifiers
elif not self.is_portugol and in_property_block and \
lowercase_name in ('read', 'write'):
token = Keyword.Pseudo
next_token_is_function = True
# if the last iteration set next_token_is_function
# to true we now want this name highlighted as
# function. so do that and reset the state
elif next_token_is_function:
# Look if the next token is a dot. If yes it's
# not a function, but a class name and the
# part after the dot a function name
if not self.is_portugol and scanner.test(r'\s*\.\s*'):
token = Name.Class
# it's not a dot, our job is done
else:
token = Name.Function
next_token_is_function = False
if self.is_portugol:
block_labels.add(scanner.match.lower())
# same for properties
elif not self.is_portugol and next_token_is_property:
token = Name.Property
next_token_is_property = False
# Highlight this token as label and add it
# to the list of known labels
elif not self.is_portugol and collect_labels:
token = Name.Label
block_labels.add(scanner.match.lower())
# name is in list of known labels
elif lowercase_name in block_labels:
token = Name.Label
elif self.is_portugol and lowercase_name in self.PORTUGOL_BUILTIN_TYPES:
token = Keyword.Type
elif not self.is_portugol and lowercase_name in self.BUILTIN_TYPES:
token = Keyword.Type
elif not self.is_portugol and lowercase_name in self.DIRECTIVES:
token = Keyword.Pseudo
# builtins are just builtins if the token
# before isn't a dot
elif not self.is_portugol and not was_dot and lowercase_name in self.builtins:
token = Name.Builtin
else:
token = Name
elif self.is_portugol and scanner.scan(r"\""):
token = String
stack.append('string')
elif not self.is_portugol and scanner.scan(r"'"):
token = String
stack.append('string')
elif not self.is_portugol and scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
token = String.Char
elif not self.is_portugol and scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
# if the stack depth is deeper than once, pop
if len(stack) > 1:
stack.pop()
scanner.get_char()
elif stack[-1] == 'string':
if self.is_portugol:
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"\""):
token = String
stack.pop()
elif scanner.scan(r"[^\"]*"):
token = String
else:
scanner.get_char()
stack.pop()
else:
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"'"):
token = String
stack.pop()
elif scanner.scan(r"[^']*"):
token = String
else:
scanner.get_char()
stack.pop()
elif not self.is_portugol and stack[-1] == 'asm':
if scanner.scan(r'\s+'):
token = Whitespace
elif scanner.scan(r'end'):
token = Keyword
stack.pop()
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
token = Name.Label
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name in self.ASM_INSTRUCTIONS:
token = Keyword
elif lowercase_name in self.ASM_REGISTERS:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
token = Operator
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
scanner.get_char()
stack.pop()
# save the dot!!!11
if not self.is_portugol and scanner.match.strip():
was_dot = scanner.match == '.'
yield scanner.start_pos, token, scanner.match or ''
| 30,880 | Python | 47.101246 | 107 | 0.500615 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_cl_builtins.py | """
pygments.lexers._cl_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ANSI Common Lisp builtins.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTIN_FUNCTIONS = { # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
'apropos-list', 'aref', 'arithmetic-error-operands',
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
'characterp', 'char-code', 'char-downcase', 'char-equal',
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
'close', 'clrhash', 'code-char', 'coerce', 'compile',
'compiled-function-p', 'compile-file', 'compile-file-pathname',
'compiler-macro-function', 'complement', 'complex', 'complexp',
'compute-applicable-methods', 'compute-restarts', 'concatenate',
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
'delete-package', 'denominator', 'deposit-field', 'describe',
'describe-object', 'digit-char', 'digit-char-p', 'directory',
'directory-namestring', 'disassemble', 'documentation', 'dpb',
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
'enough-namestring', 'ensure-directories-exist',
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
'file-error-pathname', 'file-length', 'file-namestring',
'file-position', 'file-string-length', 'file-write-date',
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
'fround', 'ftruncate', 'funcall', 'function-keywords',
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
'gethash', 'get-internal-real-time', 'get-internal-run-time',
'get-macro-character', 'get-output-stream-string', 'get-properties',
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'host-namestring', 'identity', 'imagpart', 'import',
'initialize-instance', 'input-stream-p', 'inspect',
'integer-decode-float', 'integer-length', 'integerp',
'interactive-stream-p', 'intern', 'intersection',
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
'listen', 'list-length', 'listp', 'load',
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
'make-instance', 'make-instances-obsolete', 'make-list',
'make-load-form', 'make-load-form-saving-slots', 'make-package',
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
'merge', 'merge-pathnames', 'method-combination-error',
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
'package-name', 'package-nicknames', 'packagep',
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
'pathname-device', 'pathname-directory', 'pathname-host',
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
'read-from-string', 'read-line', 'read-preserving-whitespace',
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
'search', 'second', 'set', 'set-difference',
'set-dispatch-macro-character', 'set-exclusive-or',
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
'simple-condition-format-arguments', 'simple-condition-format-control',
'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
'slot-unbound', 'slot-value', 'software-type', 'software-version',
'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
'standard-char-p', 'store-value', 'stream-element-type',
'stream-error-stream', 'stream-external-format', 'streamp', 'string',
'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
'string-capitalize', 'string-downcase', 'string-equal',
'string-greaterp', 'string-left-trim', 'string-lessp',
'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
'translate-logical-pathname', 'translate-pathname', 'tree-equal',
'truename', 'truncate', 'two-way-stream-input-stream',
'two-way-stream-output-stream', 'type-error-datum',
'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
'update-instance-for-different-class',
'update-instance-for-redefined-class', 'upgraded-array-element-type',
'upgraded-complex-part-type', 'upper-case-p', 'use-package',
'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
'y-or-n-p', 'zerop',
}
SPECIAL_FORMS = {
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
'unwind-protect',
}
MACROS = {
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
'define-compiler-macro', 'define-condition', 'define-method-combination',
'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
'multiple-value-setq', 'nth-value', 'or', 'pop',
'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
'with-condition-restarts', 'with-hash-table-iterator',
'with-input-from-string', 'with-open-file', 'with-open-stream',
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
'with-slots', 'with-standard-io-syntax',
}
LAMBDA_LIST_KEYWORDS = {
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
}
DECLARATIONS = {
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
'ignorable', 'notinline', 'type',
}
BUILTIN_TYPES = {
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
'simple-vector', 'standard-char', 'unsigned-byte',
# Condition Types
'arithmetic-error', 'cell-error', 'condition', 'control-error',
'division-by-zero', 'end-of-file', 'error', 'file-error',
'floating-point-inexact', 'floating-point-overflow',
'floating-point-underflow', 'floating-point-invalid-operation',
'parse-error', 'package-error', 'print-not-readable', 'program-error',
'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
'undefined-function', 'warning',
}
BUILTIN_CLASSES = {
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
'integer', 'list', 'logical-pathname', 'method-combination', 'method',
'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
'real', 'random-state', 'restart', 'sequence', 'standard-class',
'standard-generic-function', 'standard-method', 'standard-object',
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
}
| 13,994 | Python | 59.323276 | 80 | 0.630056 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_lasso_builtins.py | """
pygments.lexers._lasso_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Built-in Lasso types, traits, methods, and members.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTINS = {
'Types': (
'array',
'atbegin',
'boolean',
'bson_iter',
'bson',
'bytes_document_body',
'bytes',
'cache_server_element',
'cache_server',
'capture',
'client_address',
'client_ip',
'component_container',
'component_render_state',
'component',
'curl',
'curltoken',
'currency',
'custom',
'data_document',
'database_registry',
'date',
'dateandtime',
'dbgp_packet',
'dbgp_server',
'debugging_stack',
'decimal',
'delve',
'dir',
'dirdesc',
'dns_response',
'document_base',
'document_body',
'document_header',
'dsinfo',
'duration',
'eacher',
'email_compose',
'email_parse',
'email_pop',
'email_queue_impl_base',
'email_queue_impl',
'email_smtp',
'email_stage_impl_base',
'email_stage_impl',
'fastcgi_each_fcgi_param',
'fastcgi_server',
'fcgi_record',
'fcgi_request',
'file',
'filedesc',
'filemaker_datasource',
'generateforeachkeyed',
'generateforeachunkeyed',
'generateseries',
'hash_map',
'html_atomic_element',
'html_attr',
'html_base',
'html_binary',
'html_br',
'html_cdata',
'html_container_element',
'html_div',
'html_document_body',
'html_document_head',
'html_eol',
'html_fieldset',
'html_form',
'html_h1',
'html_h2',
'html_h3',
'html_h4',
'html_h5',
'html_h6',
'html_hr',
'html_img',
'html_input',
'html_json',
'html_label',
'html_legend',
'html_link',
'html_meta',
'html_object',
'html_option',
'html_raw',
'html_script',
'html_select',
'html_span',
'html_style',
'html_table',
'html_td',
'html_text',
'html_th',
'html_tr',
'http_document_header',
'http_document',
'http_error',
'http_header_field',
'http_server_connection_handler_globals',
'http_server_connection_handler',
'http_server_request_logger_thread',
'http_server_web_connection',
'http_server',
'image',
'include_cache',
'inline_type',
'integer',
'java_jnienv',
'jbyte',
'jbytearray',
'jchar',
'jchararray',
'jfieldid',
'jfloat',
'jint',
'jmethodid',
'jobject',
'jshort',
'json_decode',
'json_encode',
'json_literal',
'json_object',
'keyword',
'lassoapp_compiledsrc_appsource',
'lassoapp_compiledsrc_fileresource',
'lassoapp_content_rep_halt',
'lassoapp_dirsrc_appsource',
'lassoapp_dirsrc_fileresource',
'lassoapp_installer',
'lassoapp_livesrc_appsource',
'lassoapp_livesrc_fileresource',
'lassoapp_long_expiring_bytes',
'lassoapp_manualsrc_appsource',
'lassoapp_zip_file_server',
'lassoapp_zipsrc_appsource',
'lassoapp_zipsrc_fileresource',
'ldap',
'library_thread_loader',
'list_node',
'list',
'locale',
'log_impl_base',
'log_impl',
'magick_image',
'map_node',
'map',
'memberstream',
'memory_session_driver_impl_entry',
'memory_session_driver_impl',
'memory_session_driver',
'mime_reader',
'mongo_client',
'mongo_collection',
'mongo_cursor',
'mustache_ctx',
'mysql_session_driver_impl',
'mysql_session_driver',
'net_named_pipe',
'net_tcp_ssl',
'net_tcp',
'net_udp_packet',
'net_udp',
'null',
'odbc_session_driver_impl',
'odbc_session_driver',
'opaque',
'os_process',
'pair_compare',
'pair',
'pairup',
'pdf_barcode',
'pdf_chunk',
'pdf_color',
'pdf_doc',
'pdf_font',
'pdf_hyphenator',
'pdf_image',
'pdf_list',
'pdf_paragraph',
'pdf_phrase',
'pdf_read',
'pdf_table',
'pdf_text',
'pdf_typebase',
'percent',
'portal_impl',
'queriable_groupby',
'queriable_grouping',
'queriable_groupjoin',
'queriable_join',
'queriable_orderby',
'queriable_orderbydescending',
'queriable_select',
'queriable_selectmany',
'queriable_skip',
'queriable_take',
'queriable_thenby',
'queriable_thenbydescending',
'queriable_where',
'queue',
'raw_document_body',
'regexp',
'repeat',
'scientific',
'security_registry',
'serialization_element',
'serialization_object_identity_compare',
'serialization_reader',
'serialization_writer_ref',
'serialization_writer_standin',
'serialization_writer',
'session_delete_expired_thread',
'set',
'signature',
'sourcefile',
'sqlite_column',
'sqlite_currentrow',
'sqlite_db',
'sqlite_results',
'sqlite_session_driver_impl_entry',
'sqlite_session_driver_impl',
'sqlite_session_driver',
'sqlite_table',
'sqlite3_stmt',
'sqlite3',
'staticarray',
'string',
'sys_process',
'tag',
'text_document',
'tie',
'timeonly',
'trait',
'tree_base',
'tree_node',
'tree_nullnode',
'ucal',
'usgcpu',
'usgvm',
'void',
'web_error_atend',
'web_node_base',
'web_node_content_representation_css_specialized',
'web_node_content_representation_html_specialized',
'web_node_content_representation_js_specialized',
'web_node_content_representation_xhr_container',
'web_node_echo',
'web_node_root',
'web_request_impl',
'web_request',
'web_response_impl',
'web_response',
'web_router',
'websocket_handler',
'worker_pool',
'xml_attr',
'xml_cdatasection',
'xml_characterdata',
'xml_comment',
'xml_document',
'xml_documentfragment',
'xml_documenttype',
'xml_domimplementation',
'xml_element',
'xml_entity',
'xml_entityreference',
'xml_namednodemap_attr',
'xml_namednodemap_ht',
'xml_namednodemap',
'xml_node',
'xml_nodelist',
'xml_notation',
'xml_processinginstruction',
'xml_text',
'xmlstream',
'zip_file_impl',
'zip_file',
'zip_impl',
'zip',
),
'Traits': (
'any',
'formattingbase',
'html_attributed',
'html_element_coreattrs',
'html_element_eventsattrs',
'html_element_i18nattrs',
'lassoapp_capabilities',
'lassoapp_resource',
'lassoapp_source',
'queriable_asstring',
'session_driver',
'trait_array',
'trait_asstring',
'trait_backcontractible',
'trait_backended',
'trait_backexpandable',
'trait_close',
'trait_contractible',
'trait_decompose_assignment',
'trait_doubleended',
'trait_each_sub',
'trait_encodeurl',
'trait_endedfullymutable',
'trait_expandable',
'trait_file',
'trait_finite',
'trait_finiteforeach',
'trait_foreach',
'trait_foreachtextelement',
'trait_frontcontractible',
'trait_frontended',
'trait_frontexpandable',
'trait_fullymutable',
'trait_generator',
'trait_generatorcentric',
'trait_hashable',
'trait_json_serialize',
'trait_keyed',
'trait_keyedfinite',
'trait_keyedforeach',
'trait_keyedmutable',
'trait_list',
'trait_map',
'trait_net',
'trait_pathcomponents',
'trait_positionallykeyed',
'trait_positionallysearchable',
'trait_queriable',
'trait_queriablelambda',
'trait_readbytes',
'trait_readstring',
'trait_scalar',
'trait_searchable',
'trait_serializable',
'trait_setencoding',
'trait_setoperations',
'trait_stack',
'trait_treenode',
'trait_writebytes',
'trait_writestring',
'trait_xml_elementcompat',
'trait_xml_nodecompat',
'web_connection',
'web_node_container',
'web_node_content_css_specialized',
'web_node_content_document',
'web_node_content_html_specialized',
'web_node_content_js_specialized',
'web_node_content_json_specialized',
'web_node_content_representation',
'web_node_content',
'web_node_postable',
'web_node',
),
'Unbound Methods': (
'abort_clear',
'abort_now',
'abort',
'action_param',
'action_params',
'action_statement',
'admin_authorization',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'admin_getpref',
'admin_initialize',
'admin_lassoservicepath',
'admin_removepref',
'admin_setpref',
'admin_userexists',
'all',
'auth_admin',
'auth_check',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'bw',
'capture_nearestloopabort',
'capture_nearestloopcontinue',
'capture_nearestloopcount',
'checked',
'cipher_decrypt_private',
'cipher_decrypt_public',
'cipher_decrypt',
'cipher_digest',
'cipher_encrypt_private',
'cipher_encrypt_public',
'cipher_encrypt',
'cipher_generate_key',
'cipher_hmac',
'cipher_keylength',
'cipher_list',
'cipher_open',
'cipher_seal',
'cipher_sign',
'cipher_verify',
'client_addr',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparam',
'client_getparams',
'client_headers',
'client_integertoip',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparam',
'client_postparams',
'client_type',
'client_url',
'client_username',
'cn',
'column_name',
'column_names',
'column_type',
'column',
'compress',
'content_addheader',
'content_body',
'content_encoding',
'content_header',
'content_replaceheader',
'content_type',
'cookie_set',
'cookie',
'curl_easy_cleanup',
'curl_easy_duphandle',
'curl_easy_getinfo',
'curl_easy_init',
'curl_easy_reset',
'curl_easy_setopt',
'curl_easy_strerror',
'curl_getdate',
'curl_http_version_1_0',
'curl_http_version_1_1',
'curl_http_version_none',
'curl_ipresolve_v4',
'curl_ipresolve_v6',
'curl_ipresolve_whatever',
'curl_multi_perform',
'curl_multi_result',
'curl_netrc_ignored',
'curl_netrc_optional',
'curl_netrc_required',
'curl_sslversion_default',
'curl_sslversion_sslv2',
'curl_sslversion_sslv3',
'curl_sslversion_tlsv1',
'curl_version_asynchdns',
'curl_version_debug',
'curl_version_gssnegotiate',
'curl_version_idn',
'curl_version_info',
'curl_version_ipv6',
'curl_version_kerberos4',
'curl_version_largefile',
'curl_version_libz',
'curl_version_ntlm',
'curl_version_spnego',
'curl_version_ssl',
'curl_version',
'curlauth_any',
'curlauth_anysafe',
'curlauth_basic',
'curlauth_digest',
'curlauth_gssnegotiate',
'curlauth_none',
'curlauth_ntlm',
'curle_aborted_by_callback',
'curle_bad_calling_order',
'curle_bad_content_encoding',
'curle_bad_download_resume',
'curle_bad_function_argument',
'curle_bad_password_entered',
'curle_couldnt_connect',
'curle_couldnt_resolve_host',
'curle_couldnt_resolve_proxy',
'curle_failed_init',
'curle_file_couldnt_read_file',
'curle_filesize_exceeded',
'curle_ftp_access_denied',
'curle_ftp_cant_get_host',
'curle_ftp_cant_reconnect',
'curle_ftp_couldnt_get_size',
'curle_ftp_couldnt_retr_file',
'curle_ftp_couldnt_set_ascii',
'curle_ftp_couldnt_set_binary',
'curle_ftp_couldnt_use_rest',
'curle_ftp_port_failed',
'curle_ftp_quote_error',
'curle_ftp_ssl_failed',
'curle_ftp_user_password_incorrect',
'curle_ftp_weird_227_format',
'curle_ftp_weird_pass_reply',
'curle_ftp_weird_pasv_reply',
'curle_ftp_weird_server_reply',
'curle_ftp_weird_user_reply',
'curle_ftp_write_error',
'curle_function_not_found',
'curle_got_nothing',
'curle_http_post_error',
'curle_http_range_error',
'curle_http_returned_error',
'curle_interface_failed',
'curle_ldap_cannot_bind',
'curle_ldap_invalid_url',
'curle_ldap_search_failed',
'curle_library_not_found',
'curle_login_denied',
'curle_malformat_user',
'curle_obsolete',
'curle_ok',
'curle_operation_timeouted',
'curle_out_of_memory',
'curle_partial_file',
'curle_read_error',
'curle_recv_error',
'curle_send_error',
'curle_send_fail_rewind',
'curle_share_in_use',
'curle_ssl_cacert',
'curle_ssl_certproblem',
'curle_ssl_cipher',
'curle_ssl_connect_error',
'curle_ssl_engine_initfailed',
'curle_ssl_engine_notfound',
'curle_ssl_engine_setfailed',
'curle_ssl_peer_certificate',
'curle_telnet_option_syntax',
'curle_too_many_redirects',
'curle_unknown_telnet_option',
'curle_unsupported_protocol',
'curle_url_malformat_user',
'curle_url_malformat',
'curle_write_error',
'curlftpauth_default',
'curlftpauth_ssl',
'curlftpauth_tls',
'curlftpssl_all',
'curlftpssl_control',
'curlftpssl_last',
'curlftpssl_none',
'curlftpssl_try',
'curlinfo_connect_time',
'curlinfo_content_length_download',
'curlinfo_content_length_upload',
'curlinfo_content_type',
'curlinfo_effective_url',
'curlinfo_filetime',
'curlinfo_header_size',
'curlinfo_http_connectcode',
'curlinfo_httpauth_avail',
'curlinfo_namelookup_time',
'curlinfo_num_connects',
'curlinfo_os_errno',
'curlinfo_pretransfer_time',
'curlinfo_proxyauth_avail',
'curlinfo_redirect_count',
'curlinfo_redirect_time',
'curlinfo_request_size',
'curlinfo_response_code',
'curlinfo_size_download',
'curlinfo_size_upload',
'curlinfo_speed_download',
'curlinfo_speed_upload',
'curlinfo_ssl_engines',
'curlinfo_ssl_verifyresult',
'curlinfo_starttransfer_time',
'curlinfo_total_time',
'curlmsg_done',
'curlopt_autoreferer',
'curlopt_buffersize',
'curlopt_cainfo',
'curlopt_capath',
'curlopt_connecttimeout',
'curlopt_cookie',
'curlopt_cookiefile',
'curlopt_cookiejar',
'curlopt_cookiesession',
'curlopt_crlf',
'curlopt_customrequest',
'curlopt_dns_use_global_cache',
'curlopt_egdsocket',
'curlopt_encoding',
'curlopt_failonerror',
'curlopt_filetime',
'curlopt_followlocation',
'curlopt_forbid_reuse',
'curlopt_fresh_connect',
'curlopt_ftp_account',
'curlopt_ftp_create_missing_dirs',
'curlopt_ftp_response_timeout',
'curlopt_ftp_ssl',
'curlopt_ftp_use_eprt',
'curlopt_ftp_use_epsv',
'curlopt_ftpappend',
'curlopt_ftplistonly',
'curlopt_ftpport',
'curlopt_ftpsslauth',
'curlopt_header',
'curlopt_http_version',
'curlopt_http200aliases',
'curlopt_httpauth',
'curlopt_httpget',
'curlopt_httpheader',
'curlopt_httppost',
'curlopt_httpproxytunnel',
'curlopt_infilesize_large',
'curlopt_infilesize',
'curlopt_interface',
'curlopt_ipresolve',
'curlopt_krb4level',
'curlopt_low_speed_limit',
'curlopt_low_speed_time',
'curlopt_mail_from',
'curlopt_mail_rcpt',
'curlopt_maxconnects',
'curlopt_maxfilesize_large',
'curlopt_maxfilesize',
'curlopt_maxredirs',
'curlopt_netrc_file',
'curlopt_netrc',
'curlopt_nobody',
'curlopt_noprogress',
'curlopt_port',
'curlopt_post',
'curlopt_postfields',
'curlopt_postfieldsize_large',
'curlopt_postfieldsize',
'curlopt_postquote',
'curlopt_prequote',
'curlopt_proxy',
'curlopt_proxyauth',
'curlopt_proxyport',
'curlopt_proxytype',
'curlopt_proxyuserpwd',
'curlopt_put',
'curlopt_quote',
'curlopt_random_file',
'curlopt_range',
'curlopt_readdata',
'curlopt_referer',
'curlopt_resume_from_large',
'curlopt_resume_from',
'curlopt_ssl_cipher_list',
'curlopt_ssl_verifyhost',
'curlopt_ssl_verifypeer',
'curlopt_sslcert',
'curlopt_sslcerttype',
'curlopt_sslengine_default',
'curlopt_sslengine',
'curlopt_sslkey',
'curlopt_sslkeypasswd',
'curlopt_sslkeytype',
'curlopt_sslversion',
'curlopt_tcp_nodelay',
'curlopt_timecondition',
'curlopt_timeout',
'curlopt_timevalue',
'curlopt_transfertext',
'curlopt_unrestricted_auth',
'curlopt_upload',
'curlopt_url',
'curlopt_use_ssl',
'curlopt_useragent',
'curlopt_userpwd',
'curlopt_verbose',
'curlopt_writedata',
'curlproxy_http',
'curlproxy_socks4',
'curlproxy_socks5',
'database_adddefaultsqlitehost',
'database_database',
'database_initialize',
'database_name',
'database_qs',
'database_table_database_tables',
'database_table_datasource_databases',
'database_table_datasource_hosts',
'database_table_datasources',
'database_table_table_fields',
'database_util_cleanpath',
'dbgp_stop_stack_name',
'debugging_break',
'debugging_breakpoint_get',
'debugging_breakpoint_list',
'debugging_breakpoint_remove',
'debugging_breakpoint_set',
'debugging_breakpoint_update',
'debugging_context_locals',
'debugging_context_self',
'debugging_context_vars',
'debugging_detach',
'debugging_enabled',
'debugging_get_context',
'debugging_get_stack',
'debugging_run',
'debugging_step_in',
'debugging_step_out',
'debugging_step_over',
'debugging_stop',
'debugging_terminate',
'decimal_random',
'decompress',
'decrypt_blowfish',
'define_atbegin',
'define_atend',
'dns_default',
'dns_lookup',
'document',
'email_attachment_mime_type',
'email_batch',
'email_digestchallenge',
'email_digestresponse',
'email_extract',
'email_findemails',
'email_fix_address_list',
'email_fix_address',
'email_fs_error_clean',
'email_immediate',
'email_initialize',
'email_merge',
'email_mxlookup',
'email_pop_priv_extract',
'email_pop_priv_quote',
'email_pop_priv_substring',
'email_queue',
'email_result',
'email_safeemail',
'email_send',
'email_status',
'email_token',
'email_translatebreakstocrlf',
'encode_qheader',
'encoding_iso88591',
'encoding_utf8',
'encrypt_blowfish',
'encrypt_crammd5',
'encrypt_hmac',
'encrypt_md5',
'eol',
'eq',
'error_code_aborted',
'error_code_dividebyzero',
'error_code_filenotfound',
'error_code_invalidparameter',
'error_code_methodnotfound',
'error_code_networkerror',
'error_code_noerror',
'error_code_resnotfound',
'error_code_runtimeassertion',
'error_code',
'error_msg_aborted',
'error_msg_dividebyzero',
'error_msg_filenotfound',
'error_msg_invalidparameter',
'error_msg_methodnotfound',
'error_msg_networkerror',
'error_msg_noerror',
'error_msg_resnotfound',
'error_msg_runtimeassertion',
'error_msg',
'error_obj',
'error_pop',
'error_push',
'error_reset',
'error_stack',
'escape_tag',
'evdns_resolve_ipv4',
'evdns_resolve_ipv6',
'evdns_resolve_reverse_ipv6',
'evdns_resolve_reverse',
'ew',
'fail_if',
'fail_ifnot',
'fail_now',
'fail',
'failure_clear',
'fastcgi_createfcgirequest',
'fastcgi_handlecon',
'fastcgi_handlereq',
'fastcgi_initialize',
'fastcgi_initiate_request',
'fcgi_abort_request',
'fcgi_authorize',
'fcgi_begin_request',
'fcgi_bodychunksize',
'fcgi_cant_mpx_conn',
'fcgi_data',
'fcgi_end_request',
'fcgi_filter',
'fcgi_get_values_result',
'fcgi_get_values',
'fcgi_keep_conn',
'fcgi_makeendrequestbody',
'fcgi_makestdoutbody',
'fcgi_max_conns',
'fcgi_max_reqs',
'fcgi_mpxs_conns',
'fcgi_null_request_id',
'fcgi_overloaded',
'fcgi_params',
'fcgi_read_timeout_seconds',
'fcgi_readparam',
'fcgi_request_complete',
'fcgi_responder',
'fcgi_stderr',
'fcgi_stdin',
'fcgi_stdout',
'fcgi_unknown_role',
'fcgi_unknown_type',
'fcgi_version_1',
'fcgi_x_stdin',
'field_name',
'field_names',
'field',
'file_copybuffersize',
'file_defaultencoding',
'file_forceroot',
'file_modechar',
'file_modeline',
'file_stderr',
'file_stdin',
'file_stdout',
'file_tempfile',
'filemakerds_initialize',
'filemakerds',
'found_count',
'ft',
'ftp_deletefile',
'ftp_getdata',
'ftp_getfile',
'ftp_getlisting',
'ftp_putdata',
'ftp_putfile',
'full',
'generateforeach',
'gt',
'gte',
'handle_failure',
'handle',
'hash_primes',
'html_comment',
'http_char_colon',
'http_char_cr',
'http_char_htab',
'http_char_lf',
'http_char_question',
'http_char_space',
'http_default_files',
'http_read_headers',
'http_read_timeout_secs',
'http_server_apps_path',
'http_server_request_logger',
'if_empty',
'if_false',
'if_null',
'if_true',
'include_cache_compare',
'include_currentpath',
'include_filepath',
'include_localpath',
'include_once',
'include_path',
'include_raw',
'include_url',
'include',
'includes',
'inline_colinfo_name_pos',
'inline_colinfo_type_pos',
'inline_colinfo_valuelist_pos',
'inline_columninfo_pos',
'inline_foundcount_pos',
'inline_namedget',
'inline_namedput',
'inline_resultrows_pos',
'inline_scopeget',
'inline_scopepop',
'inline_scopepush',
'inline',
'integer_bitor',
'integer_random',
'io_dir_dt_blk',
'io_dir_dt_chr',
'io_dir_dt_dir',
'io_dir_dt_fifo',
'io_dir_dt_lnk',
'io_dir_dt_reg',
'io_dir_dt_sock',
'io_dir_dt_unknown',
'io_dir_dt_wht',
'io_file_access',
'io_file_chdir',
'io_file_chmod',
'io_file_chown',
'io_file_dirname',
'io_file_f_dupfd',
'io_file_f_getfd',
'io_file_f_getfl',
'io_file_f_getlk',
'io_file_f_rdlck',
'io_file_f_setfd',
'io_file_f_setfl',
'io_file_f_setlk',
'io_file_f_setlkw',
'io_file_f_test',
'io_file_f_tlock',
'io_file_f_ulock',
'io_file_f_unlck',
'io_file_f_wrlck',
'io_file_fd_cloexec',
'io_file_fioasync',
'io_file_fioclex',
'io_file_fiodtype',
'io_file_fiogetown',
'io_file_fionbio',
'io_file_fionclex',
'io_file_fionread',
'io_file_fiosetown',
'io_file_getcwd',
'io_file_lchown',
'io_file_link',
'io_file_lockf',
'io_file_lstat_atime',
'io_file_lstat_mode',
'io_file_lstat_mtime',
'io_file_lstat_size',
'io_file_mkdir',
'io_file_mkfifo',
'io_file_mkstemp',
'io_file_o_append',
'io_file_o_async',
'io_file_o_creat',
'io_file_o_excl',
'io_file_o_exlock',
'io_file_o_fsync',
'io_file_o_nofollow',
'io_file_o_nonblock',
'io_file_o_rdonly',
'io_file_o_rdwr',
'io_file_o_shlock',
'io_file_o_sync',
'io_file_o_trunc',
'io_file_o_wronly',
'io_file_pipe',
'io_file_readlink',
'io_file_realpath',
'io_file_remove',
'io_file_rename',
'io_file_rmdir',
'io_file_s_ifblk',
'io_file_s_ifchr',
'io_file_s_ifdir',
'io_file_s_ififo',
'io_file_s_iflnk',
'io_file_s_ifmt',
'io_file_s_ifreg',
'io_file_s_ifsock',
'io_file_s_irgrp',
'io_file_s_iroth',
'io_file_s_irusr',
'io_file_s_irwxg',
'io_file_s_irwxo',
'io_file_s_irwxu',
'io_file_s_isgid',
'io_file_s_isuid',
'io_file_s_isvtx',
'io_file_s_iwgrp',
'io_file_s_iwoth',
'io_file_s_iwusr',
'io_file_s_ixgrp',
'io_file_s_ixoth',
'io_file_s_ixusr',
'io_file_seek_cur',
'io_file_seek_end',
'io_file_seek_set',
'io_file_stat_atime',
'io_file_stat_mode',
'io_file_stat_mtime',
'io_file_stat_size',
'io_file_stderr',
'io_file_stdin',
'io_file_stdout',
'io_file_symlink',
'io_file_tempnam',
'io_file_truncate',
'io_file_umask',
'io_file_unlink',
'io_net_accept',
'io_net_af_inet',
'io_net_af_inet6',
'io_net_af_unix',
'io_net_bind',
'io_net_connect',
'io_net_getpeername',
'io_net_getsockname',
'io_net_ipproto_ip',
'io_net_ipproto_udp',
'io_net_listen',
'io_net_msg_oob',
'io_net_msg_peek',
'io_net_msg_waitall',
'io_net_recv',
'io_net_recvfrom',
'io_net_send',
'io_net_sendto',
'io_net_shut_rd',
'io_net_shut_rdwr',
'io_net_shut_wr',
'io_net_shutdown',
'io_net_so_acceptconn',
'io_net_so_broadcast',
'io_net_so_debug',
'io_net_so_dontroute',
'io_net_so_error',
'io_net_so_keepalive',
'io_net_so_linger',
'io_net_so_oobinline',
'io_net_so_rcvbuf',
'io_net_so_rcvlowat',
'io_net_so_rcvtimeo',
'io_net_so_reuseaddr',
'io_net_so_sndbuf',
'io_net_so_sndlowat',
'io_net_so_sndtimeo',
'io_net_so_timestamp',
'io_net_so_type',
'io_net_so_useloopback',
'io_net_sock_dgram',
'io_net_sock_raw',
'io_net_sock_rdm',
'io_net_sock_seqpacket',
'io_net_sock_stream',
'io_net_socket',
'io_net_sol_socket',
'io_net_ssl_accept',
'io_net_ssl_begin',
'io_net_ssl_connect',
'io_net_ssl_end',
'io_net_ssl_error',
'io_net_ssl_errorstring',
'io_net_ssl_funcerrorstring',
'io_net_ssl_liberrorstring',
'io_net_ssl_read',
'io_net_ssl_reasonerrorstring',
'io_net_ssl_setacceptstate',
'io_net_ssl_setconnectstate',
'io_net_ssl_setverifylocations',
'io_net_ssl_shutdown',
'io_net_ssl_usecertificatechainfile',
'io_net_ssl_useprivatekeyfile',
'io_net_ssl_write',
'java_jvm_create',
'java_jvm_getenv',
'jdbc_initialize',
'json_back_slash',
'json_back_space',
'json_close_array',
'json_close_object',
'json_colon',
'json_comma',
'json_consume_array',
'json_consume_object',
'json_consume_string',
'json_consume_token',
'json_cr',
'json_debug',
'json_deserialize',
'json_e_lower',
'json_e_upper',
'json_f_lower',
'json_form_feed',
'json_forward_slash',
'json_lf',
'json_n_lower',
'json_negative',
'json_open_array',
'json_open_object',
'json_period',
'json_positive',
'json_quote_double',
'json_rpccall',
'json_serialize',
'json_t_lower',
'json_tab',
'json_white_space',
'keycolumn_name',
'keycolumn_value',
'keyfield_name',
'keyfield_value',
'lasso_currentaction',
'lasso_errorreporting',
'lasso_executiontimelimit',
'lasso_methodexists',
'lasso_tagexists',
'lasso_uniqueid',
'lasso_version',
'lassoapp_current_app',
'lassoapp_current_include',
'lassoapp_do_with_include',
'lassoapp_exists',
'lassoapp_find_missing_file',
'lassoapp_format_mod_date',
'lassoapp_get_capabilities_name',
'lassoapp_include_current',
'lassoapp_include',
'lassoapp_initialize_db',
'lassoapp_initialize',
'lassoapp_invoke_resource',
'lassoapp_issourcefileextension',
'lassoapp_link',
'lassoapp_load_module',
'lassoapp_mime_get',
'lassoapp_mime_type_appcache',
'lassoapp_mime_type_css',
'lassoapp_mime_type_csv',
'lassoapp_mime_type_doc',
'lassoapp_mime_type_docx',
'lassoapp_mime_type_eof',
'lassoapp_mime_type_eot',
'lassoapp_mime_type_gif',
'lassoapp_mime_type_html',
'lassoapp_mime_type_ico',
'lassoapp_mime_type_jpg',
'lassoapp_mime_type_js',
'lassoapp_mime_type_lasso',
'lassoapp_mime_type_map',
'lassoapp_mime_type_pdf',
'lassoapp_mime_type_png',
'lassoapp_mime_type_ppt',
'lassoapp_mime_type_rss',
'lassoapp_mime_type_svg',
'lassoapp_mime_type_swf',
'lassoapp_mime_type_tif',
'lassoapp_mime_type_ttf',
'lassoapp_mime_type_txt',
'lassoapp_mime_type_woff',
'lassoapp_mime_type_xaml',
'lassoapp_mime_type_xap',
'lassoapp_mime_type_xbap',
'lassoapp_mime_type_xhr',
'lassoapp_mime_type_xml',
'lassoapp_mime_type_zip',
'lassoapp_path_to_method_name',
'lassoapp_settingsdb',
'layout_name',
'lcapi_datasourceadd',
'lcapi_datasourcecloseconnection',
'lcapi_datasourcedelete',
'lcapi_datasourceduplicate',
'lcapi_datasourceexecsql',
'lcapi_datasourcefindall',
'lcapi_datasourceimage',
'lcapi_datasourceinfo',
'lcapi_datasourceinit',
'lcapi_datasourcematchesname',
'lcapi_datasourcenames',
'lcapi_datasourcenothing',
'lcapi_datasourceopand',
'lcapi_datasourceopany',
'lcapi_datasourceopbw',
'lcapi_datasourceopct',
'lcapi_datasourceopeq',
'lcapi_datasourceopew',
'lcapi_datasourceopft',
'lcapi_datasourceopgt',
'lcapi_datasourceopgteq',
'lcapi_datasourceopin',
'lcapi_datasourceoplt',
'lcapi_datasourceoplteq',
'lcapi_datasourceopnbw',
'lcapi_datasourceopnct',
'lcapi_datasourceopneq',
'lcapi_datasourceopnew',
'lcapi_datasourceopnin',
'lcapi_datasourceopno',
'lcapi_datasourceopnot',
'lcapi_datasourceopnrx',
'lcapi_datasourceopor',
'lcapi_datasourceoprx',
'lcapi_datasourcepreparesql',
'lcapi_datasourceprotectionnone',
'lcapi_datasourceprotectionreadonly',
'lcapi_datasourcerandom',
'lcapi_datasourceschemanames',
'lcapi_datasourcescripts',
'lcapi_datasourcesearch',
'lcapi_datasourcesortascending',
'lcapi_datasourcesortcustom',
'lcapi_datasourcesortdescending',
'lcapi_datasourcetablenames',
'lcapi_datasourceterm',
'lcapi_datasourcetickle',
'lcapi_datasourcetypeblob',
'lcapi_datasourcetypeboolean',
'lcapi_datasourcetypedate',
'lcapi_datasourcetypedecimal',
'lcapi_datasourcetypeinteger',
'lcapi_datasourcetypestring',
'lcapi_datasourceunpreparesql',
'lcapi_datasourceupdate',
'lcapi_fourchartointeger',
'lcapi_listdatasources',
'lcapi_loadmodule',
'lcapi_loadmodules',
'lcapi_updatedatasourceslist',
'ldap_scope_base',
'ldap_scope_children',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'library_once',
'library',
'ljapi_initialize',
'locale_availablelocales',
'locale_canada',
'locale_canadafrench',
'locale_china',
'locale_chinese',
'locale_default',
'locale_english',
'locale_format_style_date_time',
'locale_format_style_default',
'locale_format_style_full',
'locale_format_style_long',
'locale_format_style_medium',
'locale_format_style_none',
'locale_format_style_short',
'locale_format',
'locale_france',
'locale_french',
'locale_german',
'locale_germany',
'locale_isocountries',
'locale_isolanguages',
'locale_italian',
'locale_italy',
'locale_japan',
'locale_japanese',
'locale_korea',
'locale_korean',
'locale_prc',
'locale_setdefault',
'locale_simplifiedchinese',
'locale_taiwan',
'locale_traditionalchinese',
'locale_uk',
'locale_us',
'log_always',
'log_critical',
'log_deprecated',
'log_destination_console',
'log_destination_database',
'log_destination_file',
'log_detail',
'log_initialize',
'log_level_critical',
'log_level_deprecated',
'log_level_detail',
'log_level_sql',
'log_level_warning',
'log_max_file_size',
'log_setdestination',
'log_sql',
'log_trim_file_size',
'log_warning',
'log',
'loop_abort',
'loop_continue',
'loop_count',
'loop_key_pop',
'loop_key_push',
'loop_key',
'loop_pop',
'loop_push',
'loop_value_pop',
'loop_value_push',
'loop_value',
'loop',
'lt',
'lte',
'main_thread_only',
'max',
'maxrecords_value',
'median',
'method_name',
'micros',
'millis',
'min',
'minimal',
'mongo_insert_continue_on_error',
'mongo_insert_no_validate',
'mongo_insert_none',
'mongo_query_await_data',
'mongo_query_exhaust',
'mongo_query_no_cursor_timeout',
'mongo_query_none',
'mongo_query_oplog_replay',
'mongo_query_partial',
'mongo_query_slave_ok',
'mongo_query_tailable_cursor',
'mongo_remove_none',
'mongo_remove_single_remove',
'mongo_update_multi_update',
'mongo_update_no_validate',
'mongo_update_none',
'mongo_update_upsert',
'mustache_compile_file',
'mustache_compile_string',
'mustache_include',
'mysqlds',
'namespace_global',
'namespace_import',
'namespace_using',
'nbw',
'ncn',
'neq',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'new',
'none',
'nrx',
'nslookup',
'odbc_session_driver_mssql',
'odbc',
'output_none',
'output',
'pdf_package',
'pdf_rectangle',
'pdf_serve',
'pi',
'portal',
'postgresql',
'process',
'protect_now',
'protect',
'queriable_average',
'queriable_defaultcompare',
'queriable_do',
'queriable_internal_combinebindings',
'queriable_max',
'queriable_min',
'queriable_qsort',
'queriable_reversecompare',
'queriable_sum',
'random_seed',
'range',
'records_array',
'records_map',
'records',
'redirect_url',
'referer_url',
'referrer_url',
'register_thread',
'register',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'response_root',
'resultset_count',
'resultset',
'resultsets',
'rows_array',
'rows_impl',
'rows',
'rx',
'schema_name',
'security_database',
'security_default_realm',
'security_initialize',
'security_table_groups',
'security_table_ug_map',
'security_table_users',
'selected',
'series',
'server_admin',
'server_ip',
'server_name',
'server_port',
'server_protocol',
'server_push',
'server_signature',
'server_software',
'session_abort',
'session_addvar',
'session_decorate',
'session_deleteexpired',
'session_end',
'session_getdefaultdriver',
'session_id',
'session_initialize',
'session_removevar',
'session_result',
'session_setdefaultdriver',
'session_start',
'shown_count',
'shown_first',
'shown_last',
'site_id',
'site_name',
'skiprecords_value',
'sleep',
'split_thread',
'sqlite_abort',
'sqlite_auth',
'sqlite_blob',
'sqlite_busy',
'sqlite_cantopen',
'sqlite_constraint',
'sqlite_corrupt',
'sqlite_createdb',
'sqlite_done',
'sqlite_empty',
'sqlite_error',
'sqlite_float',
'sqlite_format',
'sqlite_full',
'sqlite_integer',
'sqlite_internal',
'sqlite_interrupt',
'sqlite_ioerr',
'sqlite_locked',
'sqlite_mismatch',
'sqlite_misuse',
'sqlite_nolfs',
'sqlite_nomem',
'sqlite_notadb',
'sqlite_notfound',
'sqlite_null',
'sqlite_ok',
'sqlite_perm',
'sqlite_protocol',
'sqlite_range',
'sqlite_readonly',
'sqlite_row',
'sqlite_schema',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'sqlite_text',
'sqlite_toobig',
'sqliteconnector',
'staticarray_join',
'stdout',
'stdoutnl',
'string_validcharset',
'suspend',
'sys_appspath',
'sys_chroot',
'sys_clock',
'sys_clockspersec',
'sys_credits',
'sys_databasespath',
'sys_detach_exec',
'sys_difftime',
'sys_dll_ext',
'sys_drand48',
'sys_environ',
'sys_eol',
'sys_erand48',
'sys_errno',
'sys_exec_pid_to_os_pid',
'sys_exec',
'sys_exit',
'sys_fork',
'sys_garbagecollect',
'sys_getbytessincegc',
'sys_getchar',
'sys_getegid',
'sys_getenv',
'sys_geteuid',
'sys_getgid',
'sys_getgrnam',
'sys_getheapfreebytes',
'sys_getheapsize',
'sys_getlogin',
'sys_getpid',
'sys_getppid',
'sys_getpwnam',
'sys_getpwuid',
'sys_getstartclock',
'sys_getthreadcount',
'sys_getuid',
'sys_growheapby',
'sys_homepath',
'sys_is_full_path',
'sys_is_windows',
'sys_isfullpath',
'sys_iswindows',
'sys_iterate',
'sys_jrand48',
'sys_kill_exec',
'sys_kill',
'sys_lcong48',
'sys_librariespath',
'sys_listtraits',
'sys_listtypes',
'sys_listunboundmethods',
'sys_loadlibrary',
'sys_lrand48',
'sys_masterhomepath',
'sys_mrand48',
'sys_nrand48',
'sys_pid_exec',
'sys_pointersize',
'sys_rand',
'sys_random',
'sys_seed48',
'sys_setenv',
'sys_setgid',
'sys_setsid',
'sys_setuid',
'sys_sigabrt',
'sys_sigalrm',
'sys_sigbus',
'sys_sigchld',
'sys_sigcont',
'sys_sigfpe',
'sys_sighup',
'sys_sigill',
'sys_sigint',
'sys_sigkill',
'sys_sigpipe',
'sys_sigprof',
'sys_sigquit',
'sys_sigsegv',
'sys_sigstop',
'sys_sigsys',
'sys_sigterm',
'sys_sigtrap',
'sys_sigtstp',
'sys_sigttin',
'sys_sigttou',
'sys_sigurg',
'sys_sigusr1',
'sys_sigusr2',
'sys_sigvtalrm',
'sys_sigxcpu',
'sys_sigxfsz',
'sys_srand',
'sys_srand48',
'sys_srandom',
'sys_strerror',
'sys_supportpath',
'sys_test_exec',
'sys_time',
'sys_uname',
'sys_unsetenv',
'sys_usercapimodulepath',
'sys_userstartuppath',
'sys_version',
'sys_wait_exec',
'sys_waitpid',
'sys_wcontinued',
'sys_while',
'sys_wnohang',
'sys_wuntraced',
'table_name',
'tag_exists',
'tag_name',
'thread_var_get',
'thread_var_pop',
'thread_var_push',
'threadvar_find',
'threadvar_get',
'threadvar_set_asrt',
'threadvar_set',
'timer',
'token_value',
'treemap',
'u_lb_alphabetic',
'u_lb_ambiguous',
'u_lb_break_after',
'u_lb_break_before',
'u_lb_break_both',
'u_lb_break_symbols',
'u_lb_carriage_return',
'u_lb_close_punctuation',
'u_lb_combining_mark',
'u_lb_complex_context',
'u_lb_contingent_break',
'u_lb_exclamation',
'u_lb_glue',
'u_lb_h2',
'u_lb_h3',
'u_lb_hyphen',
'u_lb_ideographic',
'u_lb_infix_numeric',
'u_lb_inseparable',
'u_lb_jl',
'u_lb_jt',
'u_lb_jv',
'u_lb_line_feed',
'u_lb_mandatory_break',
'u_lb_next_line',
'u_lb_nonstarter',
'u_lb_numeric',
'u_lb_open_punctuation',
'u_lb_postfix_numeric',
'u_lb_prefix_numeric',
'u_lb_quotation',
'u_lb_space',
'u_lb_surrogate',
'u_lb_unknown',
'u_lb_word_joiner',
'u_lb_zwspace',
'u_nt_decimal',
'u_nt_digit',
'u_nt_none',
'u_nt_numeric',
'u_sb_aterm',
'u_sb_close',
'u_sb_format',
'u_sb_lower',
'u_sb_numeric',
'u_sb_oletter',
'u_sb_other',
'u_sb_sep',
'u_sb_sp',
'u_sb_sterm',
'u_sb_upper',
'u_wb_aletter',
'u_wb_extendnumlet',
'u_wb_format',
'u_wb_katakana',
'u_wb_midletter',
'u_wb_midnum',
'u_wb_numeric',
'u_wb_other',
'ucal_ampm',
'ucal_dayofmonth',
'ucal_dayofweek',
'ucal_dayofweekinmonth',
'ucal_dayofyear',
'ucal_daysinfirstweek',
'ucal_dowlocal',
'ucal_dstoffset',
'ucal_era',
'ucal_extendedyear',
'ucal_firstdayofweek',
'ucal_hour',
'ucal_hourofday',
'ucal_julianday',
'ucal_lenient',
'ucal_listtimezones',
'ucal_millisecond',
'ucal_millisecondsinday',
'ucal_minute',
'ucal_month',
'ucal_second',
'ucal_weekofmonth',
'ucal_weekofyear',
'ucal_year',
'ucal_yearwoy',
'ucal_zoneoffset',
'uchar_age',
'uchar_alphabetic',
'uchar_ascii_hex_digit',
'uchar_bidi_class',
'uchar_bidi_control',
'uchar_bidi_mirrored',
'uchar_bidi_mirroring_glyph',
'uchar_block',
'uchar_canonical_combining_class',
'uchar_case_folding',
'uchar_case_sensitive',
'uchar_dash',
'uchar_decomposition_type',
'uchar_default_ignorable_code_point',
'uchar_deprecated',
'uchar_diacritic',
'uchar_east_asian_width',
'uchar_extender',
'uchar_full_composition_exclusion',
'uchar_general_category_mask',
'uchar_general_category',
'uchar_grapheme_base',
'uchar_grapheme_cluster_break',
'uchar_grapheme_extend',
'uchar_grapheme_link',
'uchar_hangul_syllable_type',
'uchar_hex_digit',
'uchar_hyphen',
'uchar_id_continue',
'uchar_ideographic',
'uchar_ids_binary_operator',
'uchar_ids_trinary_operator',
'uchar_iso_comment',
'uchar_join_control',
'uchar_joining_group',
'uchar_joining_type',
'uchar_lead_canonical_combining_class',
'uchar_line_break',
'uchar_logical_order_exception',
'uchar_lowercase_mapping',
'uchar_lowercase',
'uchar_math',
'uchar_name',
'uchar_nfc_inert',
'uchar_nfc_quick_check',
'uchar_nfd_inert',
'uchar_nfd_quick_check',
'uchar_nfkc_inert',
'uchar_nfkc_quick_check',
'uchar_nfkd_inert',
'uchar_nfkd_quick_check',
'uchar_noncharacter_code_point',
'uchar_numeric_type',
'uchar_numeric_value',
'uchar_pattern_syntax',
'uchar_pattern_white_space',
'uchar_posix_alnum',
'uchar_posix_blank',
'uchar_posix_graph',
'uchar_posix_print',
'uchar_posix_xdigit',
'uchar_quotation_mark',
'uchar_radical',
'uchar_s_term',
'uchar_script',
'uchar_segment_starter',
'uchar_sentence_break',
'uchar_simple_case_folding',
'uchar_simple_lowercase_mapping',
'uchar_simple_titlecase_mapping',
'uchar_simple_uppercase_mapping',
'uchar_soft_dotted',
'uchar_terminal_punctuation',
'uchar_titlecase_mapping',
'uchar_trail_canonical_combining_class',
'uchar_unicode_1_name',
'uchar_unified_ideograph',
'uchar_uppercase_mapping',
'uchar_uppercase',
'uchar_variation_selector',
'uchar_white_space',
'uchar_word_break',
'uchar_xid_continue',
'uncompress',
'usage',
'uuid_compare',
'uuid_copy',
'uuid_generate_random',
'uuid_generate_time',
'uuid_generate',
'uuid_is_null',
'uuid_parse',
'uuid_unparse_lower',
'uuid_unparse_upper',
'uuid_unparse',
'value_list',
'value_listitem',
'valuelistitem',
'var_keys',
'var_values',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxcolumns',
'wap_maxhorzpixels',
'wap_maxrows',
'wap_maxvertpixels',
'web_handlefcgirequest',
'web_node_content_representation_css',
'web_node_content_representation_html',
'web_node_content_representation_js',
'web_node_content_representation_xhr',
'web_node_forpath',
'web_nodes_initialize',
'web_nodes_normalizeextension',
'web_nodes_processcontentnode',
'web_nodes_requesthandler',
'web_response_nodesentry',
'web_router_database',
'web_router_initialize',
'websocket_handler_timeout',
'wexitstatus',
'wifcontinued',
'wifexited',
'wifsignaled',
'wifstopped',
'wstopsig',
'wtermsig',
'xml_transform',
'xml',
'zip_add_dir',
'zip_add',
'zip_checkcons',
'zip_close',
'zip_cm_bzip2',
'zip_cm_default',
'zip_cm_deflate',
'zip_cm_deflate64',
'zip_cm_implode',
'zip_cm_pkware_implode',
'zip_cm_reduce_1',
'zip_cm_reduce_2',
'zip_cm_reduce_3',
'zip_cm_reduce_4',
'zip_cm_shrink',
'zip_cm_store',
'zip_create',
'zip_delete',
'zip_em_3des_112',
'zip_em_3des_168',
'zip_em_aes_128',
'zip_em_aes_192',
'zip_em_aes_256',
'zip_em_des',
'zip_em_none',
'zip_em_rc2_old',
'zip_em_rc2',
'zip_em_rc4',
'zip_em_trad_pkware',
'zip_em_unknown',
'zip_er_changed',
'zip_er_close',
'zip_er_compnotsupp',
'zip_er_crc',
'zip_er_deleted',
'zip_er_eof',
'zip_er_exists',
'zip_er_incons',
'zip_er_internal',
'zip_er_inval',
'zip_er_memory',
'zip_er_multidisk',
'zip_er_noent',
'zip_er_nozip',
'zip_er_ok',
'zip_er_open',
'zip_er_read',
'zip_er_remove',
'zip_er_rename',
'zip_er_seek',
'zip_er_tmpopen',
'zip_er_write',
'zip_er_zipclosed',
'zip_er_zlib',
'zip_error_get_sys_type',
'zip_error_get',
'zip_error_to_str',
'zip_et_none',
'zip_et_sys',
'zip_et_zlib',
'zip_excl',
'zip_fclose',
'zip_file_error_get',
'zip_file_strerror',
'zip_fl_compressed',
'zip_fl_nocase',
'zip_fl_nodir',
'zip_fl_unchanged',
'zip_fopen_index',
'zip_fopen',
'zip_fread',
'zip_get_archive_comment',
'zip_get_file_comment',
'zip_get_name',
'zip_get_num_files',
'zip_name_locate',
'zip_open',
'zip_rename',
'zip_replace',
'zip_set_archive_comment',
'zip_set_file_comment',
'zip_stat_index',
'zip_stat',
'zip_strerror',
'zip_unchange_all',
'zip_unchange_archive',
'zip_unchange',
'zlib_version',
),
'Lasso 8 Tags': (
'__char',
'__sync_timestamp__',
'_admin_addgroup',
'_admin_adduser',
'_admin_defaultconnector',
'_admin_defaultconnectornames',
'_admin_defaultdatabase',
'_admin_defaultfield',
'_admin_defaultgroup',
'_admin_defaulthost',
'_admin_defaulttable',
'_admin_defaultuser',
'_admin_deleteconnector',
'_admin_deletedatabase',
'_admin_deletefield',
'_admin_deletegroup',
'_admin_deletehost',
'_admin_deletetable',
'_admin_deleteuser',
'_admin_duplicategroup',
'_admin_internaldatabase',
'_admin_listconnectors',
'_admin_listdatabases',
'_admin_listfields',
'_admin_listgroups',
'_admin_listhosts',
'_admin_listtables',
'_admin_listusers',
'_admin_refreshconnector',
'_admin_refreshsecurity',
'_admin_servicepath',
'_admin_updateconnector',
'_admin_updatedatabase',
'_admin_updatefield',
'_admin_updategroup',
'_admin_updatehost',
'_admin_updatetable',
'_admin_updateuser',
'_chartfx_activation_string',
'_chartfx_getchallengestring',
'_chop_args',
'_chop_mimes',
'_client_addr_old',
'_client_address_old',
'_client_ip_old',
'_database_names',
'_datasource_reload',
'_date_current',
'_date_format',
'_date_msec',
'_date_parse',
'_execution_timelimit',
'_file_chmod',
'_initialize',
'_jdbc_acceptsurl',
'_jdbc_debug',
'_jdbc_deletehost',
'_jdbc_driverclasses',
'_jdbc_driverinfo',
'_jdbc_metainfo',
'_jdbc_propertyinfo',
'_jdbc_setdriver',
'_lasso_param',
'_log_helper',
'_proc_noparam',
'_proc_withparam',
'_recursion_limit',
'_request_param',
'_security_binaryexpiration',
'_security_flushcaches',
'_security_isserialized',
'_security_serialexpiration',
'_srand',
'_strict_literals',
'_substring',
'_xmlrpc_exconverter',
'_xmlrpc_inconverter',
'_xmlrpc_xmlinconverter',
'abort',
'action_addinfo',
'action_addrecord',
'action_param',
'action_params',
'action_setfoundcount',
'action_setrecordid',
'action_settotalcount',
'action_statement',
'admin_allowedfileroots',
'admin_changeuser',
'admin_createuser',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'admin_getpref',
'admin_groupassignuser',
'admin_grouplistusers',
'admin_groupremoveuser',
'admin_lassoservicepath',
'admin_listgroups',
'admin_refreshlicensing',
'admin_refreshsecurity',
'admin_reloaddatasource',
'admin_removepref',
'admin_setpref',
'admin_userexists',
'admin_userlistgroups',
'all',
'and',
'array',
'array_iterator',
'auth',
'auth_admin',
'auth_auth',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'base64',
'bean',
'bigint',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'boolean',
'bw',
'bytes',
'cache',
'cache_delete',
'cache_empty',
'cache_exists',
'cache_fetch',
'cache_internal',
'cache_maintenance',
'cache_object',
'cache_preferences',
'cache_store',
'case',
'chartfx',
'chartfx_records',
'chartfx_serve',
'checked',
'choice_list',
'choice_listitem',
'choicelistitem',
'cipher_decrypt',
'cipher_digest',
'cipher_encrypt',
'cipher_hmac',
'cipher_keylength',
'cipher_list',
'click_text',
'client_addr',
'client_address',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparams',
'client_headers',
'client_ip',
'client_ipfrominteger',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparams',
'client_type',
'client_url',
'client_username',
'cn',
'column',
'column_name',
'column_names',
'compare_beginswith',
'compare_contains',
'compare_endswith',
'compare_equalto',
'compare_greaterthan',
'compare_greaterthanorequals',
'compare_greaterthanorequls',
'compare_lessthan',
'compare_lessthanorequals',
'compare_notbeginswith',
'compare_notcontains',
'compare_notendswith',
'compare_notequalto',
'compare_notregexp',
'compare_regexp',
'compare_strictequalto',
'compare_strictnotequalto',
'compiler_removecacheddoc',
'compiler_setdefaultparserflags',
'compress',
'content_body',
'content_encoding',
'content_header',
'content_type',
'cookie',
'cookie_set',
'curl_ftp_getfile',
'curl_ftp_getlisting',
'curl_ftp_putfile',
'curl_include_url',
'currency',
'database_changecolumn',
'database_changefield',
'database_createcolumn',
'database_createfield',
'database_createtable',
'database_fmcontainer',
'database_hostinfo',
'database_inline',
'database_name',
'database_nameitem',
'database_names',
'database_realname',
'database_removecolumn',
'database_removefield',
'database_removetable',
'database_repeating',
'database_repeating_valueitem',
'database_repeatingvalueitem',
'database_schemanameitem',
'database_schemanames',
'database_tablecolumn',
'database_tablenameitem',
'database_tablenames',
'datasource_name',
'datasource_register',
'date',
'date__date_current',
'date__date_format',
'date__date_msec',
'date__date_parse',
'date_add',
'date_date',
'date_difference',
'date_duration',
'date_format',
'date_getcurrentdate',
'date_getday',
'date_getdayofweek',
'date_gethour',
'date_getlocaltimezone',
'date_getminute',
'date_getmonth',
'date_getsecond',
'date_gettime',
'date_getyear',
'date_gmttolocal',
'date_localtogmt',
'date_maximum',
'date_minimum',
'date_msec',
'date_setformat',
'date_subtract',
'db_layoutnameitem',
'db_layoutnames',
'db_nameitem',
'db_names',
'db_tablenameitem',
'db_tablenames',
'dbi_column_names',
'dbi_field_names',
'decimal',
'decimal_setglobaldefaultprecision',
'decode_base64',
'decode_bheader',
'decode_hex',
'decode_html',
'decode_json',
'decode_qheader',
'decode_quotedprintable',
'decode_quotedprintablebytes',
'decode_url',
'decode_xml',
'decompress',
'decrypt_blowfish',
'decrypt_blowfish2',
'default',
'define_atbegin',
'define_atend',
'define_constant',
'define_prototype',
'define_tag',
'define_tagp',
'define_type',
'define_typep',
'deserialize',
'directory_directorynameitem',
'directory_lister',
'directory_nameitem',
'directorynameitem',
'dns_default',
'dns_lookup',
'dns_response',
'duration',
'else',
'email_batch',
'email_compose',
'email_digestchallenge',
'email_digestresponse',
'email_extract',
'email_findemails',
'email_immediate',
'email_merge',
'email_mxerror',
'email_mxlookup',
'email_parse',
'email_pop',
'email_queue',
'email_result',
'email_safeemail',
'email_send',
'email_smtp',
'email_status',
'email_token',
'email_translatebreakstocrlf',
'encode_base64',
'encode_bheader',
'encode_break',
'encode_breaks',
'encode_crc32',
'encode_hex',
'encode_html',
'encode_htmltoxml',
'encode_json',
'encode_qheader',
'encode_quotedprintable',
'encode_quotedprintablebytes',
'encode_set',
'encode_smart',
'encode_sql',
'encode_sql92',
'encode_stricturl',
'encode_url',
'encode_xml',
'encrypt_blowfish',
'encrypt_blowfish2',
'encrypt_crammd5',
'encrypt_hmac',
'encrypt_md5',
'eq',
'error_adderror',
'error_code',
'error_code_aborted',
'error_code_assert',
'error_code_bof',
'error_code_connectioninvalid',
'error_code_couldnotclosefile',
'error_code_couldnotcreateoropenfile',
'error_code_couldnotdeletefile',
'error_code_couldnotdisposememory',
'error_code_couldnotlockmemory',
'error_code_couldnotreadfromfile',
'error_code_couldnotunlockmemory',
'error_code_couldnotwritetofile',
'error_code_criterianotmet',
'error_code_datasourceerror',
'error_code_directoryfull',
'error_code_diskfull',
'error_code_dividebyzero',
'error_code_eof',
'error_code_failure',
'error_code_fieldrestriction',
'error_code_file',
'error_code_filealreadyexists',
'error_code_filecorrupt',
'error_code_fileinvalid',
'error_code_fileinvalidaccessmode',
'error_code_fileisclosed',
'error_code_fileisopen',
'error_code_filelocked',
'error_code_filenotfound',
'error_code_fileunlocked',
'error_code_httpfilenotfound',
'error_code_illegalinstruction',
'error_code_illegaluseoffrozeninstance',
'error_code_invaliddatabase',
'error_code_invalidfilename',
'error_code_invalidmemoryobject',
'error_code_invalidparameter',
'error_code_invalidpassword',
'error_code_invalidpathname',
'error_code_invalidusername',
'error_code_ioerror',
'error_code_loopaborted',
'error_code_memory',
'error_code_network',
'error_code_nilpointer',
'error_code_noerr',
'error_code_nopermission',
'error_code_outofmemory',
'error_code_outofstackspace',
'error_code_overflow',
'error_code_postconditionfailed',
'error_code_preconditionfailed',
'error_code_resnotfound',
'error_code_resource',
'error_code_streamreaderror',
'error_code_streamwriteerror',
'error_code_syntaxerror',
'error_code_tagnotfound',
'error_code_unknownerror',
'error_code_varnotfound',
'error_code_volumedoesnotexist',
'error_code_webactionnotsupported',
'error_code_webadderror',
'error_code_webdeleteerror',
'error_code_webmodulenotfound',
'error_code_webnosuchobject',
'error_code_webrepeatingrelatedfield',
'error_code_webrequiredfieldmissing',
'error_code_webtimeout',
'error_code_webupdateerror',
'error_columnrestriction',
'error_currenterror',
'error_databaseconnectionunavailable',
'error_databasetimeout',
'error_deleteerror',
'error_fieldrestriction',
'error_filenotfound',
'error_invaliddatabase',
'error_invalidpassword',
'error_invalidusername',
'error_modulenotfound',
'error_msg',
'error_msg_aborted',
'error_msg_assert',
'error_msg_bof',
'error_msg_connectioninvalid',
'error_msg_couldnotclosefile',
'error_msg_couldnotcreateoropenfile',
'error_msg_couldnotdeletefile',
'error_msg_couldnotdisposememory',
'error_msg_couldnotlockmemory',
'error_msg_couldnotreadfromfile',
'error_msg_couldnotunlockmemory',
'error_msg_couldnotwritetofile',
'error_msg_criterianotmet',
'error_msg_datasourceerror',
'error_msg_directoryfull',
'error_msg_diskfull',
'error_msg_dividebyzero',
'error_msg_eof',
'error_msg_failure',
'error_msg_fieldrestriction',
'error_msg_file',
'error_msg_filealreadyexists',
'error_msg_filecorrupt',
'error_msg_fileinvalid',
'error_msg_fileinvalidaccessmode',
'error_msg_fileisclosed',
'error_msg_fileisopen',
'error_msg_filelocked',
'error_msg_filenotfound',
'error_msg_fileunlocked',
'error_msg_httpfilenotfound',
'error_msg_illegalinstruction',
'error_msg_illegaluseoffrozeninstance',
'error_msg_invaliddatabase',
'error_msg_invalidfilename',
'error_msg_invalidmemoryobject',
'error_msg_invalidparameter',
'error_msg_invalidpassword',
'error_msg_invalidpathname',
'error_msg_invalidusername',
'error_msg_ioerror',
'error_msg_loopaborted',
'error_msg_memory',
'error_msg_network',
'error_msg_nilpointer',
'error_msg_noerr',
'error_msg_nopermission',
'error_msg_outofmemory',
'error_msg_outofstackspace',
'error_msg_overflow',
'error_msg_postconditionfailed',
'error_msg_preconditionfailed',
'error_msg_resnotfound',
'error_msg_resource',
'error_msg_streamreaderror',
'error_msg_streamwriteerror',
'error_msg_syntaxerror',
'error_msg_tagnotfound',
'error_msg_unknownerror',
'error_msg_varnotfound',
'error_msg_volumedoesnotexist',
'error_msg_webactionnotsupported',
'error_msg_webadderror',
'error_msg_webdeleteerror',
'error_msg_webmodulenotfound',
'error_msg_webnosuchobject',
'error_msg_webrepeatingrelatedfield',
'error_msg_webrequiredfieldmissing',
'error_msg_webtimeout',
'error_msg_webupdateerror',
'error_noerror',
'error_nopermission',
'error_norecordsfound',
'error_outofmemory',
'error_pop',
'error_push',
'error_reqcolumnmissing',
'error_reqfieldmissing',
'error_requiredcolumnmissing',
'error_requiredfieldmissing',
'error_reset',
'error_seterrorcode',
'error_seterrormessage',
'error_updateerror',
'euro',
'event_schedule',
'ew',
'fail',
'fail_if',
'false',
'field',
'field_name',
'field_names',
'file',
'file_autoresolvefullpaths',
'file_chmod',
'file_control',
'file_copy',
'file_create',
'file_creationdate',
'file_currenterror',
'file_delete',
'file_exists',
'file_getlinecount',
'file_getsize',
'file_isdirectory',
'file_listdirectory',
'file_moddate',
'file_modechar',
'file_modeline',
'file_move',
'file_openread',
'file_openreadwrite',
'file_openwrite',
'file_openwriteappend',
'file_openwritetruncate',
'file_probeeol',
'file_processuploads',
'file_read',
'file_readline',
'file_rename',
'file_serve',
'file_setsize',
'file_stream',
'file_streamcopy',
'file_uploads',
'file_waitread',
'file_waittimeout',
'file_waitwrite',
'file_write',
'find_soap_ops',
'form_param',
'found_count',
'ft',
'ftp_getfile',
'ftp_getlisting',
'ftp_putfile',
'full',
'global',
'global_defined',
'global_remove',
'global_reset',
'globals',
'gt',
'gte',
'handle',
'handle_error',
'header',
'html_comment',
'http_getfile',
'ical_alarm',
'ical_attribute',
'ical_calendar',
'ical_daylight',
'ical_event',
'ical_freebusy',
'ical_item',
'ical_journal',
'ical_parse',
'ical_standard',
'ical_timezone',
'ical_todo',
'if',
'if_empty',
'if_false',
'if_null',
'if_true',
'image',
'image_url',
'img',
'include',
'include_cgi',
'include_currentpath',
'include_once',
'include_raw',
'include_url',
'inline',
'integer',
'iterate',
'iterator',
'java',
'java_bean',
'json_records',
'json_rpccall',
'keycolumn_name',
'keycolumn_value',
'keyfield_name',
'keyfield_value',
'lasso_comment',
'lasso_currentaction',
'lasso_datasourceis',
'lasso_datasourceis4d',
'lasso_datasourceisfilemaker',
'lasso_datasourceisfilemaker7',
'lasso_datasourceisfilemaker9',
'lasso_datasourceisfilemakersa',
'lasso_datasourceisjdbc',
'lasso_datasourceislassomysql',
'lasso_datasourceismysql',
'lasso_datasourceisodbc',
'lasso_datasourceisopenbase',
'lasso_datasourceisoracle',
'lasso_datasourceispostgresql',
'lasso_datasourceisspotlight',
'lasso_datasourceissqlite',
'lasso_datasourceissqlserver',
'lasso_datasourcemodulename',
'lasso_datatype',
'lasso_disableondemand',
'lasso_errorreporting',
'lasso_executiontimelimit',
'lasso_parser',
'lasso_process',
'lasso_sessionid',
'lasso_siteid',
'lasso_siteisrunning',
'lasso_sitename',
'lasso_siterestart',
'lasso_sitestart',
'lasso_sitestop',
'lasso_tagexists',
'lasso_tagmodulename',
'lasso_uniqueid',
'lasso_updatecheck',
'lasso_uptime',
'lasso_version',
'lassoapp_create',
'lassoapp_dump',
'lassoapp_flattendir',
'lassoapp_getappdata',
'lassoapp_link',
'lassoapp_list',
'lassoapp_process',
'lassoapp_unitize',
'layout_name',
'ldap',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'ldml',
'ldml_ldml',
'library',
'library_once',
'link',
'link_currentaction',
'link_currentactionparams',
'link_currentactionurl',
'link_currentgroup',
'link_currentgroupparams',
'link_currentgroupurl',
'link_currentrecord',
'link_currentrecordparams',
'link_currentrecordurl',
'link_currentsearch',
'link_currentsearchparams',
'link_currentsearchurl',
'link_detail',
'link_detailparams',
'link_detailurl',
'link_firstgroup',
'link_firstgroupparams',
'link_firstgroupurl',
'link_firstrecord',
'link_firstrecordparams',
'link_firstrecordurl',
'link_lastgroup',
'link_lastgroupparams',
'link_lastgroupurl',
'link_lastrecord',
'link_lastrecordparams',
'link_lastrecordurl',
'link_nextgroup',
'link_nextgroupparams',
'link_nextgroupurl',
'link_nextrecord',
'link_nextrecordparams',
'link_nextrecordurl',
'link_params',
'link_prevgroup',
'link_prevgroupparams',
'link_prevgroupurl',
'link_prevrecord',
'link_prevrecordparams',
'link_prevrecordurl',
'link_setformat',
'link_url',
'list',
'list_additem',
'list_fromlist',
'list_fromstring',
'list_getitem',
'list_itemcount',
'list_iterator',
'list_removeitem',
'list_replaceitem',
'list_reverseiterator',
'list_tostring',
'literal',
'ljax_end',
'ljax_hastarget',
'ljax_include',
'ljax_start',
'ljax_target',
'local',
'local_defined',
'local_remove',
'local_reset',
'locale_format',
'locals',
'log',
'log_always',
'log_critical',
'log_deprecated',
'log_destination_console',
'log_destination_database',
'log_destination_file',
'log_detail',
'log_level_critical',
'log_level_deprecated',
'log_level_detail',
'log_level_sql',
'log_level_warning',
'log_setdestination',
'log_sql',
'log_warning',
'logicalop_value',
'logicaloperator_value',
'loop',
'loop_abort',
'loop_continue',
'loop_count',
'lt',
'lte',
'magick_image',
'map',
'map_iterator',
'match_comparator',
'match_notrange',
'match_notregexp',
'match_range',
'match_regexp',
'math_abs',
'math_acos',
'math_add',
'math_asin',
'math_atan',
'math_atan2',
'math_ceil',
'math_converteuro',
'math_cos',
'math_div',
'math_exp',
'math_floor',
'math_internal_rand',
'math_internal_randmax',
'math_internal_srand',
'math_ln',
'math_log',
'math_log10',
'math_max',
'math_min',
'math_mod',
'math_mult',
'math_pow',
'math_random',
'math_range',
'math_rint',
'math_roman',
'math_round',
'math_sin',
'math_sqrt',
'math_sub',
'math_tan',
'maxrecords_value',
'memory_session_driver',
'mime_type',
'minimal',
'misc__srand',
'misc_randomnumber',
'misc_roman',
'misc_valid_creditcard',
'mysql_session_driver',
'named_param',
'namespace_current',
'namespace_delimiter',
'namespace_exists',
'namespace_file_fullpathexists',
'namespace_global',
'namespace_import',
'namespace_load',
'namespace_page',
'namespace_unload',
'namespace_using',
'neq',
'net',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'no_default_output',
'none',
'noprocess',
'not',
'nrx',
'nslookup',
'null',
'object',
'once',
'oneoff',
'op_logicalvalue',
'operator_logicalvalue',
'option',
'or',
'os_process',
'output',
'output_none',
'pair',
'params_up',
'pdf_barcode',
'pdf_color',
'pdf_doc',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_serve',
'pdf_table',
'pdf_text',
'percent',
'portal',
'postcondition',
'precondition',
'prettyprintingnsmap',
'prettyprintingtypemap',
'priorityqueue',
'private',
'proc_convert',
'proc_convertbody',
'proc_convertone',
'proc_extract',
'proc_extractone',
'proc_find',
'proc_first',
'proc_foreach',
'proc_get',
'proc_join',
'proc_lasso',
'proc_last',
'proc_map_entry',
'proc_null',
'proc_regexp',
'proc_xml',
'proc_xslt',
'process',
'protect',
'queue',
'rand',
'randomnumber',
'raw',
'recid_value',
'record_count',
'recordcount',
'recordid_value',
'records',
'records_array',
'records_map',
'redirect_url',
'reference',
'referer',
'referer_url',
'referrer',
'referrer_url',
'regexp',
'repeating',
'repeating_valueitem',
'repeatingvalueitem',
'repetition',
'req_column',
'req_field',
'required_column',
'required_field',
'response_fileexists',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'resultset',
'resultset_count',
'return',
'return_value',
'reverseiterator',
'roman',
'row_count',
'rows',
'rows_array',
'run_children',
'rx',
'schema_name',
'scientific',
'search_args',
'search_arguments',
'search_columnitem',
'search_fielditem',
'search_operatoritem',
'search_opitem',
'search_valueitem',
'searchfielditem',
'searchoperatoritem',
'searchopitem',
'searchvalueitem',
'select',
'selected',
'self',
'serialize',
'series',
'server_date',
'server_day',
'server_ip',
'server_name',
'server_port',
'server_push',
'server_siteisrunning',
'server_sitestart',
'server_sitestop',
'server_time',
'session_abort',
'session_addoutputfilter',
'session_addvar',
'session_addvariable',
'session_deleteexpired',
'session_driver',
'session_end',
'session_id',
'session_removevar',
'session_removevariable',
'session_result',
'session_setdriver',
'session_start',
'set',
'set_iterator',
'set_reverseiterator',
'shown_count',
'shown_first',
'shown_last',
'site_atbegin',
'site_id',
'site_name',
'site_restart',
'skiprecords_value',
'sleep',
'soap_convertpartstopairs',
'soap_definetag',
'soap_info',
'soap_lastrequest',
'soap_lastresponse',
'soap_stub',
'sort_args',
'sort_arguments',
'sort_columnitem',
'sort_fielditem',
'sort_orderitem',
'sortcolumnitem',
'sortfielditem',
'sortorderitem',
'sqlite_createdb',
'sqlite_session_driver',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'srand',
'stack',
'stock_quote',
'string',
'string_charfromname',
'string_concatenate',
'string_countfields',
'string_endswith',
'string_extract',
'string_findposition',
'string_findregexp',
'string_fordigit',
'string_getfield',
'string_getunicodeversion',
'string_insert',
'string_isalpha',
'string_isalphanumeric',
'string_isdigit',
'string_ishexdigit',
'string_islower',
'string_isnumeric',
'string_ispunctuation',
'string_isspace',
'string_isupper',
'string_length',
'string_lowercase',
'string_remove',
'string_removeleading',
'string_removetrailing',
'string_replace',
'string_replaceregexp',
'string_todecimal',
'string_tointeger',
'string_uppercase',
'string_validcharset',
'table_name',
'table_realname',
'tag',
'tag_name',
'tags',
'tags_find',
'tags_list',
'tcp_close',
'tcp_open',
'tcp_send',
'tcp_tcp_close',
'tcp_tcp_open',
'tcp_tcp_send',
'thread_abort',
'thread_atomic',
'thread_event',
'thread_exists',
'thread_getcurrentid',
'thread_getpriority',
'thread_info',
'thread_list',
'thread_lock',
'thread_pipe',
'thread_priority_default',
'thread_priority_high',
'thread_priority_low',
'thread_rwlock',
'thread_semaphore',
'thread_setpriority',
'token_value',
'total_records',
'treemap',
'treemap_iterator',
'true',
'url_rewrite',
'valid_creditcard',
'valid_date',
'valid_email',
'valid_url',
'value_list',
'value_listitem',
'valuelistitem',
'var',
'var_defined',
'var_remove',
'var_reset',
'var_set',
'variable',
'variable_defined',
'variable_set',
'variables',
'variant_count',
'vars',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxcolumns',
'wap_maxhorzpixels',
'wap_maxrows',
'wap_maxvertpixels',
'while',
'wsdl_extract',
'wsdl_getbinding',
'wsdl_getbindingforoperation',
'wsdl_getbindingoperations',
'wsdl_getmessagenamed',
'wsdl_getmessageparts',
'wsdl_getmessagetriofromporttype',
'wsdl_getopbodystyle',
'wsdl_getopbodyuse',
'wsdl_getoperation',
'wsdl_getoplocation',
'wsdl_getopmessagetypes',
'wsdl_getopsoapaction',
'wsdl_getportaddress',
'wsdl_getportsforservice',
'wsdl_getporttype',
'wsdl_getporttypeoperation',
'wsdl_getservicedocumentation',
'wsdl_getservices',
'wsdl_gettargetnamespace',
'wsdl_issoapoperation',
'wsdl_listoperations',
'wsdl_maketest',
'xml',
'xml_extract',
'xml_rpc',
'xml_rpccall',
'xml_rw',
'xml_serve',
'xml_transform',
'xml_xml',
'xml_xmlstream',
'xmlstream',
'xsd_attribute',
'xsd_blankarraybase',
'xsd_blankbase',
'xsd_buildtype',
'xsd_cache',
'xsd_checkcardinality',
'xsd_continueall',
'xsd_continueannotation',
'xsd_continueany',
'xsd_continueanyattribute',
'xsd_continueattribute',
'xsd_continueattributegroup',
'xsd_continuechoice',
'xsd_continuecomplexcontent',
'xsd_continuecomplextype',
'xsd_continuedocumentation',
'xsd_continueextension',
'xsd_continuegroup',
'xsd_continuekey',
'xsd_continuelist',
'xsd_continuerestriction',
'xsd_continuesequence',
'xsd_continuesimplecontent',
'xsd_continuesimpletype',
'xsd_continueunion',
'xsd_deserialize',
'xsd_fullyqualifyname',
'xsd_generate',
'xsd_generateblankfromtype',
'xsd_generateblanksimpletype',
'xsd_generatetype',
'xsd_getschematype',
'xsd_issimpletype',
'xsd_loadschema',
'xsd_lookupnamespaceuri',
'xsd_lookuptype',
'xsd_processany',
'xsd_processattribute',
'xsd_processattributegroup',
'xsd_processcomplextype',
'xsd_processelement',
'xsd_processgroup',
'xsd_processimport',
'xsd_processinclude',
'xsd_processschema',
'xsd_processsimpletype',
'xsd_ref',
'xsd_type',
)
}
MEMBERS = {
'Member Methods': (
'abort',
'abs',
'accept_charset',
'accept',
'acceptconnections',
'acceptdeserializedelement',
'acceptnossl',
'acceptpost',
'accesskey',
'acos',
'acosh',
'action',
'actionparams',
'active_tick',
'add',
'addatend',
'addattachment',
'addbarcode',
'addchapter',
'addcheckbox',
'addcolumninfo',
'addcombobox',
'addcomment',
'addcomponent',
'addcomponents',
'addcss',
'adddatabasetable',
'adddatasource',
'adddatasourcedatabase',
'adddatasourcehost',
'adddir',
'adddirpath',
'addendjs',
'addendjstext',
'adderror',
'addfavicon',
'addfile',
'addgroup',
'addheader',
'addhiddenfield',
'addhtmlpart',
'addimage',
'addjavascript',
'addjs',
'addjstext',
'addlist',
'addmathfunctions',
'addmember',
'addoneheaderline',
'addpage',
'addparagraph',
'addpart',
'addpasswordfield',
'addphrase',
'addpostdispatch',
'addpredispatch',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addrow',
'addsection',
'addselectlist',
'addset',
'addsubmitbutton',
'addsubnode',
'addtable',
'addtask',
'addtext',
'addtextarea',
'addtextfield',
'addtextpart',
'addtobuffer',
'addtrait',
'adduser',
'addusertogroup',
'addwarning',
'addzip',
'allocobject',
'am',
'ampm',
'annotate',
'answer',
'apop',
'append',
'appendarray',
'appendarraybegin',
'appendarrayend',
'appendbool',
'appendbytes',
'appendchar',
'appendchild',
'appendcolon',
'appendcomma',
'appenddata',
'appenddatetime',
'appenddbpointer',
'appenddecimal',
'appenddocument',
'appendimagetolist',
'appendinteger',
'appendnowutc',
'appendnull',
'appendoid',
'appendregex',
'appendreplacement',
'appendstring',
'appendtail',
'appendtime',
'applyheatcolors',
'appmessage',
'appname',
'appprefix',
'appstatus',
'arc',
'archive',
'arguments',
'argumentvalue',
'asarray',
'asarraystring',
'asasync',
'asbytes',
'ascopy',
'ascopydeep',
'asdecimal',
'asgenerator',
'asin',
'asinh',
'asinteger',
'askeyedgenerator',
'aslazystring',
'aslist',
'asraw',
'asstaticarray',
'asstring',
'asstringhex',
'asstringoct',
'asxml',
'atan',
'atan2',
'atanh',
'atend',
'atends',
'atime',
'attributecount',
'attributes',
'attrs',
'auth',
'authenticate',
'authorize',
'autocollectbuffer',
'average',
'back',
'basename',
'basepaths',
'baseuri',
'bcc',
'beginssl',
'beginswith',
'begintls',
'bestcharset',
'bind_blob',
'bind_double',
'bind_int',
'bind_null',
'bind_parameter_index',
'bind_text',
'bind',
'bindcount',
'bindone',
'bindparam',
'bitand',
'bitclear',
'bitflip',
'bitformat',
'bitnot',
'bitor',
'bitset',
'bitshiftleft',
'bitshiftright',
'bittest',
'bitxor',
'blur',
'body',
'bodybytes',
'boundary',
'bptoxml',
'bptypetostr',
'bucketnumber',
'buff',
'buildquery',
'businessdaysbetween',
'by',
'bytes',
'cachedappprefix',
'cachedroot',
'callboolean',
'callbooleanmethod',
'callbytemethod',
'callcharmethod',
'calldoublemethod',
'calledname',
'callfirst',
'callfloat',
'callfloatmethod',
'callint',
'callintmethod',
'calllongmethod',
'callnonvirtualbooleanmethod',
'callnonvirtualbytemethod',
'callnonvirtualcharmethod',
'callnonvirtualdoublemethod',
'callnonvirtualfloatmethod',
'callnonvirtualintmethod',
'callnonvirtuallongmethod',
'callnonvirtualobjectmethod',
'callnonvirtualshortmethod',
'callnonvirtualvoidmethod',
'callobject',
'callobjectmethod',
'callshortmethod',
'callsite_col',
'callsite_file',
'callsite_line',
'callstack',
'callstaticboolean',
'callstaticbooleanmethod',
'callstaticbytemethod',
'callstaticcharmethod',
'callstaticdoublemethod',
'callstaticfloatmethod',
'callstaticint',
'callstaticintmethod',
'callstaticlongmethod',
'callstaticobject',
'callstaticobjectmethod',
'callstaticshortmethod',
'callstaticstring',
'callstaticvoidmethod',
'callstring',
'callvoid',
'callvoidmethod',
'cancel',
'cap',
'capa',
'capabilities',
'capi',
'cbrt',
'cc',
'ceil',
'chardigitvalue',
'charname',
'charset',
'chartype',
'checkdebugging',
'checked',
'checkuser',
'childnodes',
'chk',
'chmod',
'choosecolumntype',
'chown',
'chunked',
'circle',
'class',
'classid',
'clear',
'clonenode',
'close',
'closepath',
'closeprepared',
'closewrite',
'code',
'codebase',
'codetype',
'colmap',
'colorspace',
'column_blob',
'column_count',
'column_decltype',
'column_double',
'column_int64',
'column_name',
'column_text',
'column_type',
'command',
'comments',
'compare',
'comparecodepointorder',
'componentdelimiter',
'components',
'composite',
'compress',
'concat',
'condtoint',
'configureds',
'configuredskeys',
'connect',
'connection',
'connectionhandler',
'connhandler',
'consume_domain',
'consume_label',
'consume_message',
'consume_rdata',
'consume_string',
'contains',
'content_disposition',
'content_transfer_encoding',
'content_type',
'content',
'contentlength',
'contents',
'contenttype',
'continuation',
'continuationpacket',
'continuationpoint',
'continuationstack',
'continue',
'contrast',
'conventionaltop',
'convert',
'cookie',
'cookies',
'cookiesarray',
'cookiesary',
'copyto',
'cos',
'cosh',
'count',
'countkeys',
'country',
'countusersbygroup',
'crc',
'create',
'createattribute',
'createattributens',
'createcdatasection',
'createcomment',
'createdocument',
'createdocumentfragment',
'createdocumenttype',
'createelement',
'createelementns',
'createentityreference',
'createindex',
'createprocessinginstruction',
'createtable',
'createtextnode',
'criteria',
'crop',
'csscontent',
'curl',
'current',
'currentfile',
'curveto',
'd',
'data',
'databasecolumnnames',
'databasecolumns',
'databasemap',
'databasename',
'datasourcecolumnnames',
'datasourcecolumns',
'datasourcemap',
'date',
'day',
'dayofmonth',
'dayofweek',
'dayofweekinmonth',
'dayofyear',
'days',
'daysbetween',
'db',
'dbtablestable',
'debug',
'declare',
'decodebase64',
'decodehex',
'decodehtml',
'decodeqp',
'decodeurl',
'decodexml',
'decompose',
'decomposeassignment',
'defaultcontentrepresentation',
'defer',
'deg2rad',
'dele',
'delete',
'deletedata',
'deleteglobalref',
'deletelocalref',
'delim',
'depth',
'dereferencepointer',
'describe',
'description',
'deserialize',
'detach',
'detectcharset',
'didinclude',
'difference',
'digit',
'dir',
'displaycountry',
'displaylanguage',
'displayname',
'displayscript',
'displayvariant',
'div',
'dns_response',
'do',
'doatbegins',
'doatends',
'doccomment',
'doclose',
'doctype',
'document',
'documentelement',
'documentroot',
'domainbody',
'done',
'dosessions',
'dowithclose',
'dowlocal',
'download',
'drawtext',
'drop',
'dropindex',
'dsdbtable',
'dshoststable',
'dsinfo',
'dst',
'dstable',
'dstoffset',
'dtdid',
'dup',
'dup2',
'each',
'eachbyte',
'eachcharacter',
'eachchild',
'eachcomponent',
'eachdir',
'eachdirpath',
'eachdirpathrecursive',
'eachentry',
'eachfile',
'eachfilename',
'eachfilepath',
'eachfilepathrecursive',
'eachkey',
'eachline',
'eachlinebreak',
'eachmatch',
'eachnode',
'eachpair',
'eachpath',
'eachpathrecursive',
'eachrow',
'eachsub',
'eachword',
'eachwordbreak',
'element',
'eligiblepath',
'eligiblepaths',
'encodebase64',
'encodehex',
'encodehtml',
'encodehtmltoxml',
'encodemd5',
'encodepassword',
'encodeqp',
'encodesql',
'encodesql92',
'encodeurl',
'encodevalue',
'encodexml',
'encoding',
'enctype',
'end',
'endjs',
'endssl',
'endswith',
'endtls',
'enhance',
'ensurestopped',
'entities',
'entry',
'env',
'equals',
'era',
'erf',
'erfc',
'err',
'errcode',
'errmsg',
'error',
'errors',
'errstack',
'escape_member',
'establisherrorstate',
'exceptioncheck',
'exceptionclear',
'exceptiondescribe',
'exceptionoccurred',
'exchange',
'execinits',
'execinstalls',
'execute',
'executelazy',
'executenow',
'exists',
'exit',
'exitcode',
'exp',
'expire',
'expireminutes',
'expiresminutes',
'expm1',
'export16bits',
'export32bits',
'export64bits',
'export8bits',
'exportas',
'exportbytes',
'exportfdf',
'exportpointerbits',
'exportsigned16bits',
'exportsigned32bits',
'exportsigned64bits',
'exportsigned8bits',
'exportstring',
'expose',
'extendedyear',
'extensiondelimiter',
'extensions',
'extract',
'extractfast',
'extractfastone',
'extractimage',
'extractone',
'f',
'fabs',
'fail',
'failnoconnectionhandler',
'family',
'fatalerror',
'fcgireq',
'fchdir',
'fchmod',
'fchown',
'fd',
'features',
'fetchdata',
'fieldnames',
'fieldposition',
'fieldstable',
'fieldtype',
'fieldvalue',
'file',
'filename',
'filenames',
'filequeue',
'fileuploads',
'fileuploadsary',
'filterinputcolumn',
'finalize',
'find',
'findall',
'findandmodify',
'findbucket',
'findcase',
'findclass',
'findcount',
'finddescendant',
'findfirst',
'findinclude',
'findinctx',
'findindex',
'findlast',
'findpattern',
'findposition',
'findsymbols',
'first',
'firstchild',
'firstcomponent',
'firstdayofweek',
'firstnode',
'fixformat',
'flags',
'fliph',
'flipv',
'floor',
'flush',
'foldcase',
'foo',
'for',
'forcedrowid',
'foreach',
'foreachaccept',
'foreachbyte',
'foreachcharacter',
'foreachchild',
'foreachday',
'foreachentry',
'foreachfile',
'foreachfilename',
'foreachkey',
'foreachline',
'foreachlinebreak',
'foreachmatch',
'foreachnode',
'foreachpair',
'foreachpathcomponent',
'foreachrow',
'foreachspool',
'foreachsub',
'foreachwordbreak',
'form',
'format',
'formatas',
'formatcontextelement',
'formatcontextelements',
'formatnumber',
'free',
'frexp',
'from',
'fromname',
'fromport',
'fromreflectedfield',
'fromreflectedmethod',
'front',
'fsync',
'ftpdeletefile',
'ftpgetlisting',
'ftruncate',
'fullpath',
'fx',
'gamma',
'gatewayinterface',
'gen',
'generatechecksum',
'get',
'getabswidth',
'getalignment',
'getappsource',
'getarraylength',
'getattr',
'getattribute',
'getattributenamespace',
'getattributenode',
'getattributenodens',
'getattributens',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getbold',
'getbooleanarrayelements',
'getbooleanarrayregion',
'getbooleanfield',
'getbordercolor',
'getborderwidth',
'getbytearrayelements',
'getbytearrayregion',
'getbytefield',
'getchararrayelements',
'getchararrayregion',
'getcharfield',
'getclass',
'getcode',
'getcolor',
'getcolumn',
'getcolumncount',
'getcolumns',
'getdatabasebyalias',
'getdatabasebyid',
'getdatabasebyname',
'getdatabasehost',
'getdatabasetable',
'getdatabasetablebyalias',
'getdatabasetablebyid',
'getdatabasetablepart',
'getdatasource',
'getdatasourcedatabase',
'getdatasourcedatabasebyid',
'getdatasourcehost',
'getdatasourceid',
'getdatasourcename',
'getdefaultstorage',
'getdoublearrayelements',
'getdoublearrayregion',
'getdoublefield',
'getelementbyid',
'getelementsbytagname',
'getelementsbytagnamens',
'getencoding',
'getface',
'getfield',
'getfieldid',
'getfile',
'getfloatarrayelements',
'getfloatarrayregion',
'getfloatfield',
'getfont',
'getformat',
'getfullfontname',
'getgroup',
'getgroupid',
'getheader',
'getheaders',
'gethostdatabase',
'gethtmlattr',
'gethtmlattrstring',
'getinclude',
'getintarrayelements',
'getintarrayregion',
'getintfield',
'getisocomment',
'getitalic',
'getlasterror',
'getlcapitype',
'getlibrary',
'getlongarrayelements',
'getlongarrayregion',
'getlongfield',
'getmargins',
'getmethodid',
'getmode',
'getnameditem',
'getnameditemns',
'getnode',
'getnumericvalue',
'getobjectarrayelement',
'getobjectclass',
'getobjectfield',
'getpadding',
'getpagenumber',
'getparts',
'getprefs',
'getpropertyvalue',
'getprowcount',
'getpsfontname',
'getrange',
'getrowcount',
'getset',
'getshortarrayelements',
'getshortarrayregion',
'getshortfield',
'getsize',
'getsortfieldspart',
'getspacing',
'getstaticbooleanfield',
'getstaticbytefield',
'getstaticcharfield',
'getstaticdoublefield',
'getstaticfieldid',
'getstaticfloatfield',
'getstaticintfield',
'getstaticlongfield',
'getstaticmethodid',
'getstaticobjectfield',
'getstaticshortfield',
'getstatus',
'getstringchars',
'getstringlength',
'getstyle',
'getsupportedencodings',
'gettablebyid',
'gettext',
'gettextalignment',
'gettextsize',
'gettrigger',
'gettype',
'getunderline',
'getuniquealiasname',
'getuser',
'getuserbykey',
'getuserid',
'getversion',
'getzipfilebytes',
'givenblock',
'gmt',
'gotconnection',
'gotfileupload',
'groupby',
'groupcolumns',
'groupcount',
'groupjoin',
'handlebreakpointget',
'handlebreakpointlist',
'handlebreakpointremove',
'handlebreakpointset',
'handlebreakpointupdate',
'handlecontextget',
'handlecontextnames',
'handlecontinuation',
'handledefinitionbody',
'handledefinitionhead',
'handledefinitionresource',
'handledevconnection',
'handleevalexpired',
'handlefeatureget',
'handlefeatureset',
'handlelassoappcontent',
'handlelassoappresponse',
'handlenested',
'handlenormalconnection',
'handlepop',
'handleresource',
'handlesource',
'handlestackget',
'handlestderr',
'handlestdin',
'handlestdout',
'handshake',
'hasattribute',
'hasattributens',
'hasattributes',
'hasbinaryproperty',
'haschildnodes',
'hasexpired',
'hasfeature',
'hasfield',
'hash',
'hashtmlattr',
'hasmethod',
'hastable',
'hastrailingcomponent',
'hasvalue',
'head',
'header',
'headerbytes',
'headers',
'headersarray',
'headersmap',
'height',
'histogram',
'home',
'host',
'hostcolumnnames',
'hostcolumnnames2',
'hostcolumns',
'hostcolumns2',
'hostdatasource',
'hostextra',
'hostid',
'hostisdynamic',
'hostmap',
'hostmap2',
'hostname',
'hostpassword',
'hostport',
'hostschema',
'hosttableencoding',
'hosttonet16',
'hosttonet32',
'hosttonet64',
'hostusername',
'hour',
'hourofampm',
'hourofday',
'hoursbetween',
'href',
'hreflang',
'htmlcontent',
'htmlizestacktrace',
'htmlizestacktracelink',
'httpaccept',
'httpacceptencoding',
'httpacceptlanguage',
'httpauthorization',
'httpcachecontrol',
'httpconnection',
'httpcookie',
'httpequiv',
'httphost',
'httpreferer',
'httpreferrer',
'httpuseragent',
'hypot',
'id',
'idealinmemory',
'idle',
'idmap',
'ifempty',
'ifkey',
'ifnotempty',
'ifnotkey',
'ignorecase',
'ilogb',
'imgptr',
'implementation',
'import16bits',
'import32bits',
'import64bits',
'import8bits',
'importas',
'importbytes',
'importfdf',
'importnode',
'importpointer',
'importstring',
'in',
'include',
'includebytes',
'includelibrary',
'includelibraryonce',
'includeonce',
'includes',
'includestack',
'indaylighttime',
'index',
'init',
'initialize',
'initrequest',
'inits',
'inneroncompare',
'input',
'inputcolumns',
'inputtype',
'insert',
'insertback',
'insertbefore',
'insertdata',
'insertfirst',
'insertfrom',
'insertfront',
'insertinternal',
'insertlast',
'insertpage',
'install',
'installs',
'integer',
'internalsubset',
'interrupt',
'intersection',
'inttocond',
'invoke',
'invokeautocollect',
'invokeuntil',
'invokewhile',
'ioctl',
'isa',
'isalive',
'isallof',
'isalnum',
'isalpha',
'isanyof',
'isbase',
'isblank',
'iscntrl',
'isdigit',
'isdir',
'isdirectory',
'isempty',
'isemptyelement',
'isfirststep',
'isfullpath',
'isgraph',
'ishttps',
'isidle',
'isinstanceof',
'islink',
'islower',
'ismultipart',
'isnan',
'isnota',
'isnotempty',
'isnothing',
'iso3country',
'iso3language',
'isopen',
'isprint',
'ispunct',
'issameobject',
'isset',
'issourcefile',
'isspace',
'isssl',
'issupported',
'istitle',
'istruetype',
'istype',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'isvalid',
'iswhitespace',
'isxdigit',
'isxhr',
'item',
'j0',
'j1',
'javascript',
'jbarcode',
'jcolor',
'jfont',
'jimage',
'jlist',
'jn',
'jobjectisa',
'join',
'jread',
'jscontent',
'jsonfornode',
'jsonhtml',
'jsonisleaf',
'jsonlabel',
'jtable',
'jtext',
'julianday',
'kernel',
'key',
'keycolumns',
'keys',
'keywords',
'kill',
'label',
'lang',
'language',
'last_insert_rowid',
'last',
'lastaccessdate',
'lastaccesstime',
'lastchild',
'lastcomponent',
'lasterror',
'lastinsertid',
'lastnode',
'lastpoint',
'lasttouched',
'lazyvalue',
'ldexp',
'leaveopen',
'left',
'length',
'lgamma',
'line',
'linediffers',
'linkto',
'linktype',
'list',
'listactivedatasources',
'listalldatabases',
'listalltables',
'listdatabasetables',
'listdatasourcedatabases',
'listdatasourcehosts',
'listdatasources',
'listen',
'listgroups',
'listgroupsbyuser',
'listhostdatabases',
'listhosts',
'listmethods',
'listnode',
'listusers',
'listusersbygroup',
'loadcerts',
'loaddatasourcehostinfo',
'loaddatasourceinfo',
'loadlibrary',
'localaddress',
'localname',
'locals',
'lock',
'log',
'log10',
'log1p',
'logb',
'lookupnamespace',
'lop',
'lowagiefont',
'lowercase',
'makecolor',
'makecolumnlist',
'makecolumnmap',
'makecookieyumyum',
'makefullpath',
'makeinheritedcopy',
'makenonrelative',
'makeurl',
'map',
'marker',
'matches',
'matchesstart',
'matchposition',
'matchstring',
'matchtriggers',
'max',
'maxinmemory',
'maxlength',
'maxrows',
'maxworkers',
'maybeslash',
'maybevalue',
'md5hex',
'media',
'members',
'merge',
'meta',
'method',
'methodname',
'millisecond',
'millisecondsinday',
'mime_boundary',
'mime_contenttype',
'mime_hdrs',
'mime',
'mimes',
'min',
'minute',
'minutesbetween',
'moddatestr',
'mode',
'modf',
'modificationdate',
'modificationtime',
'modulate',
'monitorenter',
'monitorexit',
'month',
'moveto',
'movetoattribute',
'movetoattributenamespace',
'movetoelement',
'movetofirstattribute',
'movetonextattribute',
'msg',
'mtime',
'multiple',
'n',
'name',
'named',
'namespaceuri',
'needinitialization',
'net',
'nettohost16',
'nettohost32',
'nettohost64',
'new',
'newbooleanarray',
'newbytearray',
'newchararray',
'newdoublearray',
'newfloatarray',
'newglobalref',
'newintarray',
'newlongarray',
'newobject',
'newobjectarray',
'newshortarray',
'newstring',
'next',
'nextafter',
'nextnode',
'nextprime',
'nextprune',
'nextprunedelta',
'nextsibling',
'nodeforpath',
'nodelist',
'nodename',
'nodetype',
'nodevalue',
'noop',
'normalize',
'notationname',
'notations',
'novaluelists',
'numsets',
'object',
'objects',
'objecttype',
'onclick',
'oncompare',
'oncomparestrict',
'onconvert',
'oncreate',
'ondblclick',
'onkeydown',
'onkeypress',
'onkeyup',
'onmousedown',
'onmousemove',
'onmouseout',
'onmouseover',
'onmouseup',
'onreset',
'onsubmit',
'ontop',
'open',
'openappend',
'openread',
'opentruncate',
'openwith',
'openwrite',
'openwriteonly',
'orderby',
'orderbydescending',
'out',
'output',
'outputencoding',
'ownerdocument',
'ownerelement',
'padleading',
'padtrailing',
'padzero',
'pagecount',
'pagerotation',
'pagesize',
'param',
'paramdescs',
'params',
'parent',
'parentdir',
'parentnode',
'parse_body',
'parse_boundary',
'parse_charset',
'parse_content_disposition',
'parse_content_transfer_encoding',
'parse_content_type',
'parse_hdrs',
'parse_mode',
'parse_msg',
'parse_parts',
'parse_rawhdrs',
'parse',
'parseas',
'parsedocument',
'parsenumber',
'parseoneheaderline',
'pass',
'path',
'pathinfo',
'pathtouri',
'pathtranslated',
'pause',
'payload',
'pdifference',
'perform',
'performonce',
'perms',
'pid',
'pixel',
'pm',
'polldbg',
'pollide',
'pop_capa',
'pop_cmd',
'pop_debug',
'pop_err',
'pop_get',
'pop_ids',
'pop_index',
'pop_log',
'pop_mode',
'pop_net',
'pop_res',
'pop_server',
'pop_timeout',
'pop_token',
'pop',
'popctx',
'popinclude',
'populate',
'port',
'position',
'postdispatch',
'postparam',
'postparams',
'postparamsary',
'poststring',
'pow',
'predispatch',
'prefix',
'preflight',
'prepare',
'prepared',
'pretty',
'prev',
'previoussibling',
'printsimplemsg',
'private_compare',
'private_find',
'private_findlast',
'private_merge',
'private_rebalanceforinsert',
'private_rebalanceforremove',
'private_replaceall',
'private_replacefirst',
'private_rotateleft',
'private_rotateright',
'private_setrange',
'private_split',
'probemimetype',
'provides',
'proxying',
'prune',
'publicid',
'pullhttpheader',
'pullmimepost',
'pulloneheaderline',
'pullpost',
'pullrawpost',
'pullrawpostchunks',
'pullrequest',
'pullrequestline',
'push',
'pushctx',
'pushinclude',
'qdarray',
'qdcount',
'queryparam',
'queryparams',
'queryparamsary',
'querystring',
'queue_maintenance',
'queue_messages',
'queue_status',
'queue',
'quit',
'r',
'raw',
'rawcontent',
'rawdiff',
'rawheader',
'rawheaders',
'rawinvokable',
'read',
'readattributevalue',
'readbytes',
'readbytesfully',
'readdestinations',
'readerror',
'readidobjects',
'readline',
'readmessage',
'readnumber',
'readobject',
'readobjecttcp',
'readpacket',
'readsomebytes',
'readstring',
'ready',
'realdoc',
'realpath',
'receivefd',
'recipients',
'recover',
'rect',
'rectype',
'red',
'redirectto',
'referrals',
'refid',
'refobj',
'refresh',
'rel',
'remainder',
'remoteaddr',
'remoteaddress',
'remoteport',
'remove',
'removeall',
'removeattribute',
'removeattributenode',
'removeattributens',
'removeback',
'removechild',
'removedatabasetable',
'removedatasource',
'removedatasourcedatabase',
'removedatasourcehost',
'removefield',
'removefirst',
'removefront',
'removegroup',
'removelast',
'removeleading',
'removenameditem',
'removenameditemns',
'removenode',
'removesubnode',
'removetrailing',
'removeuser',
'removeuserfromallgroups',
'removeuserfromgroup',
'rename',
'renderbytes',
'renderdocumentbytes',
'renderstring',
'replace',
'replaceall',
'replacechild',
'replacedata',
'replacefirst',
'replaceheader',
'replacepattern',
'representnode',
'representnoderesult',
'reqid',
'requestid',
'requestmethod',
'requestparams',
'requesturi',
'requires',
'reserve',
'reset',
'resize',
'resolutionh',
'resolutionv',
'resolvelinks',
'resourcedata',
'resourceinvokable',
'resourcename',
'resources',
'respond',
'restart',
'restname',
'result',
'results',
'resume',
'retr',
'retrieve',
'returncolumns',
'returntype',
'rev',
'reverse',
'rewind',
'right',
'rint',
'roll',
'root',
'rootmap',
'rotate',
'route',
'rowsfound',
'rset',
'rule',
'rules',
'run',
'running',
'runonce',
's',
'sa',
'safeexport8bits',
'sameas',
'save',
'savedata',
'scalb',
'scale',
'scanfordatasource',
'scantasks',
'scanworkers',
'schemaname',
'scheme',
'script',
'scriptextensions',
'scriptfilename',
'scriptname',
'scripttype',
'scripturi',
'scripturl',
'scrubkeywords',
'search',
'searchinbucket',
'searchurl',
'second',
'secondsbetween',
'seek',
'select',
'selected',
'selectmany',
'self',
'send',
'sendchunk',
'sendfd',
'sendfile',
'sendpacket',
'sendresponse',
'separator',
'serializationelements',
'serialize',
'serveraddr',
'serveradmin',
'servername',
'serverport',
'serverprotocol',
'serversignature',
'serversoftware',
'sessionsdump',
'sessionsmap',
'set',
'setalignment',
'setattr',
'setattribute',
'setattributenode',
'setattributenodens',
'setattributens',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setbold',
'setbooleanarrayregion',
'setbooleanfield',
'setbordercolor',
'setborderwidth',
'setbytearrayregion',
'setbytefield',
'setchararrayregion',
'setcharfield',
'setcode',
'setcolor',
'setcolorspace',
'setcookie',
'setcwd',
'setdefaultstorage',
'setdestination',
'setdoublearrayregion',
'setdoublefield',
'setencoding',
'setface',
'setfieldvalue',
'setfindpattern',
'setfloatarrayregion',
'setfloatfield',
'setfont',
'setformat',
'setgeneratechecksum',
'setheaders',
'sethtmlattr',
'setignorecase',
'setinput',
'setintarrayregion',
'setintfield',
'setitalic',
'setlinewidth',
'setlongarrayregion',
'setlongfield',
'setmarker',
'setmaxfilesize',
'setmode',
'setname',
'setnameditem',
'setnameditemns',
'setobjectarrayelement',
'setobjectfield',
'setpadding',
'setpagenumber',
'setpagerange',
'setposition',
'setrange',
'setreplacepattern',
'setshortarrayregion',
'setshortfield',
'setshowchecksum',
'setsize',
'setspacing',
'setstaticbooleanfield',
'setstaticbytefield',
'setstaticcharfield',
'setstaticdoublefield',
'setstaticfloatfield',
'setstaticintfield',
'setstaticlongfield',
'setstaticobjectfield',
'setstaticshortfield',
'setstatus',
'settextalignment',
'settextsize',
'settimezone',
'settrait',
'setunderline',
'sharpen',
'shouldabort',
'shouldclose',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'shutdownrd',
'shutdownrdwr',
'shutdownwr',
'sin',
'sinh',
'size',
'skip',
'skiprows',
'sort',
'sortcolumns',
'source',
'sourcecolumn',
'sourcefile',
'sourceline',
'specified',
'split',
'splitconnection',
'splitdebuggingthread',
'splitextension',
'splittext',
'splitthread',
'splittoprivatedev',
'splituppath',
'sql',
'sqlite3',
'sqrt',
'src',
'srcpath',
'sslerrfail',
'stack',
'standby',
'start',
'startone',
'startup',
'stat',
'statement',
'statementonly',
'stats',
'status',
'statuscode',
'statusmsg',
'stdin',
'step',
'stls',
'stop',
'stoprunning',
'storedata',
'stripfirstcomponent',
'striplastcomponent',
'style',
'styletype',
'sub',
'subject',
'subnode',
'subnodes',
'substringdata',
'subtract',
'subtraits',
'sum',
'supportscontentrepresentation',
'swapbytes',
'systemid',
't',
'tabindex',
'table',
'tablecolumnnames',
'tablecolumns',
'tablehascolumn',
'tableizestacktrace',
'tableizestacktracelink',
'tablemap',
'tablename',
'tables',
'tabs',
'tabstr',
'tag',
'tagname',
'take',
'tan',
'tanh',
'target',
'tasks',
'tb',
'tell',
'testexitcode',
'testlock',
'textwidth',
'thenby',
'thenbydescending',
'threadreaddesc',
'throw',
'thrownew',
'time',
'timezone',
'title',
'titlecase',
'to',
'token',
'tolower',
'top',
'toreflectedfield',
'toreflectedmethod',
'total_changes',
'totitle',
'touch',
'toupper',
'toxmlstring',
'trace',
'trackingid',
'trait',
'transform',
'trigger',
'trim',
'trunk',
'tryfinderrorfile',
'trylock',
'tryreadobject',
'type',
'typename',
'uidl',
'uncompress',
'unescape',
'union',
'uniqueid',
'unlock',
'unspool',
'up',
'update',
'updategroup',
'upload',
'uppercase',
'url',
'used',
'usemap',
'user',
'usercolumns',
'valid',
'validate',
'validatesessionstable',
'value',
'values',
'valuetype',
'variant',
'version',
'wait',
'waitforcompletion',
'warnings',
'week',
'weekofmonth',
'weekofyear',
'where',
'width',
'workers',
'workinginputcolumns',
'workingkeycolumns',
'workingkeyfield_name',
'workingreturncolumns',
'workingsortcolumns',
'write',
'writebodybytes',
'writebytes',
'writeheader',
'writeheaderbytes',
'writeheaderline',
'writeid',
'writemessage',
'writeobject',
'writeobjecttcp',
'writestring',
'wroteheaders',
'xhtml',
'xmllang',
'y0',
'y1',
'year',
'yearwoy',
'yn',
'z',
'zip',
'zipfile',
'zipfilename',
'zipname',
'zips',
'zoneoffset',
),
'Lasso 8 Member Tags': (
'accept',
'add',
'addattachment',
'addattribute',
'addbarcode',
'addchapter',
'addcheckbox',
'addchild',
'addcombobox',
'addcomment',
'addcontent',
'addhiddenfield',
'addhtmlpart',
'addimage',
'addjavascript',
'addlist',
'addnamespace',
'addnextsibling',
'addpage',
'addparagraph',
'addparenttype',
'addpart',
'addpasswordfield',
'addphrase',
'addprevsibling',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addsection',
'addselectlist',
'addsibling',
'addsubmitbutton',
'addtable',
'addtext',
'addtextarea',
'addtextfield',
'addtextpart',
'alarms',
'annotate',
'answer',
'append',
'appendreplacement',
'appendtail',
'arc',
'asasync',
'astype',
'atbegin',
'atbottom',
'atend',
'atfarleft',
'atfarright',
'attop',
'attributecount',
'attributes',
'authenticate',
'authorize',
'backward',
'baseuri',
'bcc',
'beanproperties',
'beginswith',
'bind',
'bitand',
'bitclear',
'bitflip',
'bitformat',
'bitnot',
'bitor',
'bitset',
'bitshiftleft',
'bitshiftright',
'bittest',
'bitxor',
'blur',
'body',
'boundary',
'bytes',
'call',
'cancel',
'capabilities',
'cc',
'chardigitvalue',
'charname',
'charset',
'chartype',
'children',
'circle',
'close',
'closepath',
'closewrite',
'code',
'colorspace',
'command',
'comments',
'compare',
'comparecodepointorder',
'compile',
'composite',
'connect',
'contains',
'content_disposition',
'content_transfer_encoding',
'content_type',
'contents',
'contrast',
'convert',
'crop',
'curveto',
'data',
'date',
'day',
'daylights',
'dayofweek',
'dayofyear',
'decrement',
'delete',
'depth',
'describe',
'description',
'deserialize',
'detach',
'detachreference',
'difference',
'digit',
'document',
'down',
'drawtext',
'dst',
'dump',
'endswith',
'enhance',
'equals',
'errors',
'eval',
'events',
'execute',
'export16bits',
'export32bits',
'export64bits',
'export8bits',
'exportfdf',
'exportstring',
'extract',
'extractone',
'fieldnames',
'fieldtype',
'fieldvalue',
'file',
'find',
'findindex',
'findnamespace',
'findnamespacebyhref',
'findpattern',
'findposition',
'first',
'firstchild',
'fliph',
'flipv',
'flush',
'foldcase',
'foreach',
'format',
'forward',
'freebusies',
'freezetype',
'freezevalue',
'from',
'fulltype',
'generatechecksum',
'get',
'getabswidth',
'getalignment',
'getattribute',
'getattributenamespace',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getbordercolor',
'getborderwidth',
'getcode',
'getcolor',
'getcolumncount',
'getencoding',
'getface',
'getfont',
'getformat',
'getfullfontname',
'getheaders',
'getmargins',
'getmethod',
'getnumericvalue',
'getpadding',
'getpagenumber',
'getparams',
'getproperty',
'getpsfontname',
'getrange',
'getrowcount',
'getsize',
'getspacing',
'getsupportedencodings',
'gettextalignment',
'gettextsize',
'gettype',
'gmt',
'groupcount',
'hasattribute',
'haschildren',
'hasvalue',
'header',
'headers',
'height',
'histogram',
'hosttonet16',
'hosttonet32',
'hour',
'id',
'ignorecase',
'import16bits',
'import32bits',
'import64bits',
'import8bits',
'importfdf',
'importstring',
'increment',
'input',
'insert',
'insertatcurrent',
'insertfirst',
'insertfrom',
'insertlast',
'insertpage',
'integer',
'intersection',
'invoke',
'isa',
'isalnum',
'isalpha',
'isbase',
'iscntrl',
'isdigit',
'isemptyelement',
'islower',
'isopen',
'isprint',
'isspace',
'istitle',
'istruetype',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'iswhitespace',
'iterator',
'javascript',
'join',
'journals',
'key',
'keys',
'last',
'lastchild',
'lasterror',
'left',
'length',
'line',
'listen',
'localaddress',
'localname',
'lock',
'lookupnamespace',
'lowercase',
'marker',
'matches',
'matchesstart',
'matchposition',
'matchstring',
'merge',
'millisecond',
'minute',
'mode',
'modulate',
'month',
'moveto',
'movetoattributenamespace',
'movetoelement',
'movetofirstattribute',
'movetonextattribute',
'name',
'namespaces',
'namespaceuri',
'nettohost16',
'nettohost32',
'newchild',
'next',
'nextsibling',
'nodetype',
'open',
'output',
'padleading',
'padtrailing',
'pagecount',
'pagesize',
'paraminfo',
'params',
'parent',
'path',
'pixel',
'position',
'prefix',
'previoussibling',
'properties',
'rawheaders',
'read',
'readattributevalue',
'readerror',
'readfrom',
'readline',
'readlock',
'readstring',
'readunlock',
'recipients',
'rect',
'refcount',
'referrals',
'remoteaddress',
'remove',
'removeall',
'removeattribute',
'removechild',
'removecurrent',
'removefirst',
'removelast',
'removeleading',
'removenamespace',
'removetrailing',
'render',
'replace',
'replaceall',
'replacefirst',
'replacepattern',
'replacewith',
'reserve',
'reset',
'resolutionh',
'resolutionv',
'response',
'results',
'retrieve',
'returntype',
'reverse',
'reverseiterator',
'right',
'rotate',
'run',
'save',
'scale',
'search',
'second',
'send',
'serialize',
'set',
'setalignment',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setblocking',
'setbordercolor',
'setborderwidth',
'setbytes',
'setcode',
'setcolor',
'setcolorspace',
'setdatatype',
'setencoding',
'setface',
'setfieldvalue',
'setfont',
'setformat',
'setgeneratechecksum',
'setheight',
'setlassodata',
'setlinewidth',
'setmarker',
'setmode',
'setname',
'setpadding',
'setpagenumber',
'setpagerange',
'setposition',
'setproperty',
'setrange',
'setshowchecksum',
'setsize',
'setspacing',
'settemplate',
'settemplatestr',
'settextalignment',
'settextdata',
'settextsize',
'settype',
'setunderline',
'setwidth',
'setxmldata',
'sharpen',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'signal',
'signalall',
'size',
'smooth',
'sort',
'sortwith',
'split',
'standards',
'steal',
'subject',
'substring',
'subtract',
'swapbytes',
'textwidth',
'time',
'timezones',
'titlecase',
'to',
'todos',
'tolower',
'totitle',
'toupper',
'transform',
'trim',
'type',
'unescape',
'union',
'uniqueid',
'unlock',
'unserialize',
'up',
'uppercase',
'value',
'values',
'valuetype',
'wait',
'waskeyword',
'week',
'width',
'write',
'writelock',
'writeto',
'writeunlock',
'xmllang',
'xmlschematype',
'year',
)
}
| 134,510 | Python | 24.250798 | 70 | 0.492945 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/html.py | """
pygments.lexers.html
~~~~~~~~~~~~~~~~~~~~
Lexers for HTML, XML and related markup.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation, Whitespace
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'PugLexer']
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
url = 'https://html.spec.whatwg.org/'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'(<)(\s*)(script)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('script-content', 'tag')),
(r'(<)(\s*)(style)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('style-content', 'tag')),
# note: this allows tag names not used in HTML like <x:with-dash>,
# this is to support yet-unknown template engines and the like
(r'(<)(\s*)([\w:.-]+)',
bygroups(Punctuation, Text, Name.Tag), 'tag'),
(r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation)),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
'attr'),
(r'[\w:-]+', Name.Attribute),
(r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
],
'script-content': [
(r'(<)(\s*)(/)(\s*)(script)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation), '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
# fallback cases for when there is no closing script tag
# first look for newline and then go back into root state
# if that fails just read the rest of the file
# this is similar to the error handling logic in lexer.py
(r'.+?\n', using(JavascriptLexer), '#pop'),
(r'.+', using(JavascriptLexer), '#pop'),
],
'style-content': [
(r'(<)(\s*)(/)(\s*)(style)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation),'#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
# fallback cases for when there is no closing style tag
# first look for newline and then go back into root state
# if that fails just read the rest of the file
# this is similar to the error handling logic in lexer.py
(r'.+?\n', using(CssLexer), '#pop'),
(r'.+', using(CssLexer), '#pop'),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
(r'[^<&\s]+', Text),
(r'[^<&\S]+', Whitespace),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'tag': [
(r'\s+', Whitespace),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
(r'\s+', Whitespace),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
"""
A lexer for XSLT.
.. versionadded:: 0.10
"""
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = {
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
}
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
.. versionadded:: 1.3
"""
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accommodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(RubyLexer)),
(r'\[' + _dot + r'*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
.. versionadded:: 1.4
"""
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class PugLexer(ExtendedRegexLexer):
"""
For Pug markup.
Pug is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
.. versionadded:: 1.4
"""
name = 'Pug'
aliases = ['pug', 'jade']
filenames = ['*.pug', '*.jade']
mimetypes = ['text/x-pug', 'text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
JadeLexer = PugLexer # compat
| 19,879 | Python | 31.80528 | 83 | 0.417677 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ecl.py | """
pygments.lexers.ecl
~~~~~~~~~~~~~~~~~~~
Lexers for the ECL language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ECLLexer']
class ECLLexer(RegexLexer):
"""
Lexer for the declarative big-data ECL language.
.. versionadded:: 1.5
"""
name = 'ECL'
url = 'https://hpccsystems.com/training/documentation/ecl-language-reference/html'
aliases = ['ecl']
filenames = ['*.ecl']
mimetypes = ['application/x-ecl']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('statements'),
],
'whitespace': [
(r'\s+', Whitespace),
(r'\/\/.*', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
],
'statements': [
include('types'),
include('keywords'),
include('functions'),
include('hash'),
(r'"', String, 'string'),
(r'\'', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float),
(r'0x[0-9a-f]+[lu]*', Number.Hex),
(r'0[0-7]+[lu]*', Number.Oct),
(r'\d+[lu]*', Number.Integer),
(r'[~!%^&*+=|?:<>/-]+', Operator),
(r'[{}()\[\],.;]', Punctuation),
(r'[a-z_]\w*', Name),
],
'hash': [
(r'^#.*$', Comment.Preproc),
],
'types': [
(r'(RECORD|END)\D', Keyword.Declaration),
(r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|'
r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|'
r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)',
bygroups(Keyword.Type, Whitespace)),
],
'keywords': [
(words((
'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL',
'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT',
'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED',
'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT',
'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS',
'WAIT', 'WHEN'), suffix=r'\b'),
Keyword.Reserved),
# These are classed differently, check later
(words((
'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST',
'BETWEEN', 'CASE', 'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT',
'ENDC++', 'ENDMACRO', 'EXCEPT', 'EXCLUSIVE', 'EXPIRE', 'EXPORT',
'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL', 'FUNCTION',
'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN',
'JOINED', 'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL',
'LOCALE', 'LOOKUP', 'MACRO', 'MANY', 'MAXCOUNT', 'MAXLENGTH',
'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE', 'NOROOT',
'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER',
'OVERWRITE', 'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH',
'PIPE', 'QUOTE', 'RELATIONSHIP', 'REPEAT', 'RETURN', 'RIGHT',
'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW', 'SKIP',
'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN',
'TRANSFORM', 'TRIM', 'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED',
'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD', 'WITHIN', 'XML', 'XPATH',
'__COMPRESSED__'), suffix=r'\b'),
Keyword.Reserved),
],
'functions': [
(words((
'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN',
'ATAN2', 'AVE', 'CASE', 'CHOOSE', 'CHOOSEN', 'CHOOSESETS',
'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS', 'COSH', 'COUNT',
'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE',
'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH',
'ERROR', 'EVALUATE', 'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS',
'EXP', 'FAILCODE', 'FAILMESSAGE', 'FETCH', 'FROMUNICODE',
'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32',
'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX',
'INTFORMAT', 'ISVALID', 'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH',
'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP', 'MAP', 'MATCHED',
'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE', 'MAX',
'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE',
'PARSE', 'PIPE', 'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL',
'RANDOM', 'RANGE', 'RANK', 'RANKED', 'REALFORMAT', 'RECORDOF',
'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED', 'ROLLUP',
'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN',
'SINH', 'SIZEOF', 'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED',
'STORED', 'SUM', 'TABLE', 'TAN', 'TANH', 'THISNODE', 'TOPN',
'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP',
'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE',
'XMLENCODE', 'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'),
Name.Function),
],
'string': [
(r'"', String, '#pop'),
(r'\'', String, '#pop'),
(r'[^"\']+', String),
],
}
def analyse_text(text):
"""This is very difficult to guess relative to other business languages.
-> in conjunction with BEGIN/END seems relatively rare though."""
result = 0
if '->' in text:
result += 0.01
if 'BEGIN' in text:
result += 0.01
if 'END' in text:
result += 0.01
return result
| 6,372 | Python | 42.650685 | 86 | 0.477872 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/prolog.py | """
pygments.lexers.prolog
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Prolog and Prolog-like languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['PrologLexer', 'LogtalkLexer']
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
# character literal
(r'0\'.', String.Char),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
# literal with prepended base
(r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\["\\abcefnrstv]|[^\\"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
# (r'=(?=\s|[a-zA-Z\[])', Operator),
(r'is\b', Operator),
(r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
r'(\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
r'(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(r'[#&*+\-./:<=>?@\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_]\w*', Name.Variable),
(r'\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
return ':-' in text
class LogtalkLexer(RegexLexer):
"""
For Logtalk source code.
.. versionadded:: 0.10
"""
name = 'Logtalk'
url = 'http://logtalk.org/'
aliases = ['logtalk']
filenames = ['*.lgt', '*.logtalk']
mimetypes = ['text/x-logtalk']
tokens = {
'root': [
# Directives
(r'^\s*:-\s', Punctuation, 'directive'),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/', Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
# Numbers
(r"0'[\\]?.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Event handlers
(r'(after|before)(?=[(])', Keyword),
# Message forwarding handler
(r'forward(?=[(])', Keyword),
# Execution-context methods
(r'(context|parameter|this|se(lf|nder))(?=[(])', Keyword),
# Reflection
(r'(current_predicate|predicate_property)(?=[(])', Keyword),
# DCGs and term expansion
(r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword),
# Entity
(r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword),
(r'(object|protocol|category)_property(?=[(])', Keyword),
# Entity relations
(r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword),
(r'extends_(object|protocol|category)(?=[(])', Keyword),
(r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
(r'(instantiat|specializ)es_class(?=[(])', Keyword),
# Events
(r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
# Flags
(r'(create|current|set)_logtalk_flag(?=[(])', Keyword),
# Compiling, loading, and library paths
(r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make(_target_action)?)(?=[(])', Keyword),
(r'\blogtalk_make\b', Keyword),
# Database
(r'(clause|retract(all)?)(?=[(])', Keyword),
(r'a(bolish|ssert(a|z))(?=[(])', Keyword),
# Control constructs
(r'(ca(ll|tch)|throw)(?=[(])', Keyword),
(r'(fa(il|lse)|true|(instantiation|system)_error)\b', Keyword),
(r'(type|domain|existence|permission|representation|evaluation|resource|syntax)_error(?=[(])', Keyword),
# All solutions
(r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
# Multi-threading predicates
(r'threaded(_(ca(ll|ncel)|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword),
# Engine predicates
(r'threaded_engine(_(create|destroy|self|next|next_reified|yield|post|fetch))?(?=[(])', Keyword),
# Term unification
(r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword),
# Term creation and decomposition
(r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword),
# Evaluable functors
(r'(div|rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
(r'(floor|t(an|runcate)|round|ceiling)(?=[(])', Keyword),
# Other arithmetic functors
(r'(cos|a(cos|sin|tan|tan2)|exp|log|s(in|qrt)|xor)(?=[(])', Keyword),
# Term testing
(r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|ground|acyclic_term)(?=[(])', Keyword),
# Term comparison
(r'compare(?=[(])', Keyword),
# Stream selection and control
(r'(curren|se)t_(in|out)put(?=[(])', Keyword),
(r'(open|close)(?=[(])', Keyword),
(r'flush_output(?=[(])', Keyword),
(r'(at_end_of_stream|flush_output)\b', Keyword),
(r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword),
# Character and byte input/output
(r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
(r'\bnl\b', Keyword),
# Term input/output
(r'read(_term)?(?=[(])', Keyword),
(r'write(q|_(canonical|term))?(?=[(])', Keyword),
(r'(current_)?op(?=[(])', Keyword),
(r'(current_)?char_conversion(?=[(])', Keyword),
# Atomic term processing
(r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
(r'(char_code|sub_atom)(?=[(])', Keyword),
(r'number_c(har|ode)s(?=[(])', Keyword),
# Implementation defined hooks functions
(r'(se|curren)t_prolog_flag(?=[(])', Keyword),
(r'\bhalt\b', Keyword),
(r'halt(?=[(])', Keyword),
# Message sending operators
(r'(::|:|\^\^)', Operator),
# External call
(r'[{}]', Keyword),
# Logic and control
(r'(ignore|once)(?=[(])', Keyword),
(r'\brepeat\b', Keyword),
# Sorting
(r'(key)?sort(?=[(])', Keyword),
# Bitwise functors
(r'(>>|<<|/\\|\\\\|\\)', Operator),
# Predicate aliases
(r'\bas\b', Operator),
# Arithmetic evaluation
(r'\bis\b', Keyword),
# Arithmetic comparison
(r'(=:=|=\\=|<|=<|>=|>)', Operator),
# Term creation and decomposition
(r'=\.\.', Operator),
# Term unification
(r'(=|\\=)', Operator),
# Term comparison
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
# Evaluable functors
(r'(//|[-+*/])', Operator),
(r'\b(e|pi|div|mod|rem)\b', Operator),
# Other arithmetic functors
(r'\b\*\*\b', Operator),
# DCG rules
(r'-->', Operator),
# Control constructs
(r'([!;]|->)', Operator),
# Logic and control
(r'\\+', Operator),
# Mode operators
(r'[?@]', Operator),
# Existential quantifier
(r'\^', Operator),
# Strings
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
# Punctuation
(r'[()\[\],.|]', Text),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"'", String, 'quoted_atom'),
],
'quoted_atom': [
(r"''", String),
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r"[^\\'\n]+", String),
(r'\\', String),
],
'directive': [
# Conditional compilation directives
(r'(el)?if(?=[(])', Keyword, 'root'),
(r'(e(lse|ndif))(?=[.])', Keyword, 'root'),
# Entity directives
(r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
(r'(end_(category|object|protocol))(?=[.])', Keyword, 'root'),
# Predicate scope directives
(r'(public|protected|private)(?=[(])', Keyword, 'root'),
# Other directives
(r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
(r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'),
(r'(built_in|dynamic|synchronized|threaded)(?=[.])', Keyword, 'root'),
(r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|s(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
(r'op(?=[(])', Keyword, 'root'),
(r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
(r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'),
(r'[a-z][a-zA-Z0-9_]*(?=[.])', Text, 'root'),
],
'entityrelations': [
(r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword),
# Numbers
(r"0'[\\]?.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"'", String, 'quoted_atom'),
# Strings
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
# End of entity-opening directive
(r'([)]\.)', Text, 'root'),
# Scope operator
(r'(::)', Operator),
# Punctuation
(r'[()\[\],.|]', Text),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/', Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
]
}
def analyse_text(text):
if ':- object(' in text:
return 1.0
elif ':- protocol(' in text:
return 1.0
elif ':- category(' in text:
return 1.0
elif re.search(r'^:-\s[a-z]', text, re.M):
return 0.9
else:
return 0.0
| 12,351 | Python | 39.498361 | 157 | 0.44466 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/gdscript.py | """
pygments.lexers.gdscript
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for GDScript.
Modified by Daniel J. Ramirez <[email protected]> based on the original
python.py.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, words, \
combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ["GDScriptLexer"]
class GDScriptLexer(RegexLexer):
"""
For GDScript source code.
"""
name = "GDScript"
url = 'https://www.godotengine.org'
aliases = ["gdscript", "gd"]
filenames = ["*.gd"]
mimetypes = ["text/x-gdscript", "application/x-gdscript"]
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting
(r"%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?"
"[hlL]?[E-GXc-giorsux%]",
String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r"%", ttype),
# newlines are an error (use "nl" state)
]
tokens = {
"root": [
(r"\n", Whitespace),
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
bygroups(Whitespace, String.Affix, String.Doc)),
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
bygroups(Whitespace, String.Affix, String.Doc)),
(r"[^\S\n]+", Whitespace),
(r"#.*$", Comment.Single),
(r"[]{}:(),;[]", Punctuation),
(r"(\\)(\n)", bygroups(Text, Whitespace)),
(r"\\", Text),
(r"(in|and|or|not)\b", Operator.Word),
(r"!=|==|<<|>>|&&|\+=|-=|\*=|/=|%=|&=|\|=|\|\||[-~+/*%=<>&^.!|$]",
Operator),
include("keywords"),
(r"(func)(\s+)", bygroups(Keyword, Whitespace), "funcname"),
(r"(class)(\s+)", bygroups(Keyword, Whitespace), "classname"),
include("builtins"),
('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
bygroups(String.Affix, String.Double),
"tdqs"),
("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
bygroups(String.Affix, String.Single),
"tsqs"),
('([rR]|[uUbB][rR]|[rR][uUbB])(")',
bygroups(String.Affix, String.Double),
"dqs"),
("([rR]|[uUbB][rR]|[rR][uUbB])(')",
bygroups(String.Affix, String.Single),
"sqs"),
('([uUbB]?)(""")',
bygroups(String.Affix, String.Double),
combined("stringescape", "tdqs")),
("([uUbB]?)(''')",
bygroups(String.Affix, String.Single),
combined("stringescape", "tsqs")),
('([uUbB]?)(")',
bygroups(String.Affix, String.Double),
combined("stringescape", "dqs")),
("([uUbB]?)(')",
bygroups(String.Affix, String.Single),
combined("stringescape", "sqs")),
include("name"),
include("numbers"),
],
"keywords": [
(words(("and", "in", "not", "or", "as", "breakpoint", "class",
"class_name", "extends", "is", "func", "setget", "signal",
"tool", "const", "enum", "export", "onready", "static",
"var", "break", "continue", "if", "elif", "else", "for",
"pass", "return", "match", "while", "remote", "master",
"puppet", "remotesync", "mastersync", "puppetsync"),
suffix=r"\b"), Keyword),
],
"builtins": [
(words(("Color8", "ColorN", "abs", "acos", "asin", "assert", "atan",
"atan2", "bytes2var", "ceil", "char", "clamp", "convert",
"cos", "cosh", "db2linear", "decimals", "dectime", "deg2rad",
"dict2inst", "ease", "exp", "floor", "fmod", "fposmod",
"funcref", "hash", "inst2dict", "instance_from_id", "is_inf",
"is_nan", "lerp", "linear2db", "load", "log", "max", "min",
"nearest_po2", "pow", "preload", "print", "print_stack",
"printerr", "printraw", "prints", "printt", "rad2deg",
"rand_range", "rand_seed", "randf", "randi", "randomize",
"range", "round", "seed", "sign", "sin", "sinh", "sqrt",
"stepify", "str", "str2var", "tan", "tan", "tanh",
"type_exist", "typeof", "var2bytes", "var2str", "weakref",
"yield"), prefix=r"(?<!\.)", suffix=r"\b"),
Name.Builtin),
(r"((?<!\.)(self|false|true)|(PI|TAU|NAN|INF)" r")\b",
Name.Builtin.Pseudo),
(words(("bool", "int", "float", "String", "NodePath", "Vector2",
"Rect2", "Transform2D", "Vector3", "Rect3", "Plane", "Quat",
"Basis", "Transform", "Color", "RID", "Object", "NodePath",
"Dictionary", "Array", "PackedByteArray", "PackedInt32Array",
"PackedInt64Array", "PackedFloat32Array", "PackedFloat64Array",
"PackedStringArray", "PackedVector2Array", "PackedVector3Array",
"PackedColorArray", "null", "void"),
prefix=r"(?<!\.)", suffix=r"\b"),
Name.Builtin.Type),
],
"numbers": [
(r"(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?", Number.Float),
(r"\d+[eE][+-]?[0-9]+j?", Number.Float),
(r"0[xX][a-fA-F0-9]+", Number.Hex),
(r"\d+j?", Number.Integer),
],
"name": [(r"[a-zA-Z_]\w*", Name)],
"funcname": [(r"[a-zA-Z_]\w*", Name.Function, "#pop"), default("#pop")],
"classname": [(r"[a-zA-Z_]\w*", Name.Class, "#pop")],
"stringescape": [
(
r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r"U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})",
String.Escape,
)
],
"strings-single": innerstring_rules(String.Single),
"strings-double": innerstring_rules(String.Double),
"dqs": [
(r'"', String.Double, "#pop"),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include("strings-double"),
],
"sqs": [
(r"'", String.Single, "#pop"),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include("strings-single"),
],
"tdqs": [
(r'"""', String.Double, "#pop"),
include("strings-double"),
(r"\n", Whitespace),
],
"tsqs": [
(r"'''", String.Single, "#pop"),
include("strings-single"),
(r"\n", Whitespace),
],
}
def analyse_text(text):
score = 0.0
if re.search(
r"func (_ready|_init|_input|_process|_unhandled_input)", text
):
score += 0.8
if re.search(
r"(extends |class_name |onready |preload|load|setget|func [^_])",
text
):
score += 0.4
if re.search(r"(var|const|enum|export|signal|tool)", text):
score += 0.2
return min(score, 1.0)
| 7,543 | Python | 38.915344 | 84 | 0.442795 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/smv.py | """
pygments.lexers.smv
~~~~~~~~~~~~~~~~~~~
Lexers for the SMV languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, Text
__all__ = ['NuSMVLexer']
class NuSMVLexer(RegexLexer):
"""
Lexer for the NuSMV language.
.. versionadded:: 2.2
"""
name = 'NuSMV'
aliases = ['nusmv']
filenames = ['*.smv']
mimetypes = []
tokens = {
'root': [
# Comments
(r'(?s)\/\-\-.*?\-\-/', Comment),
(r'--.*\n', Comment),
# Reserved
(words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR',
'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC',
'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC',
'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN',
'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF',
'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED',
'PREDICATES'), suffix=r'(?![\w$#-])'),
Keyword.Declaration),
(r'process(?![\w$#-])', Keyword),
(words(('array', 'of', 'boolean', 'integer', 'real', 'word'),
suffix=r'(?![\w$#-])'), Keyword.Type),
(words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword),
(words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize',
'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count',
'abs', 'max', 'min'), suffix=r'(?![\w$#-])'),
Name.Builtin),
(words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G',
'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF',
'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor',
'xnor'), suffix=r'(?![\w$#-])'),
Operator.Word),
(words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant),
# Names
(r'[a-zA-Z_][\w$#-]*', Name.Variable),
# Operators
(r':=', Operator),
(r'[-&|+*/<>!=]', Operator),
# Literals
(r'\-?\d+\b', Number.Integer),
(r'0[su][bB]\d*_[01_]+', Number.Bin),
(r'0[su][oO]\d*_[0-7_]+', Number.Oct),
(r'0[su][dD]\d*_[\d_]+', Number.Decimal),
(r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex),
# Whitespace, punctuation and the rest
(r'\s+', Text.Whitespace),
(r'[()\[\]{};?:.,]', Punctuation),
],
}
| 2,773 | Python | 34.113924 | 80 | 0.419401 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_vim_builtins.py | """
pygments.lexers._vim_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file is autogenerated by scripts/get_vimkw.py
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Split up in multiple functions so it's importable by jython, which has a
# per-method size limit.
def _getauto():
var = (
('BufAdd','BufAdd'),
('BufCreate','BufCreate'),
('BufDelete','BufDelete'),
('BufEnter','BufEnter'),
('BufFilePost','BufFilePost'),
('BufFilePre','BufFilePre'),
('BufHidden','BufHidden'),
('BufLeave','BufLeave'),
('BufNew','BufNew'),
('BufNewFile','BufNewFile'),
('BufRead','BufRead'),
('BufReadCmd','BufReadCmd'),
('BufReadPost','BufReadPost'),
('BufReadPre','BufReadPre'),
('BufUnload','BufUnload'),
('BufWinEnter','BufWinEnter'),
('BufWinLeave','BufWinLeave'),
('BufWipeout','BufWipeout'),
('BufWrite','BufWrite'),
('BufWriteCmd','BufWriteCmd'),
('BufWritePost','BufWritePost'),
('BufWritePre','BufWritePre'),
('Cmd','Cmd'),
('CmdwinEnter','CmdwinEnter'),
('CmdwinLeave','CmdwinLeave'),
('ColorScheme','ColorScheme'),
('CompleteDone','CompleteDone'),
('CursorHold','CursorHold'),
('CursorHoldI','CursorHoldI'),
('CursorMoved','CursorMoved'),
('CursorMovedI','CursorMovedI'),
('EncodingChanged','EncodingChanged'),
('FileAppendCmd','FileAppendCmd'),
('FileAppendPost','FileAppendPost'),
('FileAppendPre','FileAppendPre'),
('FileChangedRO','FileChangedRO'),
('FileChangedShell','FileChangedShell'),
('FileChangedShellPost','FileChangedShellPost'),
('FileEncoding','FileEncoding'),
('FileReadCmd','FileReadCmd'),
('FileReadPost','FileReadPost'),
('FileReadPre','FileReadPre'),
('FileType','FileType'),
('FileWriteCmd','FileWriteCmd'),
('FileWritePost','FileWritePost'),
('FileWritePre','FileWritePre'),
('FilterReadPost','FilterReadPost'),
('FilterReadPre','FilterReadPre'),
('FilterWritePost','FilterWritePost'),
('FilterWritePre','FilterWritePre'),
('FocusGained','FocusGained'),
('FocusLost','FocusLost'),
('FuncUndefined','FuncUndefined'),
('GUIEnter','GUIEnter'),
('GUIFailed','GUIFailed'),
('InsertChange','InsertChange'),
('InsertCharPre','InsertCharPre'),
('InsertEnter','InsertEnter'),
('InsertLeave','InsertLeave'),
('MenuPopup','MenuPopup'),
('QuickFixCmdPost','QuickFixCmdPost'),
('QuickFixCmdPre','QuickFixCmdPre'),
('QuitPre','QuitPre'),
('RemoteReply','RemoteReply'),
('SessionLoadPost','SessionLoadPost'),
('ShellCmdPost','ShellCmdPost'),
('ShellFilterPost','ShellFilterPost'),
('SourceCmd','SourceCmd'),
('SourcePre','SourcePre'),
('SpellFileMissing','SpellFileMissing'),
('StdinReadPost','StdinReadPost'),
('StdinReadPre','StdinReadPre'),
('SwapExists','SwapExists'),
('Syntax','Syntax'),
('TabEnter','TabEnter'),
('TabLeave','TabLeave'),
('TermChanged','TermChanged'),
('TermResponse','TermResponse'),
('TextChanged','TextChanged'),
('TextChangedI','TextChangedI'),
('User','User'),
('UserGettingBored','UserGettingBored'),
('VimEnter','VimEnter'),
('VimLeave','VimLeave'),
('VimLeavePre','VimLeavePre'),
('VimResized','VimResized'),
('WinEnter','WinEnter'),
('WinLeave','WinLeave'),
('event','event'),
)
return var
auto = _getauto()
def _getcommand():
var = (
('a','a'),
('ab','ab'),
('abc','abclear'),
('abo','aboveleft'),
('al','all'),
('ar','ar'),
('ar','args'),
('arga','argadd'),
('argd','argdelete'),
('argdo','argdo'),
('arge','argedit'),
('argg','argglobal'),
('argl','arglocal'),
('argu','argument'),
('as','ascii'),
('au','au'),
('b','buffer'),
('bN','bNext'),
('ba','ball'),
('bad','badd'),
('bd','bdelete'),
('bel','belowright'),
('bf','bfirst'),
('bl','blast'),
('bm','bmodified'),
('bn','bnext'),
('bo','botright'),
('bp','bprevious'),
('br','br'),
('br','brewind'),
('brea','break'),
('breaka','breakadd'),
('breakd','breakdel'),
('breakl','breaklist'),
('bro','browse'),
('bu','bu'),
('buf','buf'),
('bufdo','bufdo'),
('buffers','buffers'),
('bun','bunload'),
('bw','bwipeout'),
('c','c'),
('c','change'),
('cN','cN'),
('cN','cNext'),
('cNf','cNf'),
('cNf','cNfile'),
('cabc','cabclear'),
('cad','cad'),
('cad','caddexpr'),
('caddb','caddbuffer'),
('caddf','caddfile'),
('cal','call'),
('cat','catch'),
('cb','cbuffer'),
('cc','cc'),
('ccl','cclose'),
('cd','cd'),
('ce','center'),
('cex','cexpr'),
('cf','cfile'),
('cfir','cfirst'),
('cg','cgetfile'),
('cgetb','cgetbuffer'),
('cgete','cgetexpr'),
('changes','changes'),
('chd','chdir'),
('che','checkpath'),
('checkt','checktime'),
('cl','cl'),
('cl','clist'),
('cla','clast'),
('clo','close'),
('cmapc','cmapclear'),
('cn','cn'),
('cn','cnext'),
('cnew','cnewer'),
('cnf','cnf'),
('cnf','cnfile'),
('co','copy'),
('col','colder'),
('colo','colorscheme'),
('com','com'),
('comc','comclear'),
('comp','compiler'),
('con','con'),
('con','continue'),
('conf','confirm'),
('cope','copen'),
('cp','cprevious'),
('cpf','cpfile'),
('cq','cquit'),
('cr','crewind'),
('cs','cs'),
('cscope','cscope'),
('cstag','cstag'),
('cuna','cunabbrev'),
('cw','cwindow'),
('d','d'),
('d','delete'),
('de','de'),
('debug','debug'),
('debugg','debuggreedy'),
('del','del'),
('delc','delcommand'),
('delel','delel'),
('delep','delep'),
('deletel','deletel'),
('deletep','deletep'),
('deletl','deletl'),
('deletp','deletp'),
('delf','delf'),
('delf','delfunction'),
('dell','dell'),
('delm','delmarks'),
('delp','delp'),
('dep','dep'),
('di','di'),
('di','display'),
('diffg','diffget'),
('diffo','diffoff'),
('diffp','diffpatch'),
('diffpu','diffput'),
('diffs','diffsplit'),
('difft','diffthis'),
('diffu','diffupdate'),
('dig','dig'),
('dig','digraphs'),
('dir','dir'),
('dj','djump'),
('dl','dl'),
('dli','dlist'),
('do','do'),
('doau','doau'),
('dp','dp'),
('dr','drop'),
('ds','dsearch'),
('dsp','dsplit'),
('e','e'),
('e','edit'),
('ea','ea'),
('earlier','earlier'),
('ec','ec'),
('echoe','echoerr'),
('echom','echomsg'),
('echon','echon'),
('el','else'),
('elsei','elseif'),
('em','emenu'),
('en','en'),
('en','endif'),
('endf','endf'),
('endf','endfunction'),
('endfo','endfor'),
('endfun','endfun'),
('endt','endtry'),
('endw','endwhile'),
('ene','enew'),
('ex','ex'),
('exi','exit'),
('exu','exusage'),
('f','f'),
('f','file'),
('files','files'),
('filet','filet'),
('filetype','filetype'),
('fin','fin'),
('fin','find'),
('fina','finally'),
('fini','finish'),
('fir','first'),
('fix','fixdel'),
('fo','fold'),
('foldc','foldclose'),
('foldd','folddoopen'),
('folddoc','folddoclosed'),
('foldo','foldopen'),
('for','for'),
('fu','fu'),
('fu','function'),
('fun','fun'),
('g','g'),
('go','goto'),
('gr','grep'),
('grepa','grepadd'),
('gui','gui'),
('gvim','gvim'),
('h','h'),
('h','help'),
('ha','hardcopy'),
('helpf','helpfind'),
('helpg','helpgrep'),
('helpt','helptags'),
('hi','hi'),
('hid','hide'),
('his','history'),
('i','i'),
('ia','ia'),
('iabc','iabclear'),
('if','if'),
('ij','ijump'),
('il','ilist'),
('imapc','imapclear'),
('in','in'),
('intro','intro'),
('is','isearch'),
('isp','isplit'),
('iuna','iunabbrev'),
('j','join'),
('ju','jumps'),
('k','k'),
('kee','keepmarks'),
('keepa','keepa'),
('keepalt','keepalt'),
('keepj','keepjumps'),
('keepp','keeppatterns'),
('l','l'),
('l','list'),
('lN','lN'),
('lN','lNext'),
('lNf','lNf'),
('lNf','lNfile'),
('la','la'),
('la','last'),
('lad','lad'),
('lad','laddexpr'),
('laddb','laddbuffer'),
('laddf','laddfile'),
('lan','lan'),
('lan','language'),
('lat','lat'),
('later','later'),
('lb','lbuffer'),
('lc','lcd'),
('lch','lchdir'),
('lcl','lclose'),
('lcs','lcs'),
('lcscope','lcscope'),
('le','left'),
('lefta','leftabove'),
('lex','lexpr'),
('lf','lfile'),
('lfir','lfirst'),
('lg','lgetfile'),
('lgetb','lgetbuffer'),
('lgete','lgetexpr'),
('lgr','lgrep'),
('lgrepa','lgrepadd'),
('lh','lhelpgrep'),
('ll','ll'),
('lla','llast'),
('lli','llist'),
('lmak','lmake'),
('lmapc','lmapclear'),
('lne','lne'),
('lne','lnext'),
('lnew','lnewer'),
('lnf','lnf'),
('lnf','lnfile'),
('lo','lo'),
('lo','loadview'),
('loadk','loadk'),
('loadkeymap','loadkeymap'),
('loc','lockmarks'),
('lockv','lockvar'),
('lol','lolder'),
('lop','lopen'),
('lp','lprevious'),
('lpf','lpfile'),
('lr','lrewind'),
('ls','ls'),
('lt','ltag'),
('lua','lua'),
('luado','luado'),
('luafile','luafile'),
('lv','lvimgrep'),
('lvimgrepa','lvimgrepadd'),
('lw','lwindow'),
('m','move'),
('ma','ma'),
('ma','mark'),
('mak','make'),
('marks','marks'),
('mat','match'),
('menut','menut'),
('menut','menutranslate'),
('mes','mes'),
('messages','messages'),
('mk','mk'),
('mk','mkexrc'),
('mks','mksession'),
('mksp','mkspell'),
('mkv','mkv'),
('mkv','mkvimrc'),
('mkvie','mkview'),
('mo','mo'),
('mod','mode'),
('mz','mz'),
('mz','mzscheme'),
('mzf','mzfile'),
('n','n'),
('n','next'),
('nb','nbkey'),
('nbc','nbclose'),
('nbs','nbstart'),
('ne','ne'),
('new','new'),
('nmapc','nmapclear'),
('noa','noa'),
('noautocmd','noautocmd'),
('noh','nohlsearch'),
('nu','number'),
('o','o'),
('o','open'),
('ol','oldfiles'),
('omapc','omapclear'),
('on','only'),
('opt','options'),
('ownsyntax','ownsyntax'),
('p','p'),
('p','print'),
('pc','pclose'),
('pe','pe'),
('pe','perl'),
('ped','pedit'),
('perld','perldo'),
('po','pop'),
('popu','popu'),
('popu','popup'),
('pp','ppop'),
('pr','pr'),
('pre','preserve'),
('prev','previous'),
('pro','pro'),
('prof','profile'),
('profd','profdel'),
('promptf','promptfind'),
('promptr','promptrepl'),
('ps','psearch'),
('ptN','ptN'),
('ptN','ptNext'),
('pta','ptag'),
('ptf','ptfirst'),
('ptj','ptjump'),
('ptl','ptlast'),
('ptn','ptn'),
('ptn','ptnext'),
('ptp','ptprevious'),
('ptr','ptrewind'),
('pts','ptselect'),
('pu','put'),
('pw','pwd'),
('py','py'),
('py','python'),
('py3','py3'),
('py3','py3'),
('py3do','py3do'),
('pydo','pydo'),
('pyf','pyfile'),
('python3','python3'),
('q','q'),
('q','quit'),
('qa','qall'),
('quita','quitall'),
('r','r'),
('r','read'),
('re','re'),
('rec','recover'),
('red','red'),
('red','redo'),
('redi','redir'),
('redr','redraw'),
('redraws','redrawstatus'),
('reg','registers'),
('res','resize'),
('ret','retab'),
('retu','return'),
('rew','rewind'),
('ri','right'),
('rightb','rightbelow'),
('ru','ru'),
('ru','runtime'),
('rub','ruby'),
('rubyd','rubydo'),
('rubyf','rubyfile'),
('rundo','rundo'),
('rv','rviminfo'),
('sN','sNext'),
('sa','sargument'),
('sal','sall'),
('san','sandbox'),
('sav','saveas'),
('sb','sbuffer'),
('sbN','sbNext'),
('sba','sball'),
('sbf','sbfirst'),
('sbl','sblast'),
('sbm','sbmodified'),
('sbn','sbnext'),
('sbp','sbprevious'),
('sbr','sbrewind'),
('scrip','scrip'),
('scrip','scriptnames'),
('scripte','scriptencoding'),
('scs','scs'),
('scscope','scscope'),
('se','set'),
('setf','setfiletype'),
('setg','setglobal'),
('setl','setlocal'),
('sf','sfind'),
('sfir','sfirst'),
('sh','shell'),
('si','si'),
('sig','sig'),
('sign','sign'),
('sil','silent'),
('sim','simalt'),
('sl','sl'),
('sl','sleep'),
('sla','slast'),
('sm','smagic'),
('sm','smap'),
('sme','sme'),
('smenu','smenu'),
('sn','snext'),
('sni','sniff'),
('sno','snomagic'),
('snoreme','snoreme'),
('snoremenu','snoremenu'),
('so','so'),
('so','source'),
('sor','sort'),
('sp','split'),
('spe','spe'),
('spe','spellgood'),
('spelld','spelldump'),
('spelli','spellinfo'),
('spellr','spellrepall'),
('spellu','spellundo'),
('spellw','spellwrong'),
('spr','sprevious'),
('sre','srewind'),
('st','st'),
('st','stop'),
('sta','stag'),
('star','star'),
('star','startinsert'),
('start','start'),
('startg','startgreplace'),
('startr','startreplace'),
('stj','stjump'),
('stopi','stopinsert'),
('sts','stselect'),
('sun','sunhide'),
('sunme','sunme'),
('sunmenu','sunmenu'),
('sus','suspend'),
('sv','sview'),
('sw','swapname'),
('sy','sy'),
('syn','syn'),
('sync','sync'),
('syncbind','syncbind'),
('syntime','syntime'),
('t','t'),
('tN','tN'),
('tN','tNext'),
('ta','ta'),
('ta','tag'),
('tab','tab'),
('tabN','tabN'),
('tabN','tabNext'),
('tabc','tabclose'),
('tabd','tabdo'),
('tabe','tabedit'),
('tabf','tabfind'),
('tabfir','tabfirst'),
('tabl','tablast'),
('tabm','tabmove'),
('tabn','tabnext'),
('tabnew','tabnew'),
('tabo','tabonly'),
('tabp','tabprevious'),
('tabr','tabrewind'),
('tabs','tabs'),
('tags','tags'),
('tc','tcl'),
('tcld','tcldo'),
('tclf','tclfile'),
('te','tearoff'),
('tf','tfirst'),
('th','throw'),
('tj','tjump'),
('tl','tlast'),
('tm','tm'),
('tm','tmenu'),
('tn','tn'),
('tn','tnext'),
('to','topleft'),
('tp','tprevious'),
('tr','tr'),
('tr','trewind'),
('try','try'),
('ts','tselect'),
('tu','tu'),
('tu','tunmenu'),
('u','u'),
('u','undo'),
('un','un'),
('una','unabbreviate'),
('undoj','undojoin'),
('undol','undolist'),
('unh','unhide'),
('unl','unl'),
('unlo','unlockvar'),
('uns','unsilent'),
('up','update'),
('v','v'),
('ve','ve'),
('ve','version'),
('verb','verbose'),
('vert','vertical'),
('vi','vi'),
('vi','visual'),
('vie','view'),
('vim','vimgrep'),
('vimgrepa','vimgrepadd'),
('viu','viusage'),
('vmapc','vmapclear'),
('vne','vnew'),
('vs','vsplit'),
('w','w'),
('w','write'),
('wN','wNext'),
('wa','wall'),
('wh','while'),
('win','win'),
('win','winsize'),
('winc','wincmd'),
('windo','windo'),
('winp','winpos'),
('wn','wnext'),
('wp','wprevious'),
('wq','wq'),
('wqa','wqall'),
('ws','wsverb'),
('wundo','wundo'),
('wv','wviminfo'),
('x','x'),
('x','xit'),
('xa','xall'),
('xmapc','xmapclear'),
('xme','xme'),
('xmenu','xmenu'),
('xnoreme','xnoreme'),
('xnoremenu','xnoremenu'),
('xunme','xunme'),
('xunmenu','xunmenu'),
('xwininfo','xwininfo'),
('y','yank'),
)
return var
command = _getcommand()
def _getoption():
var = (
('acd','acd'),
('ai','ai'),
('akm','akm'),
('al','al'),
('aleph','aleph'),
('allowrevins','allowrevins'),
('altkeymap','altkeymap'),
('ambiwidth','ambiwidth'),
('ambw','ambw'),
('anti','anti'),
('antialias','antialias'),
('ar','ar'),
('arab','arab'),
('arabic','arabic'),
('arabicshape','arabicshape'),
('ari','ari'),
('arshape','arshape'),
('autochdir','autochdir'),
('autoindent','autoindent'),
('autoread','autoread'),
('autowrite','autowrite'),
('autowriteall','autowriteall'),
('aw','aw'),
('awa','awa'),
('background','background'),
('backspace','backspace'),
('backup','backup'),
('backupcopy','backupcopy'),
('backupdir','backupdir'),
('backupext','backupext'),
('backupskip','backupskip'),
('balloondelay','balloondelay'),
('ballooneval','ballooneval'),
('balloonexpr','balloonexpr'),
('bdir','bdir'),
('bdlay','bdlay'),
('beval','beval'),
('bex','bex'),
('bexpr','bexpr'),
('bg','bg'),
('bh','bh'),
('bin','bin'),
('binary','binary'),
('biosk','biosk'),
('bioskey','bioskey'),
('bk','bk'),
('bkc','bkc'),
('bl','bl'),
('bomb','bomb'),
('breakat','breakat'),
('brk','brk'),
('browsedir','browsedir'),
('bs','bs'),
('bsdir','bsdir'),
('bsk','bsk'),
('bt','bt'),
('bufhidden','bufhidden'),
('buflisted','buflisted'),
('buftype','buftype'),
('casemap','casemap'),
('cb','cb'),
('cc','cc'),
('ccv','ccv'),
('cd','cd'),
('cdpath','cdpath'),
('cedit','cedit'),
('cf','cf'),
('cfu','cfu'),
('ch','ch'),
('charconvert','charconvert'),
('ci','ci'),
('cin','cin'),
('cindent','cindent'),
('cink','cink'),
('cinkeys','cinkeys'),
('cino','cino'),
('cinoptions','cinoptions'),
('cinw','cinw'),
('cinwords','cinwords'),
('clipboard','clipboard'),
('cmdheight','cmdheight'),
('cmdwinheight','cmdwinheight'),
('cmp','cmp'),
('cms','cms'),
('co','co'),
('cocu','cocu'),
('cole','cole'),
('colorcolumn','colorcolumn'),
('columns','columns'),
('com','com'),
('comments','comments'),
('commentstring','commentstring'),
('compatible','compatible'),
('complete','complete'),
('completefunc','completefunc'),
('completeopt','completeopt'),
('concealcursor','concealcursor'),
('conceallevel','conceallevel'),
('confirm','confirm'),
('consk','consk'),
('conskey','conskey'),
('copyindent','copyindent'),
('cot','cot'),
('cp','cp'),
('cpo','cpo'),
('cpoptions','cpoptions'),
('cpt','cpt'),
('crb','crb'),
('cryptmethod','cryptmethod'),
('cscopepathcomp','cscopepathcomp'),
('cscopeprg','cscopeprg'),
('cscopequickfix','cscopequickfix'),
('cscoperelative','cscoperelative'),
('cscopetag','cscopetag'),
('cscopetagorder','cscopetagorder'),
('cscopeverbose','cscopeverbose'),
('cspc','cspc'),
('csprg','csprg'),
('csqf','csqf'),
('csre','csre'),
('cst','cst'),
('csto','csto'),
('csverb','csverb'),
('cuc','cuc'),
('cul','cul'),
('cursorbind','cursorbind'),
('cursorcolumn','cursorcolumn'),
('cursorline','cursorline'),
('cwh','cwh'),
('debug','debug'),
('deco','deco'),
('def','def'),
('define','define'),
('delcombine','delcombine'),
('dex','dex'),
('dg','dg'),
('dict','dict'),
('dictionary','dictionary'),
('diff','diff'),
('diffexpr','diffexpr'),
('diffopt','diffopt'),
('digraph','digraph'),
('dip','dip'),
('dir','dir'),
('directory','directory'),
('display','display'),
('dy','dy'),
('ea','ea'),
('ead','ead'),
('eadirection','eadirection'),
('eb','eb'),
('ed','ed'),
('edcompatible','edcompatible'),
('ef','ef'),
('efm','efm'),
('ei','ei'),
('ek','ek'),
('enc','enc'),
('encoding','encoding'),
('endofline','endofline'),
('eol','eol'),
('ep','ep'),
('equalalways','equalalways'),
('equalprg','equalprg'),
('errorbells','errorbells'),
('errorfile','errorfile'),
('errorformat','errorformat'),
('esckeys','esckeys'),
('et','et'),
('eventignore','eventignore'),
('ex','ex'),
('expandtab','expandtab'),
('exrc','exrc'),
('fcl','fcl'),
('fcs','fcs'),
('fdc','fdc'),
('fde','fde'),
('fdi','fdi'),
('fdl','fdl'),
('fdls','fdls'),
('fdm','fdm'),
('fdn','fdn'),
('fdo','fdo'),
('fdt','fdt'),
('fen','fen'),
('fenc','fenc'),
('fencs','fencs'),
('fex','fex'),
('ff','ff'),
('ffs','ffs'),
('fic','fic'),
('fileencoding','fileencoding'),
('fileencodings','fileencodings'),
('fileformat','fileformat'),
('fileformats','fileformats'),
('fileignorecase','fileignorecase'),
('filetype','filetype'),
('fillchars','fillchars'),
('fk','fk'),
('fkmap','fkmap'),
('flp','flp'),
('fml','fml'),
('fmr','fmr'),
('fo','fo'),
('foldclose','foldclose'),
('foldcolumn','foldcolumn'),
('foldenable','foldenable'),
('foldexpr','foldexpr'),
('foldignore','foldignore'),
('foldlevel','foldlevel'),
('foldlevelstart','foldlevelstart'),
('foldmarker','foldmarker'),
('foldmethod','foldmethod'),
('foldminlines','foldminlines'),
('foldnestmax','foldnestmax'),
('foldopen','foldopen'),
('foldtext','foldtext'),
('formatexpr','formatexpr'),
('formatlistpat','formatlistpat'),
('formatoptions','formatoptions'),
('formatprg','formatprg'),
('fp','fp'),
('fs','fs'),
('fsync','fsync'),
('ft','ft'),
('gcr','gcr'),
('gd','gd'),
('gdefault','gdefault'),
('gfm','gfm'),
('gfn','gfn'),
('gfs','gfs'),
('gfw','gfw'),
('ghr','ghr'),
('go','go'),
('gp','gp'),
('grepformat','grepformat'),
('grepprg','grepprg'),
('gtl','gtl'),
('gtt','gtt'),
('guicursor','guicursor'),
('guifont','guifont'),
('guifontset','guifontset'),
('guifontwide','guifontwide'),
('guiheadroom','guiheadroom'),
('guioptions','guioptions'),
('guipty','guipty'),
('guitablabel','guitablabel'),
('guitabtooltip','guitabtooltip'),
('helpfile','helpfile'),
('helpheight','helpheight'),
('helplang','helplang'),
('hf','hf'),
('hh','hh'),
('hi','hi'),
('hid','hid'),
('hidden','hidden'),
('highlight','highlight'),
('history','history'),
('hk','hk'),
('hkmap','hkmap'),
('hkmapp','hkmapp'),
('hkp','hkp'),
('hl','hl'),
('hlg','hlg'),
('hls','hls'),
('hlsearch','hlsearch'),
('ic','ic'),
('icon','icon'),
('iconstring','iconstring'),
('ignorecase','ignorecase'),
('im','im'),
('imactivatefunc','imactivatefunc'),
('imactivatekey','imactivatekey'),
('imaf','imaf'),
('imak','imak'),
('imc','imc'),
('imcmdline','imcmdline'),
('imd','imd'),
('imdisable','imdisable'),
('imi','imi'),
('iminsert','iminsert'),
('ims','ims'),
('imsearch','imsearch'),
('imsf','imsf'),
('imstatusfunc','imstatusfunc'),
('inc','inc'),
('include','include'),
('includeexpr','includeexpr'),
('incsearch','incsearch'),
('inde','inde'),
('indentexpr','indentexpr'),
('indentkeys','indentkeys'),
('indk','indk'),
('inex','inex'),
('inf','inf'),
('infercase','infercase'),
('inoremap','inoremap'),
('insertmode','insertmode'),
('invacd','invacd'),
('invai','invai'),
('invakm','invakm'),
('invallowrevins','invallowrevins'),
('invaltkeymap','invaltkeymap'),
('invanti','invanti'),
('invantialias','invantialias'),
('invar','invar'),
('invarab','invarab'),
('invarabic','invarabic'),
('invarabicshape','invarabicshape'),
('invari','invari'),
('invarshape','invarshape'),
('invautochdir','invautochdir'),
('invautoindent','invautoindent'),
('invautoread','invautoread'),
('invautowrite','invautowrite'),
('invautowriteall','invautowriteall'),
('invaw','invaw'),
('invawa','invawa'),
('invbackup','invbackup'),
('invballooneval','invballooneval'),
('invbeval','invbeval'),
('invbin','invbin'),
('invbinary','invbinary'),
('invbiosk','invbiosk'),
('invbioskey','invbioskey'),
('invbk','invbk'),
('invbl','invbl'),
('invbomb','invbomb'),
('invbuflisted','invbuflisted'),
('invcf','invcf'),
('invci','invci'),
('invcin','invcin'),
('invcindent','invcindent'),
('invcompatible','invcompatible'),
('invconfirm','invconfirm'),
('invconsk','invconsk'),
('invconskey','invconskey'),
('invcopyindent','invcopyindent'),
('invcp','invcp'),
('invcrb','invcrb'),
('invcscoperelative','invcscoperelative'),
('invcscopetag','invcscopetag'),
('invcscopeverbose','invcscopeverbose'),
('invcsre','invcsre'),
('invcst','invcst'),
('invcsverb','invcsverb'),
('invcuc','invcuc'),
('invcul','invcul'),
('invcursorbind','invcursorbind'),
('invcursorcolumn','invcursorcolumn'),
('invcursorline','invcursorline'),
('invdeco','invdeco'),
('invdelcombine','invdelcombine'),
('invdg','invdg'),
('invdiff','invdiff'),
('invdigraph','invdigraph'),
('invea','invea'),
('inveb','inveb'),
('inved','inved'),
('invedcompatible','invedcompatible'),
('invek','invek'),
('invendofline','invendofline'),
('inveol','inveol'),
('invequalalways','invequalalways'),
('inverrorbells','inverrorbells'),
('invesckeys','invesckeys'),
('invet','invet'),
('invex','invex'),
('invexpandtab','invexpandtab'),
('invexrc','invexrc'),
('invfen','invfen'),
('invfic','invfic'),
('invfileignorecase','invfileignorecase'),
('invfk','invfk'),
('invfkmap','invfkmap'),
('invfoldenable','invfoldenable'),
('invgd','invgd'),
('invgdefault','invgdefault'),
('invguipty','invguipty'),
('invhid','invhid'),
('invhidden','invhidden'),
('invhk','invhk'),
('invhkmap','invhkmap'),
('invhkmapp','invhkmapp'),
('invhkp','invhkp'),
('invhls','invhls'),
('invhlsearch','invhlsearch'),
('invic','invic'),
('invicon','invicon'),
('invignorecase','invignorecase'),
('invim','invim'),
('invimc','invimc'),
('invimcmdline','invimcmdline'),
('invimd','invimd'),
('invimdisable','invimdisable'),
('invincsearch','invincsearch'),
('invinf','invinf'),
('invinfercase','invinfercase'),
('invinsertmode','invinsertmode'),
('invis','invis'),
('invjoinspaces','invjoinspaces'),
('invjs','invjs'),
('invlazyredraw','invlazyredraw'),
('invlbr','invlbr'),
('invlinebreak','invlinebreak'),
('invlisp','invlisp'),
('invlist','invlist'),
('invloadplugins','invloadplugins'),
('invlpl','invlpl'),
('invlz','invlz'),
('invma','invma'),
('invmacatsui','invmacatsui'),
('invmagic','invmagic'),
('invmh','invmh'),
('invml','invml'),
('invmod','invmod'),
('invmodeline','invmodeline'),
('invmodifiable','invmodifiable'),
('invmodified','invmodified'),
('invmore','invmore'),
('invmousef','invmousef'),
('invmousefocus','invmousefocus'),
('invmousehide','invmousehide'),
('invnu','invnu'),
('invnumber','invnumber'),
('invodev','invodev'),
('invopendevice','invopendevice'),
('invpaste','invpaste'),
('invpi','invpi'),
('invpreserveindent','invpreserveindent'),
('invpreviewwindow','invpreviewwindow'),
('invprompt','invprompt'),
('invpvw','invpvw'),
('invreadonly','invreadonly'),
('invrelativenumber','invrelativenumber'),
('invremap','invremap'),
('invrestorescreen','invrestorescreen'),
('invrevins','invrevins'),
('invri','invri'),
('invrightleft','invrightleft'),
('invrl','invrl'),
('invrnu','invrnu'),
('invro','invro'),
('invrs','invrs'),
('invru','invru'),
('invruler','invruler'),
('invsb','invsb'),
('invsc','invsc'),
('invscb','invscb'),
('invscrollbind','invscrollbind'),
('invscs','invscs'),
('invsecure','invsecure'),
('invsft','invsft'),
('invshellslash','invshellslash'),
('invshelltemp','invshelltemp'),
('invshiftround','invshiftround'),
('invshortname','invshortname'),
('invshowcmd','invshowcmd'),
('invshowfulltag','invshowfulltag'),
('invshowmatch','invshowmatch'),
('invshowmode','invshowmode'),
('invsi','invsi'),
('invsm','invsm'),
('invsmartcase','invsmartcase'),
('invsmartindent','invsmartindent'),
('invsmarttab','invsmarttab'),
('invsmd','invsmd'),
('invsn','invsn'),
('invsol','invsol'),
('invspell','invspell'),
('invsplitbelow','invsplitbelow'),
('invsplitright','invsplitright'),
('invspr','invspr'),
('invsr','invsr'),
('invssl','invssl'),
('invsta','invsta'),
('invstartofline','invstartofline'),
('invstmp','invstmp'),
('invswapfile','invswapfile'),
('invswf','invswf'),
('invta','invta'),
('invtagbsearch','invtagbsearch'),
('invtagrelative','invtagrelative'),
('invtagstack','invtagstack'),
('invtbi','invtbi'),
('invtbidi','invtbidi'),
('invtbs','invtbs'),
('invtermbidi','invtermbidi'),
('invterse','invterse'),
('invtextauto','invtextauto'),
('invtextmode','invtextmode'),
('invtf','invtf'),
('invtgst','invtgst'),
('invtildeop','invtildeop'),
('invtimeout','invtimeout'),
('invtitle','invtitle'),
('invto','invto'),
('invtop','invtop'),
('invtr','invtr'),
('invttimeout','invttimeout'),
('invttybuiltin','invttybuiltin'),
('invttyfast','invttyfast'),
('invtx','invtx'),
('invudf','invudf'),
('invundofile','invundofile'),
('invvb','invvb'),
('invvisualbell','invvisualbell'),
('invwa','invwa'),
('invwarn','invwarn'),
('invwb','invwb'),
('invweirdinvert','invweirdinvert'),
('invwfh','invwfh'),
('invwfw','invwfw'),
('invwic','invwic'),
('invwildignorecase','invwildignorecase'),
('invwildmenu','invwildmenu'),
('invwinfixheight','invwinfixheight'),
('invwinfixwidth','invwinfixwidth'),
('invwiv','invwiv'),
('invwmnu','invwmnu'),
('invwrap','invwrap'),
('invwrapscan','invwrapscan'),
('invwrite','invwrite'),
('invwriteany','invwriteany'),
('invwritebackup','invwritebackup'),
('invws','invws'),
('is','is'),
('isf','isf'),
('isfname','isfname'),
('isi','isi'),
('isident','isident'),
('isk','isk'),
('iskeyword','iskeyword'),
('isp','isp'),
('isprint','isprint'),
('joinspaces','joinspaces'),
('js','js'),
('key','key'),
('keymap','keymap'),
('keymodel','keymodel'),
('keywordprg','keywordprg'),
('km','km'),
('kmp','kmp'),
('kp','kp'),
('langmap','langmap'),
('langmenu','langmenu'),
('laststatus','laststatus'),
('lazyredraw','lazyredraw'),
('lbr','lbr'),
('lcs','lcs'),
('linebreak','linebreak'),
('lines','lines'),
('linespace','linespace'),
('lisp','lisp'),
('lispwords','lispwords'),
('list','list'),
('listchars','listchars'),
('lm','lm'),
('lmap','lmap'),
('loadplugins','loadplugins'),
('lpl','lpl'),
('ls','ls'),
('lsp','lsp'),
('lw','lw'),
('lz','lz'),
('ma','ma'),
('macatsui','macatsui'),
('magic','magic'),
('makeef','makeef'),
('makeprg','makeprg'),
('mat','mat'),
('matchpairs','matchpairs'),
('matchtime','matchtime'),
('maxcombine','maxcombine'),
('maxfuncdepth','maxfuncdepth'),
('maxmapdepth','maxmapdepth'),
('maxmem','maxmem'),
('maxmempattern','maxmempattern'),
('maxmemtot','maxmemtot'),
('mco','mco'),
('mef','mef'),
('menuitems','menuitems'),
('mfd','mfd'),
('mh','mh'),
('mis','mis'),
('mkspellmem','mkspellmem'),
('ml','ml'),
('mls','mls'),
('mm','mm'),
('mmd','mmd'),
('mmp','mmp'),
('mmt','mmt'),
('mod','mod'),
('modeline','modeline'),
('modelines','modelines'),
('modifiable','modifiable'),
('modified','modified'),
('more','more'),
('mouse','mouse'),
('mousef','mousef'),
('mousefocus','mousefocus'),
('mousehide','mousehide'),
('mousem','mousem'),
('mousemodel','mousemodel'),
('mouses','mouses'),
('mouseshape','mouseshape'),
('mouset','mouset'),
('mousetime','mousetime'),
('mp','mp'),
('mps','mps'),
('msm','msm'),
('mzq','mzq'),
('mzquantum','mzquantum'),
('nf','nf'),
('nnoremap','nnoremap'),
('noacd','noacd'),
('noai','noai'),
('noakm','noakm'),
('noallowrevins','noallowrevins'),
('noaltkeymap','noaltkeymap'),
('noanti','noanti'),
('noantialias','noantialias'),
('noar','noar'),
('noarab','noarab'),
('noarabic','noarabic'),
('noarabicshape','noarabicshape'),
('noari','noari'),
('noarshape','noarshape'),
('noautochdir','noautochdir'),
('noautoindent','noautoindent'),
('noautoread','noautoread'),
('noautowrite','noautowrite'),
('noautowriteall','noautowriteall'),
('noaw','noaw'),
('noawa','noawa'),
('nobackup','nobackup'),
('noballooneval','noballooneval'),
('nobeval','nobeval'),
('nobin','nobin'),
('nobinary','nobinary'),
('nobiosk','nobiosk'),
('nobioskey','nobioskey'),
('nobk','nobk'),
('nobl','nobl'),
('nobomb','nobomb'),
('nobuflisted','nobuflisted'),
('nocf','nocf'),
('noci','noci'),
('nocin','nocin'),
('nocindent','nocindent'),
('nocompatible','nocompatible'),
('noconfirm','noconfirm'),
('noconsk','noconsk'),
('noconskey','noconskey'),
('nocopyindent','nocopyindent'),
('nocp','nocp'),
('nocrb','nocrb'),
('nocscoperelative','nocscoperelative'),
('nocscopetag','nocscopetag'),
('nocscopeverbose','nocscopeverbose'),
('nocsre','nocsre'),
('nocst','nocst'),
('nocsverb','nocsverb'),
('nocuc','nocuc'),
('nocul','nocul'),
('nocursorbind','nocursorbind'),
('nocursorcolumn','nocursorcolumn'),
('nocursorline','nocursorline'),
('nodeco','nodeco'),
('nodelcombine','nodelcombine'),
('nodg','nodg'),
('nodiff','nodiff'),
('nodigraph','nodigraph'),
('noea','noea'),
('noeb','noeb'),
('noed','noed'),
('noedcompatible','noedcompatible'),
('noek','noek'),
('noendofline','noendofline'),
('noeol','noeol'),
('noequalalways','noequalalways'),
('noerrorbells','noerrorbells'),
('noesckeys','noesckeys'),
('noet','noet'),
('noex','noex'),
('noexpandtab','noexpandtab'),
('noexrc','noexrc'),
('nofen','nofen'),
('nofic','nofic'),
('nofileignorecase','nofileignorecase'),
('nofk','nofk'),
('nofkmap','nofkmap'),
('nofoldenable','nofoldenable'),
('nogd','nogd'),
('nogdefault','nogdefault'),
('noguipty','noguipty'),
('nohid','nohid'),
('nohidden','nohidden'),
('nohk','nohk'),
('nohkmap','nohkmap'),
('nohkmapp','nohkmapp'),
('nohkp','nohkp'),
('nohls','nohls'),
('nohlsearch','nohlsearch'),
('noic','noic'),
('noicon','noicon'),
('noignorecase','noignorecase'),
('noim','noim'),
('noimc','noimc'),
('noimcmdline','noimcmdline'),
('noimd','noimd'),
('noimdisable','noimdisable'),
('noincsearch','noincsearch'),
('noinf','noinf'),
('noinfercase','noinfercase'),
('noinsertmode','noinsertmode'),
('nois','nois'),
('nojoinspaces','nojoinspaces'),
('nojs','nojs'),
('nolazyredraw','nolazyredraw'),
('nolbr','nolbr'),
('nolinebreak','nolinebreak'),
('nolisp','nolisp'),
('nolist','nolist'),
('noloadplugins','noloadplugins'),
('nolpl','nolpl'),
('nolz','nolz'),
('noma','noma'),
('nomacatsui','nomacatsui'),
('nomagic','nomagic'),
('nomh','nomh'),
('noml','noml'),
('nomod','nomod'),
('nomodeline','nomodeline'),
('nomodifiable','nomodifiable'),
('nomodified','nomodified'),
('nomore','nomore'),
('nomousef','nomousef'),
('nomousefocus','nomousefocus'),
('nomousehide','nomousehide'),
('nonu','nonu'),
('nonumber','nonumber'),
('noodev','noodev'),
('noopendevice','noopendevice'),
('nopaste','nopaste'),
('nopi','nopi'),
('nopreserveindent','nopreserveindent'),
('nopreviewwindow','nopreviewwindow'),
('noprompt','noprompt'),
('nopvw','nopvw'),
('noreadonly','noreadonly'),
('norelativenumber','norelativenumber'),
('noremap','noremap'),
('norestorescreen','norestorescreen'),
('norevins','norevins'),
('nori','nori'),
('norightleft','norightleft'),
('norl','norl'),
('nornu','nornu'),
('noro','noro'),
('nors','nors'),
('noru','noru'),
('noruler','noruler'),
('nosb','nosb'),
('nosc','nosc'),
('noscb','noscb'),
('noscrollbind','noscrollbind'),
('noscs','noscs'),
('nosecure','nosecure'),
('nosft','nosft'),
('noshellslash','noshellslash'),
('noshelltemp','noshelltemp'),
('noshiftround','noshiftround'),
('noshortname','noshortname'),
('noshowcmd','noshowcmd'),
('noshowfulltag','noshowfulltag'),
('noshowmatch','noshowmatch'),
('noshowmode','noshowmode'),
('nosi','nosi'),
('nosm','nosm'),
('nosmartcase','nosmartcase'),
('nosmartindent','nosmartindent'),
('nosmarttab','nosmarttab'),
('nosmd','nosmd'),
('nosn','nosn'),
('nosol','nosol'),
('nospell','nospell'),
('nosplitbelow','nosplitbelow'),
('nosplitright','nosplitright'),
('nospr','nospr'),
('nosr','nosr'),
('nossl','nossl'),
('nosta','nosta'),
('nostartofline','nostartofline'),
('nostmp','nostmp'),
('noswapfile','noswapfile'),
('noswf','noswf'),
('nota','nota'),
('notagbsearch','notagbsearch'),
('notagrelative','notagrelative'),
('notagstack','notagstack'),
('notbi','notbi'),
('notbidi','notbidi'),
('notbs','notbs'),
('notermbidi','notermbidi'),
('noterse','noterse'),
('notextauto','notextauto'),
('notextmode','notextmode'),
('notf','notf'),
('notgst','notgst'),
('notildeop','notildeop'),
('notimeout','notimeout'),
('notitle','notitle'),
('noto','noto'),
('notop','notop'),
('notr','notr'),
('nottimeout','nottimeout'),
('nottybuiltin','nottybuiltin'),
('nottyfast','nottyfast'),
('notx','notx'),
('noudf','noudf'),
('noundofile','noundofile'),
('novb','novb'),
('novisualbell','novisualbell'),
('nowa','nowa'),
('nowarn','nowarn'),
('nowb','nowb'),
('noweirdinvert','noweirdinvert'),
('nowfh','nowfh'),
('nowfw','nowfw'),
('nowic','nowic'),
('nowildignorecase','nowildignorecase'),
('nowildmenu','nowildmenu'),
('nowinfixheight','nowinfixheight'),
('nowinfixwidth','nowinfixwidth'),
('nowiv','nowiv'),
('nowmnu','nowmnu'),
('nowrap','nowrap'),
('nowrapscan','nowrapscan'),
('nowrite','nowrite'),
('nowriteany','nowriteany'),
('nowritebackup','nowritebackup'),
('nows','nows'),
('nrformats','nrformats'),
('nu','nu'),
('number','number'),
('numberwidth','numberwidth'),
('nuw','nuw'),
('odev','odev'),
('oft','oft'),
('ofu','ofu'),
('omnifunc','omnifunc'),
('opendevice','opendevice'),
('operatorfunc','operatorfunc'),
('opfunc','opfunc'),
('osfiletype','osfiletype'),
('pa','pa'),
('para','para'),
('paragraphs','paragraphs'),
('paste','paste'),
('pastetoggle','pastetoggle'),
('patchexpr','patchexpr'),
('patchmode','patchmode'),
('path','path'),
('pdev','pdev'),
('penc','penc'),
('pex','pex'),
('pexpr','pexpr'),
('pfn','pfn'),
('ph','ph'),
('pheader','pheader'),
('pi','pi'),
('pm','pm'),
('pmbcs','pmbcs'),
('pmbfn','pmbfn'),
('popt','popt'),
('preserveindent','preserveindent'),
('previewheight','previewheight'),
('previewwindow','previewwindow'),
('printdevice','printdevice'),
('printencoding','printencoding'),
('printexpr','printexpr'),
('printfont','printfont'),
('printheader','printheader'),
('printmbcharset','printmbcharset'),
('printmbfont','printmbfont'),
('printoptions','printoptions'),
('prompt','prompt'),
('pt','pt'),
('pumheight','pumheight'),
('pvh','pvh'),
('pvw','pvw'),
('qe','qe'),
('quoteescape','quoteescape'),
('rdt','rdt'),
('re','re'),
('readonly','readonly'),
('redrawtime','redrawtime'),
('regexpengine','regexpengine'),
('relativenumber','relativenumber'),
('remap','remap'),
('report','report'),
('restorescreen','restorescreen'),
('revins','revins'),
('ri','ri'),
('rightleft','rightleft'),
('rightleftcmd','rightleftcmd'),
('rl','rl'),
('rlc','rlc'),
('rnu','rnu'),
('ro','ro'),
('rs','rs'),
('rtp','rtp'),
('ru','ru'),
('ruf','ruf'),
('ruler','ruler'),
('rulerformat','rulerformat'),
('runtimepath','runtimepath'),
('sb','sb'),
('sbo','sbo'),
('sbr','sbr'),
('sc','sc'),
('scb','scb'),
('scr','scr'),
('scroll','scroll'),
('scrollbind','scrollbind'),
('scrolljump','scrolljump'),
('scrolloff','scrolloff'),
('scrollopt','scrollopt'),
('scs','scs'),
('sect','sect'),
('sections','sections'),
('secure','secure'),
('sel','sel'),
('selection','selection'),
('selectmode','selectmode'),
('sessionoptions','sessionoptions'),
('sft','sft'),
('sh','sh'),
('shcf','shcf'),
('shell','shell'),
('shellcmdflag','shellcmdflag'),
('shellpipe','shellpipe'),
('shellquote','shellquote'),
('shellredir','shellredir'),
('shellslash','shellslash'),
('shelltemp','shelltemp'),
('shelltype','shelltype'),
('shellxescape','shellxescape'),
('shellxquote','shellxquote'),
('shiftround','shiftround'),
('shiftwidth','shiftwidth'),
('shm','shm'),
('shortmess','shortmess'),
('shortname','shortname'),
('showbreak','showbreak'),
('showcmd','showcmd'),
('showfulltag','showfulltag'),
('showmatch','showmatch'),
('showmode','showmode'),
('showtabline','showtabline'),
('shq','shq'),
('si','si'),
('sidescroll','sidescroll'),
('sidescrolloff','sidescrolloff'),
('siso','siso'),
('sj','sj'),
('slm','slm'),
('sm','sm'),
('smartcase','smartcase'),
('smartindent','smartindent'),
('smarttab','smarttab'),
('smc','smc'),
('smd','smd'),
('sn','sn'),
('so','so'),
('softtabstop','softtabstop'),
('sol','sol'),
('sp','sp'),
('spc','spc'),
('spell','spell'),
('spellcapcheck','spellcapcheck'),
('spellfile','spellfile'),
('spelllang','spelllang'),
('spellsuggest','spellsuggest'),
('spf','spf'),
('spl','spl'),
('splitbelow','splitbelow'),
('splitright','splitright'),
('spr','spr'),
('sps','sps'),
('sr','sr'),
('srr','srr'),
('ss','ss'),
('ssl','ssl'),
('ssop','ssop'),
('st','st'),
('sta','sta'),
('stal','stal'),
('startofline','startofline'),
('statusline','statusline'),
('stl','stl'),
('stmp','stmp'),
('sts','sts'),
('su','su'),
('sua','sua'),
('suffixes','suffixes'),
('suffixesadd','suffixesadd'),
('sw','sw'),
('swapfile','swapfile'),
('swapsync','swapsync'),
('swb','swb'),
('swf','swf'),
('switchbuf','switchbuf'),
('sws','sws'),
('sxe','sxe'),
('sxq','sxq'),
('syn','syn'),
('synmaxcol','synmaxcol'),
('syntax','syntax'),
('t_AB','t_AB'),
('t_AF','t_AF'),
('t_AL','t_AL'),
('t_CS','t_CS'),
('t_CV','t_CV'),
('t_Ce','t_Ce'),
('t_Co','t_Co'),
('t_Cs','t_Cs'),
('t_DL','t_DL'),
('t_EI','t_EI'),
('t_F1','t_F1'),
('t_F2','t_F2'),
('t_F3','t_F3'),
('t_F4','t_F4'),
('t_F5','t_F5'),
('t_F6','t_F6'),
('t_F7','t_F7'),
('t_F8','t_F8'),
('t_F9','t_F9'),
('t_IE','t_IE'),
('t_IS','t_IS'),
('t_K1','t_K1'),
('t_K3','t_K3'),
('t_K4','t_K4'),
('t_K5','t_K5'),
('t_K6','t_K6'),
('t_K7','t_K7'),
('t_K8','t_K8'),
('t_K9','t_K9'),
('t_KA','t_KA'),
('t_KB','t_KB'),
('t_KC','t_KC'),
('t_KD','t_KD'),
('t_KE','t_KE'),
('t_KF','t_KF'),
('t_KG','t_KG'),
('t_KH','t_KH'),
('t_KI','t_KI'),
('t_KJ','t_KJ'),
('t_KK','t_KK'),
('t_KL','t_KL'),
('t_RI','t_RI'),
('t_RV','t_RV'),
('t_SI','t_SI'),
('t_Sb','t_Sb'),
('t_Sf','t_Sf'),
('t_WP','t_WP'),
('t_WS','t_WS'),
('t_ZH','t_ZH'),
('t_ZR','t_ZR'),
('t_al','t_al'),
('t_bc','t_bc'),
('t_cd','t_cd'),
('t_ce','t_ce'),
('t_cl','t_cl'),
('t_cm','t_cm'),
('t_cs','t_cs'),
('t_da','t_da'),
('t_db','t_db'),
('t_dl','t_dl'),
('t_fs','t_fs'),
('t_k1','t_k1'),
('t_k2','t_k2'),
('t_k3','t_k3'),
('t_k4','t_k4'),
('t_k5','t_k5'),
('t_k6','t_k6'),
('t_k7','t_k7'),
('t_k8','t_k8'),
('t_k9','t_k9'),
('t_kB','t_kB'),
('t_kD','t_kD'),
('t_kI','t_kI'),
('t_kN','t_kN'),
('t_kP','t_kP'),
('t_kb','t_kb'),
('t_kd','t_kd'),
('t_ke','t_ke'),
('t_kh','t_kh'),
('t_kl','t_kl'),
('t_kr','t_kr'),
('t_ks','t_ks'),
('t_ku','t_ku'),
('t_le','t_le'),
('t_mb','t_mb'),
('t_md','t_md'),
('t_me','t_me'),
('t_mr','t_mr'),
('t_ms','t_ms'),
('t_nd','t_nd'),
('t_op','t_op'),
('t_se','t_se'),
('t_so','t_so'),
('t_sr','t_sr'),
('t_te','t_te'),
('t_ti','t_ti'),
('t_ts','t_ts'),
('t_u7','t_u7'),
('t_ue','t_ue'),
('t_us','t_us'),
('t_ut','t_ut'),
('t_vb','t_vb'),
('t_ve','t_ve'),
('t_vi','t_vi'),
('t_vs','t_vs'),
('t_xs','t_xs'),
('ta','ta'),
('tabline','tabline'),
('tabpagemax','tabpagemax'),
('tabstop','tabstop'),
('tag','tag'),
('tagbsearch','tagbsearch'),
('taglength','taglength'),
('tagrelative','tagrelative'),
('tags','tags'),
('tagstack','tagstack'),
('tal','tal'),
('tb','tb'),
('tbi','tbi'),
('tbidi','tbidi'),
('tbis','tbis'),
('tbs','tbs'),
('tenc','tenc'),
('term','term'),
('termbidi','termbidi'),
('termencoding','termencoding'),
('terse','terse'),
('textauto','textauto'),
('textmode','textmode'),
('textwidth','textwidth'),
('tf','tf'),
('tgst','tgst'),
('thesaurus','thesaurus'),
('tildeop','tildeop'),
('timeout','timeout'),
('timeoutlen','timeoutlen'),
('title','title'),
('titlelen','titlelen'),
('titleold','titleold'),
('titlestring','titlestring'),
('tl','tl'),
('tm','tm'),
('to','to'),
('toolbar','toolbar'),
('toolbariconsize','toolbariconsize'),
('top','top'),
('tpm','tpm'),
('tr','tr'),
('ts','ts'),
('tsl','tsl'),
('tsr','tsr'),
('ttimeout','ttimeout'),
('ttimeoutlen','ttimeoutlen'),
('ttm','ttm'),
('tty','tty'),
('ttybuiltin','ttybuiltin'),
('ttyfast','ttyfast'),
('ttym','ttym'),
('ttymouse','ttymouse'),
('ttyscroll','ttyscroll'),
('ttytype','ttytype'),
('tw','tw'),
('tx','tx'),
('uc','uc'),
('udf','udf'),
('udir','udir'),
('ul','ul'),
('undodir','undodir'),
('undofile','undofile'),
('undolevels','undolevels'),
('undoreload','undoreload'),
('updatecount','updatecount'),
('updatetime','updatetime'),
('ur','ur'),
('ut','ut'),
('vb','vb'),
('vbs','vbs'),
('vdir','vdir'),
('ve','ve'),
('verbose','verbose'),
('verbosefile','verbosefile'),
('vfile','vfile'),
('vi','vi'),
('viewdir','viewdir'),
('viewoptions','viewoptions'),
('viminfo','viminfo'),
('virtualedit','virtualedit'),
('visualbell','visualbell'),
('vnoremap','vnoremap'),
('vop','vop'),
('wa','wa'),
('wak','wak'),
('warn','warn'),
('wb','wb'),
('wc','wc'),
('wcm','wcm'),
('wd','wd'),
('weirdinvert','weirdinvert'),
('wfh','wfh'),
('wfw','wfw'),
('wh','wh'),
('whichwrap','whichwrap'),
('wi','wi'),
('wic','wic'),
('wig','wig'),
('wildchar','wildchar'),
('wildcharm','wildcharm'),
('wildignore','wildignore'),
('wildignorecase','wildignorecase'),
('wildmenu','wildmenu'),
('wildmode','wildmode'),
('wildoptions','wildoptions'),
('wim','wim'),
('winaltkeys','winaltkeys'),
('window','window'),
('winfixheight','winfixheight'),
('winfixwidth','winfixwidth'),
('winheight','winheight'),
('winminheight','winminheight'),
('winminwidth','winminwidth'),
('winwidth','winwidth'),
('wiv','wiv'),
('wiw','wiw'),
('wm','wm'),
('wmh','wmh'),
('wmnu','wmnu'),
('wmw','wmw'),
('wop','wop'),
('wrap','wrap'),
('wrapmargin','wrapmargin'),
('wrapscan','wrapscan'),
('write','write'),
('writeany','writeany'),
('writebackup','writebackup'),
('writedelay','writedelay'),
('ws','ws'),
('ww','ww'),
)
return var
option = _getoption()
| 57,066 | Python | 28.43115 | 74 | 0.424176 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/basic.py | """
pygments.lexers.basic
~~~~~~~~~~~~~~~~~~~~~
Lexers for BASIC like languages (other than VB.net).
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words, include
from pygments.token import Comment, Error, Keyword, Name, Number, \
Punctuation, Operator, String, Text, Whitespace
from pygments.lexers import _vbscript_builtins
__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer']
class BlitzMaxLexer(RegexLexer):
"""
For BlitzMax source code.
.. versionadded:: 1.4
"""
name = 'BlitzMax'
url = 'http://blitzbasic.com'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_]\w*'
bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
(bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'\s+', Whitespace),
(r'(\.\.)(\n)', bygroups(Text, Whitespace)), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Whitespace, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Whitespace, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Whitespace, Keyword.Type,
Operator, Whitespace, Punctuation, Whitespace,
Keyword.Type, Name.Class, Whitespace,
Keyword.Type, Whitespace, Punctuation)),
(bmax_var, bygroups(Name.Variable, Whitespace, Keyword.Type, Operator,
Whitespace, Punctuation, Whitespace, Keyword.Type,
Name.Class, Whitespace, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Whitespace, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(words((
'TNullMethodException', 'TNullFunctionException',
'TNullObjectException', 'TArrayBoundsException',
'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception),
(words((
'Strict', 'SuperStrict', 'Module', 'ModuleInfo',
'End', 'Return', 'Continue', 'Exit', 'Public', 'Private',
'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max',
'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen',
'Framework', 'Include', 'Import', 'Extern', 'EndExtern',
'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod',
'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect',
'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData',
'RestoreData'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
"""
For BlitzBasic source code.
.. versionadded:: 2.0
"""
name = 'BlitzBasic'
url = 'http://blitzbasic.com'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'\s+', Whitespace),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
'First', 'Last', 'Before', 'After'),
prefix=r'\b', suffix=r'\b'),
Operator),
(r'([+\-*/~=<>^])', Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Whitespace, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Whitespace, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Whitespace, Punctuation, Whitespace, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Whitespace, Keyword.Type, Whitespace, Punctuation,
Whitespace, Name.Class, Whitespace, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Whitespace, Name.Function, Whitespace, Keyword.Type,
Whitespace, Punctuation, Whitespace, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Whitespace, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(words((
'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Whitespace, Keyword.Type,
Whitespace, Punctuation, Whitespace, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"\n]+', String.Double),
],
}
class MonkeyLexer(RegexLexer):
"""
For
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
.. versionadded:: 1.6
"""
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
name_variable = r'[a-z_]\w*'
name_function = r'[A-Z]\w*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z]\w*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%#$]'
flags = re.MULTILINE
tokens = {
'root': [
# Text
(r'\s+', Whitespace),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
# preprocessor directives
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
# preprocessor variable (any line starting with '#' that is not a directive)
(r'^#', Comment.Preproc, 'variables'),
# String
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Native data types
(r'\b%s\b' % keyword_type, Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace, Whitespace)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Whitespace), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Whitespace), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)(?=\s)', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[(){}!#,.:]', Punctuation),
# catch the rest
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_function, Name.Function),
(r'%s\b' % name_variable, Name.Variable),
],
'funcname': [
(r'(?i)%s\b' % name_function, Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Whitespace),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(r'%s\.' % name_module, Name.Namespace),
(r'%s\b' % keyword_type, Keyword.Type),
(r'%s\b' % name_class, Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Whitespace, Number.Integer, Whitespace, Punctuation)),
# generics
(r'\s+(?!<)', Whitespace, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Whitespace, '#pop'),
default('#pop')
],
'variables': [
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_variable, Name.Variable),
(r'%s' % keyword_type_special, Keyword.Type),
(r'\s+', Whitespace),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
default('#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment': [
(r'(?i)^#rem.*?', Comment.Multiline, "#push"),
(r'(?i)^#end.*?', Comment.Multiline, "#pop"),
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CbmBasicV2Lexer(RegexLexer):
"""
For CBM BASIC V2 sources.
.. versionadded:: 1.6
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
filenames = ['*.bas']
flags = re.IGNORECASE
tokens = {
'root': [
(r'rem.*\n', Comment.Single),
(r'\s+', Whitespace),
(r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
(r'data|restore|dim|let|def|fn', Keyword.Declaration),
(r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
(r'[-+*/^<>=]', Operator),
(r'not|and|or', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:;]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'^\d+', text):
return 0.2
class QBasicLexer(RegexLexer):
"""
For
`QBasic <http://en.wikipedia.org/wiki/QBasic>`_
source code.
.. versionadded:: 2.0
"""
name = 'QBasic'
aliases = ['qbasic', 'basic']
filenames = ['*.BAS', '*.bas']
mimetypes = ['text/basic']
declarations = ('DATA', 'LET')
functions = (
'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
'VARPTR$', 'VARSEG'
)
metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC')
operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR')
statements = (
'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
)
keywords = (
'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
)
tokens = {
'root': [
(r'\n+', Text),
(r'\s+', Text.Whitespace),
(r'^(\s*)(\d*)(\s*)(REM .*)$',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
Comment.Single)),
(r'^(\s*)(\d+)(\s*)',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
(r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
(r'(?=[^"]*)\'.*$', Comment.Single),
(r'"[^\n"]*"', String.Double),
(r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
(r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name)),
(r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name.Variable.Global)),
(r'(DIM)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
(r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
Operator)),
(r'(GOTO|GOSUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
(r'(SUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
include('declarations'),
include('functions'),
include('metacommands'),
include('operators'),
include('statements'),
include('keywords'),
(r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global),
(r'[a-zA-Z_]\w*\:', Name.Label),
(r'\-?\d*\.\d+[@|#]?', Number.Float),
(r'\-?\d+[@|#]', Number.Float),
(r'\-?\d+#?', Number.Integer.Long),
(r'\-?\d+#?', Number.Integer),
(r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
(r'[\[\]{}(),;]', Punctuation),
(r'[\w]+', Name.Variable.Global),
],
# can't use regular \b because of X$()
# XXX: use words() here
'declarations': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)),
Keyword.Declaration),
],
'functions': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)),
Keyword.Reserved),
],
'metacommands': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)),
Keyword.Constant),
],
'operators': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word),
],
'statements': [
(r'\b(%s)\b' % '|'.join(map(re.escape, statements)),
Keyword.Reserved),
],
'keywords': [
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
],
}
def analyse_text(text):
if '$DYNAMIC' in text or '$STATIC' in text:
return 0.9
class VBScriptLexer(RegexLexer):
"""
VBScript is scripting language that is modeled on Visual Basic.
.. versionadded:: 2.4
"""
name = 'VBScript'
aliases = ['vbscript']
filenames = ['*.vbs', '*.VBS']
flags = re.IGNORECASE
tokens = {
'root': [
(r"'[^\n]*", Comment.Single),
(r'\s+', Whitespace),
('"', String.Double, 'string'),
('&h[0-9a-f]+', Number.Hex),
# Float variant 1, for example: 1., 1.e2, 1.2e3
(r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float),
(r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Float variant 2, for example: .1, .1e2
(r'[0-9]+e[+-]?[0-9]+', Number.Float), # Float variant 3, for example: 123e45
(r'[0-9]+', Number.Integer),
('#.+#', String), # date or time value
(r'(dim)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Variable), 'dim_more'),
(r'(function|sub)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Function)),
(r'(class)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(const)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Constant)),
(r'(end)(\s+)(class|function|if|property|sub|with)',
bygroups(Keyword, Whitespace, Keyword)),
(r'(on)(\s+)(error)(\s+)(goto)(\s+)(0)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Number.Integer)),
(r'(on)(\s+)(error)(\s+)(resume)(\s+)(next)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(option)(\s+)(explicit)', bygroups(Keyword, Whitespace, Keyword)),
(r'(property)(\s+)(get|let|set)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Name.Property)),
(r'rem\s.*[^\n]*', Comment.Single),
(words(_vbscript_builtins.KEYWORDS, suffix=r'\b'), Keyword),
(words(_vbscript_builtins.OPERATORS), Operator),
(words(_vbscript_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
(words(_vbscript_builtins.BUILTIN_CONSTANTS, suffix=r'\b'), Name.Constant),
(words(_vbscript_builtins.BUILTIN_FUNCTIONS, suffix=r'\b'), Name.Builtin),
(words(_vbscript_builtins.BUILTIN_VARIABLES, suffix=r'\b'), Name.Builtin),
(r'[a-z_][a-z0-9_]*', Name),
(r'\b_\n', Operator),
(words(r'(),.:'), Punctuation),
(r'.+(\n)?', Error)
],
'dim_more': [
(r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)',
bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)),
default('#pop'),
],
'string': [
(r'[^"\n]+', String.Double),
(r'\"\"', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, '#pop'), # Unterminated string
],
}
class BBCBasicLexer(RegexLexer):
"""
BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS.
It is also used by BBC Basic For Windows.
.. versionadded:: 2.4
"""
base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR',
'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN',
'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS',
'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT',
'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN',
'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT',
'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND',
'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL',
'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$',
'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME',
'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR',
'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END',
'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF',
'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON',
'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT',
'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR',
'TRACE', 'UNTIL', 'WIDTH', 'OSCLI']
basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE',
'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP',
'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL',
'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES',
'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH',
'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW',
'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE',
'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT']
name = 'BBC Basic'
aliases = ['bbcbasic']
filenames = ['*.bbc']
tokens = {
'root': [
(r"[0-9]+", Name.Label),
(r"(\*)([^\n]*)",
bygroups(Keyword.Pseudo, Comment.Special)),
default('code'),
],
'code': [
(r"(REM)([^\n]*)",
bygroups(Keyword.Declaration, Comment.Single)),
(r'\n', Whitespace, 'root'),
(r'\s+', Whitespace),
(r':', Comment.Preproc),
# Some special cases to make functions come out nicer
(r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword.Declaration, Whitespace,
Keyword.Declaration, Name.Function)),
(r'(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword, Name.Function)),
(r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(TRUE|FALSE)', Keyword.Constant),
(r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)',
Keyword.Pseudo),
(words(base_keywords), Keyword),
(words(basic5_keywords), Keyword),
('"', String.Double, 'string'),
('%[01]{1,32}', Number.Bin),
('&[0-9a-f]{1,8}', Number.Hex),
(r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float),
(r'[+-]?\d+', Number.Integer),
(r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable),
(r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator),
],
'string': [
(r'[^"\n]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, 'root'), # Unterminated string
],
}
def analyse_text(text):
if text.startswith('10REM >') or text.startswith('REM >'):
return 0.9
| 27,923 | Python | 40.927928 | 104 | 0.457329 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/teal.py | """
pygments.lexers.teal
~~~~~~~~~~~~~~~~~~~~
Lexer for TEAL.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, words
from pygments.token import Comment, Name, Number, String, Text, Keyword, \
Whitespace
__all__ = ['TealLexer']
class TealLexer(RegexLexer):
"""
For the Transaction Execution Approval Language (TEAL)
For more information about the grammar, see:
https://github.com/algorand/go-algorand/blob/master/data/transactions/logic/assembler.go
.. versionadded:: 2.9
"""
name = 'teal'
url = 'https://developer.algorand.org/docs/reference/teal/specification/'
aliases = ['teal']
filenames = ['*.teal']
keywords = words({
'Sender', 'Fee', 'FirstValid', 'FirstValidTime', 'LastValid', 'Note',
'Lease', 'Receiver', 'Amount', 'CloseRemainderTo', 'VotePK',
'SelectionPK', 'VoteFirst', 'VoteLast', 'VoteKeyDilution', 'Type',
'TypeEnum', 'XferAsset', 'AssetAmount', 'AssetSender', 'AssetReceiver',
'AssetCloseTo', 'GroupIndex', 'TxID', 'ApplicationID', 'OnCompletion',
'ApplicationArgs', 'NumAppArgs', 'Accounts', 'NumAccounts',
'ApprovalProgram', 'ClearStateProgram', 'RekeyTo', 'ConfigAsset',
'ConfigAssetTotal', 'ConfigAssetDecimals', 'ConfigAssetDefaultFrozen',
'ConfigAssetUnitName', 'ConfigAssetName', 'ConfigAssetURL',
'ConfigAssetMetadataHash', 'ConfigAssetManager', 'ConfigAssetReserve',
'ConfigAssetFreeze', 'ConfigAssetClawback', 'FreezeAsset',
'FreezeAssetAccount', 'FreezeAssetFrozen',
'NoOp', 'OptIn', 'CloseOut', 'ClearState', 'UpdateApplication',
'DeleteApplication',
'MinTxnFee', 'MinBalance', 'MaxTxnLife', 'ZeroAddress', 'GroupSize',
'LogicSigVersion', 'Round', 'LatestTimestamp', 'CurrentApplicationID',
'AssetBalance', 'AssetFrozen',
'AssetTotal', 'AssetDecimals', 'AssetDefaultFrozen', 'AssetUnitName',
'AssetName', 'AssetURL', 'AssetMetadataHash', 'AssetManager',
'AssetReserve', 'AssetFreeze', 'AssetClawback',
}, suffix=r'\b')
identifier = r'[^ \t\n]+(?=\/\/)|[^ \t\n]+'
newline = r'\r?\n'
tokens = {
'root': [
include('whitespace'),
# pragmas match specifically on the space character
(r'^#pragma .*' + newline, Comment.Directive),
# labels must be followed by a space,
# but anything after that is ignored
('(' + identifier + ':' + ')' + '([ \t].*)',
bygroups(Name.Label, Comment.Single)),
(identifier, Name.Function, 'function-args'),
],
'function-args': [
include('whitespace'),
(r'"', String, 'string'),
(r'(b(?:ase)?(?:32|64) ?)(\(?[a-zA-Z0-9+/=]+\)?)',
bygroups(String.Affix, String.Other)),
(r'[A-Z2-7]{58}', Number), # address
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
(keywords, Keyword),
(identifier, Name.Attributes), # branch targets
(newline, Text, '#pop'),
],
'string': [
(r'\\(?:["nrt\\]|x\d\d)', String.Escape),
(r'[^\\\"\n]+', String),
(r'"', String, '#pop'),
],
'whitespace': [
(r'[ \t]+', Whitespace),
(r'//[^\n]+', Comment.Single),
],
}
| 3,523 | Python | 38.155555 | 92 | 0.566279 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/stata.py | """
pygments.lexers.stata
~~~~~~~~~~~~~~~~~~~~~
Lexer for Stata
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, include, words
from pygments.token import Comment, Keyword, Name, Number, \
String, Text, Operator
from pygments.lexers._stata_builtins import builtins_base, builtins_functions
__all__ = ['StataLexer']
class StataLexer(RegexLexer):
"""
For Stata do files.
.. versionadded:: 2.2
"""
# Syntax based on
# - http://fmwww.bc.edu/RePEc/bocode/s/synlightlist.ado
# - https://github.com/isagalaev/highlight.js/blob/master/src/languages/stata.js
# - https://github.com/jpitblado/vim-stata/blob/master/syntax/stata.vim
name = 'Stata'
url = 'http://www.stata.com/'
aliases = ['stata', 'do']
filenames = ['*.do', '*.ado']
mimetypes = ['text/x-stata', 'text/stata', 'application/x-stata']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('comments'),
include('strings'),
include('macros'),
include('numbers'),
include('keywords'),
include('operators'),
include('format'),
(r'.', Text),
],
# Comments are a complicated beast in Stata because they can be
# nested and there are a few corner cases with that. See:
# - github.com/kylebarron/language-stata/issues/90
# - statalist.org/forums/forum/general-stata-discussion/general/1448244
'comments': [
(r'(^//|(?<=\s)//)(?!/)', Comment.Single, 'comments-double-slash'),
(r'^\s*\*', Comment.Single, 'comments-star'),
(r'/\*', Comment.Multiline, 'comments-block'),
(r'(^///|(?<=\s)///)', Comment.Special, 'comments-triple-slash')
],
'comments-block': [
(r'/\*', Comment.Multiline, '#push'),
# this ends and restarts a comment block. but need to catch this so
# that it doesn\'t start _another_ level of comment blocks
(r'\*/\*', Comment.Multiline),
(r'(\*/\s+\*(?!/)[^\n]*)|(\*/)', Comment.Multiline, '#pop'),
# Match anything else as a character inside the comment
(r'.', Comment.Multiline),
],
'comments-star': [
(r'///.*?\n', Comment.Single,
('#pop', 'comments-triple-slash')),
(r'(^//|(?<=\s)//)(?!/)', Comment.Single,
('#pop', 'comments-double-slash')),
(r'/\*', Comment.Multiline, 'comments-block'),
(r'.(?=\n)', Comment.Single, '#pop'),
(r'.', Comment.Single),
],
'comments-triple-slash': [
(r'\n', Comment.Special, '#pop'),
# A // breaks out of a comment for the rest of the line
(r'//.*?(?=\n)', Comment.Single, '#pop'),
(r'.', Comment.Special),
],
'comments-double-slash': [
(r'\n', Text, '#pop'),
(r'.', Comment.Single),
],
# `"compound string"' and regular "string"; note the former are
# nested.
'strings': [
(r'`"', String, 'string-compound'),
(r'(?<!`)"', String, 'string-regular'),
],
'string-compound': [
(r'`"', String, '#push'),
(r'"\'', String, '#pop'),
(r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
include('macros'),
(r'.', String)
],
'string-regular': [
(r'(")(?!\')|(?=\n)', String, '#pop'),
(r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
include('macros'),
(r'.', String)
],
# A local is usually
# `\w{0,31}'
# `:extended macro'
# `=expression'
# `[rsen](results)'
# `(++--)scalar(++--)'
#
# However, there are all sorts of weird rules wrt edge
# cases. Instead of writing 27 exceptions, anything inside
# `' is a local.
#
# A global is more restricted, so we do follow rules. Note only
# locals explicitly enclosed ${} can be nested.
'macros': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'`', Name.Variable, 'macro-local'),
],
'macro-local': [
(r'`', Name.Variable, '#push'),
(r"'", Name.Variable, '#pop'),
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'.', Name.Variable), # fallback
],
'macro-global-nested': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, '#push'),
(r'\}', Name.Variable.Global, '#pop'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'`', Name.Variable, 'macro-local'),
(r'\w', Name.Variable.Global), # fallback
default('#pop'),
],
'macro-global-name': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested', '#pop'),
(r'\$', Name.Variable.Global, 'macro-global-name', '#pop'),
(r'`', Name.Variable, 'macro-local', '#pop'),
(r'\w{1,32}', Name.Variable.Global, '#pop'),
],
# Built in functions and statements
'keywords': [
(words(builtins_functions, prefix = r'\b', suffix = r'(?=\()'),
Name.Function),
(words(builtins_base, prefix = r'(^\s*|\s)', suffix = r'\b'),
Keyword),
],
# http://www.stata.com/help.cgi?operators
'operators': [
(r'-|==|<=|>=|<|>|&|!=', Operator),
(r'\*|\+|\^|/|!|~|==|~=', Operator)
],
# Stata numbers
'numbers': [
# decimal number
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[i]?\b',
Number),
],
# Stata formats
'format': [
(r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Other),
(r'%(21x|16H|16L|8H|8L)', Name.Other),
(r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg)\S{0,32}', Name.Other),
(r'%[-~]?\d{1,4}s', Name.Other),
]
}
| 6,416 | Python | 36.308139 | 86 | 0.465243 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/teraterm.py | """
pygments.lexers.teraterm
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Tera Term macro files.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Name, String, \
Number, Keyword, Error
__all__ = ['TeraTermLexer']
class TeraTermLexer(RegexLexer):
"""
For Tera Term macro source code.
.. versionadded:: 2.4
"""
name = 'Tera Term macro'
url = 'https://ttssh2.osdn.jp/'
aliases = ['teratermmacro', 'teraterm', 'ttl']
filenames = ['*.ttl']
mimetypes = ['text/x-teratermmacro']
tokens = {
'root': [
include('comments'),
include('labels'),
include('commands'),
include('builtin-variables'),
include('user-variables'),
include('operators'),
include('numeric-literals'),
include('string-literals'),
include('all-whitespace'),
(r'\S', Text),
],
'comments': [
(r';[^\r\n]*', Comment.Single),
(r'/\*', Comment.Multiline, 'in-comment'),
],
'in-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline)
],
'labels': [
(r'(?i)^(\s*)(:[a-z0-9_]+)', bygroups(Text.Whitespace, Name.Label)),
],
'commands': [
(
r'(?i)\b('
r'basename|'
r'beep|'
r'bplusrecv|'
r'bplussend|'
r'break|'
r'bringupbox|'
# 'call' is handled separately.
r'callmenu|'
r'changedir|'
r'checksum16|'
r'checksum16file|'
r'checksum32|'
r'checksum32file|'
r'checksum8|'
r'checksum8file|'
r'clearscreen|'
r'clipb2var|'
r'closesbox|'
r'closett|'
r'code2str|'
r'connect|'
r'continue|'
r'crc16|'
r'crc16file|'
r'crc32|'
r'crc32file|'
r'cygconnect|'
r'delpassword|'
r'dirname|'
r'dirnamebox|'
r'disconnect|'
r'dispstr|'
r'do|'
r'else|'
r'elseif|'
r'enablekeyb|'
r'end|'
r'endif|'
r'enduntil|'
r'endwhile|'
r'exec|'
r'execcmnd|'
r'exit|'
r'expandenv|'
r'fileclose|'
r'fileconcat|'
r'filecopy|'
r'filecreate|'
r'filedelete|'
r'filelock|'
r'filemarkptr|'
r'filenamebox|'
r'fileopen|'
r'fileread|'
r'filereadln|'
r'filerename|'
r'filesearch|'
r'fileseek|'
r'fileseekback|'
r'filestat|'
r'filestrseek|'
r'filestrseek2|'
r'filetruncate|'
r'fileunlock|'
r'filewrite|'
r'filewriteln|'
r'findclose|'
r'findfirst|'
r'findnext|'
r'flushrecv|'
r'foldercreate|'
r'folderdelete|'
r'foldersearch|'
r'for|'
r'getdate|'
r'getdir|'
r'getenv|'
r'getfileattr|'
r'gethostname|'
r'getipv4addr|'
r'getipv6addr|'
r'getmodemstatus|'
r'getpassword|'
r'getspecialfolder|'
r'gettime|'
r'gettitle|'
r'getttdir|'
r'getver|'
# 'goto' is handled separately.
r'if|'
r'ifdefined|'
r'include|'
r'inputbox|'
r'int2str|'
r'intdim|'
r'ispassword|'
r'kmtfinish|'
r'kmtget|'
r'kmtrecv|'
r'kmtsend|'
r'listbox|'
r'loadkeymap|'
r'logautoclosemode|'
r'logclose|'
r'loginfo|'
r'logopen|'
r'logpause|'
r'logrotate|'
r'logstart|'
r'logwrite|'
r'loop|'
r'makepath|'
r'messagebox|'
r'mpause|'
r'next|'
r'passwordbox|'
r'pause|'
r'quickvanrecv|'
r'quickvansend|'
r'random|'
r'recvln|'
r'regexoption|'
r'restoresetup|'
r'return|'
r'rotateleft|'
r'rotateright|'
r'scprecv|'
r'scpsend|'
r'send|'
r'sendbreak|'
r'sendbroadcast|'
r'sendfile|'
r'sendkcode|'
r'sendln|'
r'sendlnbroadcast|'
r'sendlnmulticast|'
r'sendmulticast|'
r'setbaud|'
r'setdate|'
r'setdebug|'
r'setdir|'
r'setdlgpos|'
r'setdtr|'
r'setecho|'
r'setenv|'
r'setexitcode|'
r'setfileattr|'
r'setflowctrl|'
r'setmulticastname|'
r'setpassword|'
r'setrts|'
r'setspeed|'
r'setsync|'
r'settime|'
r'settitle|'
r'show|'
r'showtt|'
r'sprintf|'
r'sprintf2|'
r'statusbox|'
r'str2code|'
r'str2int|'
r'strcompare|'
r'strconcat|'
r'strcopy|'
r'strdim|'
r'strinsert|'
r'strjoin|'
r'strlen|'
r'strmatch|'
r'strremove|'
r'strreplace|'
r'strscan|'
r'strspecial|'
r'strsplit|'
r'strtrim|'
r'testlink|'
r'then|'
r'tolower|'
r'toupper|'
r'unlink|'
r'until|'
r'uptime|'
r'var2clipb|'
r'wait|'
r'wait4all|'
r'waitevent|'
r'waitln|'
r'waitn|'
r'waitrecv|'
r'waitregex|'
r'while|'
r'xmodemrecv|'
r'xmodemsend|'
r'yesnobox|'
r'ymodemrecv|'
r'ymodemsend|'
r'zmodemrecv|'
r'zmodemsend'
r')\b',
Keyword,
),
(r'(?i)(call|goto)([ \t]+)([a-z0-9_]+)',
bygroups(Keyword, Text.Whitespace, Name.Label)),
],
'builtin-variables': [
(
r'(?i)('
r'groupmatchstr1|'
r'groupmatchstr2|'
r'groupmatchstr3|'
r'groupmatchstr4|'
r'groupmatchstr5|'
r'groupmatchstr6|'
r'groupmatchstr7|'
r'groupmatchstr8|'
r'groupmatchstr9|'
r'inputstr|'
r'matchstr|'
r'mtimeout|'
r'param1|'
r'param2|'
r'param3|'
r'param4|'
r'param5|'
r'param6|'
r'param7|'
r'param8|'
r'param9|'
r'paramcnt|'
r'params|'
r'result|'
r'timeout'
r')\b',
Name.Builtin
),
],
'user-variables': [
(r'(?i)[a-z_][a-z0-9_]*', Name.Variable),
],
'numeric-literals': [
(r'(-?)([0-9]+)', bygroups(Operator, Number.Integer)),
(r'(?i)\$[0-9a-f]+', Number.Hex),
],
'string-literals': [
(r'(?i)#(?:[0-9]+|\$[0-9a-f]+)', String.Char),
(r"'[^'\n]*'", String.Single),
(r'"[^"\n]*"', String.Double),
# Opening quotes without a closing quote on the same line are errors.
(r"('[^']*)(\n)", bygroups(Error, Text.Whitespace)),
(r'("[^"]*)(\n)', bygroups(Error, Text.Whitespace)),
],
'operators': [
(r'and|not|or|xor', Operator.Word),
(r'[!%&*+<=>^~\|\/-]+', Operator),
(r'[()]', String.Symbol),
],
'all-whitespace': [
(r'\s+', Text.Whitespace),
],
}
# Turtle and Tera Term macro files share the same file extension
# but each has a recognizable and distinct syntax.
def analyse_text(text):
if re.search(TeraTermLexer.tokens['commands'][0][0], text):
return 0.01
| 9,719 | Python | 28.724771 | 81 | 0.366807 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_stata_builtins.py | """
pygments.lexers._stata_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtins for Stata
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
builtins_special = (
"if", "in", "using", "replace", "by", "gen", "generate"
)
builtins_base = (
"if", "else", "in", "foreach", "for", "forv", "forva",
"forval", "forvalu", "forvalue", "forvalues", "by", "bys",
"bysort", "quietly", "qui", "about", "ac",
"ac_7", "acprplot", "acprplot_7", "adjust", "ado", "adopath",
"adoupdate", "alpha", "ameans", "an", "ano", "anov", "anova",
"anova_estat", "anova_terms", "anovadef", "aorder", "ap", "app",
"appe", "appen", "append", "arch", "arch_dr", "arch_estat",
"arch_p", "archlm", "areg", "areg_p", "args", "arima",
"arima_dr", "arima_estat", "arima_p", "as", "asmprobit",
"asmprobit_estat", "asmprobit_lf", "asmprobit_mfx__dlg",
"asmprobit_p", "ass", "asse", "asser", "assert", "avplot",
"avplot_7", "avplots", "avplots_7", "bcskew0", "bgodfrey",
"binreg", "bip0_lf", "biplot", "bipp_lf", "bipr_lf",
"bipr_p", "biprobit", "bitest", "bitesti", "bitowt", "blogit",
"bmemsize", "boot", "bootsamp", "bootstrap", "bootstrap_8",
"boxco_l", "boxco_p", "boxcox", "boxcox_6", "boxcox_p",
"bprobit", "br", "break", "brier", "bro", "brow", "brows",
"browse", "brr", "brrstat", "bs", "bs_7", "bsampl_w",
"bsample", "bsample_7", "bsqreg", "bstat", "bstat_7", "bstat_8",
"bstrap", "bstrap_7", "ca", "ca_estat", "ca_p", "cabiplot",
"camat", "canon", "canon_8", "canon_8_p", "canon_estat",
"canon_p", "cap", "caprojection", "capt", "captu", "captur",
"capture", "cat", "cc", "cchart", "cchart_7", "cci",
"cd", "censobs_table", "centile", "cf", "char", "chdir",
"checkdlgfiles", "checkestimationsample", "checkhlpfiles",
"checksum", "chelp", "ci", "cii", "cl", "class", "classutil",
"clear", "cli", "clis", "clist", "clo", "clog", "clog_lf",
"clog_p", "clogi", "clogi_sw", "clogit", "clogit_lf",
"clogit_p", "clogitp", "clogl_sw", "cloglog", "clonevar",
"clslistarray", "cluster", "cluster_measures", "cluster_stop",
"cluster_tree", "cluster_tree_8", "clustermat", "cmdlog",
"cnr", "cnre", "cnreg", "cnreg_p", "cnreg_sw", "cnsreg",
"codebook", "collaps4", "collapse", "colormult_nb",
"colormult_nw", "compare", "compress", "conf", "confi",
"confir", "confirm", "conren", "cons", "const", "constr",
"constra", "constrai", "constrain", "constraint", "continue",
"contract", "copy", "copyright", "copysource", "cor", "corc",
"corr", "corr2data", "corr_anti", "corr_kmo", "corr_smc",
"corre", "correl", "correla", "correlat", "correlate",
"corrgram", "cou", "coun", "count", "cox", "cox_p", "cox_sw",
"coxbase", "coxhaz", "coxvar", "cprplot", "cprplot_7",
"crc", "cret", "cretu", "cretur", "creturn", "cross", "cs",
"cscript", "cscript_log", "csi", "ct", "ct_is", "ctset",
"ctst_5", "ctst_st", "cttost", "cumsp", "cumsp_7", "cumul",
"cusum", "cusum_7", "cutil", "d", "datasig", "datasign",
"datasigna", "datasignat", "datasignatu", "datasignatur",
"datasignature", "datetof", "db", "dbeta", "de", "dec",
"deco", "decod", "decode", "deff", "des", "desc", "descr",
"descri", "describ", "describe", "destring", "dfbeta",
"dfgls", "dfuller", "di", "di_g", "dir", "dirstats", "dis",
"discard", "disp", "disp_res", "disp_s", "displ", "displa",
"display", "distinct", "do", "doe", "doed", "doedi",
"doedit", "dotplot", "dotplot_7", "dprobit", "drawnorm",
"drop", "ds", "ds_util", "dstdize", "duplicates", "durbina",
"dwstat", "dydx", "e", "ed", "edi", "edit", "egen",
"eivreg", "emdef", "end", "en", "enc", "enco", "encod", "encode",
"eq", "erase", "ereg", "ereg_lf", "ereg_p", "ereg_sw",
"ereghet", "ereghet_glf", "ereghet_glf_sh", "ereghet_gp",
"ereghet_ilf", "ereghet_ilf_sh", "ereghet_ip", "eret",
"eretu", "eretur", "ereturn", "err", "erro", "error", "est",
"est_cfexist", "est_cfname", "est_clickable", "est_expand",
"est_hold", "est_table", "est_unhold", "est_unholdok",
"estat", "estat_default", "estat_summ", "estat_vce_only",
"esti", "estimates", "etodow", "etof", "etomdy", "ex",
"exi", "exit", "expand", "expandcl", "fac", "fact", "facto",
"factor", "factor_estat", "factor_p", "factor_pca_rotated",
"factor_rotate", "factormat", "fcast", "fcast_compute",
"fcast_graph", "fdades", "fdadesc", "fdadescr", "fdadescri",
"fdadescrib", "fdadescribe", "fdasav", "fdasave", "fdause",
"fh_st", "open", "read", "close",
"file", "filefilter", "fillin", "find_hlp_file", "findfile",
"findit", "findit_7", "fit", "fl", "fli", "flis", "flist",
"for5_0", "form", "forma", "format", "fpredict", "frac_154",
"frac_adj", "frac_chk", "frac_cox", "frac_ddp", "frac_dis",
"frac_dv", "frac_in", "frac_mun", "frac_pp", "frac_pq",
"frac_pv", "frac_wgt", "frac_xo", "fracgen", "fracplot",
"fracplot_7", "fracpoly", "fracpred", "fron_ex", "fron_hn",
"fron_p", "fron_tn", "fron_tn2", "frontier", "ftodate", "ftoe",
"ftomdy", "ftowdate", "g", "gamhet_glf", "gamhet_gp",
"gamhet_ilf", "gamhet_ip", "gamma", "gamma_d2", "gamma_p",
"gamma_sw", "gammahet", "gdi_hexagon", "gdi_spokes", "ge",
"gen", "gene", "gener", "genera", "generat", "generate",
"genrank", "genstd", "genvmean", "gettoken", "gl", "gladder",
"gladder_7", "glim_l01", "glim_l02", "glim_l03", "glim_l04",
"glim_l05", "glim_l06", "glim_l07", "glim_l08", "glim_l09",
"glim_l10", "glim_l11", "glim_l12", "glim_lf", "glim_mu",
"glim_nw1", "glim_nw2", "glim_nw3", "glim_p", "glim_v1",
"glim_v2", "glim_v3", "glim_v4", "glim_v5", "glim_v6",
"glim_v7", "glm", "glm_6", "glm_p", "glm_sw", "glmpred", "glo",
"glob", "globa", "global", "glogit", "glogit_8", "glogit_p",
"gmeans", "gnbre_lf", "gnbreg", "gnbreg_5", "gnbreg_p",
"gomp_lf", "gompe_sw", "gomper_p", "gompertz", "gompertzhet",
"gomphet_glf", "gomphet_glf_sh", "gomphet_gp", "gomphet_ilf",
"gomphet_ilf_sh", "gomphet_ip", "gphdot", "gphpen",
"gphprint", "gprefs", "gprobi_p", "gprobit", "gprobit_8", "gr",
"gr7", "gr_copy", "gr_current", "gr_db", "gr_describe",
"gr_dir", "gr_draw", "gr_draw_replay", "gr_drop", "gr_edit",
"gr_editviewopts", "gr_example", "gr_example2", "gr_export",
"gr_print", "gr_qscheme", "gr_query", "gr_read", "gr_rename",
"gr_replay", "gr_save", "gr_set", "gr_setscheme", "gr_table",
"gr_undo", "gr_use", "graph", "graph7", "grebar", "greigen",
"greigen_7", "greigen_8", "grmeanby", "grmeanby_7",
"gs_fileinfo", "gs_filetype", "gs_graphinfo", "gs_stat",
"gsort", "gwood", "h", "hadimvo", "hareg", "hausman",
"haver", "he", "heck_d2", "heckma_p", "heckman", "heckp_lf",
"heckpr_p", "heckprob", "hel", "help", "hereg", "hetpr_lf",
"hetpr_p", "hetprob", "hettest", "hexdump", "hilite",
"hist", "hist_7", "histogram", "hlogit", "hlu", "hmeans",
"hotel", "hotelling", "hprobit", "hreg", "hsearch", "icd9",
"icd9_ff", "icd9p", "iis", "impute", "imtest", "inbase",
"include", "inf", "infi", "infil", "infile", "infix", "inp",
"inpu", "input", "ins", "insheet", "insp", "inspe",
"inspec", "inspect", "integ", "inten", "intreg", "intreg_7",
"intreg_p", "intrg2_ll", "intrg_ll", "intrg_ll2", "ipolate",
"iqreg", "ir", "irf", "irf_create", "irfm", "iri", "is_svy",
"is_svysum", "isid", "istdize", "ivprob_1_lf", "ivprob_lf",
"ivprobit", "ivprobit_p", "ivreg", "ivreg_footnote",
"ivtob_1_lf", "ivtob_lf", "ivtobit", "ivtobit_p", "jackknife",
"jacknife", "jknife", "jknife_6", "jknife_8", "jkstat",
"joinby", "kalarma1", "kap", "kap_3", "kapmeier", "kappa",
"kapwgt", "kdensity", "kdensity_7", "keep", "ksm", "ksmirnov",
"ktau", "kwallis", "l", "la", "lab", "labe", "label",
"labelbook", "ladder", "levels", "levelsof", "leverage",
"lfit", "lfit_p", "li", "lincom", "line", "linktest",
"lis", "list", "lloghet_glf", "lloghet_glf_sh", "lloghet_gp",
"lloghet_ilf", "lloghet_ilf_sh", "lloghet_ip", "llogi_sw",
"llogis_p", "llogist", "llogistic", "llogistichet",
"lnorm_lf", "lnorm_sw", "lnorma_p", "lnormal", "lnormalhet",
"lnormhet_glf", "lnormhet_glf_sh", "lnormhet_gp",
"lnormhet_ilf", "lnormhet_ilf_sh", "lnormhet_ip", "lnskew0",
"loadingplot", "loc", "loca", "local", "log", "logi",
"logis_lf", "logistic", "logistic_p", "logit", "logit_estat",
"logit_p", "loglogs", "logrank", "loneway", "lookfor",
"lookup", "lowess", "lowess_7", "lpredict", "lrecomp", "lroc",
"lroc_7", "lrtest", "ls", "lsens", "lsens_7", "lsens_x",
"lstat", "ltable", "ltable_7", "ltriang", "lv", "lvr2plot",
"lvr2plot_7", "m", "ma", "mac", "macr", "macro", "makecns",
"man", "manova", "manova_estat", "manova_p", "manovatest",
"mantel", "mark", "markin", "markout", "marksample", "mat",
"mat_capp", "mat_order", "mat_put_rr", "mat_rapp", "mata",
"mata_clear", "mata_describe", "mata_drop", "mata_matdescribe",
"mata_matsave", "mata_matuse", "mata_memory", "mata_mlib",
"mata_mosave", "mata_rename", "mata_which", "matalabel",
"matcproc", "matlist", "matname", "matr", "matri",
"matrix", "matrix_input__dlg", "matstrik", "mcc", "mcci",
"md0_", "md1_", "md1debug_", "md2_", "md2debug_", "mds",
"mds_estat", "mds_p", "mdsconfig", "mdslong", "mdsmat",
"mdsshepard", "mdytoe", "mdytof", "me_derd", "mean",
"means", "median", "memory", "memsize", "meqparse", "mer",
"merg", "merge", "mfp", "mfx", "mhelp", "mhodds", "minbound",
"mixed_ll", "mixed_ll_reparm", "mkassert", "mkdir",
"mkmat", "mkspline", "ml", "ml_5", "ml_adjs", "ml_bhhhs",
"ml_c_d", "ml_check", "ml_clear", "ml_cnt", "ml_debug",
"ml_defd", "ml_e0", "ml_e0_bfgs", "ml_e0_cycle", "ml_e0_dfp",
"ml_e0i", "ml_e1", "ml_e1_bfgs", "ml_e1_bhhh", "ml_e1_cycle",
"ml_e1_dfp", "ml_e2", "ml_e2_cycle", "ml_ebfg0", "ml_ebfr0",
"ml_ebfr1", "ml_ebh0q", "ml_ebhh0", "ml_ebhr0", "ml_ebr0i",
"ml_ecr0i", "ml_edfp0", "ml_edfr0", "ml_edfr1", "ml_edr0i",
"ml_eds", "ml_eer0i", "ml_egr0i", "ml_elf", "ml_elf_bfgs",
"ml_elf_bhhh", "ml_elf_cycle", "ml_elf_dfp", "ml_elfi",
"ml_elfs", "ml_enr0i", "ml_enrr0", "ml_erdu0", "ml_erdu0_bfgs",
"ml_erdu0_bhhh", "ml_erdu0_bhhhq", "ml_erdu0_cycle",
"ml_erdu0_dfp", "ml_erdu0_nrbfgs", "ml_exde", "ml_footnote",
"ml_geqnr", "ml_grad0", "ml_graph", "ml_hbhhh", "ml_hd0",
"ml_hold", "ml_init", "ml_inv", "ml_log", "ml_max",
"ml_mlout", "ml_mlout_8", "ml_model", "ml_nb0", "ml_opt",
"ml_p", "ml_plot", "ml_query", "ml_rdgrd", "ml_repor",
"ml_s_e", "ml_score", "ml_searc", "ml_technique", "ml_unhold",
"mleval", "mlf_", "mlmatbysum", "mlmatsum", "mlog", "mlogi",
"mlogit", "mlogit_footnote", "mlogit_p", "mlopts", "mlsum",
"mlvecsum", "mnl0_", "mor", "more", "mov", "move", "mprobit",
"mprobit_lf", "mprobit_p", "mrdu0_", "mrdu1_", "mvdecode",
"mvencode", "mvreg", "mvreg_estat", "n", "nbreg",
"nbreg_al", "nbreg_lf", "nbreg_p", "nbreg_sw", "nestreg", "net",
"newey", "newey_7", "newey_p", "news", "nl", "nl_7", "nl_9",
"nl_9_p", "nl_p", "nl_p_7", "nlcom", "nlcom_p", "nlexp2",
"nlexp2_7", "nlexp2a", "nlexp2a_7", "nlexp3", "nlexp3_7",
"nlgom3", "nlgom3_7", "nlgom4", "nlgom4_7", "nlinit", "nllog3",
"nllog3_7", "nllog4", "nllog4_7", "nlog_rd", "nlogit",
"nlogit_p", "nlogitgen", "nlogittree", "nlpred", "no",
"nobreak", "noi", "nois", "noisi", "noisil", "noisily", "note",
"notes", "notes_dlg", "nptrend", "numlabel", "numlist", "odbc",
"old_ver", "olo", "olog", "ologi", "ologi_sw", "ologit",
"ologit_p", "ologitp", "on", "one", "onew", "onewa", "oneway",
"op_colnm", "op_comp", "op_diff", "op_inv", "op_str", "opr",
"opro", "oprob", "oprob_sw", "oprobi", "oprobi_p", "oprobit",
"oprobitp", "opts_exclusive", "order", "orthog", "orthpoly",
"ou", "out", "outf", "outfi", "outfil", "outfile", "outs",
"outsh", "outshe", "outshee", "outsheet", "ovtest", "pac",
"pac_7", "palette", "parse", "parse_dissim", "pause", "pca",
"pca_8", "pca_display", "pca_estat", "pca_p", "pca_rotate",
"pcamat", "pchart", "pchart_7", "pchi", "pchi_7", "pcorr",
"pctile", "pentium", "pergram", "pergram_7", "permute",
"permute_8", "personal", "peto_st", "pkcollapse", "pkcross",
"pkequiv", "pkexamine", "pkexamine_7", "pkshape", "pksumm",
"pksumm_7", "pl", "plo", "plot", "plugin", "pnorm",
"pnorm_7", "poisgof", "poiss_lf", "poiss_sw", "poisso_p",
"poisson", "poisson_estat", "post", "postclose", "postfile",
"postutil", "pperron", "pr", "prais", "prais_e", "prais_e2",
"prais_p", "predict", "predictnl", "preserve", "print",
"pro", "prob", "probi", "probit", "probit_estat", "probit_p",
"proc_time", "procoverlay", "procrustes", "procrustes_estat",
"procrustes_p", "profiler", "prog", "progr", "progra",
"program", "prop", "proportion", "prtest", "prtesti", "pwcorr",
"pwd", "q", "s", "qby", "qbys", "qchi", "qchi_7", "qladder",
"qladder_7", "qnorm", "qnorm_7", "qqplot", "qqplot_7", "qreg",
"qreg_c", "qreg_p", "qreg_sw", "qu", "quadchk", "quantile",
"quantile_7", "que", "quer", "query", "range", "ranksum",
"ratio", "rchart", "rchart_7", "rcof", "recast", "reclink",
"recode", "reg", "reg3", "reg3_p", "regdw", "regr", "regre",
"regre_p2", "regres", "regres_p", "regress", "regress_estat",
"regriv_p", "remap", "ren", "rena", "renam", "rename",
"renpfix", "repeat", "replace", "report", "reshape",
"restore", "ret", "retu", "retur", "return", "rm", "rmdir",
"robvar", "roccomp", "roccomp_7", "roccomp_8", "rocf_lf",
"rocfit", "rocfit_8", "rocgold", "rocplot", "rocplot_7",
"roctab", "roctab_7", "rolling", "rologit", "rologit_p",
"rot", "rota", "rotat", "rotate", "rotatemat", "rreg",
"rreg_p", "ru", "run", "runtest", "rvfplot", "rvfplot_7",
"rvpplot", "rvpplot_7", "sa", "safesum", "sample",
"sampsi", "sav", "save", "savedresults", "saveold", "sc",
"sca", "scal", "scala", "scalar", "scatter", "scm_mine",
"sco", "scob_lf", "scob_p", "scobi_sw", "scobit", "scor",
"score", "scoreplot", "scoreplot_help", "scree", "screeplot",
"screeplot_help", "sdtest", "sdtesti", "se", "search",
"separate", "seperate", "serrbar", "serrbar_7", "serset", "set",
"set_defaults", "sfrancia", "sh", "she", "shel", "shell",
"shewhart", "shewhart_7", "signestimationsample", "signrank",
"signtest", "simul", "simul_7", "simulate", "simulate_8",
"sktest", "sleep", "slogit", "slogit_d2", "slogit_p", "smooth",
"snapspan", "so", "sor", "sort", "spearman", "spikeplot",
"spikeplot_7", "spikeplt", "spline_x", "split", "sqreg",
"sqreg_p", "sret", "sretu", "sretur", "sreturn", "ssc", "st",
"st_ct", "st_hc", "st_hcd", "st_hcd_sh", "st_is", "st_issys",
"st_note", "st_promo", "st_set", "st_show", "st_smpl",
"st_subid", "stack", "statsby", "statsby_8", "stbase", "stci",
"stci_7", "stcox", "stcox_estat", "stcox_fr", "stcox_fr_ll",
"stcox_p", "stcox_sw", "stcoxkm", "stcoxkm_7", "stcstat",
"stcurv", "stcurve", "stcurve_7", "stdes", "stem", "stepwise",
"stereg", "stfill", "stgen", "stir", "stjoin", "stmc", "stmh",
"stphplot", "stphplot_7", "stphtest", "stphtest_7",
"stptime", "strate", "strate_7", "streg", "streg_sw", "streset",
"sts", "sts_7", "stset", "stsplit", "stsum", "sttocc",
"sttoct", "stvary", "stweib", "su", "suest", "suest_8",
"sum", "summ", "summa", "summar", "summari", "summariz",
"summarize", "sunflower", "sureg", "survcurv", "survsum",
"svar", "svar_p", "svmat", "svy", "svy_disp", "svy_dreg",
"svy_est", "svy_est_7", "svy_estat", "svy_get", "svy_gnbreg_p",
"svy_head", "svy_header", "svy_heckman_p", "svy_heckprob_p",
"svy_intreg_p", "svy_ivreg_p", "svy_logistic_p", "svy_logit_p",
"svy_mlogit_p", "svy_nbreg_p", "svy_ologit_p", "svy_oprobit_p",
"svy_poisson_p", "svy_probit_p", "svy_regress_p", "svy_sub",
"svy_sub_7", "svy_x", "svy_x_7", "svy_x_p", "svydes",
"svydes_8", "svygen", "svygnbreg", "svyheckman", "svyheckprob",
"svyintreg", "svyintreg_7", "svyintrg", "svyivreg", "svylc",
"svylog_p", "svylogit", "svymarkout", "svymarkout_8",
"svymean", "svymlog", "svymlogit", "svynbreg", "svyolog",
"svyologit", "svyoprob", "svyoprobit", "svyopts",
"svypois", "svypois_7", "svypoisson", "svyprobit", "svyprobt",
"svyprop", "svyprop_7", "svyratio", "svyreg", "svyreg_p",
"svyregress", "svyset", "svyset_7", "svyset_8", "svytab",
"svytab_7", "svytest", "svytotal", "sw", "sw_8", "swcnreg",
"swcox", "swereg", "swilk", "swlogis", "swlogit",
"swologit", "swoprbt", "swpois", "swprobit", "swqreg",
"swtobit", "swweib", "symmetry", "symmi", "symplot",
"symplot_7", "syntax", "sysdescribe", "sysdir", "sysuse",
"szroeter", "ta", "tab", "tab1", "tab2", "tab_or", "tabd",
"tabdi", "tabdis", "tabdisp", "tabi", "table", "tabodds",
"tabodds_7", "tabstat", "tabu", "tabul", "tabula", "tabulat",
"tabulate", "te", "tempfile", "tempname", "tempvar", "tes",
"test", "testnl", "testparm", "teststd", "tetrachoric",
"time_it", "timer", "tis", "tob", "tobi", "tobit", "tobit_p",
"tobit_sw", "token", "tokeni", "tokeniz", "tokenize",
"tostring", "total", "translate", "translator", "transmap",
"treat_ll", "treatr_p", "treatreg", "trim", "trnb_cons",
"trnb_mean", "trpoiss_d2", "trunc_ll", "truncr_p", "truncreg",
"tsappend", "tset", "tsfill", "tsline", "tsline_ex",
"tsreport", "tsrevar", "tsrline", "tsset", "tssmooth",
"tsunab", "ttest", "ttesti", "tut_chk", "tut_wait", "tutorial",
"tw", "tware_st", "two", "twoway", "twoway__fpfit_serset",
"twoway__function_gen", "twoway__histogram_gen",
"twoway__ipoint_serset", "twoway__ipoints_serset",
"twoway__kdensity_gen", "twoway__lfit_serset",
"twoway__normgen_gen", "twoway__pci_serset",
"twoway__qfit_serset", "twoway__scatteri_serset",
"twoway__sunflower_gen", "twoway_ksm_serset", "ty", "typ",
"type", "typeof", "u", "unab", "unabbrev", "unabcmd",
"update", "us", "use", "uselabel", "var", "var_mkcompanion",
"var_p", "varbasic", "varfcast", "vargranger", "varirf",
"varirf_add", "varirf_cgraph", "varirf_create", "varirf_ctable",
"varirf_describe", "varirf_dir", "varirf_drop", "varirf_erase",
"varirf_graph", "varirf_ograph", "varirf_rename", "varirf_set",
"varirf_table", "varlist", "varlmar", "varnorm", "varsoc",
"varstable", "varstable_w", "varstable_w2", "varwle",
"vce", "vec", "vec_fevd", "vec_mkphi", "vec_p", "vec_p_w",
"vecirf_create", "veclmar", "veclmar_w", "vecnorm",
"vecnorm_w", "vecrank", "vecstable", "verinst", "vers",
"versi", "versio", "version", "view", "viewsource", "vif",
"vwls", "wdatetof", "webdescribe", "webseek", "webuse",
"weib1_lf", "weib2_lf", "weib_lf", "weib_lf0", "weibhet_glf",
"weibhet_glf_sh", "weibhet_glfa", "weibhet_glfa_sh",
"weibhet_gp", "weibhet_ilf", "weibhet_ilf_sh", "weibhet_ilfa",
"weibhet_ilfa_sh", "weibhet_ip", "weibu_sw", "weibul_p",
"weibull", "weibull_c", "weibull_s", "weibullhet",
"wh", "whelp", "whi", "which", "whil", "while", "wilc_st",
"wilcoxon", "win", "wind", "windo", "window", "winexec",
"wntestb", "wntestb_7", "wntestq", "xchart", "xchart_7",
"xcorr", "xcorr_7", "xi", "xi_6", "xmlsav", "xmlsave",
"xmluse", "xpose", "xsh", "xshe", "xshel", "xshell",
"xt_iis", "xt_tis", "xtab_p", "xtabond", "xtbin_p",
"xtclog", "xtcloglog", "xtcloglog_8", "xtcloglog_d2",
"xtcloglog_pa_p", "xtcloglog_re_p", "xtcnt_p", "xtcorr",
"xtdata", "xtdes", "xtfront_p", "xtfrontier", "xtgee",
"xtgee_elink", "xtgee_estat", "xtgee_makeivar", "xtgee_p",
"xtgee_plink", "xtgls", "xtgls_p", "xthaus", "xthausman",
"xtht_p", "xthtaylor", "xtile", "xtint_p", "xtintreg",
"xtintreg_8", "xtintreg_d2", "xtintreg_p", "xtivp_1",
"xtivp_2", "xtivreg", "xtline", "xtline_ex", "xtlogit",
"xtlogit_8", "xtlogit_d2", "xtlogit_fe_p", "xtlogit_pa_p",
"xtlogit_re_p", "xtmixed", "xtmixed_estat", "xtmixed_p",
"xtnb_fe", "xtnb_lf", "xtnbreg", "xtnbreg_pa_p",
"xtnbreg_refe_p", "xtpcse", "xtpcse_p", "xtpois", "xtpoisson",
"xtpoisson_d2", "xtpoisson_pa_p", "xtpoisson_refe_p", "xtpred",
"xtprobit", "xtprobit_8", "xtprobit_d2", "xtprobit_re_p",
"xtps_fe", "xtps_lf", "xtps_ren", "xtps_ren_8", "xtrar_p",
"xtrc", "xtrc_p", "xtrchh", "xtrefe_p", "xtreg", "xtreg_be",
"xtreg_fe", "xtreg_ml", "xtreg_pa_p", "xtreg_re",
"xtregar", "xtrere_p", "xtset", "xtsf_ll", "xtsf_llti",
"xtsum", "xttab", "xttest0", "xttobit", "xttobit_8",
"xttobit_p", "xttrans", "yx", "yxview__barlike_draw",
"yxview_area_draw", "yxview_bar_draw", "yxview_dot_draw",
"yxview_dropline_draw", "yxview_function_draw",
"yxview_iarrow_draw", "yxview_ilabels_draw",
"yxview_normal_draw", "yxview_pcarrow_draw",
"yxview_pcbarrow_draw", "yxview_pccapsym_draw",
"yxview_pcscatter_draw", "yxview_pcspike_draw",
"yxview_rarea_draw", "yxview_rbar_draw", "yxview_rbarm_draw",
"yxview_rcap_draw", "yxview_rcapsym_draw",
"yxview_rconnected_draw", "yxview_rline_draw",
"yxview_rscatter_draw", "yxview_rspike_draw",
"yxview_spike_draw", "yxview_sunflower_draw", "zap_s", "zinb",
"zinb_llf", "zinb_plf", "zip", "zip_llf", "zip_p", "zip_plf",
"zt_ct_5", "zt_hc_5", "zt_hcd_5", "zt_is_5", "zt_iss_5",
"zt_sho_5", "zt_smp_5", "ztbase_5", "ztcox_5", "ztdes_5",
"ztereg_5", "ztfill_5", "ztgen_5", "ztir_5", "ztjoin_5", "ztnb",
"ztnb_p", "ztp", "ztp_p", "zts_5", "ztset_5", "ztspli_5",
"ztsum_5", "zttoct_5", "ztvary_5", "ztweib_5"
)
builtins_functions = (
"abbrev", "abs", "acos", "acosh", "asin", "asinh", "atan",
"atan2", "atanh", "autocode", "betaden", "binomial",
"binomialp", "binomialtail", "binormal", "bofd",
"byteorder", "c", "_caller", "cauchy", "cauchyden",
"cauchytail", "Cdhms", "ceil", "char", "chi2", "chi2den",
"chi2tail", "Chms", "chop", "cholesky", "clip", "Clock",
"clock", "cloglog", "Cmdyhms", "Cofc", "cofC", "Cofd", "cofd",
"coleqnumb", "collatorlocale", "collatorversion",
"colnfreeparms", "colnumb", "colsof", "comb", "cond", "corr",
"cos", "cosh", "daily", "date", "day", "det", "dgammapda",
"dgammapdada", "dgammapdadx", "dgammapdx", "dgammapdxdx",
"dhms", "diag", "diag0cnt", "digamma", "dofb", "dofC", "dofc",
"dofh", "dofm", "dofq", "dofw", "dofy", "dow", "doy",
"dunnettprob", "e", "el", "esample", "epsdouble", "epsfloat",
"exp", "expm1", "exponential", "exponentialden",
"exponentialtail", "F", "Fden", "fileexists", "fileread",
"filereaderror", "filewrite", "float", "floor", "fmtwidth",
"frval", "_frval", "Ftail", "gammaden", "gammap", "gammaptail",
"get", "hadamard", "halfyear", "halfyearly", "has_eprop", "hh",
"hhC", "hms", "hofd", "hours", "hypergeometric",
"hypergeometricp", "I", "ibeta", "ibetatail", "igaussian",
"igaussianden", "igaussiantail", "indexnot", "inlist",
"inrange", "int", "inv", "invbinomial", "invbinomialtail",
"invcauchy", "invcauchytail", "invchi2", "invchi2tail",
"invcloglog", "invdunnettprob", "invexponential",
"invexponentialtail", "invF", "invFtail", "invgammap",
"invgammaptail", "invibeta", "invibetatail", "invigaussian",
"invigaussiantail", "invlaplace", "invlaplacetail",
"invlogisticp", "invlogisticsp", "invlogisticmsp",
"invlogistictailp", "invlogistictailsp", "invlogistictailmsp",
"invlogit", "invnbinomial", "invnbinomialtail", "invnchi2",
"invnchi2tail", "invnF", "invnFtail", "invnibeta",
"invnormal", "invnt", "invnttail", "invpoisson",
"invpoissontail", "invsym", "invt", "invttail", "invtukeyprob",
"invweibullabp", "invweibullabgp", "invweibullphabp",
"invweibullphabgp", "invweibullphtailabp",
"invweibullphtailabgp", "invweibulltailabp",
"invweibulltailabgp", "irecode", "issymmetric", "J", "laplace",
"laplaceden", "laplacetail", "ln", "ln1m", "ln1p", "lncauchyden",
"lnfactorial", "lngamma", "lnigammaden", "lnigaussianden",
"lniwishartden", "lnlaplaceden", "lnmvnormalden", "lnnormal",
"lnnormalden", "lnnormaldenxs", "lnnormaldenxms", "lnwishartden",
"log", "log10", "log1m", "log1p", "logisticx", "logisticsx",
"logisticmsx", "logisticdenx", "logisticdensx", "logisticdenmsx",
"logistictailx", "logistictailsx", "logistictailmsx", "logit",
"matmissing", "matrix", "matuniform", "max", "maxbyte",
"maxdouble", "maxfloat", "maxint", "maxlong", "mdy", "mdyhms",
"mi", "min", "minbyte", "mindouble", "minfloat", "minint",
"minlong", "minutes", "missing", "mm", "mmC", "mod", "mofd",
"month", "monthly", "mreldif", "msofhours", "msofminutes",
"msofseconds", "nbetaden", "nbinomial", "nbinomialp",
"nbinomialtail", "nchi2", "nchi2den", "nchi2tail", "nF",
"nFden", "nFtail", "nibeta", "normal", "normalden",
"normaldenxs", "normaldenxms", "npnchi2", "npnF", "npnt",
"nt", "ntden", "nttail", "nullmat", "plural", "plurals1",
"poisson", "poissonp", "poissontail", "qofd", "quarter",
"quarterly", "r", "rbeta", "rbinomial", "rcauchy", "rchi2",
"recode", "real", "regexm", "regexr", "regexs", "reldif",
"replay", "return", "rexponential", "rgamma", "rhypergeometric",
"rigaussian", "rlaplace", "rlogistic", "rlogistics",
"rlogisticms", "rnbinomial", "rnormal", "rnormalm", "rnormalms",
"round", "roweqnumb", "rownfreeparms", "rownumb", "rowsof",
"rpoisson", "rt", "runiform", "runiformab", "runiformint",
"rweibullab", "rweibullabg", "rweibullphab", "rweibullphabg",
"s", "scalar", "seconds", "sign", "sin", "sinh",
"smallestdouble", "soundex", "soundex_nara", "sqrt", "ss",
"ssC", "strcat", "strdup", "string", "stringns", "stritrim",
"strlen", "strlower", "strltrim", "strmatch", "strofreal",
"strofrealns", "strpos", "strproper", "strreverse", "strrpos",
"strrtrim", "strtoname", "strtrim", "strupper", "subinstr",
"subinword", "substr", "sum", "sweep", "t", "tan", "tanh",
"tC", "tc", "td", "tden", "th", "tin", "tm", "tobytes", "tq",
"trace", "trigamma", "trunc", "ttail", "tukeyprob", "tw",
"twithin", "uchar", "udstrlen", "udsubstr", "uisdigit",
"uisletter", "ustrcompare", "ustrfix", "ustrfrom",
"ustrinvalidcnt", "ustrleft", "ustrlen", "ustrlower",
"ustrltrim", "ustrnormalize", "ustrpos", "ustrregexm",
"ustrregexra", "ustrregexrf", "ustrregexs", "ustrreverse",
"ustrright", "ustrrpos", "ustrrtrim", "ustrsortkey",
"ustrtitle", "ustrto", "ustrtohex", "ustrtoname",
"ustrtrim", "ustrunescape", "ustrupper", "ustrword",
"ustrwordcount", "usubinstr", "usubstr", "vec", "vecdiag",
"week", "weekly", "weibullabx", "weibullabgx", "weibulldenabx",
"weibulldenabgx", "weibullphabx", "weibullphabgx",
"weibullphdenabx", "weibullphdenabgx", "weibullphtailabx",
"weibullphtailabgx", "weibulltailabx", "weibulltailabgx",
"wofd", "word", "wordbreaklocale", "wordcount",
"year", "yearly", "yh", "ym", "yofd", "yq", "yw"
)
| 27,227 | Python | 58.449782 | 70 | 0.571712 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/oberon.py | """
pygments.lexers.oberon
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Oberon family languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ComponentPascalLexer']
class ComponentPascalLexer(RegexLexer):
"""
For Component Pascal source code.
.. versionadded:: 2.1
"""
name = 'Component Pascal'
aliases = ['componentpascal', 'cp']
filenames = ['*.cp', '*.cps']
mimetypes = ['text/x-component-pascal']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('whitespace'),
include('comments'),
include('punctuation'),
include('numliterals'),
include('strings'),
include('operators'),
include('builtins'),
include('identifiers'),
],
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'comments': [
(r'\(\*([^$].*?)\*\)', Comment.Multiline),
# TODO: nested comments (* (* ... *) ... (* ... *) *) not supported!
],
'punctuation': [
(r'[()\[\]{},.:;|]', Punctuation),
],
'numliterals': [
(r'[0-9A-F]+X\b', Number.Hex), # char code
(r'[0-9A-F]+[HL]\b', Number.Hex), # hexadecimal number
(r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
(r'[0-9]+\.[0-9]+', Number.Float), # real number
(r'[0-9]+', Number.Integer), # decimal whole number
],
'strings': [
(r"'[^\n']*'", String), # single quoted string
(r'"[^\n"]*"', String), # double quoted string
],
'operators': [
# Arithmetic Operators
(r'[+-]', Operator),
(r'[*/]', Operator),
# Relational Operators
(r'[=#<>]', Operator),
# Dereferencing Operator
(r'\^', Operator),
# Logical AND Operator
(r'&', Operator),
# Logical NOT Operator
(r'~', Operator),
# Assignment Symbol
(r':=', Operator),
# Range Constructor
(r'\.\.', Operator),
(r'\$', Operator),
],
'identifiers': [
(r'([a-zA-Z_$][\w$]*)', Name),
],
'builtins': [
(words((
'ANYPTR', 'ANYREC', 'BOOLEAN', 'BYTE', 'CHAR', 'INTEGER', 'LONGINT',
'REAL', 'SET', 'SHORTCHAR', 'SHORTINT', 'SHORTREAL'
), suffix=r'\b'), Keyword.Type),
(words((
'ABS', 'ABSTRACT', 'ARRAY', 'ASH', 'ASSERT', 'BEGIN', 'BITS', 'BY',
'CAP', 'CASE', 'CHR', 'CLOSE', 'CONST', 'DEC', 'DIV', 'DO', 'ELSE',
'ELSIF', 'EMPTY', 'END', 'ENTIER', 'EXCL', 'EXIT', 'EXTENSIBLE', 'FOR',
'HALT', 'IF', 'IMPORT', 'IN', 'INC', 'INCL', 'IS', 'LEN', 'LIMITED',
'LONG', 'LOOP', 'MAX', 'MIN', 'MOD', 'MODULE', 'NEW', 'ODD', 'OF',
'OR', 'ORD', 'OUT', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
'SHORT', 'SHORTCHAR', 'SHORTINT', 'SIZE', 'THEN', 'TYPE', 'TO', 'UNTIL',
'VAR', 'WHILE', 'WITH'
), suffix=r'\b'), Keyword.Reserved),
(r'(TRUE|FALSE|NIL|INF)\b', Keyword.Constant),
]
}
def analyse_text(text):
"""The only other lexer using .cp is the C++ one, so we check if for
a few common Pascal keywords here. Those are unfortunately quite
common across various business languages as well."""
result = 0
if 'BEGIN' in text:
result += 0.01
if 'END' in text:
result += 0.01
if 'PROCEDURE' in text:
result += 0.01
if 'END' in text:
result += 0.01
return result
| 4,169 | Python | 33.46281 | 89 | 0.453106 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/console.py | """
pygments.lexers.console
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for misc console output.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Generic, Comment, String, Text, Keyword, Name, \
Punctuation, Number, Whitespace
__all__ = ['VCTreeStatusLexer', 'PyPyLogLexer']
class VCTreeStatusLexer(RegexLexer):
"""
For colorizing output of version control status commands, like "hg
status" or "svn status".
.. versionadded:: 2.0
"""
name = 'VCTreeStatus'
aliases = ['vctreestatus']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^A \+ C\s+', Generic.Error),
(r'^A\s+\+?\s+', String),
(r'^M\s+', Generic.Inserted),
(r'^C\s+', Generic.Error),
(r'^D\s+', Generic.Deleted),
(r'^[?!]\s+', Comment.Preproc),
(r' >\s+.*\n', Comment.Preproc),
(r'\S+', Text),
(r'\s+', Whitespace),
]
}
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
.. versionadded:: 1.5
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] \{jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] \{jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"^\+\d+: ", Comment),
(r"--end of the loop--", Comment),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()(\w+(?:\.\w+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name),
(r"<.*?>+", Name.Builtin),
(r"(label|debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|float_neg|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|"
r"cast_int_to_float|cast_float_to_int|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|"
r"virtual_ref|mark_opaque_ptr|"
r"call_may_force|call_assembler|call_loopinvariant|"
r"call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|"
r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r":", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"\s+", Whitespace),
(r"#.*?$", Comment),
],
}
| 4,148 | Python | 35.078261 | 80 | 0.52001 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/freefem.py | """
pygments.lexers.freefem
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for FreeFem++ language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Comment, Operator, Keyword, Name
from pygments.lexers.c_cpp import CppLexer
__all__ = ['FreeFemLexer']
class FreeFemLexer(CppLexer):
"""
For FreeFem++ source.
This is an extension of the CppLexer, as the FreeFem Language is a superset
of C++.
.. versionadded:: 2.4
"""
name = 'Freefem'
url = 'https://freefem.org/'
aliases = ['freefem']
filenames = ['*.edp']
mimetypes = ['text/x-freefem']
# Language operators
operators = {'+', '-', '*', '.*', '/', './', '%', '^', '^-1', ':', '\''}
# types
types = {'bool', 'border', 'complex', 'dmatrix', 'fespace', 'func', 'gslspline',
'ifstream', 'int', 'macro', 'matrix', 'mesh', 'mesh3', 'mpiComm',
'mpiGroup', 'mpiRequest', 'NewMacro', 'EndMacro', 'ofstream', 'Pmmap',
'problem', 'Psemaphore', 'real', 'solve', 'string', 'varf'}
# finite element spaces
fespaces = {'BDM1', 'BDM1Ortho', 'Edge03d', 'Edge13d', 'Edge23d', 'FEQF', 'HCT',
'P0', 'P03d', 'P0Edge', 'P1', 'P13d', 'P1b', 'P1b3d', 'P1bl', 'P1bl3d',
'P1dc', 'P1Edge', 'P1nc', 'P2', 'P23d', 'P2b', 'P2BR', 'P2dc', 'P2Edge',
'P2h', 'P2Morley', 'P2pnc', 'P3', 'P3dc', 'P3Edge', 'P4', 'P4dc',
'P4Edge', 'P5Edge', 'RT0', 'RT03d', 'RT0Ortho', 'RT1', 'RT1Ortho',
'RT2', 'RT2Ortho'}
# preprocessor
preprocessor = {'ENDIFMACRO', 'include', 'IFMACRO', 'load'}
# Language keywords
keywords = {
'adj',
'append',
'area',
'ARGV',
'be',
'binary',
'BoundaryEdge',
'bordermeasure',
'CG',
'Cholesky',
'cin',
'cout',
'Crout',
'default',
'diag',
'edgeOrientation',
'endl',
'false',
'ffind',
'FILE',
'find',
'fixed',
'flush',
'GMRES',
'good',
'hTriangle',
'im',
'imax',
'imin',
'InternalEdge',
'l1',
'l2',
'label',
'lenEdge',
'length',
'LINE',
'linfty',
'LU',
'm',
'max',
'measure',
'min',
'mpiAnySource',
'mpiBAND',
'mpiBXOR',
'mpiCommWorld',
'mpiLAND',
'mpiLOR',
'mpiLXOR',
'mpiMAX',
'mpiMIN',
'mpiPROD',
'mpirank',
'mpisize',
'mpiSUM',
'mpiUndefined',
'n',
'N',
'nbe',
'ndof',
'ndofK',
'noshowbase',
'noshowpos',
'notaregion',
'nt',
'nTonEdge',
'nuEdge',
'nuTriangle',
'nv',
'P',
'pi',
'precision',
'qf1pE',
'qf1pElump',
'qf1pT',
'qf1pTlump',
'qfV1',
'qfV1lump',
'qf2pE',
'qf2pT',
'qf2pT4P1',
'qfV2',
'qf3pE',
'qf4pE',
'qf5pE',
'qf5pT',
'qfV5',
'qf7pT',
'qf9pT',
'qfnbpE',
'quantile',
're',
'region',
'rfind',
'scientific',
'searchMethod',
'setw',
'showbase',
'showpos',
'sparsesolver',
'sum',
'tellp',
'true',
'UMFPACK',
'unused',
'whoinElement',
'verbosity',
'version',
'volume',
'x',
'y',
'z'
}
# Language shipped functions and class ( )
functions = {
'abs',
'acos',
'acosh',
'adaptmesh',
'adj',
'AffineCG',
'AffineGMRES',
'arg',
'asin',
'asinh',
'assert',
'atan',
'atan2',
'atanh',
'atof',
'atoi',
'BFGS',
'broadcast',
'buildlayers',
'buildmesh',
'ceil',
'chi',
'complexEigenValue',
'copysign',
'change',
'checkmovemesh',
'clock',
'cmaes',
'conj',
'convect',
'cos',
'cosh',
'cube',
'd',
'dd',
'dfft',
'diffnp',
'diffpos',
'dimKrylov',
'dist',
'dumptable',
'dx',
'dxx',
'dxy',
'dxz',
'dy',
'dyx',
'dyy',
'dyz',
'dz',
'dzx',
'dzy',
'dzz',
'EigenValue',
'emptymesh',
'erf',
'erfc',
'exec',
'exit',
'exp',
'fdim',
'floor',
'fmax',
'fmin',
'fmod',
'freeyams',
'getARGV',
'getline',
'gmshload',
'gmshload3',
'gslcdfugaussianP',
'gslcdfugaussianQ',
'gslcdfugaussianPinv',
'gslcdfugaussianQinv',
'gslcdfgaussianP',
'gslcdfgaussianQ',
'gslcdfgaussianPinv',
'gslcdfgaussianQinv',
'gslcdfgammaP',
'gslcdfgammaQ',
'gslcdfgammaPinv',
'gslcdfgammaQinv',
'gslcdfcauchyP',
'gslcdfcauchyQ',
'gslcdfcauchyPinv',
'gslcdfcauchyQinv',
'gslcdflaplaceP',
'gslcdflaplaceQ',
'gslcdflaplacePinv',
'gslcdflaplaceQinv',
'gslcdfrayleighP',
'gslcdfrayleighQ',
'gslcdfrayleighPinv',
'gslcdfrayleighQinv',
'gslcdfchisqP',
'gslcdfchisqQ',
'gslcdfchisqPinv',
'gslcdfchisqQinv',
'gslcdfexponentialP',
'gslcdfexponentialQ',
'gslcdfexponentialPinv',
'gslcdfexponentialQinv',
'gslcdfexppowP',
'gslcdfexppowQ',
'gslcdftdistP',
'gslcdftdistQ',
'gslcdftdistPinv',
'gslcdftdistQinv',
'gslcdffdistP',
'gslcdffdistQ',
'gslcdffdistPinv',
'gslcdffdistQinv',
'gslcdfbetaP',
'gslcdfbetaQ',
'gslcdfbetaPinv',
'gslcdfbetaQinv',
'gslcdfflatP',
'gslcdfflatQ',
'gslcdfflatPinv',
'gslcdfflatQinv',
'gslcdflognormalP',
'gslcdflognormalQ',
'gslcdflognormalPinv',
'gslcdflognormalQinv',
'gslcdfgumbel1P',
'gslcdfgumbel1Q',
'gslcdfgumbel1Pinv',
'gslcdfgumbel1Qinv',
'gslcdfgumbel2P',
'gslcdfgumbel2Q',
'gslcdfgumbel2Pinv',
'gslcdfgumbel2Qinv',
'gslcdfweibullP',
'gslcdfweibullQ',
'gslcdfweibullPinv',
'gslcdfweibullQinv',
'gslcdfparetoP',
'gslcdfparetoQ',
'gslcdfparetoPinv',
'gslcdfparetoQinv',
'gslcdflogisticP',
'gslcdflogisticQ',
'gslcdflogisticPinv',
'gslcdflogisticQinv',
'gslcdfbinomialP',
'gslcdfbinomialQ',
'gslcdfpoissonP',
'gslcdfpoissonQ',
'gslcdfgeometricP',
'gslcdfgeometricQ',
'gslcdfnegativebinomialP',
'gslcdfnegativebinomialQ',
'gslcdfpascalP',
'gslcdfpascalQ',
'gslinterpakima',
'gslinterpakimaperiodic',
'gslinterpcsplineperiodic',
'gslinterpcspline',
'gslinterpsteffen',
'gslinterplinear',
'gslinterppolynomial',
'gslranbernoullipdf',
'gslranbeta',
'gslranbetapdf',
'gslranbinomialpdf',
'gslranexponential',
'gslranexponentialpdf',
'gslranexppow',
'gslranexppowpdf',
'gslrancauchy',
'gslrancauchypdf',
'gslranchisq',
'gslranchisqpdf',
'gslranerlang',
'gslranerlangpdf',
'gslranfdist',
'gslranfdistpdf',
'gslranflat',
'gslranflatpdf',
'gslrangamma',
'gslrangammaint',
'gslrangammapdf',
'gslrangammamt',
'gslrangammaknuth',
'gslrangaussian',
'gslrangaussianratiomethod',
'gslrangaussianziggurat',
'gslrangaussianpdf',
'gslranugaussian',
'gslranugaussianratiomethod',
'gslranugaussianpdf',
'gslrangaussiantail',
'gslrangaussiantailpdf',
'gslranugaussiantail',
'gslranugaussiantailpdf',
'gslranlandau',
'gslranlandaupdf',
'gslrangeometricpdf',
'gslrangumbel1',
'gslrangumbel1pdf',
'gslrangumbel2',
'gslrangumbel2pdf',
'gslranlogistic',
'gslranlogisticpdf',
'gslranlognormal',
'gslranlognormalpdf',
'gslranlogarithmicpdf',
'gslrannegativebinomialpdf',
'gslranpascalpdf',
'gslranpareto',
'gslranparetopdf',
'gslranpoissonpdf',
'gslranrayleigh',
'gslranrayleighpdf',
'gslranrayleightail',
'gslranrayleightailpdf',
'gslrantdist',
'gslrantdistpdf',
'gslranlaplace',
'gslranlaplacepdf',
'gslranlevy',
'gslranweibull',
'gslranweibullpdf',
'gslsfairyAi',
'gslsfairyBi',
'gslsfairyAiscaled',
'gslsfairyBiscaled',
'gslsfairyAideriv',
'gslsfairyBideriv',
'gslsfairyAiderivscaled',
'gslsfairyBiderivscaled',
'gslsfairyzeroAi',
'gslsfairyzeroBi',
'gslsfairyzeroAideriv',
'gslsfairyzeroBideriv',
'gslsfbesselJ0',
'gslsfbesselJ1',
'gslsfbesselJn',
'gslsfbesselY0',
'gslsfbesselY1',
'gslsfbesselYn',
'gslsfbesselI0',
'gslsfbesselI1',
'gslsfbesselIn',
'gslsfbesselI0scaled',
'gslsfbesselI1scaled',
'gslsfbesselInscaled',
'gslsfbesselK0',
'gslsfbesselK1',
'gslsfbesselKn',
'gslsfbesselK0scaled',
'gslsfbesselK1scaled',
'gslsfbesselKnscaled',
'gslsfbesselj0',
'gslsfbesselj1',
'gslsfbesselj2',
'gslsfbesseljl',
'gslsfbessely0',
'gslsfbessely1',
'gslsfbessely2',
'gslsfbesselyl',
'gslsfbesseli0scaled',
'gslsfbesseli1scaled',
'gslsfbesseli2scaled',
'gslsfbesselilscaled',
'gslsfbesselk0scaled',
'gslsfbesselk1scaled',
'gslsfbesselk2scaled',
'gslsfbesselklscaled',
'gslsfbesselJnu',
'gslsfbesselYnu',
'gslsfbesselInuscaled',
'gslsfbesselInu',
'gslsfbesselKnuscaled',
'gslsfbesselKnu',
'gslsfbessellnKnu',
'gslsfbesselzeroJ0',
'gslsfbesselzeroJ1',
'gslsfbesselzeroJnu',
'gslsfclausen',
'gslsfhydrogenicR1',
'gslsfdawson',
'gslsfdebye1',
'gslsfdebye2',
'gslsfdebye3',
'gslsfdebye4',
'gslsfdebye5',
'gslsfdebye6',
'gslsfdilog',
'gslsfmultiply',
'gslsfellintKcomp',
'gslsfellintEcomp',
'gslsfellintPcomp',
'gslsfellintDcomp',
'gslsfellintF',
'gslsfellintE',
'gslsfellintRC',
'gslsferfc',
'gslsflogerfc',
'gslsferf',
'gslsferfZ',
'gslsferfQ',
'gslsfhazard',
'gslsfexp',
'gslsfexpmult',
'gslsfexpm1',
'gslsfexprel',
'gslsfexprel2',
'gslsfexpreln',
'gslsfexpintE1',
'gslsfexpintE2',
'gslsfexpintEn',
'gslsfexpintE1scaled',
'gslsfexpintE2scaled',
'gslsfexpintEnscaled',
'gslsfexpintEi',
'gslsfexpintEiscaled',
'gslsfShi',
'gslsfChi',
'gslsfexpint3',
'gslsfSi',
'gslsfCi',
'gslsfatanint',
'gslsffermidiracm1',
'gslsffermidirac0',
'gslsffermidirac1',
'gslsffermidirac2',
'gslsffermidiracint',
'gslsffermidiracmhalf',
'gslsffermidirachalf',
'gslsffermidirac3half',
'gslsffermidiracinc0',
'gslsflngamma',
'gslsfgamma',
'gslsfgammastar',
'gslsfgammainv',
'gslsftaylorcoeff',
'gslsffact',
'gslsfdoublefact',
'gslsflnfact',
'gslsflndoublefact',
'gslsflnchoose',
'gslsfchoose',
'gslsflnpoch',
'gslsfpoch',
'gslsfpochrel',
'gslsfgammaincQ',
'gslsfgammaincP',
'gslsfgammainc',
'gslsflnbeta',
'gslsfbeta',
'gslsfbetainc',
'gslsfgegenpoly1',
'gslsfgegenpoly2',
'gslsfgegenpoly3',
'gslsfgegenpolyn',
'gslsfhyperg0F1',
'gslsfhyperg1F1int',
'gslsfhyperg1F1',
'gslsfhypergUint',
'gslsfhypergU',
'gslsfhyperg2F0',
'gslsflaguerre1',
'gslsflaguerre2',
'gslsflaguerre3',
'gslsflaguerren',
'gslsflambertW0',
'gslsflambertWm1',
'gslsflegendrePl',
'gslsflegendreP1',
'gslsflegendreP2',
'gslsflegendreP3',
'gslsflegendreQ0',
'gslsflegendreQ1',
'gslsflegendreQl',
'gslsflegendrePlm',
'gslsflegendresphPlm',
'gslsflegendrearraysize',
'gslsfconicalPhalf',
'gslsfconicalPmhalf',
'gslsfconicalP0',
'gslsfconicalP1',
'gslsfconicalPsphreg',
'gslsfconicalPcylreg',
'gslsflegendreH3d0',
'gslsflegendreH3d1',
'gslsflegendreH3d',
'gslsflog',
'gslsflogabs',
'gslsflog1plusx',
'gslsflog1plusxmx',
'gslsfpowint',
'gslsfpsiint',
'gslsfpsi',
'gslsfpsi1piy',
'gslsfpsi1int',
'gslsfpsi1',
'gslsfpsin',
'gslsfsynchrotron1',
'gslsfsynchrotron2',
'gslsftransport2',
'gslsftransport3',
'gslsftransport4',
'gslsftransport5',
'gslsfsin',
'gslsfcos',
'gslsfhypot',
'gslsfsinc',
'gslsflnsinh',
'gslsflncosh',
'gslsfanglerestrictsymm',
'gslsfanglerestrictpos',
'gslsfzetaint',
'gslsfzeta',
'gslsfzetam1',
'gslsfzetam1int',
'gslsfhzeta',
'gslsfetaint',
'gslsfeta',
'imag',
'int1d',
'int2d',
'int3d',
'intalledges',
'intallfaces',
'interpolate',
'invdiff',
'invdiffnp',
'invdiffpos',
'Isend',
'isInf',
'isNaN',
'isoline',
'Irecv',
'j0',
'j1',
'jn',
'jump',
'lgamma',
'LinearCG',
'LinearGMRES',
'log',
'log10',
'lrint',
'lround',
'max',
'mean',
'medit',
'min',
'mmg3d',
'movemesh',
'movemesh23',
'mpiAlltoall',
'mpiAlltoallv',
'mpiAllgather',
'mpiAllgatherv',
'mpiAllReduce',
'mpiBarrier',
'mpiGather',
'mpiGatherv',
'mpiRank',
'mpiReduce',
'mpiScatter',
'mpiScatterv',
'mpiSize',
'mpiWait',
'mpiWaitAny',
'mpiWtick',
'mpiWtime',
'mshmet',
'NaN',
'NLCG',
'on',
'plot',
'polar',
'Post',
'pow',
'processor',
'processorblock',
'projection',
'randinit',
'randint31',
'randint32',
'random',
'randreal1',
'randreal2',
'randreal3',
'randres53',
'Read',
'readmesh',
'readmesh3',
'Recv',
'rint',
'round',
'savemesh',
'savesol',
'savevtk',
'seekg',
'Sent',
'set',
'sign',
'signbit',
'sin',
'sinh',
'sort',
'splitComm',
'splitmesh',
'sqrt',
'square',
'srandom',
'srandomdev',
'Stringification',
'swap',
'system',
'tan',
'tanh',
'tellg',
'tetg',
'tetgconvexhull',
'tetgreconstruction',
'tetgtransfo',
'tgamma',
'triangulate',
'trunc',
'Wait',
'Write',
'y0',
'y1',
'yn'
}
# function parameters
parameters = {
'A',
'A1',
'abserror',
'absolute',
'aniso',
'aspectratio',
'B',
'B1',
'bb',
'beginend',
'bin',
'boundary',
'bw',
'close',
'cmm',
'coef',
'composante',
'cutoff',
'datafilename',
'dataname',
'dim',
'distmax',
'displacement',
'doptions',
'dparams',
'eps',
'err',
'errg',
'facemerge',
'facetcl',
'factorize',
'file',
'fill',
'fixedborder',
'flabel',
'flags',
'floatmesh',
'floatsol',
'fregion',
'gradation',
'grey',
'hmax',
'hmin',
'holelist',
'hsv',
'init',
'inquire',
'inside',
'IsMetric',
'iso',
'ivalue',
'keepbackvertices',
'label',
'labeldown',
'labelmid',
'labelup',
'levelset',
'loptions',
'lparams',
'maxit',
'maxsubdiv',
'meditff',
'mem',
'memory',
'metric',
'mode',
'nbarrow',
'nbiso',
'nbiter',
'nbjacoby',
'nboffacetcl',
'nbofholes',
'nbofregions',
'nbregul',
'nbsmooth',
'nbvx',
'ncv',
'nev',
'nomeshgeneration',
'normalization',
'omega',
'op',
'optimize',
'option',
'options',
'order',
'orientation',
'periodic',
'power',
'precon',
'prev',
'ps',
'ptmerge',
'qfe',
'qforder',
'qft',
'qfV',
'ratio',
'rawvector',
'reffacelow',
'reffacemid',
'reffaceup',
'refnum',
'reftet',
'reftri',
'region',
'regionlist',
'renumv',
'rescaling',
'ridgeangle',
'save',
'sigma',
'sizeofvolume',
'smoothing',
'solver',
'sparams',
'split',
'splitin2',
'splitpbedge',
'stop',
'strategy',
'swap',
'switch',
'sym',
't',
'tgv',
'thetamax',
'tol',
'tolpivot',
'tolpivotsym',
'transfo',
'U2Vc',
'value',
'varrow',
'vector',
'veps',
'viso',
'wait',
'width',
'withsurfacemesh',
'WindowIndex',
'which',
'zbound'
}
# deprecated
deprecated = {'fixeborder'}
# do not highlight
suppress_highlight = {
'alignof',
'asm',
'constexpr',
'decltype',
'div',
'double',
'grad',
'mutable',
'namespace',
'noexcept',
'restrict',
'static_assert',
'template',
'this',
'thread_local',
'typeid',
'typename',
'using'
}
def get_tokens_unprocessed(self, text, stack=('root',)):
for index, token, value in CppLexer.get_tokens_unprocessed(self, text, stack):
if value in self.operators:
yield index, Operator, value
elif value in self.types:
yield index, Keyword.Type, value
elif value in self.fespaces:
yield index, Name.Class, value
elif value in self.preprocessor:
yield index, Comment.Preproc, value
elif value in self.keywords:
yield index, Keyword.Reserved, value
elif value in self.functions:
yield index, Name.Function, value
elif value in self.parameters:
yield index, Keyword.Pseudo, value
elif value in self.suppress_highlight:
yield index, Name, value
else:
yield index, token, value
| 26,914 | Python | 29.072626 | 88 | 0.354648 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_usd_builtins.py | """
pygments.lexers._usd_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A collection of known USD-related keywords, attributes, and types.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
COMMON_ATTRIBUTES = [
"extent",
"xformOpOrder",
]
KEYWORDS = [
"class",
"clips",
"custom",
"customData",
"def",
"dictionary",
"inherits",
"over",
"payload",
"references",
"rel",
"subLayers",
"timeSamples",
"uniform",
"variantSet",
"variantSets",
"variants",
]
OPERATORS = [
"add",
"append",
"delete",
"prepend",
"reorder",
]
SPECIAL_NAMES = [
"active",
"apiSchemas",
"defaultPrim",
"elementSize",
"endTimeCode",
"hidden",
"instanceable",
"interpolation",
"kind",
"startTimeCode",
"upAxis",
]
TYPES = [
"asset",
"bool",
"color3d",
"color3f",
"color3h",
"color4d",
"color4f",
"color4h",
"double",
"double2",
"double3",
"double4",
"float",
"float2",
"float3",
"float4",
"frame4d",
"half",
"half2",
"half3",
"half4",
"int",
"int2",
"int3",
"int4",
"keyword",
"matrix2d",
"matrix3d",
"matrix4d",
"normal3d",
"normal3f",
"normal3h",
"point3d",
"point3f",
"point3h",
"quatd",
"quatf",
"quath",
"string",
"syn",
"token",
"uchar",
"uchar2",
"uchar3",
"uchar4",
"uint",
"uint2",
"uint3",
"uint4",
"usdaType",
"vector3d",
"vector3f",
"vector3h",
]
| 1,658 | Python | 13.681416 | 70 | 0.48854 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_lilypond_builtins.py | """
pygments.lexers._lilypond_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LilyPond builtins.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Contents generated by the script lilypond-builtins-generator.ly
# found in the external/ directory of the source tree.
keywords = [
"accepts",
"addlyrics",
"alias",
"book",
"bookpart",
"chordmode",
"chords",
"consists",
"context",
"defaultchild",
"denies",
"description",
"drummode",
"drums",
"etc",
"figuremode",
"figures",
"header",
"include",
"inherit-acceptability",
"language",
"layout",
"lyricmode",
"lyricsto",
"midi",
"name",
"new",
"notemode",
"paper",
"remove",
"score",
"type",
"version",
"with",
]
clefs = [
"C",
"F",
"G",
"G2",
"GG",
"alto",
"altovarC",
"baritone",
"baritonevarC",
"baritonevarF",
"bass",
"blackmensural-c1",
"blackmensural-c2",
"blackmensural-c3",
"blackmensural-c4",
"blackmensural-c5",
"french",
"hufnagel-do-fa",
"hufnagel-do1",
"hufnagel-do2",
"hufnagel-do3",
"hufnagel-fa1",
"hufnagel-fa2",
"kievan-do",
"medicaea-do1",
"medicaea-do2",
"medicaea-do3",
"medicaea-fa1",
"medicaea-fa2",
"mensural-c1",
"mensural-c2",
"mensural-c3",
"mensural-c4",
"mensural-c5",
"mensural-f",
"mensural-g",
"mezzosoprano",
"moderntab",
"neomensural-c1",
"neomensural-c2",
"neomensural-c3",
"neomensural-c4",
"neomensural-c5",
"percussion",
"petrucci-c1",
"petrucci-c2",
"petrucci-c3",
"petrucci-c4",
"petrucci-c5",
"petrucci-f",
"petrucci-f2",
"petrucci-f3",
"petrucci-f4",
"petrucci-f5",
"petrucci-g",
"petrucci-g1",
"petrucci-g2",
"soprano",
"subbass",
"tab",
"tenor",
"tenorG",
"tenorvarC",
"treble",
"varC",
"varbaritone",
"varpercussion",
"vaticana-do1",
"vaticana-do2",
"vaticana-do3",
"vaticana-fa1",
"vaticana-fa2",
"violin",
]
scales = [
"aeolian",
"dorian",
"ionian",
"locrian",
"lydian",
"major",
"minor",
"mixolydian",
"phrygian",
]
repeat_types = [
"percent",
"segno",
"unfold",
"volta",
]
units = [
"cm",
"in",
"mm",
"pt",
"staff-space",
]
chord_modifiers = [
"aug",
"dim",
"m",
"maj",
]
pitch_language_names = [
"arabic",
"catalan",
"català",
"deutsch",
"english",
"espanol",
"español",
"français",
"italiano",
"nederlands",
"norsk",
"portugues",
"português",
"suomi",
"svenska",
"vlaams",
]
pitches = [
"R",
"a",
"a-flat",
"a-flatflat",
"a-natural",
"a-sharp",
"a-sharpsharp",
"ab",
"acousticbassdrum",
"acousticsnare",
"ad",
"adb",
"add",
"aeh",
"aes",
"aeseh",
"aeses",
"aess",
"aesseh",
"aessess",
"af",
"aff",
"afhb",
"afhd",
"agh",
"agl",
"ah",
"aih",
"ais",
"aisih",
"aisis",
"aiss",
"aissih",
"aississ",
"aqf",
"aqs",
"as",
"asah",
"asas",
"aseh",
"ases",
"ashb",
"ashd",
"ass",
"asseh",
"assess",
"atqb",
"atqd",
"atqf",
"atqs",
"ax",
"b",
"b-flat",
"b-flatflat",
"b-natural",
"b-sharp",
"b-sharpsharp",
"bassdrum",
"bb",
"bd",
"bda",
"bdb",
"bdd",
"beh",
"bes",
"beseh",
"beses",
"bess",
"bf",
"bff",
"bfhb",
"bfhd",
"bih",
"bis",
"bisih",
"bisis",
"boh",
"bohm",
"boho",
"bol",
"bolm",
"bolo",
"bqf",
"bqs",
"bs",
"bshb",
"bshd",
"bss",
"btqb",
"btqd",
"btqf",
"btqs",
"bx",
"c",
"c-flat",
"c-flatflat",
"c-natural",
"c-sharp",
"c-sharpsharp",
"cab",
"cabasa",
"cb",
"cd",
"cdb",
"cdd",
"ceh",
"ces",
"ceseh",
"ceses",
"cess",
"cesseh",
"cessess",
"cf",
"cff",
"cfhb",
"cfhd",
"cgh",
"cghm",
"cgho",
"cgl",
"cglm",
"cglo",
"chinesecymbal",
"cih",
"cis",
"cisih",
"cisis",
"ciss",
"cissih",
"cississ",
"cl",
"claves",
"closedhihat",
"cowbell",
"cqf",
"cqs",
"crashcymbal",
"crashcymbala",
"crashcymbalb",
"cs",
"cshb",
"cshd",
"css",
"ctqb",
"ctqd",
"ctqf",
"ctqs",
"cuim",
"cuio",
"cx",
"cymc",
"cymca",
"cymcb",
"cymch",
"cymr",
"cymra",
"cymrb",
"cyms",
"d",
"d-flat",
"d-flatflat",
"d-natural",
"d-sharp",
"d-sharpsharp",
"db",
"dd",
"ddb",
"ddd",
"deh",
"des",
"deseh",
"deses",
"dess",
"desseh",
"dessess",
"df",
"dff",
"dfhb",
"dfhd",
"dih",
"dis",
"disih",
"disis",
"diss",
"dissih",
"dississ",
"do",
"dob",
"dobb",
"dobhb",
"dobqt",
"dobsb",
"dobtqt",
"docb",
"docs",
"dod",
"dodd",
"dodsd",
"dohb",
"dohk",
"dok",
"dokhk",
"dokk",
"doqb",
"doqd",
"doqs",
"dos",
"dosb",
"dosd",
"dosqt",
"doss",
"dostqt",
"dotcb",
"dotcs",
"dotqb",
"dotqd",
"dotqs",
"dox",
"dqf",
"dqs",
"ds",
"dshb",
"dshd",
"dss",
"dtqb",
"dtqd",
"dtqf",
"dtqs",
"dx",
"e",
"e-flat",
"e-flatflat",
"e-natural",
"e-sharp",
"e-sharpsharp",
"eb",
"ed",
"edb",
"edd",
"eeh",
"ees",
"eeseh",
"eeses",
"eess",
"eesseh",
"eessess",
"ef",
"eff",
"efhb",
"efhd",
"eh",
"eih",
"eis",
"eisih",
"eisis",
"eiss",
"eissih",
"eississ",
"electricsnare",
"eqf",
"eqs",
"es",
"eseh",
"eses",
"eshb",
"eshd",
"ess",
"esseh",
"essess",
"etqb",
"etqd",
"etqf",
"etqs",
"ex",
"f",
"f-flat",
"f-flatflat",
"f-natural",
"f-sharp",
"f-sharpsharp",
"fa",
"fab",
"fabb",
"fabhb",
"fabqt",
"fabsb",
"fabtqt",
"facb",
"facs",
"fad",
"fadd",
"fadsd",
"fahb",
"fahk",
"fak",
"fakhk",
"fakk",
"faqb",
"faqd",
"faqs",
"fas",
"fasb",
"fasd",
"fasqt",
"fass",
"fastqt",
"fatcb",
"fatcs",
"fatqb",
"fatqd",
"fatqs",
"fax",
"fb",
"fd",
"fdb",
"fdd",
"feh",
"fes",
"feseh",
"feses",
"fess",
"fesseh",
"fessess",
"ff",
"fff",
"ffhb",
"ffhd",
"fih",
"fis",
"fisih",
"fisis",
"fiss",
"fissih",
"fississ",
"fqf",
"fqs",
"fs",
"fshb",
"fshd",
"fss",
"ftqb",
"ftqd",
"ftqf",
"ftqs",
"fx",
"g",
"g-flat",
"g-flatflat",
"g-natural",
"g-sharp",
"g-sharpsharp",
"gb",
"gd",
"gdb",
"gdd",
"geh",
"ges",
"geseh",
"geses",
"gess",
"gesseh",
"gessess",
"gf",
"gff",
"gfhb",
"gfhd",
"gih",
"gis",
"gisih",
"gisis",
"giss",
"gissih",
"gississ",
"gqf",
"gqs",
"gs",
"gshb",
"gshd",
"gss",
"gtqb",
"gtqd",
"gtqf",
"gtqs",
"gui",
"guil",
"guiro",
"guis",
"gx",
"h",
"halfopenhihat",
"handclap",
"hc",
"heh",
"heseh",
"heses",
"hesseh",
"hessess",
"hh",
"hhc",
"hhho",
"hho",
"hhp",
"hiagogo",
"hibongo",
"hiconga",
"highfloortom",
"hightom",
"hih",
"hihat",
"himidtom",
"his",
"hisidestick",
"hisih",
"hisis",
"hiss",
"hissih",
"hississ",
"hitimbale",
"hiwoodblock",
"la",
"lab",
"labb",
"labhb",
"labqt",
"labsb",
"labtqt",
"lacb",
"lacs",
"lad",
"ladd",
"ladsd",
"lahb",
"lahk",
"lak",
"lakhk",
"lakk",
"laqb",
"laqd",
"laqs",
"las",
"lasb",
"lasd",
"lasqt",
"lass",
"lastqt",
"latcb",
"latcs",
"latqb",
"latqd",
"latqs",
"lax",
"loagogo",
"lobongo",
"loconga",
"longguiro",
"longwhistle",
"losidestick",
"lotimbale",
"lowfloortom",
"lowmidtom",
"lowoodblock",
"lowtom",
"mar",
"maracas",
"mi",
"mib",
"mibb",
"mibhb",
"mibqt",
"mibsb",
"mibtqt",
"micb",
"mics",
"mid",
"midd",
"midsd",
"mihb",
"mihk",
"mik",
"mikhk",
"mikk",
"miqb",
"miqd",
"miqs",
"mis",
"misb",
"misd",
"misqt",
"miss",
"mistqt",
"mitcb",
"mitcs",
"mitqb",
"mitqd",
"mitqs",
"mix",
"mutecuica",
"mutehibongo",
"mutehiconga",
"mutelobongo",
"muteloconga",
"mutetriangle",
"opencuica",
"openhibongo",
"openhiconga",
"openhihat",
"openlobongo",
"openloconga",
"opentriangle",
"pedalhihat",
"r",
"rb",
"re",
"reb",
"rebb",
"rebhb",
"rebqt",
"rebsb",
"rebtqt",
"recb",
"recs",
"red",
"redd",
"redsd",
"rehb",
"rehk",
"rek",
"rekhk",
"rekk",
"reqb",
"reqd",
"reqs",
"res",
"resb",
"resd",
"resqt",
"ress",
"restqt",
"retcb",
"retcs",
"retqb",
"retqd",
"retqs",
"rex",
"ridebell",
"ridecymbal",
"ridecymbala",
"ridecymbalb",
"ré",
"réb",
"rébb",
"rébsb",
"réd",
"rédd",
"rédsd",
"résb",
"résd",
"réx",
"shortguiro",
"shortwhistle",
"si",
"sib",
"sibb",
"sibhb",
"sibqt",
"sibsb",
"sibtqt",
"sicb",
"sics",
"sid",
"sidd",
"sidestick",
"sidsd",
"sihb",
"sihk",
"sik",
"sikhk",
"sikk",
"siqb",
"siqd",
"siqs",
"sis",
"sisb",
"sisd",
"sisqt",
"siss",
"sistqt",
"sitcb",
"sitcs",
"sitqb",
"sitqd",
"sitqs",
"six",
"sn",
"sna",
"snare",
"sne",
"sol",
"solb",
"solbb",
"solbhb",
"solbqt",
"solbsb",
"solbtqt",
"solcb",
"solcs",
"sold",
"soldd",
"soldsd",
"solhb",
"solhk",
"solk",
"solkhk",
"solkk",
"solqb",
"solqd",
"solqs",
"sols",
"solsb",
"solsd",
"solsqt",
"solss",
"solstqt",
"soltcb",
"soltcs",
"soltqb",
"soltqd",
"soltqs",
"solx",
"splashcymbal",
"ss",
"ssh",
"ssl",
"tamb",
"tambourine",
"timh",
"timl",
"tomfh",
"tomfl",
"tomh",
"toml",
"tommh",
"tomml",
"tri",
"triangle",
"trim",
"trio",
"tt",
"vibraslap",
"vibs",
"wbh",
"wbl",
"whl",
"whs",
]
music_functions = [
"=",
"absolute",
"acciaccatura",
"accidentalStyle",
"addChordShape",
"addInstrumentDefinition",
"addQuote",
"after",
"afterGrace",
"allowPageTurn",
"allowVoltaHook",
"alterBroken",
"alternative",
"ambitusAfter",
"appendToTag",
"applyContext",
"applyMusic",
"applyOutput",
"appoggiatura",
"assertBeamQuant",
"assertBeamSlope",
"autoChange",
"balloonGrobText",
"balloonText",
"bar",
"barNumberCheck",
"beamExceptions",
"bendAfter",
"bendHold",
"bendStartLevel",
"bookOutputName",
"bookOutputSuffix",
"breathe",
"caesura",
"change",
"chordRepeats",
"clef",
"codaMark",
"compoundMeter",
"compressMMRests",
"crossStaff",
"cueClef",
"cueClefUnset",
"cueDuring",
"cueDuringWithClef",
"deadNote",
"defineBarLine",
"displayLilyMusic",
"displayMusic",
"displayScheme",
"dropNote",
"enablePolymeter",
"endSpanners",
"eventChords",
"featherDurations",
"finger",
"fixed",
"footnote",
"grace",
"grobdescriptions",
"harmonicByFret",
"harmonicByRatio",
"harmonicNote",
"harmonicsOn",
"hide",
"inStaffSegno",
"incipit",
"inherit-acceptability",
"instrumentSwitch",
"inversion",
"invertChords",
"jump",
"keepWithTag",
"key",
"killCues",
"label",
"language",
"languageRestore",
"languageSaveAndChange",
"magnifyMusic",
"magnifyStaff",
"makeClusters",
"makeDefaultStringTuning",
"mark",
"markupMap",
"modalInversion",
"modalTranspose",
"musicMap",
"noPageBreak",
"noPageTurn",
"octaveCheck",
"offset",
"omit",
"once",
"ottava",
"override",
"overrideProperty",
"overrideTimeSignatureSettings",
"pageBreak",
"pageTurn",
"palmMute",
"palmMuteOn",
"parallelMusic",
"parenthesize",
"partCombine",
"partCombineDown",
"partCombineForce",
"partCombineUp",
"partial",
"phrasingSlurDashPattern",
"pitchedTrill",
"pointAndClickOff",
"pointAndClickOn",
"pointAndClickTypes",
"preBend",
"preBendHold",
"propertyOverride",
"propertyRevert",
"propertySet",
"propertyTweak",
"propertyUnset",
"pushToTag",
"quoteDuring",
"raiseNote",
"reduceChords",
"relative",
"removeWithTag",
"repeat",
"resetRelativeOctave",
"retrograde",
"revert",
"revertTimeSignatureSettings",
"rightHandFinger",
"scaleDurations",
"sectionLabel",
"segnoMark",
"set",
"settingsFrom",
"shape",
"shiftDurations",
"single",
"skip",
"slashedGrace",
"slurDashPattern",
"storePredefinedDiagram",
"stringTuning",
"styledNoteHeads",
"tabChordRepeats",
"tabChordRepetition",
"tag",
"tagGroup",
"tempo",
"temporary",
"tieDashPattern",
"time",
"times",
"tocItem",
"transpose",
"transposedCueDuring",
"transposition",
"tuplet",
"tupletSpan",
"tweak",
"undo",
"unfoldRepeats",
"unfolded",
"unset",
"voices",
"void",
"volta",
"vshape",
"withMusicProperty",
"xNote",
]
dynamics = [
"!",
"<",
">",
"cr",
"cresc",
"decr",
"decresc",
"dim",
"endcr",
"endcresc",
"enddecr",
"enddecresc",
"enddim",
"f",
"ff",
"fff",
"ffff",
"fffff",
"fp",
"fz",
"mf",
"mp",
"n",
"p",
"pp",
"ppp",
"pppp",
"ppppp",
"rfz",
"sf",
"sff",
"sfp",
"sfz",
"sp",
"spp",
]
articulations = [
"(",
")",
"-",
"[",
"]",
"^",
"accent",
"arpeggio",
"breakDynamicSpan",
"coda",
"dashBang",
"dashDash",
"dashDot",
"dashHat",
"dashLarger",
"dashPlus",
"dashUnderscore",
"downbow",
"downmordent",
"downprall",
"episemFinis",
"episemInitium",
"espressivo",
"fermata",
"flageolet",
"glide",
"glissando",
"halfopen",
"harmonic",
"haydnturn",
"henzelongfermata",
"henzeshortfermata",
"laissezVibrer",
"lheel",
"lineprall",
"longfermata",
"ltoe",
"marcato",
"mordent",
"noBeam",
"open",
"portato",
"prall",
"pralldown",
"prallmordent",
"prallprall",
"prallup",
"repeatTie",
"reverseturn",
"rheel",
"rtoe",
"segno",
"shortfermata",
"signumcongruentiae",
"slashturn",
"snappizzicato",
"sostenutoOff",
"sostenutoOn",
"staccatissimo",
"staccato",
"startGraceSlur",
"startGroup",
"startTextSpan",
"startTrillSpan",
"stopGraceSlur",
"stopGroup",
"stopTextSpan",
"stopTrillSpan",
"stopped",
"sustainOff",
"sustainOn",
"tenuto",
"thumb",
"treCorde",
"trill",
"turn",
"unaCorda",
"upbow",
"upmordent",
"upprall",
"varcoda",
"verylongfermata",
"veryshortfermata",
"vowelTransition",
"~",
]
music_commands = [
"[",
"]",
"aikenHeads",
"aikenHeadsMinor",
"aikenThinHeads",
"aikenThinHeadsMinor",
"allowBreak",
"arabicStringNumbers",
"arpeggioArrowDown",
"arpeggioArrowUp",
"arpeggioBracket",
"arpeggioNormal",
"arpeggioParenthesis",
"arpeggioParenthesisDashed",
"autoBeamOff",
"autoBeamOn",
"autoBreaksOff",
"autoBreaksOn",
"autoLineBreaksOff",
"autoLineBreaksOn",
"autoPageBreaksOff",
"autoPageBreaksOn",
"balloonLengthOff",
"balloonLengthOn",
"bassFigureExtendersOff",
"bassFigureExtendersOn",
"bassFigureStaffAlignmentDown",
"bassFigureStaffAlignmentNeutral",
"bassFigureStaffAlignmentUp",
"break",
"cadenzaOff",
"cadenzaOn",
"compressEmptyMeasures",
"crescHairpin",
"crescTextCresc",
"deadNotesOff",
"deadNotesOn",
"defaultNoteHeads",
"defaultTimeSignature",
"deprecatedcresc",
"deprecateddim",
"deprecatedendcresc",
"deprecatedenddim",
"dimHairpin",
"dimTextDecr",
"dimTextDecresc",
"dimTextDim",
"dotsDown",
"dotsNeutral",
"dotsUp",
"dynamicDown",
"dynamicNeutral",
"dynamicUp",
"easyHeadsOff",
"easyHeadsOn",
"endSkipNCs",
"expandEmptyMeasures",
"fine",
"frenchChords",
"funkHeads",
"funkHeadsMinor",
"germanChords",
"harmonicsOff",
"hideNotes",
"hideSplitTiedTabNotes",
"hideStaffSwitch",
"huge",
"ignatzekExceptionMusic",
"improvisationOff",
"improvisationOn",
"italianChords",
"kievanOff",
"kievanOn",
"large",
"markLengthOff",
"markLengthOn",
"medianChordGridStyle",
"melisma",
"melismaEnd",
"mergeDifferentlyDottedOff",
"mergeDifferentlyDottedOn",
"mergeDifferentlyHeadedOff",
"mergeDifferentlyHeadedOn",
"newSpacingSection",
"noBreak",
"normalsize",
"numericTimeSignature",
"oneVoice",
"palmMuteOff",
"partCombineApart",
"partCombineAutomatic",
"partCombineChords",
"partCombineSoloI",
"partCombineSoloII",
"partCombineUnisono",
"phrasingSlurDashed",
"phrasingSlurDotted",
"phrasingSlurDown",
"phrasingSlurHalfDashed",
"phrasingSlurHalfSolid",
"phrasingSlurNeutral",
"phrasingSlurSolid",
"phrasingSlurUp",
"predefinedFretboardsOff",
"predefinedFretboardsOn",
"romanStringNumbers",
"sacredHarpHeads",
"sacredHarpHeadsMinor",
"section",
"semiGermanChords",
"setDefaultDurationToQuarter",
"shiftOff",
"shiftOn",
"shiftOnn",
"shiftOnnn",
"showSplitTiedTabNotes",
"showStaffSwitch",
"skipNC",
"skipNCs",
"slurDashed",
"slurDotted",
"slurDown",
"slurHalfDashed",
"slurHalfSolid",
"slurNeutral",
"slurSolid",
"slurUp",
"small",
"southernHarmonyHeads",
"southernHarmonyHeadsMinor",
"startAcciaccaturaMusic",
"startAppoggiaturaMusic",
"startGraceMusic",
"startMeasureCount",
"startMeasureSpanner",
"startSlashedGraceMusic",
"startStaff",
"stemDown",
"stemNeutral",
"stemUp",
"stopAcciaccaturaMusic",
"stopAppoggiaturaMusic",
"stopGraceMusic",
"stopMeasureCount",
"stopMeasureSpanner",
"stopSlashedGraceMusic",
"stopStaff",
"tabFullNotation",
"teeny",
"textLengthOff",
"textLengthOn",
"textSpannerDown",
"textSpannerNeutral",
"textSpannerUp",
"tieDashed",
"tieDotted",
"tieDown",
"tieHalfDashed",
"tieHalfSolid",
"tieNeutral",
"tieSolid",
"tieUp",
"tiny",
"tupletDown",
"tupletNeutral",
"tupletUp",
"unHideNotes",
"voiceFour",
"voiceFourStyle",
"voiceNeutralStyle",
"voiceOne",
"voiceOneStyle",
"voiceThree",
"voiceThreeStyle",
"voiceTwo",
"voiceTwoStyle",
"walkerHeads",
"walkerHeadsMinor",
"xNotesOff",
"xNotesOn",
"|",
"~",
]
markup_commands = [
"abs-fontsize",
"accidental",
"align-on-other",
"arrow-head",
"auto-footnote",
"backslashed-digit",
"beam",
"bold",
"box",
"bracket",
"caps",
"center-align",
"center-column",
"char",
"circle",
"coda",
"column",
"column-lines",
"combine",
"compound-meter",
"concat",
"conditional-circle-markup",
"customTabClef",
"dir-column",
"discant",
"doubleflat",
"doublesharp",
"draw-circle",
"draw-dashed-line",
"draw-dotted-line",
"draw-hline",
"draw-line",
"draw-squiggle-line",
"dynamic",
"ellipse",
"epsfile",
"eyeglasses",
"fermata",
"fill-line",
"fill-with-pattern",
"filled-box",
"finger",
"first-visible",
"flat",
"fontCaps",
"fontsize",
"footnote",
"fraction",
"freeBass",
"fret-diagram",
"fret-diagram-terse",
"fret-diagram-verbose",
"fromproperty",
"general-align",
"halign",
"harp-pedal",
"hbracket",
"hcenter-in",
"hspace",
"huge",
"if",
"italic",
"justified-lines",
"justify",
"justify-field",
"justify-line",
"justify-string",
"large",
"larger",
"left-align",
"left-brace",
"left-column",
"line",
"lookup",
"lower",
"magnify",
"map-markup-commands",
"markalphabet",
"markletter",
"markup",
"markuplist",
"medium",
"multi-measure-rest-by-number",
"musicglyph",
"natural",
"normal-size-sub",
"normal-size-super",
"normal-text",
"normalsize",
"note",
"note-by-number",
"null",
"number",
"on-the-fly",
"oval",
"overlay",
"override",
"override-lines",
"overtie",
"pad-around",
"pad-markup",
"pad-to-box",
"pad-x",
"page-link",
"page-ref",
"parenthesize",
"path",
"pattern",
"polygon",
"postscript",
"property-recursive",
"put-adjacent",
"raise",
"replace",
"rest",
"rest-by-number",
"rhythm",
"right-align",
"right-brace",
"right-column",
"roman",
"rotate",
"rounded-box",
"sans",
"scale",
"score",
"score-lines",
"segno",
"semiflat",
"semisharp",
"sesquiflat",
"sesquisharp",
"sharp",
"simple",
"slashed-digit",
"small",
"smallCaps",
"smaller",
"stdBass",
"stdBassIV",
"stdBassV",
"stdBassVI",
"stencil",
"string-lines",
"strut",
"sub",
"super",
"table",
"table-of-contents",
"teeny",
"text",
"tie",
"tied-lyric",
"tiny",
"translate",
"translate-scaled",
"transparent",
"triangle",
"typewriter",
"underline",
"undertie",
"unless",
"upright",
"varcoda",
"vcenter",
"verbatim-file",
"vspace",
"whiteout",
"with-color",
"with-dimension",
"with-dimension-from",
"with-dimensions",
"with-dimensions-from",
"with-link",
"with-outline",
"with-string-transformer",
"with-true-dimension",
"with-true-dimensions",
"with-url",
"woodwind-diagram",
"wordwrap",
"wordwrap-field",
"wordwrap-internal",
"wordwrap-lines",
"wordwrap-string",
"wordwrap-string-internal",
]
grobs = [
"Accidental",
"AccidentalCautionary",
"AccidentalPlacement",
"AccidentalSuggestion",
"Ambitus",
"AmbitusAccidental",
"AmbitusLine",
"AmbitusNoteHead",
"Arpeggio",
"BalloonText",
"BarLine",
"BarNumber",
"BassFigure",
"BassFigureAlignment",
"BassFigureAlignmentPositioning",
"BassFigureBracket",
"BassFigureContinuation",
"BassFigureLine",
"Beam",
"BendAfter",
"BendSpanner",
"BreakAlignGroup",
"BreakAlignment",
"BreathingSign",
"CenteredBarNumber",
"CenteredBarNumberLineSpanner",
"ChordName",
"ChordSquare",
"Clef",
"ClefModifier",
"ClusterSpanner",
"ClusterSpannerBeacon",
"CodaMark",
"CombineTextScript",
"ControlPoint",
"ControlPolygon",
"CueClef",
"CueEndClef",
"Custos",
"DotColumn",
"Dots",
"DoublePercentRepeat",
"DoublePercentRepeatCounter",
"DoubleRepeatSlash",
"DurationLine",
"DynamicLineSpanner",
"DynamicText",
"DynamicTextSpanner",
"Episema",
"FingerGlideSpanner",
"Fingering",
"FingeringColumn",
"Flag",
"Footnote",
"FretBoard",
"Glissando",
"GraceSpacing",
"GridChordName",
"GridLine",
"GridPoint",
"Hairpin",
"HorizontalBracket",
"HorizontalBracketText",
"InstrumentName",
"InstrumentSwitch",
"JumpScript",
"KeyCancellation",
"KeySignature",
"KievanLigature",
"LaissezVibrerTie",
"LaissezVibrerTieColumn",
"LedgerLineSpanner",
"LeftEdge",
"LigatureBracket",
"LyricExtender",
"LyricHyphen",
"LyricRepeatCount",
"LyricSpace",
"LyricText",
"MeasureCounter",
"MeasureGrouping",
"MeasureSpanner",
"MelodyItem",
"MensuralLigature",
"MetronomeMark",
"MultiMeasureRest",
"MultiMeasureRestNumber",
"MultiMeasureRestScript",
"MultiMeasureRestText",
"NonMusicalPaperColumn",
"NoteCollision",
"NoteColumn",
"NoteHead",
"NoteName",
"NoteSpacing",
"OttavaBracket",
"PaperColumn",
"Parentheses",
"PercentRepeat",
"PercentRepeatCounter",
"PhrasingSlur",
"PianoPedalBracket",
"RehearsalMark",
"RepeatSlash",
"RepeatTie",
"RepeatTieColumn",
"Rest",
"RestCollision",
"Script",
"ScriptColumn",
"ScriptRow",
"SectionLabel",
"SegnoMark",
"SignumRepetitionis",
"Slur",
"SostenutoPedal",
"SostenutoPedalLineSpanner",
"SpacingSpanner",
"SpanBar",
"SpanBarStub",
"StaffEllipsis",
"StaffGrouper",
"StaffSpacing",
"StaffSymbol",
"StanzaNumber",
"Stem",
"StemStub",
"StemTremolo",
"StringNumber",
"StrokeFinger",
"SustainPedal",
"SustainPedalLineSpanner",
"System",
"SystemStartBar",
"SystemStartBrace",
"SystemStartBracket",
"SystemStartSquare",
"TabNoteHead",
"TextScript",
"TextSpanner",
"Tie",
"TieColumn",
"TimeSignature",
"TrillPitchAccidental",
"TrillPitchGroup",
"TrillPitchHead",
"TrillPitchParentheses",
"TrillSpanner",
"TupletBracket",
"TupletNumber",
"UnaCordaPedal",
"UnaCordaPedalLineSpanner",
"VaticanaLigature",
"VerticalAlignment",
"VerticalAxisGroup",
"VoiceFollower",
"VoltaBracket",
"VoltaBracketSpanner",
"VowelTransition",
]
contexts = [
"ChoirStaff",
"ChordGrid",
"ChordGridScore",
"ChordNames",
"CueVoice",
"Devnull",
"DrumStaff",
"DrumVoice",
"Dynamics",
"FiguredBass",
"FretBoards",
"Global",
"GrandStaff",
"GregorianTranscriptionLyrics",
"GregorianTranscriptionStaff",
"GregorianTranscriptionVoice",
"InternalGregorianStaff",
"KievanStaff",
"KievanVoice",
"Lyrics",
"MensuralStaff",
"MensuralVoice",
"NoteNames",
"NullVoice",
"OneStaff",
"PetrucciStaff",
"PetrucciVoice",
"PianoStaff",
"RhythmicStaff",
"Score",
"Staff",
"StaffGroup",
"StandaloneRhythmScore",
"StandaloneRhythmStaff",
"StandaloneRhythmVoice",
"TabStaff",
"TabVoice",
"VaticanaLyrics",
"VaticanaStaff",
"VaticanaVoice",
"Voice",
]
translators = [
"Accidental_engraver",
"Alteration_glyph_engraver",
"Ambitus_engraver",
"Arpeggio_engraver",
"Auto_beam_engraver",
"Axis_group_engraver",
"Balloon_engraver",
"Bar_engraver",
"Bar_number_engraver",
"Beam_collision_engraver",
"Beam_engraver",
"Beam_performer",
"Beat_engraver",
"Beat_performer",
"Bend_engraver",
"Bend_spanner_engraver",
"Break_align_engraver",
"Breathing_sign_engraver",
"Centered_bar_number_align_engraver",
"Chord_name_engraver",
"Chord_square_engraver",
"Chord_tremolo_engraver",
"Clef_engraver",
"Cluster_spanner_engraver",
"Collision_engraver",
"Completion_heads_engraver",
"Completion_rest_engraver",
"Concurrent_hairpin_engraver",
"Control_track_performer",
"Cue_clef_engraver",
"Current_chord_text_engraver",
"Custos_engraver",
"Dot_column_engraver",
"Dots_engraver",
"Double_percent_repeat_engraver",
"Drum_note_performer",
"Drum_notes_engraver",
"Duration_line_engraver",
"Dynamic_align_engraver",
"Dynamic_engraver",
"Dynamic_performer",
"Episema_engraver",
"Extender_engraver",
"Figured_bass_engraver",
"Figured_bass_position_engraver",
"Finger_glide_engraver",
"Fingering_column_engraver",
"Fingering_engraver",
"Font_size_engraver",
"Footnote_engraver",
"Forbid_line_break_engraver",
"Fretboard_engraver",
"Glissando_engraver",
"Grace_auto_beam_engraver",
"Grace_beam_engraver",
"Grace_engraver",
"Grace_spacing_engraver",
"Grid_chord_name_engraver",
"Grid_line_span_engraver",
"Grid_point_engraver",
"Grob_pq_engraver",
"Horizontal_bracket_engraver",
"Hyphen_engraver",
"Instrument_name_engraver",
"Instrument_switch_engraver",
"Jump_engraver",
"Keep_alive_together_engraver",
"Key_engraver",
"Key_performer",
"Kievan_ligature_engraver",
"Laissez_vibrer_engraver",
"Ledger_line_engraver",
"Ligature_bracket_engraver",
"Lyric_engraver",
"Lyric_performer",
"Lyric_repeat_count_engraver",
"Mark_engraver",
"Mark_performer",
"Mark_tracking_translator",
"Measure_counter_engraver",
"Measure_grouping_engraver",
"Measure_spanner_engraver",
"Melody_engraver",
"Mensural_ligature_engraver",
"Merge_mmrest_numbers_engraver",
"Merge_rests_engraver",
"Metronome_mark_engraver",
"Midi_control_change_performer",
"Multi_measure_rest_engraver",
"New_fingering_engraver",
"Note_head_line_engraver",
"Note_heads_engraver",
"Note_name_engraver",
"Note_performer",
"Note_spacing_engraver",
"Ottava_spanner_engraver",
"Output_property_engraver",
"Page_turn_engraver",
"Paper_column_engraver",
"Parenthesis_engraver",
"Part_combine_engraver",
"Percent_repeat_engraver",
"Phrasing_slur_engraver",
"Piano_pedal_align_engraver",
"Piano_pedal_engraver",
"Piano_pedal_performer",
"Pitch_squash_engraver",
"Pitched_trill_engraver",
"Pure_from_neighbor_engraver",
"Repeat_acknowledge_engraver",
"Repeat_tie_engraver",
"Rest_collision_engraver",
"Rest_engraver",
"Rhythmic_column_engraver",
"Script_column_engraver",
"Script_engraver",
"Script_row_engraver",
"Separating_line_group_engraver",
"Show_control_points_engraver",
"Signum_repetitionis_engraver",
"Skip_typesetting_engraver",
"Slash_repeat_engraver",
"Slur_engraver",
"Slur_performer",
"Spacing_engraver",
"Span_arpeggio_engraver",
"Span_bar_engraver",
"Span_bar_stub_engraver",
"Span_stem_engraver",
"Spanner_break_forbid_engraver",
"Spanner_tracking_engraver",
"Staff_collecting_engraver",
"Staff_performer",
"Staff_symbol_engraver",
"Stanza_number_align_engraver",
"Stanza_number_engraver",
"Stem_engraver",
"System_start_delimiter_engraver",
"Tab_note_heads_engraver",
"Tab_staff_symbol_engraver",
"Tab_tie_follow_engraver",
"Tempo_performer",
"Text_engraver",
"Text_spanner_engraver",
"Tie_engraver",
"Tie_performer",
"Time_signature_engraver",
"Time_signature_performer",
"Timing_translator",
"Trill_spanner_engraver",
"Tuplet_engraver",
"Tweak_engraver",
"Vaticana_ligature_engraver",
"Vertical_align_engraver",
"Volta_engraver",
]
scheme_functions = [
"!=",
"*location*",
"*parser*",
"Alteration_glyph_engraver",
"Beat_performer",
"Bend_spanner_engraver",
"Breathing_sign_engraver",
"Centered_bar_number_align_engraver",
"Chord_name_engraver",
"Chord_square_engraver",
"Current_chord_text_engraver",
"Duration_line_engraver",
"Finger_glide_engraver",
"G_",
"Grid_chord_name_engraver",
"Lyric_repeat_count_engraver",
"Measure_counter_engraver",
"Measure_spanner_engraver",
"Merge_mmrest_numbers_engraver",
"Merge_rests_engraver",
"Show_control_points_engraver",
"Signum_repetitionis_engraver",
"Skip_typesetting_engraver",
"Span_stem_engraver",
"Spanner_tracking_engraver",
"_i",
"abs-fontsize-markup",
"accidental->markup",
"accidental->markup-italian",
"accidental-interface::calc-alteration",
"accidental-invalid?",
"accidental-markup",
"add-bar-glyph-print-procedure",
"add-font",
"add-grace-property",
"add-music",
"add-music-fonts",
"add-new-clef",
"add-pango-fonts",
"add-point",
"add-quotable",
"add-score",
"add-simple-time-signature-style",
"add-stroke-glyph",
"add-stroke-straight",
"add-text",
"adjust-slash-stencil",
"align-on-other-markup",
"aligned-text-stencil-function",
"alist->hash-table",
"alist<?",
"alist?",
"all-bar-numbers-visible",
"all-equal?",
"all-repeat-counts-visible",
"allow-volta-hook",
"alteration->text-accidental-markup",
"alterations-in-key",
"ambitus-line::calc-gap",
"ambitus::print",
"analyse-spanner-states",
"ancestor-lookup-initialize",
"angle-0-2pi",
"angle-0-360",
"annotate-spacing-spec",
"annotate-y-interval",
"any-mmrest-events",
"apply-durations",
"apply-group-draw-rule-series",
"arrow-head-markup",
"arrow-stencil",
"arrow-stencil-maker",
"assemble-stencils",
"assoc-get",
"assoc-keys",
"assoc-values",
"aug-modifier",
"auto-footnote-markup",
"average",
"b",
"backslashed-digit-markup",
"bar-line::bar-y-extent",
"bar-line::calc-blot",
"bar-line::calc-break-visibility",
"bar-line::calc-glyph-name",
"bar-line::calc-glyph-name-for-direction",
"bar-line::compound-bar-line",
"bar-line::draw-filled-box",
"bar-line::widen-bar-extent-on-span",
"base-length",
"bass-clarinet-rh-ees-key-stencil",
"bassoon-bend-info-maker",
"bassoon-cc-one-key-stencil",
"bassoon-lh-a-flick-key-stencil",
"bassoon-lh-c-flick-key-stencil",
"bassoon-lh-cis-key-stencil",
"bassoon-lh-d-flick-key-stencil",
"bassoon-lh-ees-key-stencil",
"bassoon-lh-he-key-stencil",
"bassoon-lh-hees-key-stencil",
"bassoon-lh-lb-key-stencil",
"bassoon-lh-lbes-key-stencil",
"bassoon-lh-lc-key-stencil",
"bassoon-lh-ld-key-stencil",
"bassoon-lh-thumb-cis-key-stencil",
"bassoon-lh-whisper-key-stencil",
"bassoon-midline-rule",
"bassoon-rh-bes-key-stencil",
"bassoon-rh-cis-key-stencil",
"bassoon-rh-f-key-stencil",
"bassoon-rh-fis-key-stencil",
"bassoon-rh-gis-key-stencil",
"bassoon-rh-thumb-bes-key-stencil",
"bassoon-rh-thumb-e-key-stencil",
"bassoon-rh-thumb-fis-key-stencil",
"bassoon-rh-thumb-gis-key-stencil",
"bassoon-uber-key-stencil",
"beam-exceptions",
"beam-markup",
"beam::align-with-broken-parts",
"beam::get-kievan-positions",
"beam::get-kievan-quantized-positions",
"beam::place-broken-parts-individually",
"beam::slope-like-broken-parts",
"beat-grouping-internal",
"beat-structure",
"bend-spanner::print",
"bend::arrow-head-stencil",
"bend::calc-bend-x-begin",
"bend::calc-bend-x-end",
"bend::calc-y-coordinates",
"bend::draw-curves",
"bend::make-line-curve-stencil",
"bend::print",
"bend::remove-certain-tab-note-heads",
"bend::target-cautionary",
"bend::text-stencil",
"bend::text-string",
"bezier-head-for-stencil",
"binary-search",
"bold-markup",
"book-first-page",
"boolean-or-number?",
"boolean-or-symbol?",
"bounding-note-heads-pitches",
"box-grob-stencil",
"box-markup",
"box-stencil",
"bracket-markup",
"bracketify-stencil",
"break-alignable-interface::self-alignment-of-anchor",
"break-alignable-interface::self-alignment-opposite-of-anchor",
"break-alignment-list",
"breathe::midi-length",
"buildflag",
"cached-file-contents",
"calc-harmonic-pitch",
"calc-line-thickness",
"calc-repeat-slash-count",
"calculate-complex-compound-time",
"calculate-compound-base-beat",
"calculate-compound-base-beat-full",
"calculate-compound-beat-grouping",
"calculate-compound-measure-length",
"calculate-time-fraction",
"call-after-session",
"caps-markup",
"car-or-identity",
"car<",
"car<=",
"cdr-or-identity",
"center-align-markup",
"center-column-markup",
"centered-spanner-interface::calc-x-offset",
"centered-stencil",
"chain-assoc-get",
"change-pitches",
"char-markup",
"cheap-list?",
"cheap-markup?",
"check-beam-quant",
"check-beam-slope-sign",
"check-broken-spanner",
"check-context-path",
"check-division-alist",
"check-for-annotation",
"check-for-replacement",
"check-grob-path",
"check-music-path",
"check-pitch-against-signature",
"check-quant-callbacks",
"check-slope-callbacks",
"chord-name->german-markup",
"chord-name->italian-markup",
"chord-square::height",
"chord-square::print",
"chord-square::width",
"circle-markup",
"circle-stencil",
"clarinet-lh-R-key-stencil",
"clarinet-lh-a-key-stencil",
"clarinet-lh-cis-key-stencil",
"clarinet-lh-d-key-stencil",
"clarinet-lh-e-key-stencil",
"clarinet-lh-ees-key-stencil",
"clarinet-lh-f-key-stencil",
"clarinet-lh-fis-key-stencil",
"clarinet-lh-gis-key-stencil",
"clarinet-lh-thumb-key-stencil",
"clarinet-rh-b-key-stencil",
"clarinet-rh-d-key-stencil",
"clarinet-rh-e-key-stencil",
"clarinet-rh-f-key-stencil",
"clarinet-rh-fis-key-stencil",
"clarinet-rh-four-key-stencil",
"clarinet-rh-gis-key-stencil",
"clarinet-rh-low-c-key-stencil",
"clarinet-rh-low-cis-key-stencil",
"clarinet-rh-low-d-key-stencil",
"clarinet-rh-one-key-stencil",
"clarinet-rh-three-key-stencil",
"clarinet-rh-two-key-stencil",
"clef-transposition-markup",
"clef::print-modern-tab-if-set",
"clip-systems-to-region-stencils",
"clipped-systems-stencils",
"close-enough?",
"close-port-rename",
"coda-markup",
"collect-book-music-for-book",
"collect-bookpart-for-book",
"collect-music-aux",
"collect-music-for-book",
"collect-scores-for-book",
"color?",
"column-circle-stencil",
"column-lines-markup-list",
"column-markup",
"combine-markup",
"comparable-note-events",
"comparator-from-key",
"compile-all-markup-args",
"compile-all-markup-expressions",
"compile-markup-arg",
"compile-markup-expression",
"completize-formats",
"completize-grob-entry",
"compound-meter-markup",
"concat-markup",
"conditional-circle-markup-markup",
"conditional-kern-before",
"conditional-string-capitalize",
"configuration",
"cons-fret",
"constante-hairpin",
"construct-chord-elements",
"context-defs-from-music",
"context-mod-from-music",
"context-spec-music",
"control-point::calc-offset",
"control-polygon::calc-text",
"coord-axis",
"coord-rotate",
"coord-rotated",
"coord-scale",
"coord-translate",
"coord-x",
"coord-y",
"copy-binary-file",
"copy-repeat-chord",
"count-list",
"create-file-exclusive",
"create-fretboard",
"create-glyph-flag",
"cross-staff-connect",
"css-color",
"cue-substitute",
"current-or-previous-voice-states",
"customTabClef-markup",
"cyclic-base-value",
"debugf",
"def-grace-function",
"default-auto-beam-check",
"default-flag",
"default-paren-color",
"define-bar-line",
"define-event-class",
"define-event-function",
"define-fonts",
"define-grob-property",
"define-internal-grob-property",
"define-markup-command",
"define-markup-command-internal",
"define-markup-list-command",
"define-music-function",
"define-scheme-function",
"define-session",
"define-session-public",
"define-syntax-function",
"define-syntax-public",
"define-syntax-rule-public",
"define-tag-group",
"define-void-function",
"degree-first-true",
"degrees->radians",
"descend-to-context",
"determine-frets",
"determine-split-list",
"determine-string-fret-finger",
"dim-modifier",
"dimension-arrows",
"dir-basename",
"dir-column-markup",
"display-lily-music",
"display-music",
"display-scheme-music",
"dodecaphonic-no-repeat-rule",
"done?",
"dot-has-color",
"dot-is-inverted",
"dot-is-parenthesized",
"dots::calc-dot-count",
"dots::calc-staff-position",
"doubleflat-markup",
"doublesharp-markup",
"draw-circle-markup",
"draw-dashed-line-markup",
"draw-dotted-line-markup",
"draw-hline-markup",
"draw-line-markup",
"draw-squiggle-line-markup",
"dump-zombies",
"duration",
"duration-dot-factor",
"duration-length",
"duration-line::calc",
"duration-line::calc-thickness",
"duration-line::print",
"duration-log-factor",
"duration-of-note",
"duration-or-music?",
"duration-visual",
"duration-visual-length",
"dynamic-markup",
"dynamic-text-spanner::before-line-breaking",
"elbowed-hairpin",
"ellipse-markup",
"ellipse-radius",
"ellipse-stencil",
"empty-music",
"end-broken-spanner?",
"entry-greater-than-x?",
"eps-file->stencil",
"epsfile-markup",
"eval-carefully",
"event-cause",
"event-chord-notes",
"event-chord-pitches",
"event-chord-reduce",
"event-chord-wrap!",
"event-class-cons",
"event-has-articulation?",
"events",
"every-nth-bar-number-visible",
"every-nth-repeat-count-visible",
"exact-rational?",
"expand-repeat-chords!",
"expand-repeat-notes!",
"extent-combine",
"extract-alteration",
"extract-beam-exceptions",
"extract-music",
"extract-named-music",
"extract-typed-music",
"eyeglasses-markup",
"fermata-markup",
"fill-line-markup",
"fill-with-pattern-markup",
"filled-box-markup",
"filtered-map",
"find-named-props",
"find-pitch-entry",
"find-value-to-offset",
"finger-glide::print",
"finger-markup",
"fingering::calc-text",
"first-assoc",
"first-bar-number-invisible",
"first-bar-number-invisible-and-no-parenthesized-bar-numbers",
"first-bar-number-invisible-save-broken-bars",
"first-broken-spanner?",
"first-member",
"first-visible-markup",
"flared-hairpin",
"flat-flag",
"flat-markup",
"flatten-alist",
"flatten-list",
"flip-stencil",
"flute-lh-b-key-stencil",
"flute-lh-bes-key-stencil",
"flute-lh-gis-key-stencil",
"flute-lh-gis-rh-bes-key-stencil",
"flute-rh-b-key-stencil",
"flute-rh-bes-key-stencil",
"flute-rh-c-key-stencil",
"flute-rh-cis-key-stencil",
"flute-rh-d-key-stencil",
"flute-rh-dis-key-stencil",
"flute-rh-ees-key-stencil",
"flute-rh-gz-key-stencil",
"fold-some-music",
"font-children",
"font-default",
"font-name-split",
"font-name-style",
"font-qualifier",
"fontCaps-markup",
"fontsize-markup",
"footnote-markup",
"for-some-music",
"forced-configuration",
"format",
"format-bass-figure",
"format-coda-mark",
"format-compound-time",
"format-dal-segno-text",
"format-dal-segno-text-brief",
"format-mark-alphabet",
"format-mark-barnumbers",
"format-mark-box-alphabet",
"format-mark-box-barnumbers",
"format-mark-box-letters",
"format-mark-box-numbers",
"format-mark-circle-alphabet",
"format-mark-circle-barnumbers",
"format-mark-circle-letters",
"format-mark-circle-numbers",
"format-mark-generic",
"format-mark-letters",
"format-mark-numbers",
"format-metronome-markup",
"format-segno-mark",
"format-segno-mark-considering-bar-lines",
"format-sign-with-number",
"format-time-element",
"format-time-fraction",
"format-time-list",
"format-time-numerator",
"format-varcoda-mark",
"fraction->moment",
"fraction-markup",
"fraction?",
"fret->pitch",
"fret-board::calc-stencil",
"fret-count",
"fret-diagram-markup",
"fret-diagram-terse-markup",
"fret-diagram-verbose-markup",
"fret-letter-tablature-format",
"fret-number-tablature-format",
"fret-number-tablature-format-banjo",
"fret-parse-definition-string",
"fret-parse-marking-list",
"fret-parse-terse-definition-string",
"fromproperty-markup",
"function-chain",
"g",
"g-lookup-font",
"general-align-markup",
"general-column",
"generate-bassoon-family-entry",
"generate-clarinet-family-entry",
"generate-crop-stencil",
"generate-flute-family-entry",
"generate-oboe-family-entry",
"generate-preview-stencil",
"generate-saxophone-family-entry",
"generate-system-stencils",
"generate-tin-whistle-family-entry",
"get-bound-note-heads",
"get-chord-shape",
"get-current-filename",
"get-current-suffix",
"get-fill-space",
"get-key",
"get-named-spreadsheet-column",
"get-next-unique-voice-name",
"get-numeric-from-key",
"get-outfile-name",
"get-postscript-bbox",
"get-quarter-diffs",
"get-setting",
"get-slope-offset",
"get-span-glyph",
"get-spreadsheet-column",
"get-step",
"get-sub-list",
"get-top-most-tab-head",
"get-tweakable-music",
"get-woodwind-key-list",
"glissando::calc-tab-extra-dy",
"glissando::draw-tab-glissando",
"glyph->stencil",
"glyph-flag",
"grace-spacing::calc-shortest-duration",
"gray-colorize",
"grid-chord-name::calc-X-offset",
"grid-chord-name::calc-Y-offset",
"grid-chord-name::calc-offset-on-axis",
"grob-interpret-markup",
"grob-list?",
"grob-transformer",
"grob::all-objects",
"grob::calc-property-by-copy",
"grob::compose-function",
"grob::display-objects",
"grob::has-interface",
"grob::inherit-parent-property",
"grob::is-live?",
"grob::name",
"grob::objects-from-interface",
"grob::offset-function",
"grob::relay-other-property",
"grob::rhythmic-location",
"grob::show-skylines-if-debug-skylines-set",
"grob::unpure-Y-extent-from-stencil",
"grob::when",
"group-automate-rule",
"group-draw-rule",
"group-extra-offset-rule",
"gs-cmd-args",
"gs-safe-run",
"hairpin::calc-grow-direction",
"halign-markup",
"harp-pedal-check",
"harp-pedal-info",
"harp-pedal-markup",
"harp-pedals-parse-string",
"has-at-least-two?",
"has-one-or-less?",
"hash-table->alist",
"hbracket-markup",
"hcenter-in-markup",
"header-to-file",
"headers-property-alist-chain",
"hook-stencil",
"horizontal-slash-interval",
"hspace-markup",
"huge-markup",
"if-markup",
"ignatzek-chord-names",
"index-cell",
"index-or-markup?",
"index?",
"insert-markups",
"internal-set-paper-size",
"interpret-markup",
"interpret-markup-list",
"interval-bound",
"interval-center",
"interval-contains?",
"interval-empty?",
"interval-end",
"interval-index",
"interval-intersection",
"interval-length",
"interval-sane?",
"interval-scale",
"interval-start",
"interval-union",
"interval-widen",
"invalidate-alterations",
"inverter-factory",
"is-absolute?",
"is-square?",
"italic-markup",
"item::extra-spacing-height-including-staff",
"justified-lines-markup-list",
"justify-field-markup",
"justify-line-helper",
"justify-line-markup",
"justify-markup",
"justify-string-markup",
"key-crawler",
"key-entry-alteration",
"key-entry-bar-number",
"key-entry-end-mom",
"key-entry-notename",
"key-entry-octave",
"key-fill-translate",
"key-list-or-music?",
"key-list-or-symbol?",
"key-list?",
"key-signature-interface::alteration-position",
"key-signature-interface::alteration-positions",
"key?",
"keyword->make-markup",
"large-markup",
"larger-markup",
"layout-blot-diameter",
"layout-extract-page-properties",
"layout-line-thickness",
"layout-set-absolute-staff-size",
"layout-set-absolute-staff-size-in-module",
"layout-set-staff-size",
"left-align-markup",
"left-brace-markup",
"left-column-markup",
"lexicographic-list-compare?",
"lh-woodwind-text-stencil",
"lilypond-all",
"lilypond-file",
"lilypond-main",
"lilypond-version",
"lilypond-version-outdated?",
"line-markup",
"list-all-possible-keys",
"list-all-possible-keys-verbose",
"list-element-index",
"list-insert-separator",
"list-join",
"listener->once-listener",
"little-elliptical-key-stencil",
"long-midline-stencil",
"lookup-font",
"lookup-markup",
"lookup-markup-command",
"lookup-markup-command-aux",
"lookup-markup-list-command",
"lookup-paper-name",
"low-bass-clarinet-rh-ees-key-stencil",
"lower-markup",
"ly-getcwd",
"ly-type?",
"ly:accidental-interface::height",
"ly:accidental-interface::horizontal-skylines",
"ly:accidental-interface::print",
"ly:accidental-interface::remove-tied",
"ly:accidental-placement::calc-positioning-done",
"ly:add-context-mod",
"ly:add-interface",
"ly:add-listener",
"ly:add-option",
"ly:align-interface::align-to-ideal-distances",
"ly:align-interface::align-to-minimum-distances",
"ly:all-grob-interfaces",
"ly:all-options",
"ly:all-output-backend-commands",
"ly:all-stencil-commands",
"ly:all-stencil-expressions",
"ly:alternative-sequence-iterator::constructor",
"ly:angle",
"ly:apply-context-iterator::constructor",
"ly:arpeggio::brew-chord-bracket",
"ly:arpeggio::brew-chord-slur",
"ly:arpeggio::calc-cross-staff",
"ly:arpeggio::calc-positions",
"ly:arpeggio::print",
"ly:arpeggio::pure-height",
"ly:arpeggio::width",
"ly:assoc-get",
"ly:axis-group-interface::add-element",
"ly:axis-group-interface::adjacent-pure-heights",
"ly:axis-group-interface::calc-pure-relevant-grobs",
"ly:axis-group-interface::calc-pure-staff-staff-spacing",
"ly:axis-group-interface::calc-pure-y-common",
"ly:axis-group-interface::calc-skylines",
"ly:axis-group-interface::calc-staff-staff-spacing",
"ly:axis-group-interface::calc-x-common",
"ly:axis-group-interface::calc-y-common",
"ly:axis-group-interface::combine-skylines",
"ly:axis-group-interface::height",
"ly:axis-group-interface::pure-height",
"ly:axis-group-interface::width",
"ly:balloon-interface::print",
"ly:balloon-interface::pure-height",
"ly:balloon-interface::remove-irrelevant-spanner",
"ly:balloon-interface::width",
"ly:bar-check-iterator::constructor",
"ly:bar-line::calc-anchor",
"ly:bar-line::calc-bar-extent",
"ly:bar-line::print",
"ly:basic-progress",
"ly:beam::calc-beam-segments",
"ly:beam::calc-beaming",
"ly:beam::calc-cross-staff",
"ly:beam::calc-direction",
"ly:beam::calc-normal-stems",
"ly:beam::calc-stem-shorten",
"ly:beam::calc-x-positions",
"ly:beam::print",
"ly:beam::pure-rest-collision-callback",
"ly:beam::quanting",
"ly:beam::rest-collision-callback",
"ly:beam::set-stem-lengths",
"ly:bezier-extent",
"ly:bezier-extract",
"ly:book-add-bookpart!",
"ly:book-add-score!",
"ly:book-book-parts",
"ly:book-header",
"ly:book-paper",
"ly:book-process",
"ly:book-process-to-systems",
"ly:book-scores",
"ly:book-set-header!",
"ly:book?",
"ly:bp",
"ly:bracket",
"ly:break-alignable-interface::find-parent",
"ly:break-alignable-interface::self-align-callback",
"ly:break-aligned-interface::calc-average-anchor",
"ly:break-aligned-interface::calc-break-visibility",
"ly:break-aligned-interface::calc-extent-aligned-anchor",
"ly:break-aligned-interface::calc-joint-anchor-alignment",
"ly:break-alignment-interface::calc-positioning-done",
"ly:breathing-sign::divisio-maior",
"ly:breathing-sign::divisio-maxima",
"ly:breathing-sign::divisio-minima",
"ly:breathing-sign::finalis",
"ly:breathing-sign::offset-callback",
"ly:breathing-sign::set-breath-properties",
"ly:broadcast",
"ly:cairo-output-stencil",
"ly:cairo-output-stencils",
"ly:calculated-sequential-music::length",
"ly:calculated-sequential-music::start",
"ly:camel-case->lisp-identifier",
"ly:chain-assoc-get",
"ly:change-iterator::constructor",
"ly:check-expected-warnings",
"ly:chord-name::after-line-breaking",
"ly:clef-modifier::calc-parent-alignment",
"ly:clef::calc-glyph-name",
"ly:clef::print",
"ly:cluster-beacon::height",
"ly:cluster::calc-cross-staff",
"ly:cluster::print",
"ly:cm",
"ly:command-line-code",
"ly:command-line-options",
"ly:connect-dispatchers",
"ly:context-current-moment",
"ly:context-def-lookup",
"ly:context-def-modify",
"ly:context-def?",
"ly:context-event-source",
"ly:context-events-below",
"ly:context-find",
"ly:context-grob-definition",
"ly:context-id",
"ly:context-matched-pop-property",
"ly:context-mod-apply!",
"ly:context-mod?",
"ly:context-name",
"ly:context-output-def",
"ly:context-parent",
"ly:context-property",
"ly:context-property-where-defined",
"ly:context-pushpop-property",
"ly:context-set-property!",
"ly:context-specced-music-iterator::constructor",
"ly:context-unset-property",
"ly:context?",
"ly:custos::print",
"ly:debug",
"ly:default-scale",
"ly:dimension?",
"ly:dir?",
"ly:directed",
"ly:disconnect-dispatchers",
"ly:dispatcher?",
"ly:dot-column::calc-positioning-done",
"ly:dots::print",
"ly:duration->string",
"ly:duration-compress",
"ly:duration-dot-count",
"ly:duration-factor",
"ly:duration-length",
"ly:duration-log",
"ly:duration-scale",
"ly:duration::less?",
"ly:duration<?",
"ly:duration?",
"ly:effective-prefix",
"ly:enclosing-bracket::print",
"ly:enclosing-bracket::width",
"ly:engraver-announce-end-grob",
"ly:engraver-make-grob",
"ly:engraver-make-item",
"ly:engraver-make-spanner",
"ly:engraver-make-sticky",
"ly:error",
"ly:event-chord-iterator::constructor",
"ly:event-deep-copy",
"ly:event-iterator::constructor",
"ly:event-property",
"ly:event-set-property!",
"ly:event-warning",
"ly:event?",
"ly:exit",
"ly:expect-warning",
"ly:extract-subfont-from-collection",
"ly:figured-bass-continuation::center-on-figures",
"ly:figured-bass-continuation::print",
"ly:find-file",
"ly:fine-iterator::constructor",
"ly:fingering-column::calc-positioning-done",
"ly:flag::calc-x-offset",
"ly:flag::calc-y-offset",
"ly:flag::glyph-name",
"ly:flag::print",
"ly:flag::pure-calc-y-offset",
"ly:flag::width",
"ly:font-config-add-directory",
"ly:font-config-add-font",
"ly:font-config-display-fonts",
"ly:font-config-get-font-file",
"ly:font-design-size",
"ly:font-file-name",
"ly:font-get-glyph",
"ly:font-glyph-name-to-charcode",
"ly:font-glyph-name-to-index",
"ly:font-index-to-charcode",
"ly:font-magnification",
"ly:font-metric?",
"ly:font-name",
"ly:font-sub-fonts",
"ly:format",
"ly:format-output",
"ly:generic-bound-extent",
"ly:get-all-function-documentation",
"ly:get-all-translators",
"ly:get-cff-offset",
"ly:get-context-mods",
"ly:get-font-format",
"ly:get-option",
"ly:get-spacing-spec",
"ly:grace-iterator::constructor",
"ly:grace-music::start-callback",
"ly:grid-line-interface::print",
"ly:grid-line-interface::width",
"ly:grob-alist-chain",
"ly:grob-array->list",
"ly:grob-array-length",
"ly:grob-array-ref",
"ly:grob-array?",
"ly:grob-basic-properties",
"ly:grob-chain-callback",
"ly:grob-common-refpoint",
"ly:grob-common-refpoint-of-array",
"ly:grob-default-font",
"ly:grob-extent",
"ly:grob-get-vertical-axis-group-index",
"ly:grob-interfaces",
"ly:grob-layout",
"ly:grob-list->grob-array",
"ly:grob-object",
"ly:grob-original",
"ly:grob-parent",
"ly:grob-pq<?",
"ly:grob-properties?",
"ly:grob-property",
"ly:grob-property-data",
"ly:grob-pure-height",
"ly:grob-pure-property",
"ly:grob-relative-coordinate",
"ly:grob-robust-relative-extent",
"ly:grob-script-priority-less",
"ly:grob-set-nested-property!",
"ly:grob-set-object!",
"ly:grob-set-parent!",
"ly:grob-set-property!",
"ly:grob-spanned-column-rank-interval",
"ly:grob-staff-position",
"ly:grob-suicide!",
"ly:grob-system",
"ly:grob-translate-axis!",
"ly:grob-vertical<?",
"ly:grob-warning",
"ly:grob::horizontal-skylines-from-element-stencils",
"ly:grob::horizontal-skylines-from-stencil",
"ly:grob::pure-horizontal-skylines-from-element-stencils",
"ly:grob::pure-simple-horizontal-skylines-from-extents",
"ly:grob::pure-simple-vertical-skylines-from-extents",
"ly:grob::pure-stencil-height",
"ly:grob::pure-vertical-skylines-from-element-stencils",
"ly:grob::simple-horizontal-skylines-from-extents",
"ly:grob::simple-vertical-skylines-from-extents",
"ly:grob::stencil-height",
"ly:grob::stencil-width",
"ly:grob::vertical-skylines-from-element-stencils",
"ly:grob::vertical-skylines-from-stencil",
"ly:grob::x-parent-positioning",
"ly:grob::y-parent-positioning",
"ly:grob?",
"ly:gs-cli",
"ly:gulp-file",
"ly:gulp-file-utf8",
"ly:hairpin::broken-bound-padding",
"ly:hairpin::print",
"ly:hairpin::pure-height",
"ly:hara-kiri-group-spanner::calc-skylines",
"ly:hara-kiri-group-spanner::force-hara-kiri-callback",
"ly:hara-kiri-group-spanner::force-hara-kiri-in-y-parent-callback",
"ly:hara-kiri-group-spanner::pure-height",
"ly:hara-kiri-group-spanner::y-extent",
"ly:has-glyph-names?",
"ly:hash-table-keys",
"ly:horizontal-bracket-text::calc-direction",
"ly:horizontal-bracket-text::print",
"ly:horizontal-bracket::print",
"ly:horizontal-line-spanner::calc-left-bound-info",
"ly:horizontal-line-spanner::calc-left-bound-info-and-text",
"ly:horizontal-line-spanner::calc-right-bound-info",
"ly:in-event-class?",
"ly:inch",
"ly:input-both-locations",
"ly:input-file-line-char-column",
"ly:input-location?",
"ly:input-message",
"ly:input-warning",
"ly:interpret-music-expression",
"ly:intlog2",
"ly:item-break-dir",
"ly:item-get-column",
"ly:item?",
"ly:iterator?",
"ly:key-signature-interface::print",
"ly:kievan-ligature::print",
"ly:ledger-line-spanner::print",
"ly:ledger-line-spanner::set-spacing-rods",
"ly:length",
"ly:lily-lexer?",
"ly:lily-parser?",
"ly:line-interface::line",
"ly:line-spanner::calc-cross-staff",
"ly:line-spanner::calc-left-bound-info",
"ly:line-spanner::calc-left-bound-info-and-text",
"ly:line-spanner::calc-right-bound-info",
"ly:line-spanner::print",
"ly:list->offsets",
"ly:listened-event-class?",
"ly:listened-event-types",
"ly:listener?",
"ly:load",
"ly:lyric-combine-music-iterator::constructor",
"ly:lyric-combine-music::length-callback",
"ly:lyric-extender::print",
"ly:lyric-hyphen::print",
"ly:lyric-hyphen::set-spacing-rods",
"ly:make-book",
"ly:make-book-part",
"ly:make-context-mod",
"ly:make-dispatcher",
"ly:make-duration",
"ly:make-event-class",
"ly:make-global-context",
"ly:make-global-translator",
"ly:make-grob-properties",
"ly:make-listener",
"ly:make-moment",
"ly:make-music",
"ly:make-music-function",
"ly:make-music-relative!",
"ly:make-output-def",
"ly:make-page-label-marker",
"ly:make-page-permission-marker",
"ly:make-pango-description-string",
"ly:make-paper-outputter",
"ly:make-pitch",
"ly:make-prob",
"ly:make-rotation",
"ly:make-scale",
"ly:make-scaling",
"ly:make-score",
"ly:make-spring",
"ly:make-stencil",
"ly:make-stream-event",
"ly:make-transform",
"ly:make-translation",
"ly:make-unpure-pure-container",
"ly:measure-grouping::print",
"ly:measure-spanner::calc-connect-to-neighbors",
"ly:measure-spanner::print",
"ly:melody-spanner::calc-neutral-stem-direction",
"ly:mensural-ligature::brew-ligature-primitive",
"ly:mensural-ligature::print",
"ly:message",
"ly:minimal-breaking",
"ly:mm",
"ly:module->alist",
"ly:module-copy",
"ly:modules-lookup",
"ly:moment-add",
"ly:moment-div",
"ly:moment-grace",
"ly:moment-grace-denominator",
"ly:moment-grace-numerator",
"ly:moment-main",
"ly:moment-main-denominator",
"ly:moment-main-numerator",
"ly:moment-mod",
"ly:moment-mul",
"ly:moment-sub",
"ly:moment<?",
"ly:moment?",
"ly:multi-measure-rest::height",
"ly:multi-measure-rest::print",
"ly:multi-measure-rest::set-spacing-rods",
"ly:multi-measure-rest::set-text-rods",
"ly:music-compress",
"ly:music-deep-copy",
"ly:music-duration-compress",
"ly:music-duration-length",
"ly:music-error",
"ly:music-function-extract",
"ly:music-function-signature",
"ly:music-function?",
"ly:music-iterator::constructor",
"ly:music-length",
"ly:music-list?",
"ly:music-message",
"ly:music-mutable-properties",
"ly:music-output?",
"ly:music-property",
"ly:music-sequence::cumulative-length-callback",
"ly:music-sequence::event-chord-length-callback",
"ly:music-sequence::event-chord-relative-callback",
"ly:music-sequence::first-start-callback",
"ly:music-sequence::maximum-length-callback",
"ly:music-sequence::minimum-start-callback",
"ly:music-sequence::simultaneous-relative-callback",
"ly:music-set-property!",
"ly:music-start",
"ly:music-transpose",
"ly:music-warning",
"ly:music-wrapper-iterator::constructor",
"ly:music-wrapper::length-callback",
"ly:music-wrapper::start-callback",
"ly:music::duration-length-callback",
"ly:music?",
"ly:non-fatal-error",
"ly:note-collision-interface::calc-positioning-done",
"ly:note-column-accidentals",
"ly:note-column-dot-column",
"ly:note-column::calc-main-extent",
"ly:note-extra-source-file",
"ly:note-head::calc-stem-attachment",
"ly:note-head::calc-tab-stem-attachment",
"ly:note-head::include-ledger-line-height",
"ly:note-head::print",
"ly:note-head::stem-attachment",
"ly:note-head::stem-x-shift",
"ly:number->string",
"ly:number-pair->string",
"ly:one-line-auto-height-breaking",
"ly:one-line-breaking",
"ly:one-page-breaking",
"ly:optimal-breaking",
"ly:option-usage",
"ly:otf->cff",
"ly:otf-font-glyph-info",
"ly:otf-font-table-data",
"ly:otf-font?",
"ly:otf-glyph-count",
"ly:otf-glyph-list",
"ly:ottava-bracket::print",
"ly:output-def-clone",
"ly:output-def-lookup",
"ly:output-def-parent",
"ly:output-def-scope",
"ly:output-def-set-variable!",
"ly:output-def?",
"ly:output-description",
"ly:output-find-context-def",
"ly:outputter-close",
"ly:outputter-dump-stencil",
"ly:outputter-dump-string",
"ly:outputter-output-scheme",
"ly:outputter-port",
"ly:page-marker?",
"ly:page-turn-breaking",
"ly:pango-font-physical-fonts",
"ly:pango-font?",
"ly:paper-book-header",
"ly:paper-book-pages",
"ly:paper-book-paper",
"ly:paper-book-performances",
"ly:paper-book-scopes",
"ly:paper-book-systems",
"ly:paper-book?",
"ly:paper-column::break-align-width",
"ly:paper-column::print",
"ly:paper-fonts",
"ly:paper-get-font",
"ly:paper-get-number",
"ly:paper-outputscale",
"ly:paper-score-paper-systems",
"ly:paper-system-minimum-distance",
"ly:paper-system?",
"ly:parse-file",
"ly:parse-init",
"ly:parse-string-expression",
"ly:parsed-undead-list!",
"ly:parser-clear-error",
"ly:parser-clone",
"ly:parser-define!",
"ly:parser-error",
"ly:parser-has-error?",
"ly:parser-include-string",
"ly:parser-lookup",
"ly:parser-output-name",
"ly:parser-parse-string",
"ly:parser-set-note-names",
"ly:part-combine-iterator::constructor",
"ly:partial-iterator::constructor",
"ly:partial-iterator::finalization",
"ly:percent-repeat-interface::beat-slash",
"ly:percent-repeat-interface::double-percent",
"ly:percent-repeat-interface::percent",
"ly:percent-repeat-iterator::constructor",
"ly:performance-headers",
"ly:performance-write",
"ly:piano-pedal-bracket::print",
"ly:pitch-alteration",
"ly:pitch-diff",
"ly:pitch-negate",
"ly:pitch-notename",
"ly:pitch-octave",
"ly:pitch-quartertones",
"ly:pitch-semitones",
"ly:pitch-steps",
"ly:pitch-tones",
"ly:pitch-transpose",
"ly:pitch::less?",
"ly:pitch<?",
"ly:pitch?",
"ly:pointer-group-interface::add-grob",
"ly:pop-property-iterator::constructor",
"ly:position-on-line?",
"ly:prob-immutable-properties",
"ly:prob-mutable-properties",
"ly:prob-property",
"ly:prob-property?",
"ly:prob-set-property!",
"ly:prob-type?",
"ly:prob?",
"ly:programming-error",
"ly:progress",
"ly:property-iterator::constructor",
"ly:property-lookup-stats",
"ly:property-unset-iterator::constructor",
"ly:pt",
"ly:pure-call",
"ly:pure-from-neighbor-interface::calc-pure-relevant-grobs",
"ly:push-property-iterator::constructor",
"ly:quote-iterator::constructor",
"ly:randomize-rand-seed",
"ly:register-stencil-expression",
"ly:register-translator",
"ly:relative-group-extent",
"ly:relative-octave-check::relative-callback",
"ly:relative-octave-music::no-relative-callback",
"ly:relative-octave-music::relative-callback",
"ly:rename-file",
"ly:reset-all-fonts",
"ly:rest-collision::calc-positioning-done",
"ly:rest-collision::force-shift-callback-rest",
"ly:rest::calc-cross-staff",
"ly:rest::height",
"ly:rest::print",
"ly:rest::pure-height",
"ly:rest::width",
"ly:rest::y-offset-callback",
"ly:rhythmic-music-iterator::constructor",
"ly:round-filled-box",
"ly:round-polygon",
"ly:run-translator",
"ly:score-add-output-def!",
"ly:score-embedded-format",
"ly:score-error?",
"ly:score-header",
"ly:score-music",
"ly:score-output-defs",
"ly:score-set-header!",
"ly:score?",
"ly:script-column::before-line-breaking",
"ly:script-column::row-before-line-breaking",
"ly:script-interface::calc-cross-staff",
"ly:script-interface::calc-direction",
"ly:script-interface::calc-positioning-done",
"ly:script-interface::print",
"ly:self-alignment-interface::aligned-on-x-parent",
"ly:self-alignment-interface::aligned-on-y-parent",
"ly:self-alignment-interface::centered-on-x-parent",
"ly:self-alignment-interface::centered-on-y-parent",
"ly:self-alignment-interface::pure-y-aligned-on-self",
"ly:self-alignment-interface::x-aligned-on-self",
"ly:self-alignment-interface::y-aligned-on-self",
"ly:semi-tie-column::calc-head-direction",
"ly:semi-tie-column::calc-positioning-done",
"ly:semi-tie::calc-control-points",
"ly:separation-item::calc-skylines",
"ly:sequential-iterator::constructor",
"ly:set-color-names",
"ly:set-default-scale",
"ly:set-grob-creation-callback",
"ly:set-grob-modification-callback",
"ly:set-middle-C!",
"ly:set-option",
"ly:set-origin!",
"ly:set-property-cache-callback",
"ly:side-position-interface::calc-cross-staff",
"ly:side-position-interface::move-to-extremal-staff",
"ly:side-position-interface::pure-y-aligned-side",
"ly:side-position-interface::x-aligned-side",
"ly:side-position-interface::y-aligned-side",
"ly:simple-music-iterator::constructor",
"ly:simultaneous-music-iterator::constructor",
"ly:skyline-distance",
"ly:skyline-empty?",
"ly:skyline-height",
"ly:skyline-max-height",
"ly:skyline-max-height-position",
"ly:skyline-pad",
"ly:skyline-pair?",
"ly:skyline-touching-point",
"ly:skyline?",
"ly:skylines-for-stencil",
"ly:slur::calc-control-points",
"ly:slur::calc-cross-staff",
"ly:slur::calc-direction",
"ly:slur::height",
"ly:slur::outside-slur-callback",
"ly:slur::outside-slur-cross-staff",
"ly:slur::print",
"ly:slur::pure-height",
"ly:slur::pure-outside-slur-callback",
"ly:smob-protects",
"ly:solve-spring-rod-problem",
"ly:source-file?",
"ly:source-files",
"ly:spacing-spanner::calc-common-shortest-duration",
"ly:spacing-spanner::set-springs",
"ly:span-bar::before-line-breaking",
"ly:span-bar::calc-anchor",
"ly:span-bar::calc-glyph-name",
"ly:span-bar::choose-model-bar-line",
"ly:span-bar::print",
"ly:span-bar::width",
"ly:spanner-bound",
"ly:spanner-broken-into",
"ly:spanner-set-bound!",
"ly:spanner::bounds-width",
"ly:spanner::calc-normalized-endpoints",
"ly:spanner::kill-zero-spanned-time",
"ly:spanner::set-spacing-rods",
"ly:spanner?",
"ly:spawn",
"ly:spring-set-inverse-compress-strength!",
"ly:spring-set-inverse-stretch-strength!",
"ly:spring?",
"ly:staff-symbol-line-thickness",
"ly:staff-symbol-referencer::callback",
"ly:staff-symbol-staff-radius",
"ly:staff-symbol-staff-space",
"ly:staff-symbol::height",
"ly:staff-symbol::print",
"ly:stderr-redirect",
"ly:stem-tremolo::calc-cross-staff",
"ly:stem-tremolo::calc-direction",
"ly:stem-tremolo::calc-shape",
"ly:stem-tremolo::calc-slope",
"ly:stem-tremolo::calc-width",
"ly:stem-tremolo::calc-y-offset",
"ly:stem-tremolo::print",
"ly:stem-tremolo::pure-calc-y-offset",
"ly:stem-tremolo::pure-height",
"ly:stem-tremolo::width",
"ly:stem::calc-cross-staff",
"ly:stem::calc-default-direction",
"ly:stem::calc-direction",
"ly:stem::calc-length",
"ly:stem::calc-positioning-done",
"ly:stem::calc-stem-begin-position",
"ly:stem::calc-stem-end-position",
"ly:stem::calc-stem-info",
"ly:stem::height",
"ly:stem::offset-callback",
"ly:stem::print",
"ly:stem::pure-calc-length",
"ly:stem::pure-calc-stem-begin-position",
"ly:stem::pure-calc-stem-end-position",
"ly:stem::pure-height",
"ly:stem::width",
"ly:stencil-add",
"ly:stencil-aligned-to",
"ly:stencil-combine-at-edge",
"ly:stencil-empty?",
"ly:stencil-expr",
"ly:stencil-extent",
"ly:stencil-in-color",
"ly:stencil-outline",
"ly:stencil-rotate",
"ly:stencil-rotate-absolute",
"ly:stencil-scale",
"ly:stencil-stack",
"ly:stencil-translate",
"ly:stencil-translate-axis",
"ly:stencil?",
"ly:stream-event::dump",
"ly:stream-event::undump",
"ly:stream-event?",
"ly:string-percent-encode",
"ly:string-substitute",
"ly:sustain-pedal::print",
"ly:system",
"ly:system-font-load",
"ly:system-start-delimiter::print",
"ly:system::calc-pure-height",
"ly:system::calc-pure-relevant-grobs",
"ly:system::footnotes-after-line-breaking",
"ly:system::footnotes-before-line-breaking",
"ly:system::get-nonspaceable-staves",
"ly:system::get-spaceable-staves",
"ly:system::get-staves",
"ly:system::get-vertical-alignment",
"ly:system::height",
"ly:system::vertical-skyline-elements",
"ly:text-interface::interpret-markup",
"ly:text-interface::interpret-string",
"ly:text-interface::print",
"ly:tie-column::before-line-breaking",
"ly:tie-column::calc-positioning-done",
"ly:tie::calc-control-points",
"ly:tie::calc-direction",
"ly:tie::print",
"ly:time-signature::print",
"ly:transform->list",
"ly:transform?",
"ly:translate-cpp-warning-scheme",
"ly:translator-context",
"ly:translator-description",
"ly:translator-group?",
"ly:translator-name",
"ly:translator?",
"ly:transpose-key-alist",
"ly:ttf->pfa",
"ly:ttf-ps-name",
"ly:tuplet-bracket::calc-connect-to-neighbors",
"ly:tuplet-bracket::calc-cross-staff",
"ly:tuplet-bracket::calc-direction",
"ly:tuplet-bracket::calc-positions",
"ly:tuplet-bracket::calc-x-positions",
"ly:tuplet-bracket::print",
"ly:tuplet-iterator::constructor",
"ly:tuplet-number::calc-cross-staff",
"ly:tuplet-number::calc-x-offset",
"ly:tuplet-number::calc-y-offset",
"ly:tuplet-number::print",
"ly:type1->pfa",
"ly:unit",
"ly:unpure-call",
"ly:unpure-pure-container-pure-part",
"ly:unpure-pure-container-unpure-part",
"ly:unpure-pure-container?",
"ly:usage",
"ly:vaticana-ligature::brew-ligature-primitive",
"ly:vaticana-ligature::print",
"ly:verbose-output?",
"ly:version",
"ly:version?",
"ly:volta-bracket-interface::print",
"ly:volta-bracket::calc-shorten-pair",
"ly:volta-repeat-iterator::constructor",
"ly:volta-specced-music-iterator::constructor",
"ly:vowel-transition::set-spacing-rods",
"ly:warning",
"ly:warning-located",
"ly:wide-char->utf-8",
"lyric-hyphen::vaticana-style",
"lyric-text::print",
"magnification->font-size",
"magnify-markup",
"magnifyStaff-is-set?",
"magstep",
"maj7-modifier",
"make-abs-fontsize-markup",
"make-accidental-dodecaphonic-rule",
"make-accidental-markup",
"make-accidental-rule",
"make-align-on-other-markup",
"make-apply-context",
"make-arrow-head-markup",
"make-articulation",
"make-auto-footnote-markup",
"make-autochange-music",
"make-backslashed-digit-markup",
"make-beam-markup",
"make-bezier-sandwich-stencil",
"make-bold-markup",
"make-bow-stencil",
"make-box-markup",
"make-bracket-bar-line",
"make-bracket-markup",
"make-c-time-signature-markup",
"make-caps-markup",
"make-center-align-markup",
"make-center-column-markup",
"make-central-column-hole-addresses",
"make-char-markup",
"make-chord-elements",
"make-circle-markup",
"make-circle-stencil",
"make-clef-set",
"make-coda-markup",
"make-colon-bar-line",
"make-color-handler",
"make-column-lines-markup-list",
"make-column-markup",
"make-combine-markup",
"make-compound-meter-markup",
"make-concat-markup",
"make-conditional-circle-markup-markup",
"make-connected-line",
"make-connected-path-stencil",
"make-cue-clef-set",
"make-cue-clef-unset",
"make-customTabClef-markup",
"make-dashed-bar-line",
"make-default-fonts-tree",
"make-dir-column-markup",
"make-dotted-bar-line",
"make-doubleflat-markup",
"make-doublesharp-markup",
"make-draw-circle-markup",
"make-draw-dashed-line-markup",
"make-draw-dotted-line-markup",
"make-draw-hline-markup",
"make-draw-line-markup",
"make-draw-squiggle-line-markup",
"make-duration-of-length",
"make-dynamic-markup",
"make-ellipse-markup",
"make-ellipse-stencil",
"make-empty-bar-line",
"make-engraver",
"make-epsfile-markup",
"make-event-chord",
"make-extended-scale",
"make-eyeglasses-markup",
"make-fermata-markup",
"make-fill-line-markup",
"make-fill-with-pattern-markup",
"make-filled-box-markup",
"make-filled-box-stencil",
"make-finger-markup",
"make-first-visible-markup",
"make-flat-markup",
"make-font-tree-leaf",
"make-font-tree-node",
"make-fontCaps-markup",
"make-fontsize-markup",
"make-footnote-markup",
"make-fraction-markup",
"make-fret-diagram",
"make-fret-diagram-markup",
"make-fret-diagram-terse-markup",
"make-fret-diagram-verbose-markup",
"make-fromproperty-markup",
"make-general-align-markup",
"make-glyph-time-signature-markup",
"make-grace-music",
"make-graceless-rhythmic-location",
"make-grob-property-override",
"make-grob-property-revert",
"make-grob-property-set",
"make-halign-markup",
"make-harmonic",
"make-harp-pedal-markup",
"make-hashq-cached-function",
"make-hbracket-markup",
"make-hcenter-in-markup",
"make-hspace-markup",
"make-huge-markup",
"make-if-markup",
"make-italic-markup",
"make-justified-lines-markup-list",
"make-justify-field-markup",
"make-justify-line-markup",
"make-justify-markup",
"make-justify-string-markup",
"make-key-alist",
"make-key-symbols",
"make-kievan-bar-line",
"make-large-markup",
"make-larger-markup",
"make-left-align-markup",
"make-left-brace-markup",
"make-left-column-markup",
"make-left-hand-key-addresses",
"make-line-markup",
"make-line-stencil",
"make-lookup-markup",
"make-lower-markup",
"make-lyric-event",
"make-lyric-repeat-count-formatter",
"make-magnify-markup",
"make-map-markup-commands-markup-list",
"make-markalphabet-markup",
"make-markletter-markup",
"make-markup",
"make-medium-markup",
"make-modal-inverter",
"make-modal-transposer",
"make-multi-measure-rest",
"make-multi-measure-rest-by-number-markup",
"make-music",
"make-musicglyph-markup",
"make-name-keylist",
"make-named-spreadsheet",
"make-natural-markup",
"make-non-relative-music",
"make-normal-size-sub-markup",
"make-normal-size-super-markup",
"make-normal-text-markup",
"make-normalsize-markup",
"make-note-by-number-markup",
"make-note-markup",
"make-null-markup",
"make-number-keylist",
"make-number-markup",
"make-on-the-fly-markup",
"make-oval-markup",
"make-oval-stencil",
"make-overlay-markup",
"make-override-lines-markup-list",
"make-override-markup",
"make-overtie-markup",
"make-pad-around-markup",
"make-pad-markup-markup",
"make-pad-to-box-markup",
"make-pad-x-markup",
"make-page-link-markup",
"make-page-ref-markup",
"make-pango-font-tree",
"make-parenthesis-stencil",
"make-parenthesize-markup",
"make-part-combine-context-changes",
"make-part-combine-marks",
"make-partial-ellipse-stencil",
"make-path-markup",
"make-path-stencil",
"make-pattern-markup",
"make-percent-set",
"make-performer",
"make-polygon-markup",
"make-postscript-markup",
"make-property-recursive-markup",
"make-property-set",
"make-property-unset",
"make-put-adjacent-markup",
"make-raise-markup",
"make-relative",
"make-relative::to-relative-callback",
"make-repeat",
"make-replace-markup",
"make-rest-by-number-markup",
"make-rest-markup",
"make-rhythm-markup",
"make-rhythmic-location",
"make-right-align-markup",
"make-right-brace-markup",
"make-right-column-markup",
"make-right-hand-key-addresses",
"make-roman-markup",
"make-rotate-markup",
"make-rounded-box-markup",
"make-sans-markup",
"make-scale",
"make-scale-markup",
"make-score-lines-markup-list",
"make-score-markup",
"make-segno-bar-line",
"make-segno-markup",
"make-semiflat-markup",
"make-semisharp-markup",
"make-semitone->pitch",
"make-sequential-music",
"make-sesquiflat-markup",
"make-sesquisharp-markup",
"make-session-variable",
"make-setting",
"make-sharp-markup",
"make-short-bar-line",
"make-simple-bar-line",
"make-simple-markup",
"make-simultaneous-music",
"make-skip-music",
"make-skipped",
"make-slashed-digit-markup",
"make-small-markup",
"make-smallCaps-markup",
"make-smaller-markup",
"make-spacer-bar-line",
"make-span-event",
"make-split-state",
"make-spreadsheet",
"make-stem-span!",
"make-stem-spans!",
"make-stencil-boxer",
"make-stencil-circler",
"make-stencil-markup",
"make-string-lines-markup-list",
"make-strut-markup",
"make-sub-markup",
"make-super-markup",
"make-symbol-alist",
"make-tab-heads-transparent",
"make-table-markup-list",
"make-teeny-markup",
"make-text-markup",
"make-thick-bar-line",
"make-tick-bar-line",
"make-tie-markup",
"make-tie-stencil",
"make-tied-lyric-markup",
"make-tilted-portion",
"make-time-signature-set",
"make-tiny-markup",
"make-tmpfile",
"make-translate-markup",
"make-translate-scaled-markup",
"make-translator",
"make-translator-component",
"make-translator-internal",
"make-transparent-box-stencil",
"make-transparent-markup",
"make-tremolo-set",
"make-triangle-markup",
"make-type-checker",
"make-typewriter-markup",
"make-underline-markup",
"make-undertie-markup",
"make-unfolded-set",
"make-unless-markup",
"make-upright-markup",
"make-varcoda-markup",
"make-vcenter-markup",
"make-verbatim-file-markup",
"make-voice-props-override",
"make-voice-props-revert",
"make-voice-props-set",
"make-voice-states",
"make-volta-set",
"make-vspace-markup",
"make-whiteout-markup",
"make-with-color-markup",
"make-with-dimension-from-markup",
"make-with-dimension-markup",
"make-with-dimensions-from-markup",
"make-with-dimensions-markup",
"make-with-link-markup",
"make-with-outline-markup",
"make-with-string-transformer-markup",
"make-with-true-dimension-markup",
"make-with-true-dimensions-markup",
"make-with-url-markup",
"make-woodwind-diagram-markup",
"make-wordwrap-field-markup",
"make-wordwrap-internal-markup-list",
"make-wordwrap-lines-markup-list",
"make-wordwrap-markup",
"make-wordwrap-string-internal-markup-list",
"make-wordwrap-string-markup",
"map-alist-keys",
"map-alist-vals",
"map-markup-commands-markup-list",
"map-selected-alist-keys",
"map-some-music",
"markalphabet-markup",
"marked-up-headfoot",
"marked-up-title",
"markgeneric-string",
"markletter-markup",
"markup",
"markup->string",
"markup-argument-list-error",
"markup-argument-list?",
"markup-command-list?",
"markup-command-signature",
"markup-default-to-string-method",
"markup-expression->make-markup",
"markup-function-as-string-method",
"markup-function-category",
"markup-function-properties",
"markup-function?",
"markup-join",
"markup-lambda",
"markup-lambda-listify",
"markup-lambda-worker",
"markup-list-function?",
"markup-list-lambda",
"markup-list?",
"markup-thrower-typecheck",
"markup-typecheck?",
"markup?",
"match-predicate",
"measure-counter::text",
"medium-markup",
"mensural-flag",
"merge-details",
"metronome-markup",
"middle-broken-spanner?",
"midi-program",
"midline-stencil",
"minor-modifier",
"mkdir-if-not-exist",
"mm-rest-child-list",
"mmrest-of-length",
"modern-straight-flag",
"modified-font-metric-font-scaling",
"modulo-bar-number-visible",
"moment",
"moment->fraction",
"moment-min",
"moment-pair?",
"moment<=?",
"move-chord-note",
"multi-fork",
"multi-measure-rest-by-number-markup",
"music->make-music",
"music-check-error",
"music-clone",
"music-filter",
"music-invert",
"music-is-of-type?",
"music-map",
"music-pitches",
"music-property-description",
"music-selective-filter",
"music-selective-map",
"music-separator?",
"music-type-predicate",
"musicglyph-markup",
"n-true-entries",
"narrow-glyph?",
"natural-chord-alteration",
"natural-markup",
"negate-extent",
"neo-modern-accidental-rule",
"no-flag",
"normal-flag",
"normal-size-sub-markup",
"normal-size-super-markup",
"normal-text-markup",
"normalize-fraction",
"normalsize-markup",
"not-first-broken-spanner?",
"not-last-broken-spanner?",
"note-by-number-markup",
"note-events",
"note-head::brew-ez-stencil",
"note-head::calc-duration-log",
"note-head::calc-glyph-name",
"note-head::calc-kievan-duration-log",
"note-markup",
"note-name->german-markup",
"note-name->markup",
"note-name->string",
"note-name-markup",
"note-names-language",
"note-to-cluster",
"notes-to-clusters",
"null-markup",
"number->octal-string",
"number-column-stencil",
"number-format",
"number-list?",
"number-markup",
"number-or-grob?",
"number-or-pair?",
"number-or-string?",
"number-pair-list?",
"number-pair?",
"numbered-footnotes",
"numerify",
"object-type",
"object-type-name",
"oboe-lh-I-key-stencil",
"oboe-lh-II-key-stencil",
"oboe-lh-III-key-stencil",
"oboe-lh-b-key-stencil",
"oboe-lh-bes-key-stencil",
"oboe-lh-cis-key-stencil",
"oboe-lh-d-key-stencil",
"oboe-lh-ees-key-stencil",
"oboe-lh-ees-lh-bes-key-stencil",
"oboe-lh-f-key-stencil",
"oboe-lh-gis-key-stencil",
"oboe-lh-gis-lh-low-b-key-stencil",
"oboe-lh-low-b-key-stencil",
"oboe-lh-octave-key-stencil",
"oboe-rh-a-key-stencil",
"oboe-rh-banana-key-stencil",
"oboe-rh-c-key-stencil",
"oboe-rh-c-rh-ees-key-stencil",
"oboe-rh-cis-key-stencil",
"oboe-rh-d-key-stencil",
"oboe-rh-ees-key-stencil",
"oboe-rh-f-key-stencil",
"oboe-rh-gis-key-stencil",
"octave-woodwind-text-stencil",
"offset-add",
"offset-flip-y",
"offset-fret",
"offset-multiple-types",
"offset-scale",
"offsetter",
"old-straight-flag",
"on-the-fly-markup",
"only-if-beamed",
"ordered-cons",
"other-axis",
"output-module?",
"output-scopes",
"outputproperty-compatibility",
"oval-markup",
"oval-stencil",
"overlay-markup",
"override-head-style",
"override-lines-markup-list",
"override-markup",
"override-property-setting",
"override-time-signature-setting",
"overtie-markup",
"pad-around-markup",
"pad-markup-markup",
"pad-to-box-markup",
"pad-x-markup",
"page-link-markup",
"page-ref-markup",
"pair-map",
"pango-font-name",
"pango-pf-file-name",
"pango-pf-font-name",
"pango-pf-fontindex",
"paper-variable",
"parentheses-interface::calc-angled-bracket-stencils",
"parentheses-interface::calc-parenthesis-stencils",
"parentheses-interface::print",
"parentheses-interface::y-extent",
"parenthesize-elements",
"parenthesize-markup",
"parenthesize-stencil",
"parse-and-check-version",
"parse-lily-version",
"parse-terse-string",
"path-markup",
"pattern-markup",
"percussion?",
"perform-text-replacements",
"performance-name-from-headers",
"piccolo-rh-x-key-stencil",
"pitch-alteration-semitones",
"pitch-invert",
"pitch-of-note",
"pitch-step",
"polar->rectangular",
"polygon-markup",
"position-true-endpoint",
"postprocess-output",
"postscript->pdf",
"postscript->png",
"postscript->ps",
"postscript-markup",
"precompute-music-length",
"prepend-alist-chain",
"prepend-props",
"pretty-printable?",
"previous-span-state",
"previous-voice-state",
"print-book-with",
"print-book-with-defaults",
"print-book-with-defaults-as-systems",
"print-circled-text-callback",
"print-keys",
"print-keys-verbose",
"process-fill-value",
"property-recursive-markup",
"pure-chain-offset-callback",
"pure-from-neighbor-interface::account-for-span-bar",
"pure-from-neighbor-interface::extra-spacing-height",
"pure-from-neighbor-interface::extra-spacing-height-at-beginning-of-line",
"pure-from-neighbor-interface::extra-spacing-height-including-staff",
"pure-from-neighbor-interface::pure-height",
"put-adjacent-markup",
"quarterdiff->string",
"quote-substitute",
"raise-markup",
"randomize-rand-seed",
"ratio->fret",
"ratio->pitch",
"rational-or-procedure?",
"read-lily-expression",
"read-lily-expression-internal",
"recent-enough?",
"recompute-music-length",
"recording-group-emulate",
"regexp-split",
"relevant-book-systems",
"relevant-dump-systems",
"remove-grace-property",
"remove-step",
"remove-whitespace",
"repeat-tie::handle-tab-note-head",
"replace-markup",
"replace-step",
"replacement-hashtab",
"replacement-regexp",
"replicate-modify",
"reset-stencil-colors",
"rest-by-number-markup",
"rest-markup",
"retrieve-glyph-flag",
"retrograde-music",
"return-1",
"reverse-interval",
"revert-fontSize",
"revert-head-style",
"revert-property-setting",
"revert-props",
"revert-time-signature-setting",
"rgb-color",
"rh-woodwind-text-stencil",
"rhythm-markup",
"rhythmic-location->file-string",
"rhythmic-location->string",
"rhythmic-location-bar-number",
"rhythmic-location-measure-position",
"rhythmic-location<=?",
"rhythmic-location<?",
"rhythmic-location=?",
"rhythmic-location>=?",
"rhythmic-location>?",
"rhythmic-location?",
"rich-bassoon-uber-key-stencil",
"rich-e-stencil",
"rich-group-draw-rule",
"rich-group-extra-offset-rule",
"rich-path-stencil",
"rich-pe-stencil",
"right-align-markup",
"right-brace-markup",
"right-column-markup",
"ring-column-circle-stencil",
"robust-bar-number-function",
"roman-markup",
"rotate-markup",
"rounded-box-markup",
"rounded-box-stencil",
"sans-markup",
"sans-serif-stencil",
"saxophone-lh-T-key-stencil",
"saxophone-lh-b-cis-key-stencil",
"saxophone-lh-b-key-stencil",
"saxophone-lh-bes-key-stencil",
"saxophone-lh-cis-key-stencil",
"saxophone-lh-d-key-stencil",
"saxophone-lh-ees-key-stencil",
"saxophone-lh-f-key-stencil",
"saxophone-lh-front-f-key-stencil",
"saxophone-lh-gis-key-stencil",
"saxophone-lh-low-a-key-stencil",
"saxophone-lh-low-bes-key-stencil",
"saxophone-name-passerelle",
"saxophone-rh-bes-key-stencil",
"saxophone-rh-c-key-stencil",
"saxophone-rh-e-key-stencil",
"saxophone-rh-ees-key-stencil",
"saxophone-rh-fis-key-stencil",
"saxophone-rh-high-fis-key-stencil",
"saxophone-rh-low-c-key-stencil",
"saxophone-rh-side-key-stencil",
"scale->factor",
"scale-beam-thickness",
"scale-by-font-size",
"scale-fontSize",
"scale-layout",
"scale-markup",
"scale-props",
"scale?",
"scheme?",
"scm->string",
"score-lines-markup-list",
"score-markup",
"scorify-music",
"script-interface::calc-x-offset",
"script-or-side-position-cross-staff",
"search-executable",
"seconds->moment",
"segno-markup",
"select-head-glyph",
"select-option",
"self-alignment-interface::self-aligned-on-breakable",
"self-evaluating?",
"semi-tie::calc-cross-staff",
"semiflat-markup",
"semisharp-markup",
"sequential-music-to-chord-exceptions",
"sesquiflat-markup",
"sesquisharp-markup",
"session-replay",
"session-save",
"session-start-record",
"session-terminate",
"set-accidental-style",
"set-bar-number-visibility",
"set-counter-text!",
"set-default-paper-size",
"set-global-fonts",
"set-global-staff-size",
"set-mus-properties!",
"set-output-property",
"set-paper-dimension-variables",
"set-paper-dimensions",
"set-paper-size",
"sharp-markup",
"shift-duration-log",
"shift-octave",
"shift-one-duration-log",
"shift-right-at-line-begin",
"shift-semitone->pitch",
"short-glyph?",
"sign",
"silence-events",
"simple-markup",
"simple-stencil-alist",
"skip->rest",
"skip-as-needed",
"skip-of-length",
"skip-of-moment-span",
"skyline-pair-and-non-empty?",
"skyline-pair::empty?",
"slashed-digit-internal",
"slashed-digit-markup",
"slashify",
"small-markup",
"smallCaps-markup",
"smaller-markup",
"space-lines",
"span-bar::compound-bar-line",
"span-state",
"split-at-predicate",
"split-index",
"split-list",
"split-list-by-separator",
"stack-lines",
"stack-stencil-line",
"stack-stencils",
"stack-stencils-padding-list",
"stack-thirds",
"staff-ellipsis::calc-y-extent",
"staff-ellipsis::print",
"staff-magnification-is-changing?",
"staff-symbol-line-count",
"staff-symbol-line-positions",
"staff-symbol-line-span",
"staff-symbol-y-extent-from-line-positions",
"standard-e-stencil",
"standard-path-stencil",
"stderr",
"stem-connectable?",
"stem-is-root?",
"stem-span-stencil",
"stem-stub::do-calculations",
"stem-stub::extra-spacing-height",
"stem-stub::pure-height",
"stem-stub::width",
"stem-tremolo::calc-tab-width",
"stem::calc-duration-log",
"stem::kievan-offset-callback",
"stencil-fretboard-extent",
"stencil-fretboard-offset",
"stencil-markup",
"stencil-true-extent",
"stencil-whiteout",
"stencil-whiteout-box",
"stencil-whiteout-outline",
"stencil-with-color",
"sticky-grob-interface::inherit-property",
"straight-flag",
"string->string-list",
"string-encode-integer",
"string-endswith",
"string-lines-markup-list",
"string-number::calc-text",
"string-or-music?",
"string-or-pair?",
"string-or-symbol?",
"string-regexp-substitute",
"string-startswith",
"string-thickness",
"strip-string-annotation",
"stroke-finger::calc-text",
"strut-markup",
"style-note-heads",
"sub-markup",
"subtract-base-fret",
"suggest-convert-ly-message",
"super-markup",
"sus-modifier",
"symbol-concatenate",
"symbol-footnotes",
"symbol-key-alist?",
"symbol-key<?",
"symbol-list-or-music?",
"symbol-list-or-symbol?",
"symbol-list?",
"symbol<?",
"symlink-if-not-exist",
"symlink-or-copy-if-not-exist",
"symmetric-interval",
"synced?",
"system-start-text::calc-x-offset",
"system-start-text::calc-y-offset",
"system-start-text::print",
"tab-note-head::calc-glyph-name",
"tab-note-head::print",
"tab-note-head::print-custom-fret-label",
"tab-note-head::whiteout-if-style-set",
"tablature-position-on-lines",
"table-markup-list",
"tabvoice::draw-double-stem-for-half-notes",
"tabvoice::make-double-stem-width-for-half-notes",
"tag-group-get",
"tags-keep-predicate",
"tags-remove-predicate",
"teaching-accidental-rule",
"teeny-markup",
"text-fill-translate",
"text-markup",
"tie-markup",
"tie::handle-tab-note-head",
"tied-lyric-markup",
"tiny-markup",
"translate-draw-instructions",
"translate-key-instruction",
"translate-markup",
"translate-scaled-markup",
"translator-property-description",
"transparent-markup",
"transposer-factory",
"triangle-markup",
"trill-pitch-group::pure-height",
"true-entry?",
"tuning",
"tuplet-number::append-note-wrapper",
"tuplet-number::calc-denominator-text",
"tuplet-number::calc-direction",
"tuplet-number::calc-fraction-text",
"tuplet-number::fraction-with-notes",
"tuplet-number::non-default-fraction-with-notes",
"tuplet-number::non-default-tuplet-denominator-text",
"tuplet-number::non-default-tuplet-fraction-text",
"type-name",
"typewriter-markup",
"unbroken-or-first-broken-spanner?",
"unbroken-or-last-broken-spanner?",
"unbroken-spanner?",
"underline-markup",
"undertie-markup",
"unfold-repeats",
"unfold-repeats-fully",
"uniform-draw-instructions",
"uniform-extra-offset-rule",
"uniq-list",
"uniqued-alist",
"unity-if-multimeasure",
"universal-color",
"unless-markup",
"update-possb-list",
"upper-key-stencil",
"upright-markup",
"value-for-spanner-piece",
"varcoda-markup",
"variable-column-circle-stencil",
"vcenter-markup",
"vector-for-each",
"verbatim-file-markup",
"version-not-seen-message",
"voice-states",
"voicify-chord",
"voicify-list",
"voicify-music",
"void?",
"volta-bracket-interface::pure-height",
"volta-bracket::calc-hook-visibility",
"volta-spec-music",
"vspace-markup",
"whiteout-markup",
"with-color-markup",
"with-dimension-from-markup",
"with-dimension-markup",
"with-dimensions-from-markup",
"with-dimensions-markup",
"with-link-markup",
"with-outline-markup",
"with-string-transformer-markup",
"with-true-dimension-markup",
"with-true-dimensions-markup",
"with-url-markup",
"woodwind-diagram-markup",
"wordwrap-field-markup",
"wordwrap-internal-markup-list",
"wordwrap-lines-markup-list",
"wordwrap-markup",
"wordwrap-stencils",
"wordwrap-string-internal-markup-list",
"wordwrap-string-markup",
"write-lilypond-book-aux-files",
"write-me",
"write-performances-midis",
"x11-color",
]
context_properties = [
"aDueText",
"accidentalGrouping",
"additionalBassStrings",
"additionalPitchPrefix",
"alignAboveContext",
"alignBelowContext",
"alterationGlyphs",
"alternativeNumber",
"alternativeNumberingStyle",
"alternativeRestores",
"associatedVoice",
"associatedVoiceContext",
"associatedVoiceType",
"autoAccidentals",
"autoBeamCheck",
"autoBeaming",
"autoCautionaries",
"barAlways",
"barCheckLastFail",
"barCheckSynchronize",
"barExtraVelocity",
"barNumberFormatter",
"barNumberVisibility",
"baseMoment",
"beamExceptions",
"beamHalfMeasure",
"beamMelismaBusy",
"beatExtraVelocity",
"beatStructure",
"breathMarkDefinitions",
"breathMarkType",
"busyGrobs",
"centerBarNumbers",
"chordChanges",
"chordNameExceptions",
"chordNameFunction",
"chordNameLowercaseMinor",
"chordNameSeparator",
"chordNoteNamer",
"chordPrefixSpacer",
"chordRootNamer",
"clefGlyph",
"clefPosition",
"clefTransposition",
"clefTranspositionFormatter",
"clefTranspositionStyle",
"codaMarkCount",
"codaMarkFormatter",
"completionBusy",
"completionFactor",
"completionUnit",
"connectArpeggios",
"countPercentRepeats",
"createKeyOnClefChange",
"createSpacing",
"crescendoSpanner",
"crescendoText",
"cueClefGlyph",
"cueClefPosition",
"cueClefTransposition",
"cueClefTranspositionFormatter",
"cueClefTranspositionStyle",
"currentBarLine",
"currentBarNumber",
"currentChordCause",
"currentChordText",
"currentCommandColumn",
"currentMarkEvent",
"currentMusicalColumn",
"dalSegnoTextFormatter",
"decrescendoSpanner",
"decrescendoText",
"defaultStrings",
"doubleRepeatBarType",
"doubleRepeatSegnoBarType",
"doubleSlurs",
"drumPitchTable",
"drumStyleTable",
"dynamicAbsoluteVolumeFunction",
"endAtSkip",
"endRepeatBarType",
"endRepeatSegnoBarType",
"explicitClefVisibility",
"explicitCueClefVisibility",
"explicitKeySignatureVisibility",
"extendersOverRests",
"extraNatural",
"figuredBassAlterationDirection",
"figuredBassCenterContinuations",
"figuredBassFormatter",
"figuredBassLargeNumberAlignment",
"figuredBassPlusDirection",
"figuredBassPlusStrokedAlist",
"finalFineTextVisibility",
"finalizations",
"fineBarType",
"fineSegnoBarType",
"fineStartRepeatSegnoBarType",
"fineText",
"fingeringOrientations",
"firstClef",
"followVoice",
"fontSize",
"forbidBreak",
"forbidBreakBetweenBarLines",
"forceBreak",
"forceClef",
"fretLabels",
"glissandoMap",
"graceSettings",
"gridInterval",
"handleNegativeFrets",
"harmonicAccidentals",
"harmonicDots",
"hasAxisGroup",
"hasStaffSpacing",
"highStringOne",
"ignoreBarChecks",
"ignoreBarNumberChecks",
"ignoreFiguredBassRest",
"ignoreMelismata",
"implicitBassFigures",
"includeGraceNotes",
"initialTimeSignatureVisibility",
"instrumentCueName",
"instrumentEqualizer",
"instrumentName",
"instrumentTransposition",
"internalBarNumber",
"keepAliveInterfaces",
"keyAlterationOrder",
"keyAlterations",
"lastChord",
"lastKeyAlterations",
"localAlterations",
"lyricMelismaAlignment",
"lyricRepeatCountFormatter",
"magnifyStaffValue",
"majorSevenSymbol",
"maximumFretStretch",
"measureBarType",
"measureLength",
"measurePosition",
"measureStartNow",
"melismaBusy",
"melismaBusyProperties",
"metronomeMarkFormatter",
"middleCClefPosition",
"middleCCuePosition",
"middleCOffset",
"middleCPosition",
"midiBalance",
"midiChannelMapping",
"midiChorusLevel",
"midiExpression",
"midiInstrument",
"midiMaximumVolume",
"midiMergeUnisons",
"midiMinimumVolume",
"midiPanPosition",
"midiReverbLevel",
"midiSkipOffset",
"minimumFret",
"minimumPageTurnLength",
"minimumRepeatLengthForPageTurn",
"minorChordModifier",
"noChordSymbol",
"noteNameFunction",
"noteNameSeparator",
"noteToFretFunction",
"nullAccidentals",
"ottavaStartNow",
"ottavation",
"ottavationMarkups",
"output",
"partCombineForced",
"partCombineTextsOnNote",
"partialBusy",
"pedalSostenutoStrings",
"pedalSostenutoStyle",
"pedalSustainStrings",
"pedalSustainStyle",
"pedalUnaCordaStrings",
"pedalUnaCordaStyle",
"predefinedDiagramTable",
"printAccidentalNames",
"printKeyCancellation",
"printNotesLanguage",
"printOctaveNames",
"printPartCombineTexts",
"proportionalNotationDuration",
"quotedCueEventTypes",
"quotedEventTypes",
"rehearsalMark",
"rehearsalMarkFormatter",
"repeatCommands",
"repeatCountVisibility",
"restCompletionBusy",
"restNumberThreshold",
"restrainOpenStrings",
"rootSystem",
"scriptDefinitions",
"searchForVoice",
"sectionBarType",
"segnoBarType",
"segnoMarkCount",
"segnoMarkFormatter",
"segnoStyle",
"shapeNoteStyles",
"shortInstrumentName",
"shortVocalName",
"skipBars",
"skipTypesetting",
"slashChordSeparator",
"slurMelismaBusy",
"soloIIText",
"soloText",
"squashedPosition",
"staffLineLayoutFunction",
"stanza",
"startAtNoteColumn",
"startAtSkip",
"startRepeatBarType",
"startRepeatSegnoBarType",
"stavesFound",
"stemLeftBeamCount",
"stemRightBeamCount",
"strictBeatBeaming",
"stringFretFingerList",
"stringNumberOrientations",
"stringOneTopmost",
"stringTunings",
"strokeFingerOrientations",
"subdivideBeams",
"suggestAccidentals",
"supportNonIntegerFret",
"suspendMelodyDecisions",
"suspendRestMerging",
"systemStartDelimiter",
"systemStartDelimiterHierarchy",
"tabStaffLineLayoutFunction",
"tablatureFormat",
"tempoHideNote",
"tempoWholesPerMinute",
"tieMelismaBusy",
"tieWaitForNote",
"timeSignatureFraction",
"timeSignatureSettings",
"timing",
"tonic",
"topLevelAlignment",
"tupletFullLength",
"tupletFullLengthNote",
"tupletSpannerDuration",
"underlyingRepeatBarType",
"useBassFigureExtenders",
"vocalName",
"voltaSpannerDuration",
"whichBar",
]
grob_properties = [
"X-align-on-main-noteheads",
"X-attachment",
"X-common",
"X-extent",
"X-offset",
"X-positions",
"Y-attachment",
"Y-common",
"Y-extent",
"Y-offset",
"accidental-grob",
"accidental-grobs",
"add-cauda",
"add-join",
"add-stem",
"add-stem-support",
"adjacent-pure-heights",
"adjacent-spanners",
"after-line-breaking",
"align-dir",
"all-elements",
"allow-loose-spacing",
"allow-span-bar",
"alteration",
"alteration-alist",
"alteration-glyph-name-alist",
"annotation",
"annotation-balloon",
"annotation-line",
"arpeggio-direction",
"arrow-length",
"arrow-width",
"ascendens",
"auctum",
"auto-knee-gap",
"automatically-numbered",
"average-spacing-wishes",
"avoid-note-head",
"avoid-scripts",
"avoid-slur",
"axes",
"axis-group-parent-X",
"axis-group-parent-Y",
"bar-extent",
"bars",
"base-shortest-duration",
"baseline-skip",
"beam",
"beam-segments",
"beam-thickness",
"beam-width",
"beamed-stem-shorten",
"beaming",
"beamlet-default-length",
"beamlet-max-length-proportion",
"before-line-breaking",
"begin-of-line-visible",
"bend-me",
"between-cols",
"bezier",
"bound-alignment-interfaces",
"bound-details",
"bound-padding",
"bounded-by-me",
"bracket",
"bracket-flare",
"bracket-text",
"bracket-visibility",
"break-align-anchor",
"break-align-anchor-alignment",
"break-align-orders",
"break-align-symbol",
"break-align-symbols",
"break-alignment",
"break-overshoot",
"break-visibility",
"breakable",
"broken-bound-padding",
"c0-position",
"cause",
"cavum",
"chord-dots-limit",
"chord-names",
"circled-tip",
"clef-alignments",
"clip-edges",
"collapse-height",
"collision-interfaces",
"collision-voice-only",
"color",
"columns",
"common-shortest-duration",
"concaveness",
"concurrent-hairpins",
"conditional-elements",
"connect-to-neighbor",
"context-info",
"control-points",
"count-from",
"covered-grobs",
"cross-staff",
"damping",
"dash-definition",
"dash-fraction",
"dash-period",
"dashed-edge",
"default-direction",
"default-staff-staff-spacing",
"delta-position",
"deminutum",
"descendens",
"details",
"digit-names",
"direction",
"direction-source",
"display-cautionary",
"dot",
"dot-count",
"dot-negative-kern",
"dot-placement-list",
"dots",
"double-stem-separation",
"duration-log",
"eccentricity",
"edge-height",
"edge-text",
"elements",
"encompass-objects",
"endpoint-alignments",
"expand-limit",
"extra-dy",
"extra-offset",
"extra-spacing-height",
"extra-spacing-width",
"extroversion",
"figures",
"filled",
"flag",
"flag-count",
"flag-style",
"flat-positions",
"flexa-height",
"flexa-interval",
"flexa-width",
"font",
"font-encoding",
"font-family",
"font-features",
"font-name",
"font-series",
"font-shape",
"font-size",
"footnote",
"footnote-music",
"footnote-stencil",
"footnote-text",
"footnotes-after-line-breaking",
"footnotes-before-line-breaking",
"force-hshift",
"forced",
"forced-spacing",
"fraction",
"french-beaming",
"french-beaming-stem-adjustment",
"fret-diagram-details",
"full-length-padding",
"full-length-to-extent",
"full-measure-extra-space",
"full-size-change",
"gap",
"gap-count",
"glissando-index",
"glissando-skip",
"glyph",
"glyph-left",
"glyph-name",
"glyph-right",
"grace-spacing",
"graphical",
"grow-direction",
"hair-thickness",
"harp-pedal-details",
"has-span-bar",
"head-direction",
"head-width",
"heads",
"height",
"height-limit",
"hide-tied-accidental-after-break",
"horizon-padding",
"horizontal-shift",
"horizontal-skylines",
"id",
"ideal-distances",
"ignore-ambitus",
"ignore-collision",
"implicit",
"important-column-ranks",
"in-note-direction",
"in-note-padding",
"in-note-stencil",
"inclinatum",
"index",
"inspect-quants",
"interfaces",
"items-worth-living",
"keep-alive-with",
"keep-inside-line",
"kern",
"knee",
"knee-spacing-correction",
"knee-to-beam",
"labels",
"layer",
"least-squares-dy",
"ledger-extra",
"ledger-line-thickness",
"ledger-positions",
"ledger-positions-function",
"left-bound-info",
"left-items",
"left-neighbor",
"left-number-text",
"left-padding",
"length",
"length-fraction",
"ligature-flexa",
"line-break-penalty",
"line-break-permission",
"line-break-system-details",
"line-count",
"line-positions",
"line-thickness",
"linea",
"long-text",
"main-extent",
"make-dead-when",
"max-beam-connect",
"max-symbol-separation",
"maximum-gap",
"maybe-loose",
"measure-count",
"measure-division",
"measure-division-chord-placement-alist",
"measure-division-lines-alist",
"measure-length",
"melody-spanner",
"merge-differently-dotted",
"merge-differently-headed",
"meta",
"minimum-X-extent",
"minimum-Y-extent",
"minimum-distance",
"minimum-distances",
"minimum-length",
"minimum-length-after-break",
"minimum-length-fraction",
"minimum-space",
"minimum-translations-alist",
"neighbors",
"neutral-direction",
"neutral-position",
"next",
"no-ledgers",
"no-stem-extend",
"non-break-align-symbols",
"non-default",
"non-musical",
"nonstaff-nonstaff-spacing",
"nonstaff-relatedstaff-spacing",
"nonstaff-unrelatedstaff-spacing",
"normal-stems",
"normalized-endpoints",
"note-collision",
"note-collision-threshold",
"note-columns",
"note-head",
"note-heads",
"note-names",
"number-range-separator",
"number-type",
"numbering-assertion-function",
"oriscus",
"output-attributes",
"outside-staff-horizontal-padding",
"outside-staff-padding",
"outside-staff-placement-directive",
"outside-staff-priority",
"packed-spacing",
"padding",
"padding-pairs",
"page-break-penalty",
"page-break-permission",
"page-number",
"page-turn-penalty",
"page-turn-permission",
"parent-alignment-X",
"parent-alignment-Y",
"parenthesis-friends",
"parenthesis-id",
"parenthesized",
"pedal-text",
"pes-or-flexa",
"positioning-done",
"positions",
"prefer-dotted-right",
"prefix-set",
"primitive",
"protrusion",
"pure-Y-common",
"pure-Y-extent",
"pure-Y-offset-in-progress",
"pure-relevant-grobs",
"pure-relevant-items",
"pure-relevant-spanners",
"quantize-position",
"quantized-positions",
"quilisma",
"rank-on-page",
"ratio",
"remove-empty",
"remove-first",
"remove-layer",
"replacement-alist",
"rest",
"rest-collision",
"restore-first",
"rests",
"rhythmic-location",
"right-bound-info",
"right-items",
"right-neighbor",
"right-number-text",
"right-padding",
"rotation",
"round-up-exceptions",
"round-up-to-longer-rest",
"rounded",
"same-direction-correction",
"script-column",
"script-priority",
"script-stencil",
"scripts",
"segno-kern",
"self-alignment-X",
"self-alignment-Y",
"shape",
"sharp-positions",
"shorten",
"shorten-pair",
"shortest-duration-space",
"shortest-playing-duration",
"shortest-starter-duration",
"show-control-points",
"show-horizontal-skylines",
"show-vertical-skylines",
"side-axis",
"side-relative-direction",
"side-support-elements",
"size",
"skip-quanting",
"skyline-horizontal-padding",
"skyline-vertical-padding",
"slash-negative-kern",
"slope",
"slur",
"slur-padding",
"snap-radius",
"space-alist",
"space-increment",
"space-to-barline",
"spacing",
"spacing-increment",
"spacing-pair",
"spacing-wishes",
"span-start",
"spanner-broken",
"spanner-id",
"spanner-placement",
"springs-and-rods",
"stacking-dir",
"staff-affinity",
"staff-grouper",
"staff-padding",
"staff-position",
"staff-space",
"staff-staff-spacing",
"staff-symbol",
"staffgroup-staff-spacing",
"stem",
"stem-attachment",
"stem-begin-position",
"stem-info",
"stem-spacing-correction",
"stemlet-length",
"stems",
"stencil",
"stencils",
"sticky-host",
"strict-grace-spacing",
"strict-note-spacing",
"stroke-style",
"stropha",
"style",
"system-Y-offset",
"text",
"text-alignment-X",
"text-alignment-Y",
"text-direction",
"thick-thickness",
"thickness",
"tie",
"tie-configuration",
"ties",
"to-barline",
"toward-stem-shift",
"toward-stem-shift-in-column",
"transparent",
"tremolo-flag",
"tuplet-number",
"tuplet-slur",
"tuplet-start",
"tuplets",
"uniform-stretching",
"usable-duration-logs",
"use-skylines",
"used",
"vertical-alignment",
"vertical-skyline-elements",
"vertical-skylines",
"virga",
"voiced-position",
"when",
"whiteout",
"whiteout-style",
"width",
"word-space",
"x-offset",
"zigzag-length",
"zigzag-width",
]
paper_variables = [
"auto-first-page-number",
"basic-distance",
"binding-offset",
"blank-last-page-penalty",
"blank-page-penalty",
"bookTitleMarkup",
"bottom-margin",
"check-consistency",
"evenFooterMarkup",
"evenHeaderMarkup",
"first-page-number",
"footnote-separator-markup",
"horizontal-shift",
"indent",
"inner-margin",
"last-bottom-spacing",
"left-margin",
"line-width",
"markup-markup-spacing",
"markup-system-spacing",
"max-systems-per-page",
"min-systems-per-page",
"minimum-distance",
"oddFooterMarkup",
"oddHeaderMarkup",
"outer-margin",
"padding",
"page-breaking",
"page-breaking-system-system-spacing",
"page-count",
"page-number-type",
"page-spacing-weight",
"paper-height",
"paper-width",
"print-all-headers",
"print-first-page-number",
"ragged-bottom",
"ragged-last",
"ragged-last-bottom",
"ragged-right",
"right-margin",
"score-markup-spacing",
"score-system-spacing",
"scoreTitleMarkup",
"short-indent",
"stretchability",
"system-count",
"system-separator-markup",
"system-system-spacing",
"systems-per-page",
"top-margin",
"top-markup-spacing",
"top-system-spacing",
"two-sided",
]
header_variables = [
"arranger",
"composer",
"copyright",
"dedication",
"doctitle",
"instrument",
"lsrtags",
"meter",
"opus",
"piece",
"poet",
"subsubtitle",
"subtitle",
"tagline",
"texidoc",
"title",
]
| 106,767 | Python | 20.84735 | 76 | 0.655081 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/cplint.py | """
pygments.lexers.cplint
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the cplint language
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import bygroups, inherit, words
from pygments.lexers import PrologLexer
from pygments.token import Operator, Keyword, Name, String, Punctuation
__all__ = ['CplintLexer']
class CplintLexer(PrologLexer):
"""
Lexer for cplint files, including CP-logic, Logic Programs with Annotated
Disjunctions, Distributional Clauses syntax, ProbLog, DTProbLog.
.. versionadded:: 2.12
"""
name = 'cplint'
url = 'https://cplint.eu'
aliases = ['cplint']
filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl']
mimetypes = ['text/x-cplint']
tokens = {
'root': [
(r'map_query', Keyword),
(words(('gaussian', 'uniform_dens', 'dirichlet', 'gamma', 'beta',
'poisson', 'binomial', 'geometric', 'exponential', 'pascal',
'multinomial', 'user', 'val', 'uniform', 'discrete',
'finite')), Name.Builtin),
# annotations of atoms
(r'([a-z]+)(:)', bygroups(String.Atom, Punctuation)),
(r':(-|=)|::?|~=?|=>', Operator),
(r'\?', Name.Builtin),
inherit,
],
}
| 1,390 | Python | 29.91111 | 80 | 0.54964 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/tcl.py | """
pygments.lexers.tcl
~~~~~~~~~~~~~~~~~~~
Lexers for Tcl and related languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Whitespace
from pygments.util import shebang_matches
__all__ = ['TclLexer']
class TclLexer(RegexLexer):
"""
For Tcl source code.
.. versionadded:: 0.10
"""
keyword_cmds_re = words((
'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif',
'else', 'error', 'eval', 'expr', 'for', 'foreach', 'global', 'if',
'namespace', 'proc', 'rename', 'return', 'set', 'switch', 'then',
'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable', 'vwait',
'while'), prefix=r'\b', suffix=r'\b')
builtin_cmds_re = words((
'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close',
'concat', 'dde', 'dict', 'encoding', 'eof', 'exec', 'exit', 'fblocked',
'fconfigure', 'fcopy', 'file', 'fileevent', 'flush', 'format', 'gets',
'glob', 'history', 'http', 'incr', 'info', 'interp', 'join', 'lappend',
'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort',
'mathfunc', 'mathop', 'memory', 'msgcat', 'open', 'package', 'pid',
'pkg::create', 'pkg_mkIndex', 'platform', 'platform::shell', 'puts',
'pwd', 're_syntax', 'read', 'refchan', 'regexp', 'registry', 'regsub',
'scan', 'seek', 'socket', 'source', 'split', 'string', 'subst', 'tell',
'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
name = 'Tcl'
url = 'https://www.tcl.tk/about/language.html'
aliases = ['tcl']
filenames = ['*.tcl', '*.rvt']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w.-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'\}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Whitespace),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$[\w.:-]+', Name.Variable),
(r'\$\{[\w.:-]+\}', Name.Variable),
(r'[\w.,@:-]+', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'\}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'\}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
| 5,513 | Python | 35.76 | 80 | 0.435879 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/shell.py | """
pygments.lexers.shell
~~~~~~~~~~~~~~~~~~~~~
Lexers for various shells.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, \
include, default, this, using, words, line_re
from pygments.token import Punctuation, Whitespace, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
'SlurmBashLexer', 'MSDOSSessionLexer', 'PowerShellLexer',
'PowerShellSessionLexer', 'TcshSessionLexer', 'FishShellLexer',
'ExeclineLexer']
class BashLexer(RegexLexer):
"""
Lexer for (ba|k|z|)sh shell scripts.
.. versionadded:: 0.6
"""
name = 'Bash'
aliases = ['bash', 'sh', 'ksh', 'zsh', 'shell']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
'*.exheres-0', '*.exlib', '*.zsh',
'.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc',
'.kshrc', 'kshrc',
'PKGBUILD']
mimetypes = ['application/x-sh', 'application/x-shellscript', 'text/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'`', String.Backtick, 'backticks'),
include('data'),
include('interp'),
],
'interp': [
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\$\{#?', String.Interpol, 'curly'),
(r'\$[a-zA-Z_]\w*', Name.Variable), # user variable
(r'\$(?:\d+|[#$?!_*@-])', Name.Variable), # builtin
(r'\$', Text),
],
'basic': [
(r'\b(if|fi|else|while|in|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)(\s*)\b',
bygroups(Keyword, Whitespace)),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)(?=[\s)`])',
Name.Builtin),
(r'\A#!.+\n', Comment.Hashbang),
(r'#.*\n', Comment.Single),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(\+?=)', bygroups(Name.Variable, Whitespace, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<<', Operator), # here-string
(r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\.|[^"\\$])*"', String.Double),
(r'"', String.Double, 'string'),
(r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r"(?s)'.*?'", String.Single),
(r';', Punctuation),
(r'&', Punctuation),
(r'\|', Punctuation),
(r'\s+', Whitespace),
(r'\d+\b', Number),
(r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
(r'<', Text),
],
'string': [
(r'"', String.Double, '#pop'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double),
include('interp'),
],
'curly': [
(r'\}', String.Interpol, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$\\]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+#\d+', Number),
(r'\d+#(?! )', Number),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
if shebang_matches(text, r'(ba|z|)sh'):
return 1
if text.startswith('$ '):
return 0.2
class SlurmBashLexer(BashLexer):
"""
Lexer for (ba|k|z|)sh Slurm scripts.
.. versionadded:: 2.4
"""
name = 'Slurm'
aliases = ['slurm', 'sbatch']
filenames = ['*.sl']
mimetypes = []
EXTRA_KEYWORDS = {'srun'}
def get_tokens_unprocessed(self, text):
for index, token, value in BashLexer.get_tokens_unprocessed(self, text):
if token is Text and value in self.EXTRA_KEYWORDS:
yield index, Name.Builtin, value
elif token is Comment.Single and 'SBATCH' in value:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
class ShellSessionBaseLexer(Lexer):
"""
Base lexer for shell sessions.
.. versionadded:: 2.1
"""
_bare_continuation = False
_venv = re.compile(r'^(\([^)]*\))(\s*)')
def get_tokens_unprocessed(self, text):
innerlexer = self._innerLexerCls(**self.options)
pos = 0
curcode = ''
insertions = []
backslash_continuation = False
for match in line_re.finditer(text):
line = match.group()
venv_match = self._venv.match(line)
if venv_match:
venv = venv_match.group(1)
venv_whitespace = venv_match.group(2)
insertions.append((len(curcode),
[(0, Generic.Prompt.VirtualEnv, venv)]))
if venv_whitespace:
insertions.append((len(curcode),
[(0, Text, venv_whitespace)]))
line = line[venv_match.end():]
m = self._ps1rgx.match(line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
backslash_continuation = curcode.endswith('\\\n')
elif backslash_continuation:
if line.startswith(self._ps2):
insertions.append((len(curcode),
[(0, Generic.Prompt,
line[:len(self._ps2)])]))
curcode += line[len(self._ps2):]
else:
curcode += line
backslash_continuation = curcode.endswith('\\\n')
elif self._bare_continuation and line.startswith(self._ps2):
insertions.append((len(curcode),
[(0, Generic.Prompt,
line[:len(self._ps2)])]))
curcode += line[len(self._ps2):]
else:
if insertions:
toks = innerlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
innerlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BashSessionLexer(ShellSessionBaseLexer):
"""
Lexer for Bash shell sessions, i.e. command lines, including a
prompt, interspersed with output.
.. versionadded:: 1.1
"""
name = 'Bash Session'
aliases = ['console', 'shell-session']
filenames = ['*.sh-session', '*.shell-session']
mimetypes = ['application/x-shell-session', 'application/x-sh-session']
_innerLexerCls = BashLexer
_ps1rgx = re.compile(
r'^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)' \
r'?|\[\S+[@:][^\n]+\].+))\s*[$#%]\s*)(.*\n?)')
_ps2 = '> '
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
.. versionadded:: 0.7
"""
name = 'Batchfile'
aliases = ['batch', 'bat', 'dosbatch', 'winbatch']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
_nl = r'\n\x1a'
_punct = r'&<>|'
_ws = r'\t\v\f\r ,;=\xa0'
_nlws = r'\s\x1a\xa0,;='
_space = r'(?:(?:(?:\^[%s])?[%s])+)' % (_nl, _ws)
_keyword_terminator = (r'(?=(?:\^[%s]?)?[%s+./:[\\\]]|[%s%s(])' %
(_nl, _ws, _nl, _punct))
_token_terminator = r'(?=\^?[%s]|[%s%s])' % (_ws, _punct, _nl)
_start_label = r'((?:(?<=^[^:])|^[^:]?)[%s]*)(:)' % _ws
_label = r'(?:(?:[^%s%s+:^]|\^[%s]?[\w\W])*)' % (_nlws, _punct, _nl)
_label_compound = r'(?:(?:[^%s%s+:^)]|\^[%s]?[^)])*)' % (_nlws, _punct, _nl)
_number = r'(?:-?(?:0[0-7]+|0x[\da-f]+|\d+)%s)' % _token_terminator
_opword = r'(?:equ|geq|gtr|leq|lss|neq)'
_string = r'(?:"[^%s"]*(?:"|(?=[%s])))' % (_nl, _nl)
_variable = (r'(?:(?:%%(?:\*|(?:~[a-z]*(?:\$[^:]+:)?)?\d|'
r'[^%%:%s]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:[^%%%s^]|'
r'\^[^%%%s])[^=%s]*=(?:[^%%%s^]|\^[^%%%s])*)?)?%%))|'
r'(?:\^?![^!:%s]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:'
r'[^!%s^]|\^[^!%s])[^=%s]*=(?:[^!%s^]|\^[^!%s])*)?)?\^?!))' %
(_nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl))
_core_token = r'(?:(?:(?:\^[%s]?)?[^"%s%s])+)' % (_nl, _nlws, _punct)
_core_token_compound = r'(?:(?:(?:\^[%s]?)?[^"%s%s)])+)' % (_nl, _nlws, _punct)
_token = r'(?:[%s]+|%s)' % (_punct, _core_token)
_token_compound = r'(?:[%s]+|%s)' % (_punct, _core_token_compound)
_stoken = (r'(?:[%s]+|(?:%s|%s|%s)+)' %
(_punct, _string, _variable, _core_token))
def _make_begin_state(compound, _core_token=_core_token,
_core_token_compound=_core_token_compound,
_keyword_terminator=_keyword_terminator,
_nl=_nl, _punct=_punct, _string=_string,
_space=_space, _start_label=_start_label,
_stoken=_stoken, _token_terminator=_token_terminator,
_variable=_variable, _ws=_ws):
rest = '(?:%s|%s|[^"%%%s%s%s])*' % (_string, _variable, _nl, _punct,
')' if compound else '')
rest_of_line = r'(?:(?:[^%s^]|\^[%s]?[\w\W])*)' % (_nl, _nl)
rest_of_line_compound = r'(?:(?:[^%s^)]|\^[%s]?[^)])*)' % (_nl, _nl)
set_space = r'((?:(?:\^[%s]?)?[^\S\n])*)' % _nl
suffix = ''
if compound:
_keyword_terminator = r'(?:(?=\))|%s)' % _keyword_terminator
_token_terminator = r'(?:(?=\))|%s)' % _token_terminator
suffix = '/compound'
return [
((r'\)', Punctuation, '#pop') if compound else
(r'\)((?=\()|%s)%s' % (_token_terminator, rest_of_line),
Comment.Single)),
(r'(?=%s)' % _start_label, Text, 'follow%s' % suffix),
(_space, using(this, state='text')),
include('redirect%s' % suffix),
(r'[%s]+' % _nl, Text),
(r'\(', Punctuation, 'root/compound'),
(r'@+', Punctuation),
(r'((?:for|if|rem)(?:(?=(?:\^[%s]?)?/)|(?:(?!\^)|'
r'(?<=m))(?:(?=\()|%s)))(%s?%s?(?:\^[%s]?)?/(?:\^[%s]?)?\?)' %
(_nl, _token_terminator, _space,
_core_token_compound if compound else _core_token, _nl, _nl),
bygroups(Keyword, using(this, state='text')),
'follow%s' % suffix),
(r'(goto%s)(%s(?:\^[%s]?)?/(?:\^[%s]?)?\?%s)' %
(_keyword_terminator, rest, _nl, _nl, rest),
bygroups(Keyword, using(this, state='text')),
'follow%s' % suffix),
(words(('assoc', 'break', 'cd', 'chdir', 'cls', 'color', 'copy',
'date', 'del', 'dir', 'dpath', 'echo', 'endlocal', 'erase',
'exit', 'ftype', 'keys', 'md', 'mkdir', 'mklink', 'move',
'path', 'pause', 'popd', 'prompt', 'pushd', 'rd', 'ren',
'rename', 'rmdir', 'setlocal', 'shift', 'start', 'time',
'title', 'type', 'ver', 'verify', 'vol'),
suffix=_keyword_terminator), Keyword, 'follow%s' % suffix),
(r'(call)(%s?)(:)' % _space,
bygroups(Keyword, using(this, state='text'), Punctuation),
'call%s' % suffix),
(r'call%s' % _keyword_terminator, Keyword),
(r'(for%s(?!\^))(%s)(/f%s)' %
(_token_terminator, _space, _token_terminator),
bygroups(Keyword, using(this, state='text'), Keyword),
('for/f', 'for')),
(r'(for%s(?!\^))(%s)(/l%s)' %
(_token_terminator, _space, _token_terminator),
bygroups(Keyword, using(this, state='text'), Keyword),
('for/l', 'for')),
(r'for%s(?!\^)' % _token_terminator, Keyword, ('for2', 'for')),
(r'(goto%s)(%s?)(:?)' % (_keyword_terminator, _space),
bygroups(Keyword, using(this, state='text'), Punctuation),
'label%s' % suffix),
(r'(if(?:(?=\()|%s)(?!\^))(%s?)((?:/i%s)?)(%s?)((?:not%s)?)(%s?)' %
(_token_terminator, _space, _token_terminator, _space,
_token_terminator, _space),
bygroups(Keyword, using(this, state='text'), Keyword,
using(this, state='text'), Keyword,
using(this, state='text')), ('(?', 'if')),
(r'rem(((?=\()|%s)%s?%s?.*|%s%s)' %
(_token_terminator, _space, _stoken, _keyword_terminator,
rest_of_line_compound if compound else rest_of_line),
Comment.Single, 'follow%s' % suffix),
(r'(set%s)%s(/a)' % (_keyword_terminator, set_space),
bygroups(Keyword, using(this, state='text'), Keyword),
'arithmetic%s' % suffix),
(r'(set%s)%s((?:/p)?)%s((?:(?:(?:\^[%s]?)?[^"%s%s^=%s]|'
r'\^[%s]?[^"=])+)?)((?:(?:\^[%s]?)?=)?)' %
(_keyword_terminator, set_space, set_space, _nl, _nl, _punct,
')' if compound else '', _nl, _nl),
bygroups(Keyword, using(this, state='text'), Keyword,
using(this, state='text'), using(this, state='variable'),
Punctuation),
'follow%s' % suffix),
default('follow%s' % suffix)
]
def _make_follow_state(compound, _label=_label,
_label_compound=_label_compound, _nl=_nl,
_space=_space, _start_label=_start_label,
_token=_token, _token_compound=_token_compound,
_ws=_ws):
suffix = '/compound' if compound else ''
state = []
if compound:
state.append((r'(?=\))', Text, '#pop'))
state += [
(r'%s([%s]*)(%s)(.*)' %
(_start_label, _ws, _label_compound if compound else _label),
bygroups(Text, Punctuation, Text, Name.Label, Comment.Single)),
include('redirect%s' % suffix),
(r'(?=[%s])' % _nl, Text, '#pop'),
(r'\|\|?|&&?', Punctuation, '#pop'),
include('text')
]
return state
def _make_arithmetic_state(compound, _nl=_nl, _punct=_punct,
_string=_string, _variable=_variable,
_ws=_ws, _nlws=_nlws):
op = r'=+\-*/!~'
state = []
if compound:
state.append((r'(?=\))', Text, '#pop'))
state += [
(r'0[0-7]+', Number.Oct),
(r'0x[\da-f]+', Number.Hex),
(r'\d+', Number.Integer),
(r'[(),]+', Punctuation),
(r'([%s]|%%|\^\^)+' % op, Operator),
(r'(%s|%s|(\^[%s]?)?[^()%s%%\^"%s%s]|\^[%s]?%s)+' %
(_string, _variable, _nl, op, _nlws, _punct, _nlws,
r'[^)]' if compound else r'[\w\W]'),
using(this, state='variable')),
(r'(?=[\x00|&])', Text, '#pop'),
include('follow')
]
return state
def _make_call_state(compound, _label=_label,
_label_compound=_label_compound):
state = []
if compound:
state.append((r'(?=\))', Text, '#pop'))
state.append((r'(:?)(%s)' % (_label_compound if compound else _label),
bygroups(Punctuation, Name.Label), '#pop'))
return state
def _make_label_state(compound, _label=_label,
_label_compound=_label_compound, _nl=_nl,
_punct=_punct, _string=_string, _variable=_variable):
state = []
if compound:
state.append((r'(?=\))', Text, '#pop'))
state.append((r'(%s?)((?:%s|%s|\^[%s]?%s|[^"%%^%s%s%s])*)' %
(_label_compound if compound else _label, _string,
_variable, _nl, r'[^)]' if compound else r'[\w\W]', _nl,
_punct, r')' if compound else ''),
bygroups(Name.Label, Comment.Single), '#pop'))
return state
def _make_redirect_state(compound,
_core_token_compound=_core_token_compound,
_nl=_nl, _punct=_punct, _stoken=_stoken,
_string=_string, _space=_space,
_variable=_variable, _nlws=_nlws):
stoken_compound = (r'(?:[%s]+|(?:%s|%s|%s)+)' %
(_punct, _string, _variable, _core_token_compound))
return [
(r'((?:(?<=[%s])\d)?)(>>?&|<&)([%s]*)(\d)' %
(_nlws, _nlws),
bygroups(Number.Integer, Punctuation, Text, Number.Integer)),
(r'((?:(?<=[%s])(?<!\^[%s])\d)?)(>>?|<)(%s?%s)' %
(_nlws, _nl, _space, stoken_compound if compound else _stoken),
bygroups(Number.Integer, Punctuation, using(this, state='text')))
]
tokens = {
'root': _make_begin_state(False),
'follow': _make_follow_state(False),
'arithmetic': _make_arithmetic_state(False),
'call': _make_call_state(False),
'label': _make_label_state(False),
'redirect': _make_redirect_state(False),
'root/compound': _make_begin_state(True),
'follow/compound': _make_follow_state(True),
'arithmetic/compound': _make_arithmetic_state(True),
'call/compound': _make_call_state(True),
'label/compound': _make_label_state(True),
'redirect/compound': _make_redirect_state(True),
'variable-or-escape': [
(_variable, Name.Variable),
(r'%%%%|\^[%s]?(\^!|[\w\W])' % _nl, String.Escape)
],
'string': [
(r'"', String.Double, '#pop'),
(_variable, Name.Variable),
(r'\^!|%%', String.Escape),
(r'[^"%%^%s]+|[%%^]' % _nl, String.Double),
default('#pop')
],
'sqstring': [
include('variable-or-escape'),
(r'[^%]+|%', String.Single)
],
'bqstring': [
include('variable-or-escape'),
(r'[^%]+|%', String.Backtick)
],
'text': [
(r'"', String.Double, 'string'),
include('variable-or-escape'),
(r'[^"%%^%s%s\d)]+|.' % (_nlws, _punct), Text)
],
'variable': [
(r'"', String.Double, 'string'),
include('variable-or-escape'),
(r'[^"%%^%s]+|.' % _nl, Name.Variable)
],
'for': [
(r'(%s)(in)(%s)(\()' % (_space, _space),
bygroups(using(this, state='text'), Keyword,
using(this, state='text'), Punctuation), '#pop'),
include('follow')
],
'for2': [
(r'\)', Punctuation),
(r'(%s)(do%s)' % (_space, _token_terminator),
bygroups(using(this, state='text'), Keyword), '#pop'),
(r'[%s]+' % _nl, Text),
include('follow')
],
'for/f': [
(r'(")((?:%s|[^"])*?")([%s]*)(\))' % (_variable, _nlws),
bygroups(String.Double, using(this, state='string'), Text,
Punctuation)),
(r'"', String.Double, ('#pop', 'for2', 'string')),
(r"('(?:%%%%|%s|[\w\W])*?')([%s]*)(\))" % (_variable, _nlws),
bygroups(using(this, state='sqstring'), Text, Punctuation)),
(r'(`(?:%%%%|%s|[\w\W])*?`)([%s]*)(\))' % (_variable, _nlws),
bygroups(using(this, state='bqstring'), Text, Punctuation)),
include('for2')
],
'for/l': [
(r'-?\d+', Number.Integer),
include('for2')
],
'if': [
(r'((?:cmdextversion|errorlevel)%s)(%s)(\d+)' %
(_token_terminator, _space),
bygroups(Keyword, using(this, state='text'),
Number.Integer), '#pop'),
(r'(defined%s)(%s)(%s)' % (_token_terminator, _space, _stoken),
bygroups(Keyword, using(this, state='text'),
using(this, state='variable')), '#pop'),
(r'(exist%s)(%s%s)' % (_token_terminator, _space, _stoken),
bygroups(Keyword, using(this, state='text')), '#pop'),
(r'(%s%s)(%s)(%s%s)' % (_number, _space, _opword, _space, _number),
bygroups(using(this, state='arithmetic'), Operator.Word,
using(this, state='arithmetic')), '#pop'),
(_stoken, using(this, state='text'), ('#pop', 'if2')),
],
'if2': [
(r'(%s?)(==)(%s?%s)' % (_space, _space, _stoken),
bygroups(using(this, state='text'), Operator,
using(this, state='text')), '#pop'),
(r'(%s)(%s)(%s%s)' % (_space, _opword, _space, _stoken),
bygroups(using(this, state='text'), Operator.Word,
using(this, state='text')), '#pop')
],
'(?': [
(_space, using(this, state='text')),
(r'\(', Punctuation, ('#pop', 'else?', 'root/compound')),
default('#pop')
],
'else?': [
(_space, using(this, state='text')),
(r'else%s' % _token_terminator, Keyword, '#pop'),
default('#pop')
]
}
class MSDOSSessionLexer(ShellSessionBaseLexer):
"""
Lexer for MS DOS shell sessions, i.e. command lines, including a
prompt, interspersed with output.
.. versionadded:: 2.1
"""
name = 'MSDOS Session'
aliases = ['doscon']
filenames = []
mimetypes = []
_innerLexerCls = BatchLexer
_ps1rgx = re.compile(r'^([^>]*>)(.*\n?)')
_ps2 = 'More? '
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
.. versionadded:: 0.10
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\$\{#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|'
r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|'
r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|'
r'source|stop|suspend|source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
class TcshSessionLexer(ShellSessionBaseLexer):
"""
Lexer for Tcsh sessions, i.e. command lines, including a
prompt, interspersed with output.
.. versionadded:: 2.1
"""
name = 'Tcsh Session'
aliases = ['tcshcon']
filenames = []
mimetypes = []
_innerLexerCls = TcshLexer
_ps1rgx = re.compile(r'^([^>]+>)(.*\n?)')
_ps2 = '? '
class PowerShellLexer(RegexLexer):
"""
For Windows PowerShell code.
.. versionadded:: 1.5
"""
name = 'PowerShell'
aliases = ['powershell', 'pwsh', 'posh', 'ps1', 'psm1']
filenames = ['*.ps1', '*.psm1']
mimetypes = ['text/x-powershell']
flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
keywords = (
'while validateset validaterange validatepattern validatelength '
'validatecount until trap switch return ref process param parameter in '
'if global: local: function foreach for finally filter end elseif else '
'dynamicparam do default continue cmdletbinding break begin alias \\? '
'% #script #private #local #global mandatory parametersetname position '
'valuefrompipeline valuefrompipelinebypropertyname '
'valuefromremainingarguments helpmessage try catch throw').split()
operators = (
'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains '
'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt '
'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like '
'lt match ne not notcontains notlike notmatch or regex replace '
'wildcard').split()
verbs = (
'write where watch wait use update unregister unpublish unprotect '
'unlock uninstall undo unblock trace test tee take sync switch '
'suspend submit stop step start split sort skip show set send select '
'search scroll save revoke resume restore restart resolve resize '
'reset request repair rename remove register redo receive read push '
'publish protect pop ping out optimize open new move mount merge '
'measure lock limit join invoke install initialize import hide group '
'grant get format foreach find export expand exit enter enable edit '
'dismount disconnect disable deny debug cxnew copy convertto '
'convertfrom convert connect confirm compress complete compare close '
'clear checkpoint block backup assert approve aggregate add').split()
aliases_ = (
'ac asnp cat cd cfs chdir clc clear clhy cli clp cls clv cnsn '
'compare copy cp cpi cpp curl cvpa dbp del diff dir dnsn ebp echo epal '
'epcsv epsn erase etsn exsn fc fhx fl foreach ft fw gal gbp gc gci gcm '
'gcs gdr ghy gi gjb gl gm gmo gp gps gpv group gsn gsnp gsv gu gv gwmi '
'h history icm iex ihy ii ipal ipcsv ipmo ipsn irm ise iwmi iwr kill lp '
'ls man md measure mi mount move mp mv nal ndr ni nmo npssc nsn nv ogv '
'oh popd ps pushd pwd r rbp rcjb rcsn rd rdr ren ri rjb rm rmdir rmo '
'rni rnp rp rsn rsnp rujb rv rvpa rwmi sajb sal saps sasv sbp sc select '
'set shcm si sl sleep sls sort sp spjb spps spsv start sujb sv swmi tee '
'trcm type wget where wjb write').split()
commenthelp = (
'component description example externalhelp forwardhelpcategory '
'forwardhelptargetname functionality inputs link '
'notes outputs parameter remotehelprunspace role synopsis').split()
tokens = {
'root': [
# we need to count pairs of parentheses for correct highlight
# of '$(...)' blocks in strings
(r'\(', Punctuation, 'child'),
(r'\s+', Text),
(r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp),
bygroups(Comment, String.Doc, Comment)),
(r'#[^\n]*?$', Comment),
(r'(<|<)#', Comment.Multiline, 'multline'),
(r'@"\n', String.Heredoc, 'heredoc-double'),
(r"@'\n.*?\n'@", String.Heredoc),
# escaped syntax
(r'`[\'"$@-]', Punctuation),
(r'"', String.Double, 'string'),
(r"'([^']|'')*'", String.Single),
(r'(\$|@@|@)((global|script|private|env):)?\w+',
Name.Variable),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'-(%s)\b' % '|'.join(operators), Operator),
(r'(%s)-[a-z_]\w*\b' % '|'.join(verbs), Name.Builtin),
(r'(%s)\s' % '|'.join(aliases_), Name.Builtin),
(r'\[[a-z_\[][\w. `,\[\]]*\]', Name.Constant), # .net [type]s
(r'-[a-z_]\w*', Name),
(r'\w+', Name),
(r'[.,;:@{}\[\]$()=+*/\\&%!~?^`|<>-]', Punctuation),
],
'child': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'multline': [
(r'[^#&.]+', Comment.Multiline),
(r'#(>|>)', Comment.Multiline, '#pop'),
(r'\.(%s)' % '|'.join(commenthelp), String.Doc),
(r'[#&.]', Comment.Multiline),
],
'string': [
(r"`[0abfnrtv'\"$`]", String.Escape),
(r'[^$`"]+', String.Double),
(r'\$\(', Punctuation, 'child'),
(r'""', String.Double),
(r'[`$]', String.Double),
(r'"', String.Double, '#pop'),
],
'heredoc-double': [
(r'\n"@', String.Heredoc, '#pop'),
(r'\$\(', Punctuation, 'child'),
(r'[^@\n]+"]', String.Heredoc),
(r".", String.Heredoc),
]
}
class PowerShellSessionLexer(ShellSessionBaseLexer):
"""
Lexer for PowerShell sessions, i.e. command lines, including a
prompt, interspersed with output.
.. versionadded:: 2.1
"""
name = 'PowerShell Session'
aliases = ['pwsh-session', 'ps1con']
filenames = []
mimetypes = []
_innerLexerCls = PowerShellLexer
_bare_continuation = True
_ps1rgx = re.compile(r'^((?:\[[^]]+\]: )?PS[^>]*> ?)(.*\n?)')
_ps2 = '> '
class FishShellLexer(RegexLexer):
"""
Lexer for Fish shell scripts.
.. versionadded:: 2.1
"""
name = 'Fish'
aliases = ['fish', 'fishshell']
filenames = ['*.fish', '*.load']
mimetypes = ['application/x-fish']
tokens = {
'root': [
include('basic'),
include('data'),
include('interp'),
],
'interp': [
(r'\$\(\(', Keyword, 'math'),
(r'\(', Keyword, 'paren'),
(r'\$#?(\w+|.)', Name.Variable),
],
'basic': [
(r'\b(begin|end|if|else|while|break|for|in|return|function|block|'
r'case|continue|switch|not|and|or|set|echo|exit|pwd|true|false|'
r'cd|count|test)(\s*)\b',
bygroups(Keyword, Text)),
(r'\b(alias|bg|bind|breakpoint|builtin|command|commandline|'
r'complete|contains|dirh|dirs|emit|eval|exec|fg|fish|fish_config|'
r'fish_indent|fish_pager|fish_prompt|fish_right_prompt|'
r'fish_update_completions|fishd|funced|funcsave|functions|help|'
r'history|isatty|jobs|math|mimedb|nextd|open|popd|prevd|psub|'
r'pushd|random|read|set_color|source|status|trap|type|ulimit|'
r'umask|vared|fc|getopts|hash|kill|printf|time|wait)\s*\b(?!\.)',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Whitespace, Operator)),
(r'[\[\]()=]', Operator),
(r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\$])*"', String.Double),
(r'"', String.Double, 'string'),
(r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r"(?s)'.*?'", String.Single),
(r';', Punctuation),
(r'&|\||\^|<|>', Operator),
(r'\s+', Text),
(r'\d+(?= |\Z)', Number),
(r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
],
'string': [
(r'"', String.Double, '#pop'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double),
include('interp'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+#\d+', Number),
(r'\d+#(?! )', Number),
(r'\d+', Number),
include('root'),
],
}
class ExeclineLexer(RegexLexer):
"""
Lexer for Laurent Bercot's execline language
(https://skarnet.org/software/execline).
.. versionadded:: 2.7
"""
name = 'execline'
aliases = ['execline']
filenames = ['*.exec']
tokens = {
'root': [
include('basic'),
include('data'),
include('interp')
],
'interp': [
(r'\$\{', String.Interpol, 'curly'),
(r'\$[\w@#]+', Name.Variable), # user variable
(r'\$', Text),
],
'basic': [
(r'\b(background|backtick|cd|define|dollarat|elgetopt|'
r'elgetpositionals|elglob|emptyenv|envfile|exec|execlineb|'
r'exit|export|fdblock|fdclose|fdmove|fdreserve|fdswap|'
r'forbacktickx|foreground|forstdin|forx|getcwd|getpid|heredoc|'
r'homeof|if|ifelse|ifte|ifthenelse|importas|loopwhilex|'
r'multidefine|multisubstitute|pipeline|piperw|posix-cd|'
r'redirfd|runblock|shift|trap|tryexec|umask|unexport|wait|'
r'withstdinas)\b', Name.Builtin),
(r'\A#!.+\n', Comment.Hashbang),
(r'#.*\n', Comment.Single),
(r'[{}]', Operator)
],
'data': [
(r'(?s)"(\\.|[^"\\$])*"', String.Double),
(r'"', String.Double, 'string'),
(r'\s+', Text),
(r'[^\s{}$"\\]+', Text)
],
'string': [
(r'"', String.Double, '#pop'),
(r'(?s)(\\\\|\\.|[^"\\$])+', String.Double),
include('interp'),
],
'curly': [
(r'\}', String.Interpol, '#pop'),
(r'[\w#@]+', Name.Variable),
include('root')
]
}
def analyse_text(text):
if shebang_matches(text, r'execlineb'):
return 1
| 36,344 | Python | 38.548422 | 87 | 0.455894 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/wowtoc.py | """
pygments.lexers.wowtoc
~~~~~~~~~~~~~~~~~~~~~~
Lexer for World of Warcraft TOC files
TOC files describe game addons.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Name, Text, Punctuation, String, Keyword
__all__ = ["WoWTocLexer"]
def _create_tag_line_pattern(inner_pattern, ignore_case=False):
return ((r"(?i)" if ignore_case else r"")
+ r"^(##)( *)" # groups 1, 2
+ inner_pattern # group 3
+ r"( *)(:)( *)(.*?)( *)$") # groups 4, 5, 6, 7, 8
def _create_tag_line_token(inner_pattern, inner_token, ignore_case=False):
# this function template-izes the tag line for a specific type of tag, which will
# have a different pattern and different token. otherwise, everything about a tag
# line is the same
return (
_create_tag_line_pattern(inner_pattern, ignore_case=ignore_case),
bygroups(
Keyword.Declaration,
Text.Whitespace,
inner_token,
Text.Whitespace,
Punctuation,
Text.Whitespace,
String,
Text.Whitespace,
),
)
class WoWTocLexer(RegexLexer):
"""
Lexer for World of Warcraft TOC files.
.. versionadded:: 2.14
"""
name = "World of Warcraft TOC"
aliases = ["wowtoc"]
filenames = ["*.toc"]
tokens = {
"root": [
# official localized tags, Notes and Title
# (normal part is insensitive, locale part is sensitive)
_create_tag_line_token(
r"((?:[nN][oO][tT][eE][sS]|[tT][iI][tT][lL][eE])-(?:ptBR|zhCN|"
r"enCN|frFR|deDE|itIT|esMX|ptPT|koKR|ruRU|esES|zhTW|enTW|enGB|enUS))",
Name.Builtin,
),
# other official tags
_create_tag_line_token(
r"(Interface|Title|Notes|RequiredDeps|Dep[^: ]*|OptionalDeps|"
r"LoadOnDemand|LoadWith|LoadManagers|SavedVariablesPerCharacter|"
r"SavedVariables|DefaultState|Secure|Author|Version)",
Name.Builtin,
ignore_case=True,
),
# user-defined tags
_create_tag_line_token(
r"(X-[^: ]*)",
Name.Variable,
ignore_case=True,
),
# non-conforming tags, but still valid
_create_tag_line_token(
r"([^: ]*)",
Name.Other,
),
# Comments
(r"^#.*$", Comment),
# Addon Files
(r"^.+$", Name),
]
}
def analyse_text(text):
# at time of writing, this file suffix conflict's with one of Tex's in
# markup.py. Tex's anaylse_text() appears to be definitive (binary) and does not
# share any likeness to WoW TOCs, which means we wont have to compete with it by
# abitrary increments in score.
result = 0
# while not required, an almost certain marker of WoW TOC's is the interface tag
# if this tag is omitted, players will need to opt-in to loading the addon with
# an options change ("Load out of date addons"). the value is also standardized:
# `<major><minor><patch>`, with minor and patch being two-digit zero-padded.
interface_pattern = _create_tag_line_pattern(r"(Interface)", ignore_case=True)
match = re.search(interface_pattern, text)
if match and re.match(r"(\d+)(\d{2})(\d{2})", match.group(7)):
result += 0.8
casefolded = text.casefold()
# Lua file listing is good marker too, but probably conflicts with many other
# lexers
if ".lua" in casefolded:
result += 0.1
# ditto for XML files, but they're less used in WoW TOCs
if ".xml" in casefolded:
result += 0.05
return result
| 4,021 | Python | 32.239669 | 88 | 0.557324 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/apdlexer.py | """
pygments.lexers.apdlexer
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for ANSYS Parametric Design Language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
String, Generic, Punctuation, Whitespace
__all__ = ['apdlexer']
class apdlexer(RegexLexer):
"""
For APDL source code.
.. versionadded:: 2.9
"""
name = 'ANSYS parametric design language'
aliases = ['ansys', 'apdl']
filenames = ['*.ans']
flags = re.IGNORECASE
# list of elements
elafunb = ("SURF152", "SURF153", "SURF154", "SURF156", "SHELL157",
"SURF159", "LINK160", "BEAM161", "PLANE162",
"SHELL163", "SOLID164", "COMBI165", "MASS166",
"LINK167", "SOLID168", "TARGE169", "TARGE170",
"CONTA171", "CONTA172", "CONTA173", "CONTA174",
"CONTA175", "CONTA176", "CONTA177", "CONTA178",
"PRETS179", "LINK180", "SHELL181", "PLANE182",
"PLANE183", "MPC184", "SOLID185", "SOLID186",
"SOLID187", "BEAM188", "BEAM189", "SOLSH190",
"INTER192", "INTER193", "INTER194", "INTER195",
"MESH200", "FOLLW201", "INTER202", "INTER203",
"INTER204", "INTER205", "SHELL208", "SHELL209",
"CPT212", "CPT213", "COMBI214", "CPT215", "CPT216",
"CPT217", "FLUID220", "FLUID221", "PLANE223",
"SOLID226", "SOLID227", "PLANE230", "SOLID231",
"SOLID232", "PLANE233", "SOLID236", "SOLID237",
"PLANE238", "SOLID239", "SOLID240", "HSFLD241",
"HSFLD242", "SURF251", "SURF252", "REINF263",
"REINF264", "REINF265", "SOLID272", "SOLID273",
"SOLID278", "SOLID279", "SHELL281", "SOLID285",
"PIPE288", "PIPE289", "ELBOW290", "USER300", "BEAM3",
"BEAM4", "BEAM23", "BEAM24", "BEAM44", "BEAM54",
"COMBIN7", "FLUID79", "FLUID80", "FLUID81", "FLUID141",
"FLUID142", "INFIN9", "INFIN47", "PLANE13", "PLANE25",
"PLANE42", "PLANE53", "PLANE67", "PLANE82", "PLANE83",
"PLANE145", "PLANE146", "CONTAC12", "CONTAC52",
"LINK1", "LINK8", "LINK10", "LINK32", "PIPE16",
"PIPE17", "PIPE18", "PIPE20", "PIPE59", "PIPE60",
"SHELL41", "SHELL43", "SHELL57", "SHELL63", "SHELL91",
"SHELL93", "SHELL99", "SHELL150", "SOLID5", "SOLID45",
"SOLID46", "SOLID65", "SOLID69", "SOLID92", "SOLID95",
"SOLID117", "SOLID127", "SOLID128", "SOLID147",
"SOLID148", "SOLID191", "VISCO88", "VISCO89",
"VISCO106", "VISCO107", "VISCO108", "TRANS109")
elafunc = ("PGRAPH", "/VT", "VTIN", "VTRFIL", "VTTEMP", "PGRSET",
"VTCLR", "VTMETH", "VTRSLT", "VTVMOD", "PGSELE",
"VTDISC", "VTMP", "VTSEC", "PGWRITE", "VTEVAL", "VTOP",
"VTSFE", "POUTRES", "VTFREQ", "VTPOST", "VTSL",
"FLDATA1-40", "HFPCSWP", "MSDATA", "MSVARY", "QFACT",
"FLOCHECK", "HFPOWER", "MSMASS", "PERI", "SPADP",
"FLREAD", "HFPORT", "MSMETH", "PLFSS", "SPARM",
"FLOTRAN", "HFSCAT", "MSMIR", "PLSCH", "SPFSS",
"HFADP", "ICE", "MSNOMF", "PLSYZ", "SPICE", "HFARRAY",
"ICEDELE", "MSPROP", "PLTD", "SPSCAN", "HFDEEM",
"ICELIST", "MSQUAD", "PLTLINE", "SPSWP", "HFEIGOPT",
"ICVFRC", "MSRELAX", "PLVFRC", "HFEREFINE", "LPRT",
"MSSOLU", "/PICE", "HFMODPRT", "MSADV", "MSSPEC",
"PLWAVE", "HFPA", "MSCAP", "MSTERM", "PRSYZ")
elafund = ("*VOPER", "VOVLAP", "*VPLOT", "VPLOT", "VPTN", "*VPUT",
"VPUT", "*VREAD", "VROTAT", "VSBA", "VSBV", "VSBW",
"/VSCALE", "*VSCFUN", "VSEL", "VSLA", "*VSTAT", "VSUM",
"VSWEEP", "VSYMM", "VTRAN", "VTYPE", "/VUP", "*VWRITE",
"/WAIT", "WAVES", "WERASE", "WFRONT", "/WINDOW",
"WMID", "WMORE", "WPAVE", "WPCSYS", "WPLANE", "WPOFFS",
"WPROTA", "WPSTYL", "WRFULL", "WRITE", "WRITEMAP",
"*WRK", "WSORT", "WSPRINGS", "WSTART", "WTBCREATE",
"XFDATA", "XFENRICH", "XFLIST", "/XFRM", "/XRANGE",
"XVAR", "/YRANGE", "/ZOOM", "/WB", "XMLO", "/XML",
"CNTR", "EBLOCK", "CMBLOCK", "NBLOCK", "/TRACK",
"CWZPLOT", "~EUI", "NELE", "EALL", "NALL", "FLITEM",
"LSLN", "PSOLVE", "ASLN", "/VERIFY", "/SSS", "~CFIN",
"*EVAL", "*MOONEY", "/RUNSTAT", "ALPFILL",
"ARCOLLAPSE", "ARDETACH", "ARFILL", "ARMERGE",
"ARSPLIT", "FIPLOT", "GAPFINISH", "GAPLIST",
"GAPMERGE", "GAPOPT", "GAPPLOT", "LNCOLLAPSE",
"LNDETACH", "LNFILL", "LNMERGE", "LNSPLIT", "PCONV",
"PLCONV", "PEMOPTS", "PEXCLUDE", "PINCLUDE", "PMETH",
"/PMETH", "PMOPTS", "PPLOT", "PPRANGE", "PRCONV",
"PRECISION", "RALL", "RFILSZ", "RITER", "RMEMRY",
"RSPEED", "RSTAT", "RTIMST", "/RUNST", "RWFRNT",
"SARPLOT", "SHSD", "SLPPLOT", "SLSPLOT", "VCVFILL",
"/OPT", "OPEQN", "OPFACT", "OPFRST", "OPGRAD",
"OPKEEP", "OPLOOP", "OPPRNT", "OPRAND", "OPSUBP",
"OPSWEEP", "OPTYPE", "OPUSER", "OPVAR", "OPADD",
"OPCLR", "OPDEL", "OPMAKE", "OPSEL", "OPANL", "OPDATA",
"OPRESU", "OPSAVE", "OPEXE", "OPLFA", "OPLGR",
"OPLIST", "OPLSW", "OPRFA", "OPRGR", "OPRSW",
"PILECALC", "PILEDISPSET", "PILEGEN", "PILELOAD",
"PILEMASS", "PILERUN", "PILESEL", "PILESTIF",
"PLVAROPT", "PRVAROPT", "TOCOMP", "TODEF", "TOFREQ",
"TOTYPE", "TOVAR", "TOEXE", "TOLOOP", "TOGRAPH",
"TOLIST", "TOPLOT", "TOPRINT", "TOSTAT", "TZAMESH",
"TZDELE", "TZEGEN", "XVAROPT", "PGSAVE", "SOLCONTROL",
"TOTAL", "VTGEOM", "VTREAL", "VTSTAT")
elafune = ("/ANUM", "AOFFST", "AOVLAP", "APLOT", "APPEND", "APTN",
"ARCLEN", "ARCTRM", "AREAS", "AREFINE", "AREMESH",
"AREVERSE", "AROTAT", "ARSCALE", "ARSYM", "ASBA",
"ASBL", "ASBV", "ASBW", "ASCRES", "ASEL", "ASIFILE",
"*ASK", "ASKIN", "ASLL", "ASLV", "ASOL", "/ASSIGN",
"ASUB", "ASUM", "ATAN", "ATRAN", "ATYPE", "/AUTO",
"AUTOTS", "/AUX2", "/AUX3", "/AUX12", "/AUX15",
"AVPRIN", "AVRES", "AWAVE", "/AXLAB", "*AXPY",
"/BATCH", "BCSOPTION", "BETAD", "BF", "BFA", "BFADELE",
"BFALIST", "BFCUM", "BFDELE", "BFE", "BFECUM",
"BFEDELE", "BFELIST", "BFESCAL", "BFINT", "BFK",
"BFKDELE", "BFKLIST", "BFL", "BFLDELE", "BFLIST",
"BFLLIST", "BFSCALE", "BFTRAN", "BFUNIF", "BFV",
"BFVDELE", "BFVLIST", "BIOOPT", "BIOT", "BLC4", "BLC5",
"BLOCK", "BOOL", "BOPTN", "BSAX", "BSMD", "BSM1",
"BSM2", "BSPLIN", "BSS1", "BSS2", "BSTE", "BSTQ",
"BTOL", "BUCOPT", "C", "CALC", "CAMPBELL", "CBDOF",
"CBMD", "CBMX", "CBTE", "CBTMP", "CDOPT", "CDREAD",
"CDWRITE", "CE", "CECHECK", "CECMOD", "CECYC",
"CEDELE", "CEINTF", "CELIST", "CENTER", "CEQN",
"CERIG", "CESGEN", "CFACT", "*CFCLOS", "*CFOPEN",
"*CFWRITE", "/CFORMAT", "CGLOC", "CGOMGA", "CGROW",
"CHECK", "CHKMSH", "CINT", "CIRCLE", "CISOL",
"/CLABEL", "/CLEAR", "CLOCAL", "CLOG", "/CLOG",
"CLRMSHLN", "CM", "CMACEL", "/CMAP", "CMATRIX",
"CMDELE", "CMDOMEGA", "CMEDIT", "CMGRP", "CMLIST",
"CMMOD", "CMOMEGA", "CMPLOT", "CMROTATE", "CMSEL",
"CMSFILE", "CMSOPT", "CMWRITE", "CNCHECK", "CNKMOD",
"CNTR", "CNVTOL", "/COLOR", "/COM", "*COMP", "COMBINE",
"COMPRESS", "CON4", "CONE", "/CONFIG", "CONJUG",
"/CONTOUR", "/COPY", "CORIOLIS", "COUPLE", "COVAL",
"CP", "CPCYC", "CPDELE", "CPINTF", "/CPLANE", "CPLGEN",
"CPLIST", "CPMERGE", "CPNGEN", "CPSGEN", "CQC",
"*CREATE", "CRPLIM", "CS", "CSCIR", "CSDELE", "CSKP",
"CSLIST", "CSWPLA", "CSYS", "/CTYPE", "CURR2D",
"CUTCONTROL", "/CVAL", "CVAR", "/CWD", "CYCCALC",
"/CYCEXPAND", "CYCFILES", "CYCFREQ", "*CYCLE",
"CYCLIC", "CYCOPT", "CYCPHASE", "CYCSPEC", "CYL4",
"CYL5", "CYLIND", "CZDEL", "CZMESH", "D", "DA",
"DADELE", "DALIST", "DAMORPH", "DATA", "DATADEF",
"DCGOMG", "DCUM", "DCVSWP", "DDASPEC", "DDELE",
"DDOPTION", "DEACT", "DEFINE", "*DEL", "DELETE",
"/DELETE", "DELTIM", "DEMORPH", "DERIV", "DESIZE",
"DESOL", "DETAB", "/DEVDISP", "/DEVICE", "/DFLAB",
"DFLX", "DFSWAVE", "DIG", "DIGIT", "*DIM",
"/DIRECTORY", "DISPLAY", "/DIST", "DJ", "DJDELE",
"DJLIST", "DK", "DKDELE", "DKLIST", "DL", "DLDELE",
"DLIST", "DLLIST", "*DMAT", "DMOVE", "DMPEXT",
"DMPOPTION", "DMPRAT", "DMPSTR", "DNSOL", "*DO", "DOF",
"DOFSEL", "DOMEGA", "*DOT", "*DOWHILE", "DSCALE",
"/DSCALE", "DSET", "DSPOPTION", "DSUM", "DSURF",
"DSYM", "DSYS", "DTRAN", "DUMP", "/DV3D", "DVAL",
"DVMORPH", "DYNOPT", "E", "EALIVE", "EDADAPT", "EDALE",
"EDASMP", "EDBOUND", "EDBX", "EDBVIS", "EDCADAPT",
"EDCGEN", "EDCLIST", "EDCMORE", "EDCNSTR", "EDCONTACT",
"EDCPU", "EDCRB", "EDCSC", "EDCTS", "EDCURVE",
"EDDAMP", "EDDBL", "EDDC", "EDDRELAX", "EDDUMP",
"EDELE", "EDENERGY", "EDFPLOT", "EDGCALE", "/EDGE",
"EDHGLS", "EDHIST", "EDHTIME", "EDINT", "EDIPART",
"EDIS", "EDLCS", "EDLOAD", "EDMP", "EDNB", "EDNDTSD",
"EDNROT", "EDOPT", "EDOUT", "EDPART", "EDPC", "EDPL",
"EDPVEL", "EDRC", "EDRD", "EDREAD", "EDRI", "EDRST",
"EDRUN", "EDSHELL", "EDSOLV", "EDSP", "EDSTART",
"EDTERM", "EDTP", "EDVEL", "EDWELD", "EDWRITE",
"EEXTRUDE", "/EFACET", "EGEN", "*EIGEN", "EINFIN",
"EINTF", "EKILL", "ELBOW", "ELEM", "ELIST", "*ELSE",
"*ELSEIF", "EMAGERR", "EMATWRITE", "EMF", "EMFT",
"EMID", "EMIS", "EMODIF", "EMORE", "EMSYM", "EMTGEN",
"EMUNIT", "EN", "*END", "*ENDDO", "*ENDIF",
"ENDRELEASE", "ENERSOL", "ENGEN", "ENORM", "ENSYM",
"EORIENT", "EPLOT", "EQSLV", "ERASE", "/ERASE",
"EREAD", "EREFINE", "EREINF", "ERESX", "ERNORM",
"ERRANG", "ESCHECK", "ESEL", "/ESHAPE", "ESIZE",
"ESLA", "ESLL", "ESLN", "ESLV", "ESOL", "ESORT",
"ESSOLV", "ESTIF", "ESURF", "ESYM", "ESYS", "ET",
"ETABLE", "ETCHG", "ETCONTROL", "ETDELE", "ETLIST",
"ETYPE", "EUSORT", "EWRITE", "*EXIT", "/EXIT", "EXP",
"EXPAND", "/EXPAND", "EXPASS", "*EXPORT", "EXPROFILE",
"EXPSOL", "EXTOPT", "EXTREM", "EXUNIT", "F", "/FACET",
"FATIGUE", "FC", "FCCHECK", "FCDELE", "FCLIST", "FCUM",
"FCTYP", "FDELE", "/FDELE", "FE", "FEBODY", "FECONS",
"FEFOR", "FELIST", "FESURF", "*FFT", "FILE",
"FILEAUX2", "FILEAUX3", "FILEDISP", "FILL", "FILLDATA",
"/FILNAME", "FINISH", "FITEM", "FJ", "FJDELE",
"FJLIST", "FK", "FKDELE", "FKLIST", "FL", "FLIST",
"FLLIST", "FLST", "FLUXV", "FLUREAD", "FMAGBC",
"FMAGSUM", "/FOCUS", "FOR2D", "FORCE", "FORM",
"/FORMAT", "FP", "FPLIST", "*FREE", "FREQ", "FRQSCL",
"FS", "FSCALE", "FSDELE", "FSLIST", "FSNODE", "FSPLOT",
"FSSECT", "FSSPARM", "FSUM", "FTCALC", "FTRAN",
"FTSIZE", "FTWRITE", "FTYPE", "FVMESH", "GAP", "GAPF",
"GAUGE", "GCDEF", "GCGEN", "/GCMD", "/GCOLUMN",
"GENOPT", "GEOM", "GEOMETRY", "*GET", "/GFILE",
"/GFORMAT", "/GLINE", "/GMARKER", "GMATRIX", "GMFACE",
"*GO", "/GO", "/GOLIST", "/GOPR", "GP", "GPDELE",
"GPLIST", "GPLOT", "/GRAPHICS", "/GRESUME", "/GRID",
"/GROPT", "GRP", "/GRTYP", "/GSAVE", "GSBDATA",
"GSGDATA", "GSLIST", "GSSOL", "/GST", "GSUM", "/GTHK",
"/GTYPE", "HARFRQ", "/HBC", "HBMAT", "/HEADER", "HELP",
"HELPDISP", "HEMIOPT", "HFANG", "HFSYM", "HMAGSOLV",
"HPGL", "HPTCREATE", "HPTDELETE", "HRCPLX", "HREXP",
"HROPT", "HROCEAN", "HROUT", "IC", "ICDELE", "ICLIST",
"/ICLWID", "/ICSCALE", "*IF", "IGESIN", "IGESOUT",
"/IMAGE", "IMAGIN", "IMESH", "IMMED", "IMPD",
"INISTATE", "*INIT", "/INPUT", "/INQUIRE", "INRES",
"INRTIA", "INT1", "INTSRF", "IOPTN", "IRLF", "IRLIST",
"*ITENGINE", "JPEG", "JSOL", "K", "KATT", "KBC",
"KBETW", "KCALC", "KCENTER", "KCLEAR", "KDELE",
"KDIST", "KEEP", "KESIZE", "KEYOPT", "KEYPTS", "KEYW",
"KFILL", "KGEN", "KL", "KLIST", "KMESH", "KMODIF",
"KMOVE", "KNODE", "KPLOT", "KPSCALE", "KREFINE",
"KSCALE", "KSCON", "KSEL", "KSLL", "KSLN", "KSUM",
"KSYMM", "KTRAN", "KUSE", "KWPAVE", "KWPLAN", "L",
"L2ANG", "L2TAN", "LANG", "LARC", "/LARC", "LAREA",
"LARGE", "LATT", "LAYER", "LAYERP26", "LAYLIST",
"LAYPLOT", "LCABS", "LCASE", "LCCALC", "LCCAT",
"LCDEF", "LCFACT", "LCFILE", "LCLEAR", "LCOMB",
"LCOPER", "LCSEL", "LCSL", "LCSUM", "LCWRITE",
"LCZERO", "LDELE", "LDIV", "LDRAG", "LDREAD", "LESIZE",
"LEXTND", "LFILLT", "LFSURF", "LGEN", "LGLUE",
"LGWRITE", "/LIGHT", "LINA", "LINE", "/LINE", "LINES",
"LINL", "LINP", "LINV", "LIST", "*LIST", "LLIST",
"LMATRIX", "LMESH", "LNSRCH", "LOCAL", "LOVLAP",
"LPLOT", "LPTN", "LREFINE", "LREVERSE", "LROTAT",
"LSBA", "*LSBAC", "LSBL", "LSBV", "LSBW", "LSCLEAR",
"LSDELE", "*LSDUMP", "LSEL", "*LSENGINE", "*LSFACTOR",
"LSLA", "LSLK", "LSOPER", "/LSPEC", "LSREAD",
"*LSRESTORE", "LSSCALE", "LSSOLVE", "LSTR", "LSUM",
"LSWRITE", "/LSYMBOL", "LSYMM", "LTAN", "LTRAN",
"LUMPM", "LVSCALE", "LWPLAN", "M", "MADAPT", "MAGOPT",
"MAGSOLV", "/MAIL", "MAP", "/MAP", "MAP2DTO3D",
"MAPSOLVE", "MAPVAR", "MASTER", "MAT", "MATER",
"MCHECK", "MDAMP", "MDELE", "MDPLOT", "MEMM", "/MENU",
"MESHING", "MFANALYSIS", "MFBUCKET", "MFCALC", "MFCI",
"MFCLEAR", "MFCMMAND", "MFCONV", "MFDTIME", "MFELEM",
"MFEM", "MFEXTER", "MFFNAME", "MFFR", "MFIMPORT",
"MFINTER", "MFITER", "MFLCOMM", "MFLIST", "MFMAP",
"MFORDER", "MFOUTPUT", "*MFOURI", "MFPSIMUL", "MFRC",
"MFRELAX", "MFRSTART", "MFSORDER", "MFSURFACE",
"MFTIME", "MFTOL", "*MFUN", "MFVOLUME", "MFWRITE",
"MGEN", "MIDTOL", "/MKDIR", "MLIST", "MMASS", "MMF",
"MODCONT", "MODE", "MODIFY", "MODMSH", "MODSELOPTION",
"MODOPT", "MONITOR", "*MOPER", "MOPT", "MORPH", "MOVE",
"MP", "MPAMOD", "MPCHG", "MPCOPY", "MPDATA", "MPDELE",
"MPDRES", "/MPLIB", "MPLIST", "MPPLOT", "MPREAD",
"MPRINT", "MPTEMP", "MPTGEN", "MPTRES", "MPWRITE",
"/MREP", "MSAVE", "*MSG", "MSHAPE", "MSHCOPY",
"MSHKEY", "MSHMID", "MSHPATTERN", "MSOLVE", "/MSTART",
"MSTOLE", "*MULT", "*MWRITE", "MXPAND", "N", "NANG",
"NAXIS", "NCNV", "NDELE", "NDIST", "NDSURF", "NEQIT",
"/NERR", "NFORCE", "NGEN", "NKPT", "NLADAPTIVE",
"NLDIAG", "NLDPOST", "NLGEOM", "NLHIST", "NLIST",
"NLMESH", "NLOG", "NLOPT", "NMODIF", "NOCOLOR",
"NODES", "/NOERASE", "/NOLIST", "NOOFFSET", "NOORDER",
"/NOPR", "NORA", "NORL", "/NORMAL", "NPLOT", "NPRINT",
"NREAD", "NREFINE", "NRLSUM", "*NRM", "NROPT",
"NROTAT", "NRRANG", "NSCALE", "NSEL", "NSLA", "NSLE",
"NSLK", "NSLL", "NSLV", "NSMOOTH", "NSOL", "NSORT",
"NSTORE", "NSUBST", "NSVR", "NSYM", "/NUMBER",
"NUMCMP", "NUMEXP", "NUMMRG", "NUMOFF", "NUMSTR",
"NUMVAR", "NUSORT", "NWPAVE", "NWPLAN", "NWRITE",
"OCDATA", "OCDELETE", "OCLIST", "OCREAD", "OCTABLE",
"OCTYPE", "OCZONE", "OMEGA", "OPERATE", "OPNCONTROL",
"OUTAERO", "OUTOPT", "OUTPR", "/OUTPUT", "OUTRES",
"OVCHECK", "PADELE", "/PAGE", "PAGET", "PAPUT",
"PARESU", "PARTSEL", "PARRES", "PARSAV", "PASAVE",
"PATH", "PAUSE", "/PBC", "/PBF", "PCALC", "PCGOPT",
"PCIRC", "/PCIRCLE", "/PCOPY", "PCROSS", "PDANL",
"PDCDF", "PDCFLD", "PDCLR", "PDCMAT", "PDCORR",
"PDDMCS", "PDDOEL", "PDEF", "PDEXE", "PDHIST",
"PDINQR", "PDLHS", "PDMETH", "PDOT", "PDPINV",
"PDPLOT", "PDPROB", "PDRESU", "PDROPT", "/PDS",
"PDSAVE", "PDSCAT", "PDSENS", "PDSHIS", "PDUSER",
"PDVAR", "PDWRITE", "PERBC2D", "PERTURB", "PFACT",
"PHYSICS", "PIVCHECK", "PLCAMP", "PLCFREQ", "PLCHIST",
"PLCINT", "PLCPLX", "PLCRACK", "PLDISP", "PLESOL",
"PLETAB", "PLFAR", "PLF2D", "PLGEOM", "PLLS", "PLMAP",
"PLMC", "PLNEAR", "PLNSOL", "/PLOPTS", "PLORB", "PLOT",
"PLOTTING", "PLPAGM", "PLPATH", "PLSECT", "PLST",
"PLTIME", "PLTRAC", "PLVAR", "PLVECT", "PLZZ",
"/PMACRO", "PMAP", "PMGTRAN", "PMLOPT", "PMLSIZE",
"/PMORE", "PNGR", "/PNUM", "POINT", "POLY", "/POLYGON",
"/POST1", "/POST26", "POWERH", "PPATH", "PRANGE",
"PRAS", "PRCAMP", "PRCINT", "PRCPLX", "PRED",
"PRENERGY", "/PREP7", "PRERR", "PRESOL", "PRETAB",
"PRFAR", "PRI2", "PRIM", "PRINT", "*PRINT", "PRISM",
"PRITER", "PRJSOL", "PRNEAR", "PRNLD", "PRNSOL",
"PROD", "PRORB", "PRPATH", "PRRFOR", "PRRSOL",
"PRSCONTROL", "PRSECT", "PRTIME", "PRVAR", "PRVECT",
"PSCONTROL", "PSCR", "PSDCOM", "PSDFRQ", "PSDGRAPH",
"PSDRES", "PSDSPL", "PSDUNIT", "PSDVAL", "PSDWAV",
"/PSEARCH", "PSEL", "/PSF", "PSMAT", "PSMESH",
"/PSPEC", "/PSTATUS", "PSTRES", "/PSYMB", "PTR",
"PTXY", "PVECT", "/PWEDGE", "QDVAL", "QRDOPT", "QSOPT",
"QUAD", "/QUIT", "QUOT", "R", "RACE", "RADOPT",
"RAPPND", "RATE", "/RATIO", "RBE3", "RCON", "RCYC",
"RDEC", "RDELE", "READ", "REAL", "REALVAR", "RECTNG",
"REMESH", "/RENAME", "REORDER", "*REPEAT", "/REPLOT",
"RESCOMBINE", "RESCONTROL", "RESET", "/RESET", "RESP",
"RESUME", "RESVEC", "RESWRITE", "*RETURN", "REXPORT",
"REZONE", "RFORCE", "/RGB", "RIGID", "RIGRESP",
"RIMPORT", "RLIST", "RMALIST", "RMANL", "RMASTER",
"RMCAP", "RMCLIST", "/RMDIR", "RMFLVEC", "RMLVSCALE",
"RMMLIST", "RMMRANGE", "RMMSELECT", "RMNDISP",
"RMNEVEC", "RMODIF", "RMORE", "RMPORDER", "RMRESUME",
"RMRGENERATE", "RMROPTIONS", "RMRPLOT", "RMRSTATUS",
"RMSAVE", "RMSMPLE", "RMUSE", "RMXPORT", "ROCK",
"ROSE", "RPOLY", "RPR4", "RPRISM", "RPSD", "RSFIT",
"RSOPT", "RSPLIT", "RSPLOT", "RSPRNT", "RSSIMS",
"RSTMAC", "RSTOFF", "RSURF", "RSYMM", "RSYS", "RTHICK",
"SABS", "SADD", "SALLOW", "SAVE", "SBCLIST", "SBCTRAN",
"SDELETE", "SE", "SECCONTROL", "SECDATA",
"SECFUNCTION", "SECJOINT", "/SECLIB", "SECLOCK",
"SECMODIF", "SECNUM", "SECOFFSET", "SECPLOT",
"SECREAD", "SECSTOP", "SECTYPE", "SECWRITE", "SED",
"SEDLIST", "SEEXP", "/SEG", "SEGEN", "SELIST", "SELM",
"SELTOL", "SENERGY", "SEOPT", "SESYMM", "*SET", "SET",
"SETFGAP", "SETRAN", "SEXP", "SF", "SFA", "SFACT",
"SFADELE", "SFALIST", "SFBEAM", "SFCALC", "SFCUM",
"SFDELE", "SFE", "SFEDELE", "SFELIST", "SFFUN",
"SFGRAD", "SFL", "SFLDELE", "SFLEX", "SFLIST",
"SFLLIST", "SFSCALE", "SFTRAN", "/SHADE", "SHELL",
"/SHOW", "/SHOWDISP", "SHPP", "/SHRINK", "SLIST",
"SLOAD", "SMALL", "*SMAT", "SMAX", "/SMBC", "SMBODY",
"SMCONS", "SMFOR", "SMIN", "SMOOTH", "SMRTSIZE",
"SMSURF", "SMULT", "SNOPTION", "SOLU", "/SOLU",
"SOLUOPT", "SOLVE", "SORT", "SOURCE", "SPACE",
"SPCNOD", "SPCTEMP", "SPDAMP", "SPEC", "SPFREQ",
"SPGRAPH", "SPH4", "SPH5", "SPHERE", "SPLINE", "SPLOT",
"SPMWRITE", "SPOINT", "SPOPT", "SPREAD", "SPTOPT",
"SPOWER", "SPUNIT", "SPVAL", "SQRT", "*SREAD", "SRSS",
"SSBT", "/SSCALE", "SSLN", "SSMT", "SSPA", "SSPB",
"SSPD", "SSPE", "SSPM", "SSUM", "SSTATE", "STABILIZE",
"STAOPT", "STAT", "*STATUS", "/STATUS", "STEF",
"/STITLE", "STORE", "SUBOPT", "SUBSET", "SUCALC",
"SUCR", "SUDEL", "SUEVAL", "SUGET", "SUMAP", "SUMTYPE",
"SUPL", "SUPR", "SURESU", "SUSAVE", "SUSEL", "SUVECT",
"SV", "SVPLOT", "SVTYP", "SWADD", "SWDEL", "SWGEN",
"SWLIST", "SYNCHRO", "/SYP", "/SYS", "TALLOW",
"TARGET", "*TAXIS", "TB", "TBCOPY", "TBDATA", "TBDELE",
"TBEO", "TBIN", "TBFIELD", "TBFT", "TBLE", "TBLIST",
"TBMODIF", "TBPLOT", "TBPT", "TBTEMP", "TCHG", "/TEE",
"TERM", "THEXPAND", "THOPT", "TIFF", "TIME",
"TIMERANGE", "TIMINT", "TIMP", "TINTP", "/TITLE",
"/TLABEL", "TOFFST", "*TOPER", "TORQ2D", "TORQC2D",
"TORQSUM", "TORUS", "TRANS", "TRANSFER", "*TREAD",
"TREF", "/TRIAD", "/TRLCY", "TRNOPT", "TRPDEL",
"TRPLIS", "TRPOIN", "TRTIME", "TSHAP", "/TSPEC",
"TSRES", "TUNIF", "TVAR", "/TXTRE", "/TYPE", "TYPE",
"/UCMD", "/UDOC", "/UI", "UIMP", "/UIS", "*ULIB",
"UNDELETE", "UNDO", "/UNITS", "UNPAUSE", "UPCOORD",
"UPGEOM", "*USE", "/USER", "USRCAL", "USRDOF",
"USRELEM", "V", "V2DOPT", "VA", "*VABS", "VADD",
"VARDEL", "VARNAM", "VATT", "VCLEAR", "*VCOL",
"/VCONE", "VCROSS", "*VCUM", "VDDAM", "VDELE", "VDGL",
"VDOT", "VDRAG", "*VEC", "*VEDIT", "VEORIENT", "VEXT",
"*VFACT", "*VFILL", "VFOPT", "VFQUERY", "VFSM",
"*VFUN", "VGEN", "*VGET", "VGET", "VGLUE", "/VIEW",
"VIMP", "VINP", "VINV", "*VITRP", "*VLEN", "VLIST",
"VLSCALE", "*VMASK", "VMESH", "VOFFST", "VOLUMES")
# list of in-built () functions
elafunf = ("NX()", "NY()", "NZ()", "KX()", "KY()", "KZ()", "LX()",
"LY()", "LZ()", "LSX()", "LSY()", "LSZ()", "NODE()",
"KP()", "DISTND()", "DISTKP()", "DISTEN()", "ANGLEN()",
"ANGLEK()", "NNEAR()", "KNEAR()", "ENEARN()",
"AREAND()", "AREAKP()", "ARNODE()", "NORMNX()",
"NORMNY()", "NORMNZ()", "NORMKX()", "NORMKY()",
"NORMKZ()", "ENEXTN()", "NELEM()", "NODEDOF()",
"ELADJ()", "NDFACE()", "NMFACE()", "ARFACE()", "UX()",
"UY()", "UZ()", "ROTX()", "ROTY()", "ROTZ()", "TEMP()",
"PRES()", "VX()", "VY()", "VZ()", "ENKE()", "ENDS()",
"VOLT()", "MAG()", "AX()", "AY()", "AZ()",
"VIRTINQR()", "KWGET()", "VALCHR()", "VALHEX()",
"CHRHEX()", "STRFILL()", "STRCOMP()", "STRPOS()",
"STRLENG()", "UPCASE()", "LWCASE()", "JOIN()",
"SPLIT()", "ABS()", "SIGN()", "CXABS()", "EXP()",
"LOG()", "LOG10()", "SQRT()", "NINT()", "MOD()",
"RAND()", "GDIS()", "SIN()", "COS()", "TAN()",
"SINH()", "COSH()", "TANH()", "ASIN()", "ACOS()",
"ATAN()", "ATAN2()")
elafung = ("NSEL()", "ESEL()", "KSEL()", "LSEL()", "ASEL()",
"VSEL()", "NDNEXT()", "ELNEXT()", "KPNEXT()",
"LSNEXT()", "ARNEXT()", "VLNEXT()", "CENTRX()",
"CENTRY()", "CENTRZ()")
elafunh = ("~CAT5IN", "~CATIAIN", "~PARAIN", "~PROEIN", "~SATIN",
"~UGIN", "A", "AADD", "AATT", "ABEXTRACT", "*ABBR",
"ABBRES", "ABBSAV", "ABS", "ACCAT", "ACCOPTION",
"ACEL", "ACLEAR", "ADAMS", "ADAPT", "ADD", "ADDAM",
"ADELE", "ADGL", "ADRAG", "AESIZE", "AFILLT", "AFLIST",
"AFSURF", "*AFUN", "AGEN", "AGLUE", "AINA", "AINP",
"AINV", "AL", "ALIST", "ALLSEL", "ALPHAD", "AMAP",
"AMESH", "/AN3D", "ANCNTR", "ANCUT", "ANCYC", "ANDATA",
"ANDSCL", "ANDYNA", "/ANFILE", "ANFLOW", "/ANGLE",
"ANHARM", "ANIM", "ANISOS", "ANMODE", "ANMRES",
"/ANNOT", "ANORM", "ANPRES", "ANSOL", "ANSTOAQWA",
"ANSTOASAS", "ANTIME", "ANTYPE")
tokens = {
'root': [
(r'!.*\n', Comment),
include('strings'),
include('core'),
include('nums'),
(words((elafunb+elafunc+elafund+elafune+elafunh), suffix=r'\b'), Keyword),
(words((elafunf+elafung), suffix=r'\b'), Name.Builtin),
(r'AR[0-9]+', Name.Variable.Instance),
(r'[a-z][a-z0-9_]*', Name.Variable),
(r'[\s]+', Whitespace),
],
'core': [
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'/EOF', Generic.Emph),
(r'[(),:&;]', Punctuation),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'[$%]', String.Symbol),
],
'nums': [
(r'\d+(?![.ef])', Number.Integer),
(r'[+-]?\d*\.?\d+([ef][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.?\d*([ef][-+]?\d+)?', Number.Float),
]
}
| 26,654 | Python | 58.497768 | 86 | 0.441172 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/factor.py | """
pygments.lexers.factor
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Factor language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Whitespace, Punctuation
__all__ = ['FactorLexer']
class FactorLexer(RegexLexer):
"""
Lexer for the Factor language.
.. versionadded:: 1.4
"""
name = 'Factor'
url = 'http://factorcode.org'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
builtin_kernel = words((
'-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
'2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
'3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
'?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
'wrapper', 'wrapper?', 'xor'), suffix=r'(\s+)')
builtin_assocs = words((
'2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'(\s+)')
builtin_combinators = words((
'2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
'4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'(\s+)')
builtin_math = words((
'-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
'>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
'(each-integer)', '(find-integer)', '*', '+', '?1+',
'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
'zero?'), suffix=r'(\s+)')
builtin_sequences = words((
'1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
'2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
'3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
'?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
'assert-sequence', 'assert-sequence=', 'assert-sequence?',
'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
'non-negative-integer-expected', 'non-negative-integer-expected?',
'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
'when-empty'), suffix=r'(\s+)')
builtin_namespaces = words((
'+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
suffix=r'(\s+)')
builtin_arrays = words((
'1array', '2array', '3array', '4array', '<array>', '>array', 'array',
'array?', 'pair', 'pair?', 'resize-array'), suffix=r'(\s+)')
builtin_io = words((
'(each-stream-block-slice)', '(each-stream-block)',
'(stream-contents-by-block)', '(stream-contents-by-element)',
'(stream-contents-by-length-or-block)',
'(stream-contents-by-length)', '+byte+', '+character+',
'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
'error-stream', 'flush', 'input-stream', 'input-stream?',
'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
'output-stream', 'output-stream?', 'print', 'read', 'read-into',
'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
'stream-copy*', 'stream-element-type', 'stream-flush',
'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
'stream-read', 'stream-read-into', 'stream-read-partial',
'stream-read-partial-into', 'stream-read-partial-unsafe',
'stream-read-unsafe', 'stream-read-until', 'stream-read1',
'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
'stream-write', 'stream-write1', 'tell-input', 'tell-output',
'with-error-stream', 'with-error-stream*', 'with-error>output',
'with-input-output+error-streams',
'with-input-output+error-streams*', 'with-input-stream',
'with-input-stream*', 'with-output-stream', 'with-output-stream*',
'with-output>error', 'with-output+error-stream',
'with-output+error-stream*', 'with-streams', 'with-streams*',
'write', 'write1'), suffix=r'(\s+)')
builtin_strings = words((
'1string', '<string>', '>string', 'resize-string', 'string',
'string?'), suffix=r'(\s+)')
builtin_vectors = words((
'1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
suffix=r'(\s+)')
builtin_continuations = words((
'<condition>', '<continuation>', '<restart>', 'attempt-all',
'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
'condition?', 'continuation', 'continuation?', 'continue',
'continue-restart', 'continue-with', 'current-continuation',
'error', 'error-continuation', 'error-in-thread', 'error-thread',
'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
'throw-restarts', 'with-datastack', 'with-return'), suffix=r'(\s+)')
tokens = {
'root': [
# factor allows a file to start with a shebang
(r'#!.*$', Comment.Preproc),
default('base'),
],
'base': [
(r'\s+', Whitespace),
# defining words
(r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace,
Name.Function)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace,
Name.Class)),
(r'(GENERIC:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace,
Name.Function)),
(r'(\()(\s)', bygroups(Name.Function, Whitespace), 'stackeffect'),
(r'(;)(\s)', bygroups(Keyword, Whitespace)),
# imports and namespaces
(r'(USING:)(\s+)',
bygroups(Keyword.Namespace, Whitespace), 'vocabs'),
(r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
Whitespace, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
Whitespace), 'words'),
(r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+)(=>)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
Name.Namespace, Whitespace, Punctuation, Whitespace,
Name.Function)),
(r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
Name.Function)),
(r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function)),
# tuples and classes
(r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Punctuation,
Whitespace, Name.Class), 'slots'),
(r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class), 'slots'),
(r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace,
Punctuation, Whitespace, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace, Name.Class)),
(r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Name.Class)),
(r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Function)),
(r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
(r'SINGLETONS:', Keyword, 'classes'),
# other syntax
(r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(SYMBOLS:)(\s+)', bygroups(Keyword, Whitespace), 'words'),
(r'(SYNTAX:)(\s+)', bygroups(Keyword, Whitespace)),
(r'(ALIEN:)(\s+)', bygroups(Keyword, Whitespace)),
(r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
(r'(FUNCTION:)(\s+)'
r'(\S+)(\s+)(\S+)(\s+)'
r'(\()(\s+)([^)]+)(\))(\s)',
bygroups(Keyword.Namespace, Whitespace,
Text, Whitespace, Name.Function, Whitespace,
Punctuation, Whitespace, Text, Punctuation, Whitespace)),
(r'(FUNCTION-ALIAS:)(\s+)'
r'(\S+)(\s+)(\S+)(\s+)'
r'(\S+)(\s+)'
r'(\()(\s+)([^)]+)(\))(\s)',
bygroups(Keyword.Namespace, Whitespace,
Text, Whitespace, Name.Function, Whitespace,
Name.Function, Whitespace,
Punctuation, Whitespace, Text, Punctuation, Whitespace)),
# vocab.private
(r'(<PRIVATE|PRIVATE>)(\s)', bygroups(Keyword.Namespace, Whitespace)),
# strings
(r'"""\s(?:.|\n)*?\s"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'(\S+")(\s+)((?:\\\\|\\"|[^"])*")',
bygroups(String, Whitespace, String)),
(r'(CHAR:)(\s+)(\\[\\abfnrstv]|[^\\]\S*)(\s)',
bygroups(String.Char, Whitespace, String.Char, Whitespace)),
# comments
(r'!\s+.*$', Comment),
(r'#!\s+.*$', Comment),
(r'/\*\s+(?:.|\n)*?\s\*/', Comment),
# boolean constants
(r'[tf]\b', Name.Constant),
# symbols and literals
(r'[\\$]\s+\S+', Name.Constant),
(r'M\\\s+\S+\s+\S+', Name.Constant),
# numbers
(r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
(r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
(r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'0b[01]+\s', Number.Bin),
(r'0o[0-7]+\s', Number.Oct),
(r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
(r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, bygroups(Name.Builtin, Whitespace)),
(builtin_assocs, bygroups(Name.Builtin, Whitespace)),
(builtin_combinators, bygroups(Name.Builtin, Whitespace)),
(builtin_math, bygroups(Name.Builtin, Whitespace)),
(builtin_sequences, bygroups(Name.Builtin, Whitespace)),
(builtin_namespaces, bygroups(Name.Builtin, Whitespace)),
(builtin_arrays, bygroups(Name.Builtin, Whitespace)),
(builtin_io, bygroups(Name.Builtin, Whitespace)),
(builtin_strings, bygroups(Name.Builtin, Whitespace)),
(builtin_vectors, bygroups(Name.Builtin, Whitespace)),
(builtin_continuations, bygroups(Name.Builtin, Whitespace)),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s+', Whitespace),
(r'(\()(\s+)', bygroups(Name.Function, Whitespace), 'stackeffect'),
(r'(\))(\s+)', bygroups(Name.Function, Whitespace), '#pop'),
(r'(--)(\s+)', bygroups(Name.Function, Whitespace)),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'(\{)(\s+)(\S+)(\s+)([^}]+)(\s+)(\})(\s+)',
bygroups(Text, Whitespace, Name.Variable, Whitespace,
Text, Whitespace, Text, Whitespace)),
(r'\S+', Name.Variable),
],
'vocabs': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Namespace),
],
'classes': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Class),
],
'words': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Function),
],
}
| 19,531 | Python | 52.512329 | 93 | 0.515027 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/scdoc.py | """
pygments.lexers.scdoc
~~~~~~~~~~~~~~~~~~~~~
Lexer for scdoc, a simple man page generator.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this
from pygments.token import Text, Comment, Keyword, String, Generic
__all__ = ['ScdocLexer']
class ScdocLexer(RegexLexer):
"""
`scdoc` is a simple man page generator for POSIX systems written in C99.
.. versionadded:: 2.5
"""
name = 'scdoc'
url = 'https://git.sr.ht/~sircmpwn/scdoc'
aliases = ['scdoc', 'scd']
filenames = ['*.scd', '*.scdoc']
flags = re.MULTILINE
tokens = {
'root': [
# comment
(r'^(;.+\n)', bygroups(Comment)),
# heading with pound prefix
(r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)),
(r'^(#{2})(.+\n)', bygroups(Generic.Subheading, Text)),
# bulleted lists
(r'^(\s*)([*-])(\s)(.+\n)',
bygroups(Text, Keyword, Text, using(this, state='inline'))),
# numbered lists
(r'^(\s*)(\.+\.)( .+\n)',
bygroups(Text, Keyword, using(this, state='inline'))),
# quote
(r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)),
# text block
(r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)),
include('inline'),
],
'inline': [
# escape
(r'\\.', Text),
# underlines
(r'(\s)(_[^_]+_)(\W|\n)', bygroups(Text, Generic.Emph, Text)),
# bold
(r'(\s)(\*[^*]+\*)(\W|\n)', bygroups(Text, Generic.Strong, Text)),
# inline code
(r'`[^`]+`', String.Backtick),
# general text, must come last!
(r'[^\\\s]+', Text),
(r'.', Text),
],
}
def analyse_text(text):
"""This is very similar to markdown, save for the escape characters
needed for * and _."""
result = 0
if '\\*' in text:
result += 0.01
if '\\_' in text:
result += 0.01
return result
| 2,239 | Python | 27 | 78 | 0.472086 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/varnish.py | """
pygments.lexers.varnish
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Varnish configuration
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, using, this, \
inherit, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Whitespace
__all__ = ['VCLLexer', 'VCLSnippetLexer']
class VCLLexer(RegexLexer):
"""
For Varnish Configuration Language (VCL).
.. versionadded:: 2.2
"""
name = 'VCL'
aliases = ['vcl']
filenames = ['*.vcl']
mimetypes = ['text/x-vclsrc']
def analyse_text(text):
# If the very first line is 'vcl 4.0;' it's pretty much guaranteed
# that this is VCL
if text.startswith('vcl 4.0;'):
return 1.0
# Skip over comments and blank lines
# This is accurate enough that returning 0.9 is reasonable.
# Almost no VCL files start without some comments.
elif '\nvcl 4.0;' in text[:1000]:
return 0.9
tokens = {
'probe': [
include('whitespace'),
include('comments'),
(r'(\.\w+)(\s*=\s*)([^;]*)(;)',
bygroups(Name.Attribute, Operator, using(this), Punctuation)),
(r'\}', Punctuation, '#pop'),
],
'acl': [
include('whitespace'),
include('comments'),
(r'[!/]+', Operator),
(r';', Punctuation),
(r'\d+', Number),
(r'\}', Punctuation, '#pop'),
],
'backend': [
include('whitespace'),
(r'(\.probe)(\s*=\s*)(\w+)(;)',
bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
(r'(\.probe)(\s*=\s*)(\{)',
bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
(r'(\.\w+\b)(\s*=\s*)([^;\s]*)(\s*;)',
bygroups(Name.Attribute, Operator, using(this), Punctuation)),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'statements': [
(r'(\d\.)?\d+[sdwhmy]', Literal.Date),
(r'(\d\.)?\d+ms', Literal.Date),
(r'(vcl_pass|vcl_hash|vcl_hit|vcl_init|vcl_backend_fetch|vcl_pipe|'
r'vcl_backend_response|vcl_synth|vcl_deliver|vcl_backend_error|'
r'vcl_fini|vcl_recv|vcl_purge|vcl_miss)\b', Name.Function),
(r'(pipe|retry|hash|synth|deliver|purge|abandon|lookup|pass|fail|ok|'
r'miss|fetch|restart)\b', Name.Constant),
(r'(beresp|obj|resp|req|req_top|bereq)\.http\.[a-zA-Z_-]+\b', Name.Variable),
(words((
'obj.status', 'req.hash_always_miss', 'beresp.backend', 'req.esi_level',
'req.can_gzip', 'beresp.ttl', 'obj.uncacheable', 'req.ttl', 'obj.hits',
'client.identity', 'req.hash_ignore_busy', 'obj.reason', 'req.xid',
'req_top.proto', 'beresp.age', 'obj.proto', 'obj.age', 'local.ip',
'beresp.uncacheable', 'req.method', 'beresp.backend.ip', 'now',
'obj.grace', 'req.restarts', 'beresp.keep', 'req.proto', 'resp.proto',
'bereq.xid', 'bereq.between_bytes_timeout', 'req.esi',
'bereq.first_byte_timeout', 'bereq.method', 'bereq.connect_timeout',
'beresp.do_gzip', 'resp.status', 'beresp.do_gunzip',
'beresp.storage_hint', 'resp.is_streaming', 'beresp.do_stream',
'req_top.method', 'bereq.backend', 'beresp.backend.name', 'beresp.status',
'req.url', 'obj.keep', 'obj.ttl', 'beresp.reason', 'bereq.retries',
'resp.reason', 'bereq.url', 'beresp.do_esi', 'beresp.proto', 'client.ip',
'bereq.proto', 'server.hostname', 'remote.ip', 'req.backend_hint',
'server.identity', 'req_top.url', 'beresp.grace', 'beresp.was_304',
'server.ip', 'bereq.uncacheable'), suffix=r'\b'),
Name.Variable),
(r'[!%&+*\-,/<.}{>=|~]+', Operator),
(r'[();]', Punctuation),
(r'[,]+', Punctuation),
(words(('hash_data', 'regsub', 'regsuball', 'if', 'else',
'elsif', 'elif', 'synth', 'synthetic', 'ban',
'return', 'set', 'unset', 'import', 'include', 'new',
'rollback', 'call'), suffix=r'\b'),
Keyword),
(r'storage\.\w+\.\w+\b', Name.Variable),
(words(('true', 'false')), Name.Builtin),
(r'\d+\b', Number),
(r'(backend)(\s+\w+)(\s*\{)',
bygroups(Keyword, Name.Variable.Global, Punctuation), 'backend'),
(r'(probe\s)(\s*\w+\s)(\{)',
bygroups(Keyword, Name.Variable.Global, Punctuation), 'probe'),
(r'(acl\s)(\s*\w+\s)(\{)',
bygroups(Keyword, Name.Variable.Global, Punctuation), 'acl'),
(r'(vcl )(4.0)(;)$',
bygroups(Keyword.Reserved, Name.Constant, Punctuation)),
(r'(sub\s+)([a-zA-Z]\w*)(\s*\{)',
bygroups(Keyword, Name.Function, Punctuation)),
(r'([a-zA-Z_]\w*)'
r'(\.)'
r'([a-zA-Z_]\w*)'
r'(\s*\(.*\))',
bygroups(Name.Function, Punctuation, Name.Function, using(this))),
(r'[a-zA-Z_]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'comments': [
(r'#.*$', Comment),
(r'/\*', Comment.Multiline, 'comment'),
(r'//.*$', Comment),
],
'string': [
(r'"', String, '#pop'),
(r'[^"\n]+', String), # all other characters
],
'multistring': [
(r'[^"}]', String),
(r'"\}', String, '#pop'),
(r'["}]', String),
],
'whitespace': [
(r'L?"', String, 'string'),
(r'\{"', String, 'multistring'),
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'\\\n', Text), # line continuation
],
'root': [
include('whitespace'),
include('comments'),
include('statements'),
(r'\s+', Whitespace),
],
}
class VCLSnippetLexer(VCLLexer):
"""
For Varnish Configuration Language snippets.
.. versionadded:: 2.2
"""
name = 'VCLSnippets'
aliases = ['vclsnippets', 'vclsnippet']
mimetypes = ['text/x-vclsnippet']
filenames = []
def analyse_text(text):
# override method inherited from VCLLexer
return 0
tokens = {
'snippetspre': [
(r'\.\.\.+', Comment),
(r'(bereq|req|req_top|resp|beresp|obj|client|server|local|remote|'
r'storage)($|\.\*)', Name.Variable),
],
'snippetspost': [
(r'(backend)\b', Keyword.Reserved),
],
'root': [
include('snippetspre'),
inherit,
include('snippetspost'),
],
}
| 7,273 | Python | 37.28421 | 90 | 0.47917 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/elpi.py | """
pygments.lexers.elpi
~~~~~~~~~~~~~~~~~~~~
Lexer for the `Elpi <http://github.com/LPCIC/elpi>`_ programming language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
__all__ = ['ElpiLexer']
class ElpiLexer(RegexLexer):
"""
Lexer for the Elpi programming language.
.. versionadded:: 2.11
"""
name = 'Elpi'
url = 'http://github.com/LPCIC/elpi'
aliases = ['elpi']
filenames = ['*.elpi']
mimetypes = ['text/x-elpi']
lcase_re = r"[a-z]"
ucase_re = r"[A-Z]"
digit_re = r"[0-9]"
schar2_re = r"([+*^?/<>`'@#~=&!])"
schar_re = r"({}|-|\$|_)".format(schar2_re)
idchar_re = r"({}|{}|{}|{})".format(lcase_re,ucase_re,digit_re,schar_re)
idcharstarns_re = r"({}*(\.({}|{}){}*)*)".format(idchar_re, lcase_re, ucase_re, idchar_re)
symbchar_re = r"({}|{}|{}|{}|:)".format(lcase_re, ucase_re, digit_re, schar_re)
constant_re = r"({}{}*|{}{}|{}{}*|_{}+)".format(ucase_re, idchar_re, lcase_re, idcharstarns_re, schar2_re, symbchar_re, idchar_re)
symbol_re = r"(,|<=>|->|:-|;|\?-|->|&|=>|\bas\b|\buvar\b|<|=<|=|==|>=|>|\bi<|\bi=<|\bi>=|\bi>|\bis\b|\br<|\br=<|\br>=|\br>|\bs<|\bs=<|\bs>=|\bs>|@|::|\[\]|`->|`:|`:=|\^|-|\+|\bi-|\bi\+|r-|r\+|/|\*|\bdiv\b|\bi\*|\bmod\b|\br\*|~|\bi~|\br~)"
escape_re = r"\(({}|{})\)".format(constant_re,symbol_re)
const_sym_re = r"({}|{}|{})".format(constant_re,symbol_re,escape_re)
tokens = {
'root': [
include('elpi')
],
'elpi': [
include('_elpi-comment'),
(r"(:before|:after|:if|:name)(\s*)(\")",
bygroups(Keyword.Mode, Text.Whitespace, String.Double),
'elpi-string'),
(r"(:index)(\s*\()", bygroups(Keyword.Mode, Text.Whitespace),
'elpi-indexing-expr'),
(r"\b(external pred|pred)(\s+)({})".format(const_sym_re),
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-pred-item'),
(r"\b(external type|type)(\s+)(({}(,\s*)?)+)".format(const_sym_re),
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-type'),
(r"\b(kind)(\s+)(({}|,)+)".format(const_sym_re),
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-type'),
(r"\b(typeabbrev)(\s+)({})".format(const_sym_re),
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-type'),
(r"\b(accumulate)(\s+)(\")",
bygroups(Keyword.Declaration, Text.Whitespace, String.Double),
'elpi-string'),
(r"\b(accumulate|namespace|local)(\s+)({})".format(constant_re),
bygroups(Keyword.Declaration, Text.Whitespace, Text)),
(r"\b(shorten)(\s+)({}\.)".format(constant_re),
bygroups(Keyword.Declaration, Text.Whitespace, Text)),
(r"\b(pi|sigma)(\s+)([a-zA-Z][A-Za-z0-9_ ]*)(\\)",
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, Text)),
(r"\b(constraint)(\s+)(({}(\s+)?)+)".format(const_sym_re),
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-chr-rule-start'),
(r"(?=[A-Z_]){}".format(constant_re), Name.Variable),
(r"(?=[a-z_]){}\\".format(constant_re), Name.Variable),
(r"_", Name.Variable),
(r"({}|!|=>|;)".format(symbol_re), Keyword.Declaration),
(constant_re, Text),
(r"\[|\]|\||=>", Keyword.Declaration),
(r'"', String.Double, 'elpi-string'),
(r'`', String.Double, 'elpi-btick'),
(r'\'', String.Double, 'elpi-tick'),
(r'\{[^\{]', Text, 'elpi-spill'),
(r"\(", Text, 'elpi-in-parens'),
(r'\d[\d_]*', Number.Integer),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"[\+\*\-/\^\.]", Operator),
],
'_elpi-comment': [
(r'%[^\n]*\n', Comment),
(r'/\*', Comment, 'elpi-multiline-comment'),
(r"\s+", Text.Whitespace),
],
'elpi-multiline-comment': [
(r'\*/', Comment, '#pop'),
(r'.', Comment)
],
'elpi-indexing-expr':[
(r'[0-9 _]+', Number.Integer),
(r'\)', Text, '#pop'),
],
'elpi-type': [
(r"(ctype\s+)(\")", bygroups(Keyword.Type, String.Double), 'elpi-string'),
(r'->', Keyword.Type),
(constant_re, Keyword.Type),
(r"\(|\)", Keyword.Type),
(r"\.", Text, '#pop'),
include('_elpi-comment'),
],
'elpi-chr-rule-start': [
(r"\{", Text, 'elpi-chr-rule'),
include('_elpi-comment'),
],
'elpi-chr-rule': [
(r"\brule\b", Keyword.Declaration),
(r"\\", Keyword.Declaration),
(r"\}", Text, '#pop:2'),
include('elpi'),
],
'elpi-pred-item': [
(r"[io]:", Keyword.Mode, 'elpi-ctype'),
(r"\.", Text, '#pop'),
include('_elpi-comment'),
],
'elpi-ctype': [
(r"(ctype\s+)(\")", bygroups(Keyword.Type, String.Double), 'elpi-string'),
(r'->', Keyword.Type),
(constant_re, Keyword.Type),
(r"\(|\)", Keyword.Type),
(r",", Text, '#pop'),
(r"\.", Text, '#pop:2'),
include('_elpi-comment'),
],
'elpi-btick': [
(r'[^` ]+', String.Double),
(r'`', String.Double, '#pop'),
],
'elpi-tick': [
(r'[^\' ]+', String.Double),
(r'\'', String.Double, '#pop'),
],
'elpi-string': [
(r'[^\"]+', String.Double),
(r'"', String.Double, '#pop'),
],
'elpi-spill': [
(r'\{[^\{]', Text, '#push'),
(r'\}[^\}]', Text, '#pop'),
include('elpi'),
],
'elpi-in-parens': [
(r"\(", Operator, '#push'),
(r"\)", Operator, '#pop'),
include('elpi'),
],
}
| 6,370 | Python | 37.379518 | 242 | 0.451648 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/snobol.py | """
pygments.lexers.snobol
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the SNOBOL language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SnobolLexer']
class SnobolLexer(RegexLexer):
"""
Lexer for the SNOBOL4 programming language.
Recognizes the common ASCII equivalents of the original SNOBOL4 operators.
Does not require spaces around binary operators.
.. versionadded:: 1.5
"""
name = "Snobol"
aliases = ["snobol"]
filenames = ['*.snobol']
mimetypes = ['text/x-snobol']
tokens = {
# root state, start of line
# comments, continuation lines, and directives start in column 1
# as do labels
'root': [
(r'\*.*\n', Comment),
(r'[+.] ', Punctuation, 'statement'),
(r'-.*\n', Comment),
(r'END\s*\n', Name.Label, 'heredoc'),
(r'[A-Za-z$][\w$]*', Name.Label, 'statement'),
(r'\s+', Text, 'statement'),
],
# statement state, line after continuation or label
'statement': [
(r'\s*\n', Text, '#pop'),
(r'\s+', Text),
(r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|'
r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|'
r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|'
r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])',
Name.Builtin),
(r'[A-Za-z][\w.]*', Name),
# ASCII equivalents of original operators
# | for the EBCDIC equivalent, ! likewise
# \ for EBCDIC negation
(r'\*\*|[?$.!%*/#+\-@|&\\=]', Operator),
(r'"[^"]*"', String),
(r"'[^']*'", String),
# Accept SPITBOL syntax for real numbers
# as well as Macro SNOBOL4
(r'[0-9]+(?=[^.EeDd])', Number.Integer),
(r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float),
# Goto
(r':', Punctuation, 'goto'),
(r'[()<>,;]', Punctuation),
],
# Goto block
'goto': [
(r'\s*\n', Text, "#pop:2"),
(r'\s+', Text),
(r'F|S', Keyword),
(r'(\()([A-Za-z][\w.]*)(\))',
bygroups(Punctuation, Name.Label, Punctuation))
],
# everything after the END statement is basically one
# big heredoc.
'heredoc': [
(r'.*\n', String.Heredoc)
]
}
| 2,732 | Python | 31.92771 | 79 | 0.496706 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ezhil.py | """
pygments.lexers.ezhil
~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Ezhil language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Keyword, Comment, Name, String, Number, \
Punctuation, Operator, Whitespace
__all__ = ['EzhilLexer']
class EzhilLexer(RegexLexer):
"""
Lexer for Ezhil, a Tamil script-based programming language.
.. versionadded:: 2.1
"""
name = 'Ezhil'
url = 'http://ezhillang.org'
aliases = ['ezhil']
filenames = ['*.n']
mimetypes = ['text/x-ezhil']
# Refer to tamil.utf8.tamil_letters from open-tamil for a stricter version of this.
# This much simpler version is close enough, and includes combining marks.
_TALETTERS = '[a-zA-Z_]|[\u0b80-\u0bff]'
tokens = {
'root': [
include('keywords'),
(r'#.*$', Comment.Single),
(r'[@+/*,^\-%]|[!<>=]=?|&&?|\|\|?', Operator),
('இல்', Operator.Word),
(words(('assert', 'max', 'min',
'நீளம்', 'சரம்_இடமாற்று', 'சரம்_கண்டுபிடி',
'பட்டியல்', 'பின்இணை', 'வரிசைப்படுத்து',
'எடு', 'தலைகீழ்', 'நீட்டிக்க', 'நுழைக்க', 'வை',
'கோப்பை_திற', 'கோப்பை_எழுது', 'கோப்பை_மூடு',
'pi', 'sin', 'cos', 'tan', 'sqrt', 'hypot', 'pow',
'exp', 'log', 'log10', 'exit',
), suffix=r'\b'), Name.Builtin),
(r'(True|False)\b', Keyword.Constant),
(r'[^\S\n]+', Whitespace),
include('identifier'),
include('literal'),
(r'[(){}\[\]:;.]', Punctuation),
],
'keywords': [
('பதிப்பி|தேர்ந்தெடு|தேர்வு|ஏதேனில்|ஆனால்|இல்லைஆனால்|இல்லை|ஆக|ஒவ்வொன்றாக|இல்|வரை|செய்|முடியேனில்|பின்கொடு|முடி|நிரல்பாகம்|தொடர்|நிறுத்து|நிரல்பாகம்', Keyword),
],
'identifier': [
('(?:'+_TALETTERS+')(?:[0-9]|'+_TALETTERS+')*', Name),
],
'literal': [
(r'".*?"', String),
(r'\d+((\.\d*)?[eE][+-]?\d+|\.\d*)', Number.Float),
(r'\d+', Number.Integer),
]
}
def analyse_text(text):
"""This language uses Tamil-script. We'll assume that if there's a
decent amount of Tamil-characters, it's this language. This assumption
is obviously horribly off if someone uses string literals in tamil
in another language."""
if len(re.findall(r'[\u0b80-\u0bff]', text)) > 10:
return 0.25
def __init__(self, **options):
super().__init__(**options)
self.encoding = options.get('encoding', 'utf-8')
| 2,773 | Python | 34.564102 | 171 | 0.485755 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/q.py | """
pygments.lexers.q
~~~~~~~~~~~~~~~~~
Lexer for the Q programming language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups, inherit
from pygments.token import Comment, Name, Number, Operator, Punctuation, \
String, Whitespace, Literal, Generic
__all__ = ["KLexer", "QLexer"]
class KLexer(RegexLexer):
"""
For `K <https://code.kx.com/>`_ source code.
.. versionadded:: 2.12
"""
name = "K"
aliases = ["k"]
filenames = ["*.k"]
tokens = {
"whitespace": [
# hashbang script
(r"^#!.*", Comment.Hashbang),
# Comments
(r"^/\s*\n", Comment.Multiline, "comments"),
(r"(?<!\S)/.*", Comment.Single),
# Whitespace
(r"\s+", Whitespace),
# Strings
(r"\"", String.Double, "strings"),
],
"root": [
include("whitespace"),
include("keywords"),
include("declarations"),
],
"keywords": [
(words(("abs", "acos", "asin", "atan", "avg", "bin",
"binr", "by", "cor", "cos", "cov", "dev",
"delete", "div", "do", "enlist", "exec", "exit",
"exp", "from", "getenv", "hopen", "if", "in",
"insert", "last", "like", "log", "max", "min",
"prd", "select", "setenv", "sin", "sqrt", "ss",
"sum", "tan", "update", "var", "wavg", "while",
"within", "wsum", "xexp"),
suffix=r"\b"), Operator.Word),
],
"declarations": [
# Timing
(r"^\\ts?", Comment.Preproc),
(r"^(\\\w\s+[^/\n]*?)(/.*)",
bygroups(Comment.Preproc, Comment.Single)),
# Generic System Commands
(r"^\\\w.*", Comment.Preproc),
# Prompt
(r"^[a-zA-Z]\)", Generic.Prompt),
# Function Names
(r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)(\s*)(\{)",
bygroups(Name.Function, Whitespace, Operator, Whitespace, Punctuation),
"functions"),
# Variable Names
(r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)",
bygroups(Name.Variable, Whitespace, Operator)),
# Functions
(r"\{", Punctuation, "functions"),
# Parentheses
(r"\(", Punctuation, "parentheses"),
# Brackets
(r"\[", Punctuation, "brackets"),
# Errors
(r"'`([a-zA-Z][\w.]*)?", Name.Exception),
# File Symbols
(r"`:([a-zA-Z/][\w./]*)?", String.Symbol),
# Symbols
(r"`([a-zA-Z][\w.]*)?", String.Symbol),
# Numbers
include("numbers"),
# Variable Names
(r"[a-zA-Z][\w.]*", Name),
# Operators
(r"[-=+*#$%@!~^&:.,<>'\\|/?_]", Operator),
# Punctuation
(r";", Punctuation),
],
"functions": [
include("root"),
(r"\}", Punctuation, "#pop"),
],
"parentheses": [
include("root"),
(r"\)", Punctuation, "#pop"),
],
"brackets": [
include("root"),
(r"\]", Punctuation, "#pop"),
],
"numbers": [
# Binary Values
(r"[01]+b", Number.Bin),
# Nulls/Infinities
(r"0[nNwW][cefghijmndzuvtp]?", Number),
# Timestamps
((r"(?:[0-9]{4}[.][0-9]{2}[.][0-9]{2}|[0-9]+)"
"D(?:[0-9](?:[0-9](?::[0-9]{2}"
"(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)?"), Literal.Date),
# Datetimes
((r"[0-9]{4}[.][0-9]{2}"
"(?:m|[.][0-9]{2}(?:T(?:[0-9]{2}:[0-9]{2}"
"(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)"), Literal.Date),
# Times
(r"[0-9]{2}:[0-9]{2}(?::[0-9]{2}(?:[.][0-9]{1,3})?)?",
Literal.Date),
# GUIDs
(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}",
Number.Hex),
# Byte Vectors
(r"0x[0-9a-fA-F]+", Number.Hex),
# Floats
(r"([0-9]*[.]?[0-9]+|[0-9]+[.]?[0-9]*)[eE][+-]?[0-9]+[ef]?",
Number.Float),
(r"([0-9]*[.][0-9]+|[0-9]+[.][0-9]*)[ef]?", Number.Float),
(r"[0-9]+[ef]", Number.Float),
# Characters
(r"[0-9]+c", Number),
# Integers
(r"[0-9]+[ihtuv]", Number.Integer),
# Long Integers
(r"[0-9]+[jnp]?", Number.Integer.Long),
],
"comments": [
(r"[^\\]+", Comment.Multiline),
(r"^\\", Comment.Multiline, "#pop"),
(r"\\", Comment.Multiline),
],
"strings": [
(r'[^"\\]+', String.Double),
(r"\\.", String.Escape),
(r'"', String.Double, "#pop"),
],
}
class QLexer(KLexer):
"""
For `Q <https://code.kx.com/>`_ source code.
.. versionadded:: 2.12
"""
name = "Q"
aliases = ["q"]
filenames = ["*.q"]
tokens = {
"root": [
(words(("aj", "aj0", "ajf", "ajf0", "all", "and", "any", "asc",
"asof", "attr", "avgs", "ceiling", "cols", "count", "cross",
"csv", "cut", "deltas", "desc", "differ", "distinct", "dsave",
"each", "ej", "ema", "eval", "except", "fby", "fills", "first",
"fkeys", "flip", "floor", "get", "group", "gtime", "hclose",
"hcount", "hdel", "hsym", "iasc", "idesc", "ij", "ijf",
"inter", "inv", "key", "keys", "lj", "ljf", "load", "lower",
"lsq", "ltime", "ltrim", "mavg", "maxs", "mcount", "md5",
"mdev", "med", "meta", "mins", "mmax", "mmin", "mmu", "mod",
"msum", "neg", "next", "not", "null", "or", "over", "parse",
"peach", "pj", "prds", "prior", "prev", "rand", "rank", "ratios",
"raze", "read0", "read1", "reciprocal", "reval", "reverse",
"rload", "rotate", "rsave", "rtrim", "save", "scan", "scov",
"sdev", "set", "show", "signum", "ssr", "string", "sublist",
"sums", "sv", "svar", "system", "tables", "til", "trim", "txf",
"type", "uj", "ujf", "ungroup", "union", "upper", "upsert",
"value", "view", "views", "vs", "where", "wj", "wj1", "ww",
"xasc", "xbar", "xcol", "xcols", "xdesc", "xgroup", "xkey",
"xlog", "xprev", "xrank"),
suffix=r"\b"), Name.Builtin,
),
inherit,
],
}
| 6,932 | Python | 35.682539 | 85 | 0.383151 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/email.py | """
pygments.lexers.email
~~~~~~~~~~~~~~~~~~~~~
Lexer for the raw E-mail.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups
from pygments.lexers.mime import MIMELexer
from pygments.token import Text, Keyword, Name, String, Number, Comment
from pygments.util import get_bool_opt
__all__ = ["EmailLexer"]
class EmailHeaderLexer(RegexLexer):
"""
Sub-lexer for raw E-mail. This lexer only process header part of e-mail.
.. versionadded:: 2.5
"""
def __init__(self, **options):
super().__init__(**options)
self.highlight_x = get_bool_opt(options, "highlight-X-header", False)
def get_x_header_tokens(self, match):
if self.highlight_x:
# field
yield match.start(1), Name.Tag, match.group(1)
# content
default_actions = self.get_tokens_unprocessed(
match.group(2), stack=("root", "header"))
yield from default_actions
else:
# lowlight
yield match.start(1), Comment.Special, match.group(1)
yield match.start(2), Comment.Multiline, match.group(2)
tokens = {
"root": [
(r"^(?:[A-WYZ]|X400)[\w\-]*:", Name.Tag, "header"),
(r"^(X-(?:\w[\w\-]*:))([\s\S]*?\n)(?![ \t])", get_x_header_tokens),
],
"header": [
# folding
(r"\n[ \t]", Text.Whitespace),
(r"\n(?![ \t])", Text.Whitespace, "#pop"),
# keywords
(r"\bE?SMTPS?\b", Keyword),
(r"\b(?:HE|EH)LO\b", Keyword),
# mailbox
(r"[\w\.\-\+=]+@[\w\.\-]+", Name.Label),
(r"<[\w\.\-\+=]+@[\w\.\-]+>", Name.Label),
# domain
(r"\b(\w[\w\.-]*\.[\w\.-]*\w[a-zA-Z]+)\b", Name.Function),
# IPv4
(r"(?<=\b)(?:(?:25[0-5]|2[0-4][0-9]|1?[0-9][0-9]?)\.){3}(?:25[0"
r"-5]|2[0-4][0-9]|1?[0-9][0-9]?)(?=\b)",
Number.Integer),
# IPv6
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,7}:(?!\b)", Number.Hex),
(r"(?<=\b):((:[0-9a-fA-F]{1,4}){1,7}|:)(?=\b)", Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}(?=\b)", Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}(?=\b)", Number.Hex),
(r"(?<=\b)[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})(?=\b)", Number.Hex),
(r"(?<=\b)fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}(?=\b)", Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}(?=\b)", Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}(?=\b)",
Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}(?=\b)",
Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}(?=\b)",
Number.Hex),
(r"(?<=\b)::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}"
r"[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
r"[0-9])(?=\b)",
Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9])"
r"{0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])(?=\b)",
Number.Hex),
# Date time
(r"(?:(Sun|Mon|Tue|Wed|Thu|Fri|Sat),\s+)?(0[1-9]|[1-2]?[0-9]|3["
r"01])\s+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+("
r"19[0-9]{2}|[2-9][0-9]{3})\s+(2[0-3]|[0-1][0-9]):([0-5][0-9])"
r"(?::(60|[0-5][0-9]))?(?:\.\d{1,5})?\s+([-\+][0-9]{2}[0-5][0-"
r"9]|\(?(?:UTC?|GMT|(?:E|C|M|P)(?:ST|ET|DT)|[A-IK-Z])\)?)",
Name.Decorator),
# RFC-2047 encoded string
(r"(=\?)([\w-]+)(\?)([BbQq])(\?)([\[\w!\"#$%&\'()*+,-./:;<=>@[\\"
r"\]^_`{|}~]+)(\?=)",
bygroups(String.Affix, Name.Constant, String.Affix, Keyword.Constant,
String.Affix, Number.Hex, String.Affix)),
# others
(r'[\s]+', Text.Whitespace),
(r'[\S]', Text),
],
}
class EmailLexer(DelegatingLexer):
"""
Lexer for raw E-mail.
Additional options accepted:
`highlight-X-header`
Highlight the fields of ``X-`` user-defined email header. (default:
``False``).
.. versionadded:: 2.5
"""
name = "E-mail"
aliases = ["email", "eml"]
filenames = ["*.eml"]
mimetypes = ["message/rfc822"]
def __init__(self, **options):
super().__init__(EmailHeaderLexer, MIMELexer, Comment, **options)
| 4,742 | Python | 34.661654 | 91 | 0.427457 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/arturo.py | """
pygments.lexers.arturo
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Arturo language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, do_insertions, include, \
this, using, words
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
Punctuation, String, Text
from pygments.util import ClassNotFound, get_bool_opt
__all__ = ['ArturoLexer']
class ArturoLexer(RegexLexer):
"""
For Arturo source code.
See `Arturo's Github <https://github.com/arturo-lang/arturo>`_
and `Arturo's Website <https://arturo-lang.io/>`_.
.. versionadded:: 2.14.0
"""
name = 'Arturo'
aliases = ['arturo', 'art']
filenames = ['*.art']
url = 'https://arturo-lang.io/'
def __init__(self, **options):
self.handle_annotateds = get_bool_opt(options, 'handle_annotateds',
True)
RegexLexer.__init__(self, **options)
def handle_annotated_strings(self, match):
"""Adds syntax from another languages inside annotated strings
match args:
1:open_string,
2:exclamation_mark,
3:lang_name,
4:space_or_newline,
5:code,
6:close_string
"""
from pygments.lexers import get_lexer_by_name
# Header's section
yield match.start(1), String.Double, match.group(1)
yield match.start(2), String.Interpol, match.group(2)
yield match.start(3), String.Interpol, match.group(3)
yield match.start(4), Text.Whitespace, match.group(4)
lexer = None
if self.handle_annotateds:
try:
lexer = get_lexer_by_name(match.group(3).strip())
except ClassNotFound:
pass
code = match.group(5)
if lexer is None:
yield match.group(5), String, code
else:
yield from do_insertions([], lexer.get_tokens_unprocessed(code))
yield match.start(6), String.Double, match.group(6)
tokens = {
'root': [
(r';.*?$', Comment.Single),
(r'^((\s#!)|(#!)).*?$', Comment.Hashbang),
# Constants
(words(('false', 'true', 'maybe'), # boolean
suffix=r'\b'), Name.Constant),
(words(('this', 'init'), # class related keywords
prefix=r'\b', suffix=r'\b\??:?'), Name.Builtin.Pseudo),
(r'`.`', String.Char), # character
(r'\\\w+\b\??:?', Name.Property), # array index
(r'#\w+', Name.Constant), # color
(r'\b[0-9]+\.[0-9]+', Number.Float), # float
(r'\b[0-9]+', Number.Integer), # integer
(r'\w+\b\??:', Name.Label), # label
# Note: Literals can be labeled too
(r'\'(?:\w+\b\??:?)', Keyword.Declaration), # literal
(r'\:\w+', Keyword.Type), # type
# Note: Attributes can be labeled too
(r'\.\w+\??:?', Name.Attribute), # attributes
# Switch structure
(r'(\()(.*?)(\)\?)',
bygroups(Punctuation, using(this), Punctuation)),
# Single Line Strings
(r'"', String.Double, 'inside-simple-string'),
(r'»', String.Single, 'inside-smart-string'),
(r'«««', String.Double, 'inside-safe-string'),
(r'\{\/', String.Single, 'inside-regex-string'),
# Multi Line Strings
(r'\{\:', String.Double, 'inside-curly-verb-string'),
(r'(\{)(\!)(\w+)(\s|\n)([\w\W]*?)(^\})', handle_annotated_strings),
(r'\{', String.Single, 'inside-curly-string'),
(r'\-{3,}', String.Single, 'inside-eof-string'),
include('builtin-functions'),
# Operators
(r'[()[\],]', Punctuation),
(words(('->', '==>', '|', '::', '@', '#', # sugar syntax
'$', '&', '!', '!!', './')), Name.Decorator),
(words(('<:', ':>', ':<', '>:', '<\\', '<>', '<', '>',
'ø', '∞',
'+', '-', '*', '~', '=', '^', '%', '/', '//',
'==>', '<=>', '<==>',
'=>>', '<<=>>', '<<==>>',
'-->', '<->', '<-->',
'=|', '|=', '-:', ':-',
'_', '.', '..', '\\')), Operator),
(r'\b\w+', Name),
(r'\s+', Text.Whitespace),
(r'.+$', Error),
],
'inside-interpol': [
(r'\|', String.Interpol, '#pop'),
(r'[^|]+', using(this)),
],
'inside-template': [
(r'\|\|\>', String.Interpol, '#pop'),
(r'[^|]+', using(this)),
],
'string-escape': [
(words(('\\\\', '\\n', '\\t', '\\"')), String.Escape),
],
'inside-simple-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'"', String.Double, '#pop'), # Closing Quote
(r'[^|"]+', String) # String Content
],
'inside-smart-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\n', String.Single, '#pop'), # Closing Quote
(r'[^|\n]+', String) # String Content
],
'inside-safe-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'»»»', String.Double, '#pop'), # Closing Quote
(r'[^|»]+', String) # String Content
],
'inside-regex-string': [
(r'\\[sSwWdDbBZApPxucItnvfr0]+', String.Escape),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\/\}', String.Single, '#pop'), # Closing Quote
(r'[^|\/]+', String.Regex), # String Content
],
'inside-curly-verb-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\:\}', String.Double, '#pop'), # Closing Quote
(r'[^|<:]+', String), # String Content
],
'inside-curly-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\}', String.Single, '#pop'), # Closing Quote
(r'[^|<}]+', String), # String Content
],
'inside-eof-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\Z', String.Single, '#pop'), # Closing Quote
(r'[^|<]+', String), # String Content
],
'builtin-functions': [
(words((
'all', 'and', 'any', 'ascii', 'attr', 'attribute',
'attributeLabel', 'binary', 'block' 'char', 'contains',
'database', 'date', 'dictionary', 'empty', 'equal', 'even',
'every', 'exists', 'false', 'floatin', 'function', 'greater',
'greaterOrEqual', 'if', 'in', 'inline', 'integer', 'is',
'key', 'label', 'leap', 'less', 'lessOrEqual', 'literal',
'logical', 'lower', 'nand', 'negative', 'nor', 'not',
'notEqual', 'null', 'numeric', 'odd', 'or', 'path',
'pathLabel', 'positive', 'prefix', 'prime', 'set', 'some',
'sorted', 'standalone', 'string', 'subset', 'suffix',
'superset', 'ymbol', 'true', 'try', 'type', 'unless', 'upper',
'when', 'whitespace', 'word', 'xnor', 'xor', 'zero',
), prefix=r'\b', suffix=r'\b\?'), Name.Builtin),
(words((
'abs', 'acos', 'acosh', 'acsec', 'acsech', 'actan', 'actanh',
'add', 'after', 'alphabet', 'and', 'angle', 'append', 'arg',
'args', 'arity', 'array', 'as', 'asec', 'asech', 'asin',
'asinh', 'atan', 'atan2', 'atanh', 'attr', 'attrs', 'average',
'before', 'benchmark', 'blend', 'break', 'builtins1',
'builtins2', 'call', 'capitalize', 'case', 'ceil', 'chop',
'chunk', 'clear', 'close', 'cluster', 'color', 'combine',
'conj', 'continue', 'copy', 'cos', 'cosh', 'couple', 'csec',
'csech', 'ctan', 'ctanh', 'cursor', 'darken', 'dec', 'decode',
'decouple', 'define', 'delete', 'desaturate', 'deviation',
'dictionary', 'difference', 'digest', 'digits', 'div', 'do',
'download', 'drop', 'dup', 'e', 'else', 'empty', 'encode',
'ensure', 'env', 'epsilon', 'escape', 'execute', 'exit', 'exp',
'extend', 'extract', 'factors', 'false', 'fdiv', 'filter',
'first', 'flatten', 'floor', 'fold', 'from', 'function',
'gamma', 'gcd', 'get', 'goto', 'hash', 'help', 'hypot', 'if',
'in', 'inc', 'indent', 'index', 'infinity', 'info', 'input',
'insert', 'inspect', 'intersection', 'invert', 'join', 'keys',
'kurtosis', 'last', 'let', 'levenshtein', 'lighten', 'list',
'ln', 'log', 'loop', 'lower', 'mail', 'map', 'match', 'max',
'maybe', 'median', 'min', 'mod', 'module', 'mul', 'nand',
'neg', 'new', 'nor', 'normalize', 'not', 'now', 'null', 'open',
'or', 'outdent', 'pad', 'panic', 'path', 'pause',
'permissions', 'permutate', 'pi', 'pop', 'pow', 'powerset',
'powmod', 'prefix', 'print', 'prints', 'process', 'product',
'query', 'random', 'range', 'read', 'relative', 'remove',
'rename', 'render', 'repeat', 'replace', 'request', 'return',
'reverse', 'round', 'sample', 'saturate', 'script', 'sec',
'sech', 'select', 'serve', 'set', 'shl', 'shr', 'shuffle',
'sin', 'sinh', 'size', 'skewness', 'slice', 'sort', 'split',
'sqrt', 'squeeze', 'stack', 'strip', 'sub', 'suffix', 'sum',
'switch', 'symbols', 'symlink', 'sys', 'take', 'tan', 'tanh',
'terminal', 'to', 'true', 'truncate', 'try', 'type', 'union',
'unique', 'unless', 'until', 'unzip', 'upper', 'values', 'var',
'variance', 'volume', 'webview', 'while', 'with', 'wordwrap',
'write', 'xnor', 'xor', 'zip'
), prefix=r'\b', suffix=r'\b'), Name.Builtin)
],
}
| 11,406 | Python | 44.446215 | 79 | 0.445818 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/c_cpp.py | """
pygments.lexers.c_cpp
~~~~~~~~~~~~~~~~~~~~~
Lexers for C/C++ languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, inherit, default, words
from pygments.util import get_bool_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['CLexer', 'CppLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
# The trailing ?, rather than *, avoids a geometric performance drop here.
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)?'
# Hexadecimal part in an hexadecimal integer/floating-point literal.
# This includes decimal separators matching.
_hexpart = r'[0-9a-fA-F](\'?[0-9a-fA-F])*'
# Decimal part in an decimal integer/floating-point literal.
# This includes decimal separators matching.
_decpart = r'\d(\'?\d)*'
# Integer literal suffix (e.g. 'ull' or 'll').
_intsuffix = r'(([uU][lL]{0,2})|[lL]{1,2}[uU]?)?'
# Identifier regex with C and C++ Universal Character Name (UCN) support.
_ident = r'(?!\d)(?:[\w$]|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8})+'
_namespaced_ident = r'(?!\d)(?:[\w$]|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|::)+'
# Single and multiline comment regexes
# Beware not to use *? for the inner content! When these regexes
# are embedded in larger regexes, that can cause the stuff*? to
# match more than it would have if the regex had been used in
# a standalone way ...
_comment_single = r'//(?:.|(?<=\\)\n)*\n'
_comment_multiline = r'/(?:\\\n)?[*](?:[^*]|[*](?!(?:\\\n)?/))*[*](?:\\\n)?/'
# Regex to match optional comments
_possible_comments = rf'\s*(?:(?:(?:{_comment_single})|(?:{_comment_multiline}))\s*)*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
(r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
# Labels:
# Line start and possible indentation.
(r'(^[ \t]*)'
# Not followed by keywords which can be mistaken as labels.
r'(?!(?:public|private|protected|default)\b)'
# Actual label, followed by a single colon.
r'(' + _ident + r')(\s*)(:)(?!:)',
bygroups(Whitespace, Name.Label, Whitespace, Punctuation)),
(r'\n', Whitespace),
(r'[^\S\n]+', Whitespace),
(r'\\\n', Text), # line continuation
(_comment_single, Comment.Single),
(_comment_multiline, Comment.Multiline),
# Open until EOF, so no ending delimiter
(r'/(\\\n)?[*][\w\W]*', Comment.Multiline),
],
'statements': [
include('keywords'),
include('types'),
(r'([LuU]|u8)?(")', bygroups(String.Affix, String), 'string'),
(r"([LuU]|u8)?(')(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])(')",
bygroups(String.Affix, String.Char, String.Char, String.Char)),
# Hexadecimal floating-point literals (C11, C++17)
(r'0[xX](' + _hexpart + r'\.' + _hexpart + r'|\.' + _hexpart +
r'|' + _hexpart + r')[pP][+-]?' + _hexpart + r'[lL]?', Number.Float),
(r'(-)?(' + _decpart + r'\.' + _decpart + r'|\.' + _decpart + r'|' +
_decpart + r')[eE][+-]?' + _decpart + r'[fFlL]?', Number.Float),
(r'(-)?((' + _decpart + r'\.(' + _decpart + r')?|\.' +
_decpart + r')[fFlL]?)|(' + _decpart + r'[fFlL])', Number.Float),
(r'(-)?0[xX]' + _hexpart + _intsuffix, Number.Hex),
(r'(-)?0[bB][01](\'?[01])*' + _intsuffix, Number.Bin),
(r'(-)?0(\'?[0-7])+' + _intsuffix, Number.Oct),
(r'(-)?' + _decpart + _intsuffix, Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'(true|false|NULL)\b', Name.Builtin),
(_ident, Name)
],
'types': [
(words(('int8', 'int16', 'int32', 'int64', 'wchar_t'), prefix=r'__',
suffix=r'\b'), Keyword.Reserved),
(words(('bool', 'int', 'long', 'float', 'short', 'double', 'char',
'unsigned', 'signed', 'void'), suffix=r'\b'), Keyword.Type)
],
'keywords': [
(r'(struct|union)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'case\b', Keyword, 'case-value'),
(words(('asm', 'auto', 'break', 'const', 'continue', 'default',
'do', 'else', 'enum', 'extern', 'for', 'goto', 'if',
'register', 'restricted', 'return', 'sizeof', 'struct',
'static', 'switch', 'typedef', 'volatile', 'while', 'union',
'thread_local', 'alignas', 'alignof', 'static_assert', '_Pragma'),
suffix=r'\b'), Keyword),
(words(('inline', '_inline', '__inline', 'naked', 'restrict',
'thread'), suffix=r'\b'), Keyword.Reserved),
# Vector intrinsics
(r'(__m(128i|128d|128|64))\b', Keyword.Reserved),
# Microsoft-isms
(words((
'asm', 'based', 'except', 'stdcall', 'cdecl',
'fastcall', 'declspec', 'finally', 'try',
'leave', 'w64', 'unaligned', 'raise', 'noop',
'identifier', 'forceinline', 'assume'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved)
],
'root': [
include('whitespace'),
include('keywords'),
# functions
(r'(' + _namespaced_ident + r'(?:[&*\s])+)' # return arguments
r'(' + _possible_comments + r')'
r'(' + _namespaced_ident + r')' # method name
r'(' + _possible_comments + r')'
r'(\([^;"\')]*?\))' # signature
r'(' + _possible_comments + r')'
r'([^;{/"\']*)(\{)',
bygroups(using(this), using(this, state='whitespace'),
Name.Function, using(this, state='whitespace'),
using(this), using(this, state='whitespace'),
using(this), Punctuation),
'function'),
# function declarations
(r'(' + _namespaced_ident + r'(?:[&*\s])+)' # return arguments
r'(' + _possible_comments + r')'
r'(' + _namespaced_ident + r')' # method name
r'(' + _possible_comments + r')'
r'(\([^;"\')]*?\))' # signature
r'(' + _possible_comments + r')'
r'([^;/"\']*)(;)',
bygroups(using(this), using(this, state='whitespace'),
Name.Function, using(this, state='whitespace'),
using(this), using(this, state='whitespace'),
using(this), Punctuation)),
include('types'),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
(r'\}', Punctuation),
(r'[{;]', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'('+_ws1+r')(include)('+_ws1+r')("[^"]+")([^\n]*)',
bygroups(using(this), Comment.Preproc, using(this),
Comment.PreprocFile, Comment.Single)),
(r'('+_ws1+r')(include)('+_ws1+r')(<[^>]+>)([^\n]*)',
bygroups(using(this), Comment.Preproc, using(this),
Comment.PreprocFile, Comment.Single)),
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'classname': [
(_ident, Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
default('#pop')
],
# Mark identifiers preceded by `case` keyword as constants.
'case-value': [
(r'(?<!:)(:)(?!:)', Punctuation, '#pop'),
(_ident, Name.Constant),
include('whitespace'),
include('statements'),
]
}
stdlib_types = {
'size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t', 'sig_atomic_t', 'fpos_t',
'clock_t', 'time_t', 'va_list', 'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t',
'mbstate_t', 'wctrans_t', 'wint_t', 'wctype_t'}
c99_types = {
'int8_t', 'int16_t', 'int32_t', 'int64_t', 'uint8_t',
'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t', 'int_least16_t',
'int_least32_t', 'int_least64_t', 'uint_least8_t', 'uint_least16_t',
'uint_least32_t', 'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t', 'uint_fast64_t',
'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t'}
linux_types = {
'clockid_t', 'cpu_set_t', 'cpumask_t', 'dev_t', 'gid_t', 'id_t', 'ino_t', 'key_t',
'mode_t', 'nfds_t', 'pid_t', 'rlim_t', 'sig_t', 'sighandler_t', 'siginfo_t',
'sigset_t', 'sigval_t', 'socklen_t', 'timer_t', 'uid_t'}
c11_atomic_types = {
'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
'atomic_llong', 'atomic_ullong', 'atomic_char16_t', 'atomic_char32_t', 'atomic_wchar_t',
'atomic_int_least8_t', 'atomic_uint_least8_t', 'atomic_int_least16_t',
'atomic_uint_least16_t', 'atomic_int_least32_t', 'atomic_uint_least32_t',
'atomic_int_least64_t', 'atomic_uint_least64_t', 'atomic_int_fast8_t',
'atomic_uint_fast8_t', 'atomic_int_fast16_t', 'atomic_uint_fast16_t',
'atomic_int_fast32_t', 'atomic_uint_fast32_t', 'atomic_int_fast64_t',
'atomic_uint_fast64_t', 'atomic_intptr_t', 'atomic_uintptr_t', 'atomic_size_t',
'atomic_ptrdiff_t', 'atomic_intmax_t', 'atomic_uintmax_t'}
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options, 'c99highlighting', True)
self.c11highlighting = get_bool_opt(options, 'c11highlighting', True)
self.platformhighlighting = get_bool_opt(options, 'platformhighlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text, stack=('root',)):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
elif self.c11highlighting and value in self.c11_atomic_types:
token = Keyword.Type
elif self.platformhighlighting and value in self.linux_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
Additional options accepted:
`stdlibhighlighting`
Highlight common types found in the C/C++ standard library (e.g. `size_t`).
(default: ``True``).
`c99highlighting`
Highlight common types found in the C99 standard library (e.g. `int8_t`).
Actually, this includes all fixed-width integer types.
(default: ``True``).
`c11highlighting`
Highlight atomic types found in the C11 standard library (e.g. `atomic_bool`).
(default: ``True``).
`platformhighlighting`
Highlight common types found in the platform SDK headers (e.g. `clockid_t` on Linux).
(default: ``True``).
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc', '*.x[bp]m']
mimetypes = ['text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap']
priority = 0.1
tokens = {
'keywords': [
(words((
'_Alignas', '_Alignof', '_Noreturn', '_Generic', '_Thread_local',
'_Static_assert', '_Imaginary', 'noreturn', 'imaginary', 'complex'),
suffix=r'\b'), Keyword),
inherit
],
'types': [
(words(('_Bool', '_Complex', '_Atomic'), suffix=r'\b'), Keyword.Type),
inherit
]
}
def analyse_text(text):
if re.search(r'^\s*#include [<"]', text, re.MULTILINE):
return 0.1
if re.search(r'^\s*#ifn?def ', text, re.MULTILINE):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
Additional options accepted:
`stdlibhighlighting`
Highlight common types found in the C/C++ standard library (e.g. `size_t`).
(default: ``True``).
`c99highlighting`
Highlight common types found in the C99 standard library (e.g. `int8_t`).
Actually, this includes all fixed-width integer types.
(default: ``True``).
`c11highlighting`
Highlight atomic types found in the C11 standard library (e.g. `atomic_bool`).
(default: ``True``).
`platformhighlighting`
Highlight common types found in the platform SDK headers (e.g. `clockid_t` on Linux).
(default: ``True``).
"""
name = 'C++'
url = 'https://isocpp.org/'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP', '*.tpp']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
# C++11 raw strings
(r'((?:[LuU]|u8)?R)(")([^\\()\s]{,16})(\()((?:.|\n)*?)(\)\3)(")',
bygroups(String.Affix, String, String.Delimiter, String.Delimiter,
String, String.Delimiter, String)),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
'multiple_inheritance', 'interface', 'event'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
],
'enumname': [
include('whitespace'),
# 'enum class' and 'enum struct' C++11 support
(words(('class', 'struct'), suffix=r'\b'), Keyword),
(CFamilyLexer._ident, Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
default('#pop')
],
'keywords': [
(r'(class|concept|typename)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(words((
'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
'export', 'friend', 'mutable', 'new', 'operator',
'private', 'protected', 'public', 'reinterpret_cast', 'class',
'restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
'try', 'typeid', 'using', 'virtual', 'constexpr', 'nullptr', 'concept',
'decltype', 'noexcept', 'override', 'final', 'constinit', 'consteval',
'co_await', 'co_return', 'co_yield', 'requires', 'import', 'module',
'typename'),
suffix=r'\b'), Keyword),
(r'namespace\b', Keyword, 'namespace'),
(r'(enum)(\s+)', bygroups(Keyword, Whitespace), 'enumname'),
inherit
],
'types': [
(r'char(16_t|32_t|8_t)\b', Keyword.Type),
inherit
],
'namespace': [
(r'[;{]', Punctuation, ('#pop', 'root')),
(r'inline\b', Keyword.Reserved),
(CFamilyLexer._ident, Name.Namespace),
include('statement')
]
}
def analyse_text(text):
if re.search('#include <[a-z_]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4
| 17,791 | Python | 42.395122 | 96 | 0.490417 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/gsql.py | """
pygments.lexers.gsql
~~~~~~~~~~~~~~~~~~~~
Lexers for TigerGraph GSQL graph query language
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this, words
from pygments.token import Keyword, Punctuation, Comment, Operator, Name, \
String, Number, Whitespace
__all__ = ["GSQLLexer"]
class GSQLLexer(RegexLexer):
"""
For GSQL queries (version 3.x).
.. versionadded:: 2.10
"""
name = 'GSQL'
url = 'https://docs.tigergraph.com/dev/gsql-ref'
aliases = ['gsql']
filenames = ['*.gsql']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('keywords'),
include('clauses'),
include('accums'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
include('operators'),
],
'comment': [
(r'\#.*', Comment.Single),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
],
'keywords': [
(words((
'ACCUM', 'AND', 'ANY', 'API', 'AS', 'ASC', 'AVG', 'BAG', 'BATCH',
'BETWEEN', 'BOOL', 'BOTH', 'BREAK', 'BY', 'CASE', 'CATCH', 'COALESCE',
'COMPRESS', 'CONTINUE', 'COUNT', 'CREATE', 'DATETIME', 'DATETIME_ADD',
'DATETIME_SUB', 'DELETE', 'DESC', 'DISTRIBUTED', 'DO', 'DOUBLE',
'EDGE', 'ELSE', 'END', 'ESCAPE', 'EXCEPTION', 'FALSE', 'FILE',
'FILTER', 'FLOAT', 'FOREACH', 'FOR', 'FROM', 'GRAPH', 'GROUP',
'GSQL_INT_MAX', 'GSQL_INT_MIN', 'GSQL_UINT_MAX', 'HAVING', 'IF',
'IN', 'INSERT', 'INT', 'INTERPRET', 'INTERSECT', 'INTERVAL', 'INTO',
'IS', 'ISEMPTY', 'JSONARRAY', 'JSONOBJECT', 'LASTHOP', 'LEADING',
'LIKE', 'LIMIT', 'LIST', 'LOAD_ACCUM', 'LOG', 'MAP', 'MATCH', 'MAX',
'MIN', 'MINUS', 'NOT', 'NOW', 'NULL', 'OFFSET', 'OR', 'ORDER', 'PATH',
'PER', 'PINNED', 'POST_ACCUM', 'POST-ACCUM', 'PRIMARY_ID', 'PRINT',
'QUERY', 'RAISE', 'RANGE', 'REPLACE', 'RESET_COLLECTION_ACCUM',
'RETURN', 'RETURNS', 'RUN', 'SAMPLE', 'SELECT', 'SELECT_VERTEX',
'SET', 'SRC', 'STATIC', 'STRING', 'SUM', 'SYNTAX', 'TARGET',
'TAGSTGT', 'THEN', 'TO', 'TO_CSV', 'TO_DATETIME', 'TRAILING',
'TRIM', 'TRUE', 'TRY', 'TUPLE', 'TYPEDEF', 'UINT', 'UNION', 'UPDATE',
'VALUES', 'VERTEX', 'WHEN', 'WHERE', 'WHILE', 'WITH'),
prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
],
'clauses': [
(words(('accum', 'having', 'limit', 'order', 'postAccum', 'sample', 'where')),
Name.Builtin),
],
'accums': [
(words(('andaccum', 'arrayaccum', 'avgaccum', 'bagaccum', 'bitwiseandaccum',
'bitwiseoraccum', 'groupbyaccum', 'heapaccum', 'listaccum',
'MapAccum', 'maxaccum', 'minaccum', 'oraccum', 'setaccum',
'sumaccum')), Name.Builtin),
],
'relations': [
(r'(-\s?)(\(.*\:\w?\))(\s?-)', bygroups(Operator, using(this), Operator)),
(r'->|<-', Operator),
(r'[.*{}\[\]\<\>\_]', Punctuation),
],
'strings': [
(r'"([^"\\]|\\.)*"', String),
(r'@{1,2}\w+', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'(\d+\.\d+|\d+)', Number),
],
'operators': [
(r'\$|[^0-9|\/|\-](\-\=|\+\=|\*\=|\\\=|\=|\=\=|\=\=\=|'
r'\+|\-|\*|\\|\+\=|\>|\<)[^\>|\/]', Operator),
(r'(\||\(|\)|\,|\;|\=|\-|\+|\*|\/|\>|\<|\:)', Operator),
],
}
| 3,991 | Python | 37.019047 | 90 | 0.441493 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/bdd.py | """
pygments.lexers.bdd
~~~~~~~~~~~~~~~~~~~
Lexer for BDD(Behavior-driven development).
More information: https://en.wikipedia.org/wiki/Behavior-driven_development
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include
from pygments.token import Comment, Keyword, Name, String, Number, Text, \
Punctuation, Whitespace
__all__ = ['BddLexer']
class BddLexer(RegexLexer):
"""
Lexer for BDD(Behavior-driven development), which highlights not only
keywords, but also comments, punctuations, strings, numbers, and variables.
.. versionadded:: 2.11
"""
name = 'Bdd'
aliases = ['bdd']
filenames = ['*.feature']
mimetypes = ['text/x-bdd']
step_keywords = (r'Given|When|Then|Add|And|Feature|Scenario Outline|'
r'Scenario|Background|Examples|But')
tokens = {
'comments': [
(r'^\s*#.*$', Comment),
],
'miscellaneous': [
(r'(<|>|\[|\]|=|\||:|\(|\)|\{|\}|,|\.|;|-|_|\$)', Punctuation),
(r'((?<=\<)[^\\>]+(?=\>))', Name.Variable),
(r'"([^\"]*)"', String),
(r'^@\S+', Name.Label),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number),
],
'root': [
(r'\n|\s+', Whitespace),
(step_keywords, Keyword),
include('comments'),
include('miscellaneous'),
include('numbers'),
(r'\S+', Text),
]
}
def analyse_text(self, text):
return
| 1,652 | Python | 27.016949 | 79 | 0.507869 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/supercollider.py | """
pygments.lexers.supercollider
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for SuperCollider
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SuperColliderLexer']
class SuperColliderLexer(RegexLexer):
"""
For SuperCollider source code.
.. versionadded:: 2.1
"""
name = 'SuperCollider'
url = 'http://supercollider.github.io/'
aliases = ['supercollider', 'sc']
filenames = ['*.sc', '*.scd']
mimetypes = ['application/supercollider', 'text/supercollider']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop'),
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(words((
'for', 'in', 'while', 'do', 'break', 'return', 'continue',
'switch', 'case', 'default', 'if', 'else', 'throw', 'try',
'catch', 'finally', 'new', 'delete', 'typeof', 'instanceof',
'void'), suffix=r'\b'),
Keyword, 'slashstartsregex'),
(words(('var', 'let', 'with', 'function', 'arg'), suffix=r'\b'),
Keyword.Declaration, 'slashstartsregex'),
(words((
'(abstract', 'boolean', 'byte', 'char', 'class', 'const',
'debugger', 'double', 'enum', 'export', 'extends', 'final',
'float', 'goto', 'implements', 'import', 'int', 'interface',
'long', 'native', 'package', 'private', 'protected', 'public',
'short', 'static', 'super', 'synchronized', 'throws',
'transient', 'volatile'), suffix=r'\b'),
Keyword.Reserved),
(words(('true', 'false', 'nil', 'inf'), suffix=r'\b'), Keyword.Constant),
(words((
'Array', 'Boolean', 'Date', 'Error', 'Function', 'Number',
'Object', 'Packages', 'RegExp', 'String',
'isFinite', 'isNaN', 'parseFloat', 'parseInt', 'super',
'thisFunctionDef', 'thisFunction', 'thisMethod', 'thisProcess',
'thisThread', 'this'), suffix=r'\b'),
Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'\\?[$a-zA-Z_]\w*', String.Symbol),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
]
}
def analyse_text(text):
"""We're searching for a common function and a unique keyword here."""
if 'SinOsc' in text or 'thisFunctionDef' in text:
return 0.1
| 3,698 | Python | 37.53125 | 85 | 0.462142 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/nit.py | """
pygments.lexers.nit
~~~~~~~~~~~~~~~~~~~
Lexer for the Nit language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['NitLexer']
class NitLexer(RegexLexer):
"""
For nit source.
.. versionadded:: 2.0
"""
name = 'Nit'
url = 'http://nitlanguage.org'
aliases = ['nit']
filenames = ['*.nit']
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(words((
'package', 'module', 'import', 'class', 'abstract', 'interface',
'universal', 'enum', 'end', 'fun', 'type', 'init', 'redef',
'isa', 'do', 'readable', 'writable', 'var', 'intern', 'extern',
'public', 'protected', 'private', 'intrude', 'if', 'then',
'else', 'while', 'loop', 'for', 'in', 'and', 'or', 'not',
'implies', 'return', 'continue', 'break', 'abort', 'assert',
'new', 'is', 'once', 'super', 'self', 'true', 'false', 'nullable',
'null', 'as', 'isset', 'label', '__debug__'), suffix=r'(?=[\r\n\t( ])'),
Keyword),
(r'[A-Z]\w*', Name.Class),
(r'"""(([^\'\\]|\\.)|\\r|\\n)*((\{\{?)?(""?\{\{?)*""""*)', String), # Simple long string
(r'\'\'\'(((\\.|[^\'\\])|\\r|\\n)|\'((\\.|[^\'\\])|\\r|\\n)|'
r'\'\'((\\.|[^\'\\])|\\r|\\n))*\'\'\'', String), # Simple long string alt
(r'"""(([^\'\\]|\\.)|\\r|\\n)*((""?)?(\{\{?""?)*\{\{\{\{*)', String), # Start long string
(r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(""?)?(\{\{?""?)*\{\{\{\{*', String), # Mid long string
(r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(\{\{?)?(""?\{\{?)*""""*', String), # End long string
(r'"(\\.|([^"}{\\]))*"', String), # Simple String
(r'"(\\.|([^"}{\\]))*\{', String), # Start string
(r'\}(\\.|([^"}{\\]))*\{', String), # Mid String
(r'\}(\\.|([^"}{\\]))*"', String), # End String
(r'(\'[^\'\\]\')|(\'\\.\')', String.Char),
(r'[0-9]+', Number.Integer),
(r'[0-9]*.[0-9]+', Number.Float),
(r'0(x|X)[0-9A-Fa-f]+', Number.Hex),
(r'[a-z]\w*', Name),
(r'_\w+', Name.Variable.Instance),
(r'==|!=|<==>|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator),
(r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation),
(r'`\{[^`]*`\}', Text), # Extern blocks won't be Lexed by Nit
(r'[\r\n\t ]+', Text),
],
}
| 2,726 | Python | 40.953846 | 103 | 0.373074 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/hexdump.py | """
pygments.lexers.hexdump
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for hexadecimal dumps.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import Name, Number, String, Punctuation, Whitespace
__all__ = ['HexdumpLexer']
class HexdumpLexer(RegexLexer):
"""
For typical hex dump output formats by the UNIX and GNU/Linux tools ``hexdump``,
``hd``, ``hexcat``, ``od`` and ``xxd``, and the DOS tool ``DEBUG``. For example:
.. sourcecode:: hexdump
00000000 7f 45 4c 46 02 01 01 00 00 00 00 00 00 00 00 00 |.ELF............|
00000010 02 00 3e 00 01 00 00 00 c5 48 40 00 00 00 00 00 |..>......H@.....|
The specific supported formats are the outputs of:
* ``hexdump FILE``
* ``hexdump -C FILE`` -- the `canonical` format used in the example.
* ``hd FILE`` -- same as ``hexdump -C FILE``.
* ``hexcat FILE``
* ``od -t x1z FILE``
* ``xxd FILE``
* ``DEBUG.EXE FILE.COM`` and entering ``d`` to the prompt.
.. versionadded:: 2.1
"""
name = 'Hexdump'
aliases = ['hexdump']
hd = r'[0-9A-Ha-h]'
tokens = {
'root': [
(r'\n', Whitespace),
include('offset'),
(r'('+hd+r'{2})(\-)('+hd+r'{2})',
bygroups(Number.Hex, Punctuation, Number.Hex)),
(hd+r'{2}', Number.Hex),
(r'(\s{2,3})(\>)(.{16})(\<)$',
bygroups(Whitespace, Punctuation, String, Punctuation), 'bracket-strings'),
(r'(\s{2,3})(\|)(.{16})(\|)$',
bygroups(Whitespace, Punctuation, String, Punctuation), 'piped-strings'),
(r'(\s{2,3})(\>)(.{1,15})(\<)$',
bygroups(Whitespace, Punctuation, String, Punctuation)),
(r'(\s{2,3})(\|)(.{1,15})(\|)$',
bygroups(Whitespace, Punctuation, String, Punctuation)),
(r'(\s{2,3})(.{1,15})$', bygroups(Whitespace, String)),
(r'(\s{2,3})(.{16}|.{20})$', bygroups(Whitespace, String), 'nonpiped-strings'),
(r'\s', Whitespace),
(r'^\*', Punctuation),
],
'offset': [
(r'^('+hd+'+)(:)', bygroups(Name.Label, Punctuation), 'offset-mode'),
(r'^'+hd+'+', Name.Label),
],
'offset-mode': [
(r'\s', Whitespace, '#pop'),
(hd+'+', Name.Label),
(r':', Punctuation)
],
'piped-strings': [
(r'\n', Whitespace),
include('offset'),
(hd+r'{2}', Number.Hex),
(r'(\s{2,3})(\|)(.{1,16})(\|)$',
bygroups(Whitespace, Punctuation, String, Punctuation)),
(r'\s', Whitespace),
(r'^\*', Punctuation),
],
'bracket-strings': [
(r'\n', Whitespace),
include('offset'),
(hd+r'{2}', Number.Hex),
(r'(\s{2,3})(\>)(.{1,16})(\<)$',
bygroups(Whitespace, Punctuation, String, Punctuation)),
(r'\s', Whitespace),
(r'^\*', Punctuation),
],
'nonpiped-strings': [
(r'\n', Whitespace),
include('offset'),
(r'('+hd+r'{2})(\-)('+hd+r'{2})',
bygroups(Number.Hex, Punctuation, Number.Hex)),
(hd+r'{2}', Number.Hex),
(r'(\s{19,})(.{1,20}?)$', bygroups(Whitespace, String)),
(r'(\s{2,3})(.{1,20})$', bygroups(Whitespace, String)),
(r'\s', Whitespace),
(r'^\*', Punctuation),
],
}
| 3,603 | Python | 33.990291 | 91 | 0.472384 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_css_builtins.py | """
pygments.lexers._css_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file is autogenerated by scripts/get_css_properties.py
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
_css_properties = (
'-webkit-line-clamp',
'accent-color',
'align-content',
'align-items',
'align-self',
'alignment-baseline',
'all',
'animation',
'animation-delay',
'animation-direction',
'animation-duration',
'animation-fill-mode',
'animation-iteration-count',
'animation-name',
'animation-play-state',
'animation-timing-function',
'appearance',
'aspect-ratio',
'azimuth',
'backface-visibility',
'background',
'background-attachment',
'background-blend-mode',
'background-clip',
'background-color',
'background-image',
'background-origin',
'background-position',
'background-repeat',
'background-size',
'baseline-shift',
'baseline-source',
'block-ellipsis',
'block-size',
'block-step',
'block-step-align',
'block-step-insert',
'block-step-round',
'block-step-size',
'bookmark-label',
'bookmark-level',
'bookmark-state',
'border',
'border-block',
'border-block-color',
'border-block-end',
'border-block-end-color',
'border-block-end-style',
'border-block-end-width',
'border-block-start',
'border-block-start-color',
'border-block-start-style',
'border-block-start-width',
'border-block-style',
'border-block-width',
'border-bottom',
'border-bottom-color',
'border-bottom-left-radius',
'border-bottom-right-radius',
'border-bottom-style',
'border-bottom-width',
'border-boundary',
'border-collapse',
'border-color',
'border-end-end-radius',
'border-end-start-radius',
'border-image',
'border-image-outset',
'border-image-repeat',
'border-image-slice',
'border-image-source',
'border-image-width',
'border-inline',
'border-inline-color',
'border-inline-end',
'border-inline-end-color',
'border-inline-end-style',
'border-inline-end-width',
'border-inline-start',
'border-inline-start-color',
'border-inline-start-style',
'border-inline-start-width',
'border-inline-style',
'border-inline-width',
'border-left',
'border-left-color',
'border-left-style',
'border-left-width',
'border-radius',
'border-right',
'border-right-color',
'border-right-style',
'border-right-width',
'border-spacing',
'border-start-end-radius',
'border-start-start-radius',
'border-style',
'border-top',
'border-top-color',
'border-top-left-radius',
'border-top-right-radius',
'border-top-style',
'border-top-width',
'border-width',
'bottom',
'box-decoration-break',
'box-shadow',
'box-sizing',
'box-snap',
'break-after',
'break-before',
'break-inside',
'caption-side',
'caret',
'caret-color',
'caret-shape',
'chains',
'clear',
'clip',
'clip-path',
'clip-rule',
'color',
'color-adjust',
'color-interpolation-filters',
'color-scheme',
'column-count',
'column-fill',
'column-gap',
'column-rule',
'column-rule-color',
'column-rule-style',
'column-rule-width',
'column-span',
'column-width',
'columns',
'contain',
'contain-intrinsic-block-size',
'contain-intrinsic-height',
'contain-intrinsic-inline-size',
'contain-intrinsic-size',
'contain-intrinsic-width',
'container',
'container-name',
'container-type',
'content',
'content-visibility',
'continue',
'counter-increment',
'counter-reset',
'counter-set',
'cue',
'cue-after',
'cue-before',
'cursor',
'direction',
'display',
'dominant-baseline',
'elevation',
'empty-cells',
'fill',
'fill-break',
'fill-color',
'fill-image',
'fill-opacity',
'fill-origin',
'fill-position',
'fill-repeat',
'fill-rule',
'fill-size',
'filter',
'flex',
'flex-basis',
'flex-direction',
'flex-flow',
'flex-grow',
'flex-shrink',
'flex-wrap',
'float',
'float-defer',
'float-offset',
'float-reference',
'flood-color',
'flood-opacity',
'flow',
'flow-from',
'flow-into',
'font',
'font-family',
'font-feature-settings',
'font-kerning',
'font-language-override',
'font-optical-sizing',
'font-palette',
'font-size',
'font-size-adjust',
'font-stretch',
'font-style',
'font-synthesis',
'font-synthesis-small-caps',
'font-synthesis-style',
'font-synthesis-weight',
'font-variant',
'font-variant-alternates',
'font-variant-caps',
'font-variant-east-asian',
'font-variant-emoji',
'font-variant-ligatures',
'font-variant-numeric',
'font-variant-position',
'font-variation-settings',
'font-weight',
'footnote-display',
'footnote-policy',
'forced-color-adjust',
'gap',
'glyph-orientation-vertical',
'grid',
'grid-area',
'grid-auto-columns',
'grid-auto-flow',
'grid-auto-rows',
'grid-column',
'grid-column-end',
'grid-column-start',
'grid-row',
'grid-row-end',
'grid-row-start',
'grid-template',
'grid-template-areas',
'grid-template-columns',
'grid-template-rows',
'hanging-punctuation',
'height',
'hyphenate-character',
'hyphenate-limit-chars',
'hyphenate-limit-last',
'hyphenate-limit-lines',
'hyphenate-limit-zone',
'hyphens',
'image-orientation',
'image-rendering',
'image-resolution',
'initial-letter',
'initial-letter-align',
'initial-letter-wrap',
'inline-size',
'inline-sizing',
'input-security',
'inset',
'inset-block',
'inset-block-end',
'inset-block-start',
'inset-inline',
'inset-inline-end',
'inset-inline-start',
'isolation',
'justify-content',
'justify-items',
'justify-self',
'leading-trim',
'left',
'letter-spacing',
'lighting-color',
'line-break',
'line-clamp',
'line-grid',
'line-height',
'line-height-step',
'line-padding',
'line-snap',
'list-style',
'list-style-image',
'list-style-position',
'list-style-type',
'margin',
'margin-block',
'margin-block-end',
'margin-block-start',
'margin-bottom',
'margin-break',
'margin-inline',
'margin-inline-end',
'margin-inline-start',
'margin-left',
'margin-right',
'margin-top',
'margin-trim',
'marker',
'marker-end',
'marker-knockout-left',
'marker-knockout-right',
'marker-mid',
'marker-pattern',
'marker-segment',
'marker-side',
'marker-start',
'mask',
'mask-border',
'mask-border-mode',
'mask-border-outset',
'mask-border-repeat',
'mask-border-slice',
'mask-border-source',
'mask-border-width',
'mask-clip',
'mask-composite',
'mask-image',
'mask-mode',
'mask-origin',
'mask-position',
'mask-repeat',
'mask-size',
'mask-type',
'max-block-size',
'max-height',
'max-inline-size',
'max-lines',
'max-width',
'min-block-size',
'min-height',
'min-inline-size',
'min-intrinsic-sizing',
'min-width',
'mix-blend-mode',
'nav-down',
'nav-left',
'nav-right',
'nav-up',
'object-fit',
'object-overflow',
'object-position',
'object-view-box',
'offset',
'offset-anchor',
'offset-distance',
'offset-path',
'offset-position',
'offset-rotate',
'opacity',
'order',
'orphans',
'outline',
'outline-color',
'outline-offset',
'outline-style',
'outline-width',
'overflow',
'overflow-anchor',
'overflow-block',
'overflow-clip-margin',
'overflow-inline',
'overflow-wrap',
'overflow-x',
'overflow-y',
'overscroll-behavior',
'overscroll-behavior-block',
'overscroll-behavior-inline',
'overscroll-behavior-x',
'overscroll-behavior-y',
'padding',
'padding-block',
'padding-block-end',
'padding-block-start',
'padding-bottom',
'padding-inline',
'padding-inline-end',
'padding-inline-start',
'padding-left',
'padding-right',
'padding-top',
'page',
'page-break-after',
'page-break-before',
'page-break-inside',
'pause',
'pause-after',
'pause-before',
'perspective',
'perspective-origin',
'pitch',
'pitch-range',
'place-content',
'place-items',
'place-self',
'play-during',
'pointer-events',
'position',
'print-color-adjust',
'property-name',
'quotes',
'region-fragment',
'resize',
'rest',
'rest-after',
'rest-before',
'richness',
'right',
'rotate',
'row-gap',
'ruby-align',
'ruby-merge',
'ruby-overhang',
'ruby-position',
'running',
'scale',
'scroll-behavior',
'scroll-margin',
'scroll-margin-block',
'scroll-margin-block-end',
'scroll-margin-block-start',
'scroll-margin-bottom',
'scroll-margin-inline',
'scroll-margin-inline-end',
'scroll-margin-inline-start',
'scroll-margin-left',
'scroll-margin-right',
'scroll-margin-top',
'scroll-padding',
'scroll-padding-block',
'scroll-padding-block-end',
'scroll-padding-block-start',
'scroll-padding-bottom',
'scroll-padding-inline',
'scroll-padding-inline-end',
'scroll-padding-inline-start',
'scroll-padding-left',
'scroll-padding-right',
'scroll-padding-top',
'scroll-snap-align',
'scroll-snap-stop',
'scroll-snap-type',
'scrollbar-color',
'scrollbar-gutter',
'scrollbar-width',
'shape-image-threshold',
'shape-inside',
'shape-margin',
'shape-outside',
'spatial-navigation-action',
'spatial-navigation-contain',
'spatial-navigation-function',
'speak',
'speak-as',
'speak-header',
'speak-numeral',
'speak-punctuation',
'speech-rate',
'stress',
'string-set',
'stroke',
'stroke-align',
'stroke-alignment',
'stroke-break',
'stroke-color',
'stroke-dash-corner',
'stroke-dash-justify',
'stroke-dashadjust',
'stroke-dasharray',
'stroke-dashcorner',
'stroke-dashoffset',
'stroke-image',
'stroke-linecap',
'stroke-linejoin',
'stroke-miterlimit',
'stroke-opacity',
'stroke-origin',
'stroke-position',
'stroke-repeat',
'stroke-size',
'stroke-width',
'tab-size',
'table-layout',
'text-align',
'text-align-all',
'text-align-last',
'text-combine-upright',
'text-decoration',
'text-decoration-color',
'text-decoration-line',
'text-decoration-skip',
'text-decoration-skip-box',
'text-decoration-skip-ink',
'text-decoration-skip-inset',
'text-decoration-skip-self',
'text-decoration-skip-spaces',
'text-decoration-style',
'text-decoration-thickness',
'text-edge',
'text-emphasis',
'text-emphasis-color',
'text-emphasis-position',
'text-emphasis-skip',
'text-emphasis-style',
'text-group-align',
'text-indent',
'text-justify',
'text-orientation',
'text-overflow',
'text-shadow',
'text-space-collapse',
'text-space-trim',
'text-spacing',
'text-transform',
'text-underline-offset',
'text-underline-position',
'text-wrap',
'top',
'transform',
'transform-box',
'transform-origin',
'transform-style',
'transition',
'transition-delay',
'transition-duration',
'transition-property',
'transition-timing-function',
'translate',
'unicode-bidi',
'user-select',
'vertical-align',
'visibility',
'voice-balance',
'voice-duration',
'voice-family',
'voice-pitch',
'voice-range',
'voice-rate',
'voice-stress',
'voice-volume',
'volume',
'white-space',
'widows',
'width',
'will-change',
'word-boundary-detection',
'word-boundary-expansion',
'word-break',
'word-spacing',
'word-wrap',
'wrap-after',
'wrap-before',
'wrap-flow',
'wrap-inside',
'wrap-through',
'writing-mode',
'z-index',
) | 12,446 | Python | 21.306452 | 70 | 0.5834 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/savi.py | """
pygments.lexers.savi
~~~~~~~~~~~~~~~~~~~~
Lexer for Savi.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import Whitespace, Keyword, Name, String, Number, \
Operator, Punctuation, Comment, Generic, Error
__all__ = ['SaviLexer']
# The canonical version of this file can be found in the following repository,
# where it is kept in sync with any language changes, as well as the other
# pygments-like lexers that are maintained for use with other tools:
# - https://github.com/savi-lang/savi/blob/main/tooling/pygments/lexers/savi.py
#
# If you're changing this file in the pygments repository, please ensure that
# any changes you make are also propagated to the official Savi repository,
# in order to avoid accidental clobbering of your changes later when an update
# from the Savi repository flows forward into the pygments repository.
#
# If you're changing this file in the Savi repository, please ensure that
# any changes you make are also reflected in the other pygments-like lexers
# (rouge, vscode, etc) so that all of the lexers can be kept cleanly in sync.
class SaviLexer(RegexLexer):
"""
For Savi source code.
.. versionadded: 2.10
"""
name = 'Savi'
url = 'https://github.com/savi-lang/savi'
aliases = ['savi']
filenames = ['*.savi']
tokens = {
"root": [
# Line Comment
(r'//.*?$', Comment.Single),
# Doc Comment
(r'::.*?$', Comment.Single),
# Capability Operator
(r'(\')(\w+)(?=[^\'])', bygroups(Operator, Name)),
# Double-Quote String
(r'\w?"', String.Double, "string.double"),
# Single-Char String
(r"'", String.Char, "string.char"),
# Type Name
(r'(_?[A-Z]\w*)', Name.Class),
# Nested Type Name
(r'(\.)(\s*)(_?[A-Z]\w*)', bygroups(Punctuation, Whitespace, Name.Class)),
# Declare
(r'^([ \t]*)(:\w+)',
bygroups(Whitespace, Name.Tag),
"decl"),
# Error-Raising Calls/Names
(r'((\w+|\+|\-|\*)\!)', Generic.Deleted),
# Numeric Values
(r'\b\d([\d_]*(\.[\d_]+)?)\b', Number),
# Hex Numeric Values
(r'\b0x([0-9a-fA-F_]+)\b', Number.Hex),
# Binary Numeric Values
(r'\b0b([01_]+)\b', Number.Bin),
# Function Call (with braces)
(r'\w+(?=\()', Name.Function),
# Function Call (with receiver)
(r'(\.)(\s*)(\w+)', bygroups(Punctuation, Whitespace, Name.Function)),
# Function Call (with self receiver)
(r'(@)(\w+)', bygroups(Punctuation, Name.Function)),
# Parenthesis
(r'\(', Punctuation, "root"),
(r'\)', Punctuation, "#pop"),
# Brace
(r'\{', Punctuation, "root"),
(r'\}', Punctuation, "#pop"),
# Bracket
(r'\[', Punctuation, "root"),
(r'(\])(\!)', bygroups(Punctuation, Generic.Deleted), "#pop"),
(r'\]', Punctuation, "#pop"),
# Punctuation
(r'[,;:\.@]', Punctuation),
# Piping Operators
(r'(\|\>)', Operator),
# Branching Operators
(r'(\&\&|\|\||\?\?|\&\?|\|\?|\.\?)', Operator),
# Comparison Operators
(r'(\<\=\>|\=\~|\=\=|\<\=|\>\=|\<|\>)', Operator),
# Arithmetic Operators
(r'(\+|\-|\/|\*|\%)', Operator),
# Assignment Operators
(r'(\=)', Operator),
# Other Operators
(r'(\!|\<\<|\<|\&|\|)', Operator),
# Identifiers
(r'\b\w+\b', Name),
# Whitespace
(r'[ \t\r]+\n*|\n+', Whitespace),
],
# Declare (nested rules)
"decl": [
(r'\b[a-z_]\w*\b(?!\!)', Keyword.Declaration),
(r':', Punctuation, "#pop"),
(r'\n', Whitespace, "#pop"),
include("root"),
],
# Double-Quote String (nested rules)
"string.double": [
(r'\\\(', String.Interpol, "string.interpolation"),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
(r'\\[bfnrt\\\']', String.Escape),
(r'\\"', String.Escape),
(r'"', String.Double, "#pop"),
(r'[^\\"]+', String.Double),
(r'.', Error),
],
# Single-Char String (nested rules)
"string.char": [
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
(r'\\[bfnrt\\\']', String.Escape),
(r"\\'", String.Escape),
(r"'", String.Char, "#pop"),
(r"[^\\']+", String.Char),
(r'.', Error),
],
# Interpolation inside String (nested rules)
"string.interpolation": [
(r"\)", String.Interpol, "#pop"),
include("root"),
]
}
| 4,645 | Python | 26.16959 | 80 | 0.53972 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/dsls.py | """
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, RegexLexer, bygroups, words, \
include, default, this, using, combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ProtoBufLexer', 'ZeekLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer',
'CrmshLexer', 'ThriftLexer', 'FlatlineLexer', 'SnowballLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for Protocol Buffer definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
url = 'https://developers.google.com/protocol-buffers/'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Whitespace),
(r'[,;{}\[\]()<>]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated',
'reserved', 'default', 'packed', 'ctype', 'extensions', 'to',
'max', 'rpc', 'returns', 'oneof', 'syntax'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Whitespace, Operator)),
(r'[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class ThriftLexer(RegexLexer):
"""
For Thrift interface definitions.
.. versionadded:: 2.1
"""
name = 'Thrift'
url = 'https://thrift.apache.org/'
aliases = ['thrift']
filenames = ['*.thrift']
mimetypes = ['application/x-thrift']
tokens = {
'root': [
include('whitespace'),
include('comments'),
(r'"', String.Double, combined('stringescape', 'dqs')),
(r'\'', String.Single, combined('stringescape', 'sqs')),
(r'(namespace)(\s+)',
bygroups(Keyword.Namespace, Whitespace), 'namespace'),
(r'(enum|union|struct|service|exception)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'class'),
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
include('keywords'),
include('numbers'),
(r'[&=]', Operator),
(r'[:;,{}()<>\[\]]', Punctuation),
(r'[a-zA-Z_](\.\w|\w)*', Name),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
],
'comments': [
(r'#.*$', Comment),
(r'//.*?\n', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
],
'stringescape': [
(r'\\([\\nrt"\'])', String.Escape),
],
'dqs': [
(r'"', String.Double, '#pop'),
(r'[^\\"\n]+', String.Double),
],
'sqs': [
(r"'", String.Single, '#pop'),
(r'[^\\\'\n]+', String.Single),
],
'namespace': [
(r'[a-z*](\.\w|\w)*', Name.Namespace, '#pop'),
default('#pop'),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'keywords': [
(r'(async|oneway|extends|throws|required|optional)\b', Keyword),
(r'(true|false)\b', Keyword.Constant),
(r'(const|typedef)\b', Keyword.Declaration),
(words((
'cpp_namespace', 'cpp_include', 'cpp_type', 'java_package',
'cocoa_prefix', 'csharp_namespace', 'delphi_namespace',
'php_namespace', 'py_module', 'perl_package',
'ruby_namespace', 'smalltalk_category', 'smalltalk_prefix',
'xsd_all', 'xsd_optional', 'xsd_nillable', 'xsd_namespace',
'xsd_attrs', 'include'), suffix=r'\b'),
Keyword.Namespace),
(words((
'void', 'bool', 'byte', 'i16', 'i32', 'i64', 'double',
'string', 'binary', 'map', 'list', 'set', 'slist',
'senum'), suffix=r'\b'),
Keyword.Type),
(words((
'BEGIN', 'END', '__CLASS__', '__DIR__', '__FILE__',
'__FUNCTION__', '__LINE__', '__METHOD__', '__NAMESPACE__',
'abstract', 'alias', 'and', 'args', 'as', 'assert', 'begin',
'break', 'case', 'catch', 'class', 'clone', 'continue',
'declare', 'def', 'default', 'del', 'delete', 'do', 'dynamic',
'elif', 'else', 'elseif', 'elsif', 'end', 'enddeclare',
'endfor', 'endforeach', 'endif', 'endswitch', 'endwhile',
'ensure', 'except', 'exec', 'finally', 'float', 'for',
'foreach', 'function', 'global', 'goto', 'if', 'implements',
'import', 'in', 'inline', 'instanceof', 'interface', 'is',
'lambda', 'module', 'native', 'new', 'next', 'nil', 'not',
'or', 'pass', 'public', 'print', 'private', 'protected',
'raise', 'redo', 'rescue', 'retry', 'register', 'return',
'self', 'sizeof', 'static', 'super', 'switch', 'synchronized',
'then', 'this', 'throw', 'transient', 'try', 'undef',
'unless', 'unsigned', 'until', 'use', 'var', 'virtual',
'volatile', 'when', 'while', 'with', 'xor', 'yield'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
],
'numbers': [
(r'[+-]?(\d+\.\d+([eE][+-]?\d+)?|\.?\d+[eE][+-]?\d+)', Number.Float),
(r'[+-]?0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
}
class ZeekLexer(RegexLexer):
"""
For Zeek scripts.
.. versionadded:: 2.5
"""
name = 'Zeek'
url = 'https://www.zeek.org/'
aliases = ['zeek', 'bro']
filenames = ['*.zeek', '*.bro']
_hex = r'[0-9a-fA-F]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
include('whitespace'),
include('comments'),
include('directives'),
include('attributes'),
include('types'),
include('keywords'),
include('literals'),
include('operators'),
include('punctuation'),
(r'((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(?=\s*\()',
Name.Function),
include('identifiers'),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)),
],
'comments': [
(r'#.*$', Comment),
],
'directives': [
(r'@(load-plugin|load-sigs|load|unload)\b.*$', Comment.Preproc),
(r'@(DEBUG|DIR|FILENAME|deprecated|if|ifdef|ifndef|else|endif)\b', Comment.Preproc),
(r'(@prefixes)(\s*)((\+?=).*)$', bygroups(Comment.Preproc,
Whitespace, Comment.Preproc)),
],
'attributes': [
(words(('redef', 'priority', 'log', 'optional', 'default', 'add_func',
'delete_func', 'expire_func', 'read_expire', 'write_expire',
'create_expire', 'synchronized', 'persistent', 'rotate_interval',
'rotate_size', 'encrypt', 'raw_output', 'mergeable', 'error_handler',
'type_column', 'deprecated'),
prefix=r'&', suffix=r'\b'),
Keyword.Pseudo),
],
'types': [
(words(('any',
'enum', 'record', 'set', 'table', 'vector',
'function', 'hook', 'event',
'addr', 'bool', 'count', 'double', 'file', 'int', 'interval',
'pattern', 'port', 'string', 'subnet', 'time'),
suffix=r'\b'),
Keyword.Type),
(r'(opaque)(\s+)(of)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b',
bygroups(Keyword.Type, Whitespace, Operator.Word, Whitespace, Keyword.Type)),
(r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)(\s*)\b(record|enum)\b',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Operator, Whitespace, Keyword.Type)),
(r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)',
bygroups(Keyword, Whitespace, Name, Whitespace, Operator)),
(r'(redef)(\s+)(record|enum)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b',
bygroups(Keyword, Whitespace, Keyword.Type, Whitespace, Name.Class)),
],
'keywords': [
(words(('redef', 'export', 'if', 'else', 'for', 'while',
'return', 'break', 'next', 'continue', 'fallthrough',
'switch', 'default', 'case',
'add', 'delete',
'when', 'timeout', 'schedule'),
suffix=r'\b'),
Keyword),
(r'(print)\b', Keyword),
(r'(global|local|const|option)\b', Keyword.Declaration),
(r'(module)(\s+)(([A-Za-z_]\w*)(?:::([A-Za-z_]\w*))*)\b',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
],
'literals': [
(r'"', String, 'string'),
# Not the greatest match for patterns, but generally helps
# disambiguate between start of a pattern and just a division
# operator.
(r'/(?=.*/)', String.Regex, 'regex'),
(r'(T|F)\b', Keyword.Constant),
# Port
(r'\d{1,5}/(udp|tcp|icmp|unknown)\b', Number),
# IPv4 Address
(r'(\d{1,3}.){3}(\d{1,3})\b', Number),
# IPv6 Address
(r'\[([0-9a-fA-F]{0,4}:){2,7}([0-9a-fA-F]{0,4})?((\d{1,3}.){3}(\d{1,3}))?\]', Number),
# Numeric
(r'0[xX]' + _hex + r'+\b', Number.Hex),
(_float + r'\s*(day|hr|min|sec|msec|usec)s?\b', Number.Float),
(_float + r'\b', Number.Float),
(r'(\d+)\b', Number.Integer),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
],
'operators': [
(r'[!%*/+<=>~|&^-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|as|is|of)\b', Operator.Word),
(r'\??\$', Operator),
],
'punctuation': [
(r'[{}()\[\],;.]', Punctuation),
# The "ternary if", which uses '?' and ':', could instead be
# treated as an Operator, but colons are more frequently used to
# separate field/identifier names from their types, so the (often)
# less-prominent Punctuation is used even with '?' for consistency.
(r'[?:]', Punctuation),
],
'identifiers': [
(r'([a-zA-Z_]\w*)(::)', bygroups(Name, Punctuation)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'\\.', String.Escape),
(r'%-?[0-9]*(\.[0-9]+)?[DTd-gsx]', String.Escape),
(r'"', String, '#pop'),
(r'.', String),
],
'regex': [
(r'\\.', String.Escape),
(r'/', String.Regex, '#pop'),
(r'.', String.Regex),
],
}
BroLexer = ZeekLexer
class PuppetLexer(RegexLexer):
"""
For Puppet configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
url = 'https://puppet.com/'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'\s+', Whitespace),
],
'comments': [
(r'(\s*)(#.*)$', bygroups(Whitespace, Comment)),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
(r'[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
RSL is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
url = 'http://en.wikipedia.org/wiki/RAISE'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^([ \t]*)([\w]+)([ \t]*)(:[^:])', bygroups(Whitespace,
Name.Function, Whitespace, Name.Function)),
(r'(^[ \t]*)([\w]+)([ \t]*)(\([\w\s,]*\))([ \t]*)(is|as)',
bygroups(Whitespace, Name.Function, Whitespace, Text,
Whitespace, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'\s+', Whitespace),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For Mscgen files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
url = 'http://www.mcternan.me.uk/mscgen/'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Whitespace, Operator, Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For SampleManager VGL source code.
.. versionadded:: 1.6
"""
name = 'VGL'
url = 'http://www.thermoscientific.com/samplemanager'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Whitespace),
(r'\s+', Whitespace)
]
}
class AlloyLexer(RegexLexer):
"""
For Alloy source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
url = 'http://alloy.mit.edu'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w]*"*'
string_rex = r'"\b(\\\\|\\[^\\]|[^"\\])*"'
text_tuple = (r'[^\S\n]+', Whitespace)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'fact': [
include('fun'),
(string_rex, String, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Whitespace),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(var|this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as|steps)\b', Keyword),
(r'(always|after|eventually|until|release)\b', Keyword), # future time operators
(r'(historically|before|once|since|triggered)\b', Keyword), # past time operators
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|assert)(\s+)', bygroups(Keyword, Whitespace), 'fun'),
(r'(fact)(\s+)', bygroups(Keyword, Whitespace), 'fact'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.\.|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().\';]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(string_rex, String),
(r'\n', Whitespace),
]
}
class PanLexer(RegexLexer):
"""
Lexer for pan source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
url = 'https://github.com/quattor/pan/'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final',
'prefix', 'unique', 'object', 'foreach', 'include', 'template',
'function', 'variable', 'structure', 'extensible', 'declaration'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches',
'replace', 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase',
'debug', 'error', 'traceback', 'deprecated', 'base64_decode',
'base64_encode', 'digest', 'escape', 'unescape', 'append', 'create',
'first', 'nlist', 'key', 'list', 'merge', 'next', 'prepend', 'is_boolean',
'is_defined', 'is_double', 'is_list', 'is_long', 'is_nlist', 'is_null',
'is_number', 'is_property', 'is_resource', 'is_string', 'to_boolean',
'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'),
prefix=r'\b', suffix=r'\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Whitespace, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Whitespace),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
class CrmshLexer(RegexLexer):
"""
Lexer for crmsh configuration files for Pacemaker clusters.
.. versionadded:: 2.1
"""
name = 'Crmsh'
url = 'http://crmsh.github.io/'
aliases = ['crmsh', 'pcmk']
filenames = ['*.crmsh', '*.pcmk']
mimetypes = []
elem = words((
'node', 'primitive', 'group', 'clone', 'ms', 'location',
'colocation', 'order', 'fencing_topology', 'rsc_ticket',
'rsc_template', 'property', 'rsc_defaults',
'op_defaults', 'acl_target', 'acl_group', 'user', 'role',
'tag'), suffix=r'(?![\w#$-])')
sub = words((
'params', 'meta', 'operations', 'op', 'rule',
'attributes', 'utilization'), suffix=r'(?![\w#$-])')
acl = words(('read', 'write', 'deny'), suffix=r'(?![\w#$-])')
bin_rel = words(('and', 'or'), suffix=r'(?![\w#$-])')
un_ops = words(('defined', 'not_defined'), suffix=r'(?![\w#$-])')
date_exp = words(('in_range', 'date', 'spec', 'in'), suffix=r'(?![\w#$-])')
acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
val_qual = (r'(?:string|version|number)')
rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
r'start|promote|demote|stop)')
tokens = {
'root': [
(r'^(#.*)(\n)?', bygroups(Comment, Whitespace)),
# attr=value (nvpair)
(r'([\w#$-]+)(=)("(?:""|[^"])*"|\S+)',
bygroups(Name.Attribute, Punctuation, String)),
# need this construct, otherwise numeric node ids
# are matched as scores
# elem id:
(r'(node)(\s+)([\w#$-]+)(:)',
bygroups(Keyword, Whitespace, Name, Punctuation)),
# scores
(r'([+-]?([0-9]+|inf)):', Number),
# keywords (elements and other)
(elem, Keyword),
(sub, Keyword),
(acl, Keyword),
# binary operators
(r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual, bin_ops), Operator.Word),
# other operators
(bin_rel, Operator.Word),
(un_ops, Operator.Word),
(date_exp, Operator.Word),
# builtin attributes (e.g. #uname)
(r'#[a-z]+(?![\w#$-])', Name.Builtin),
# acl_mod:blah
(r'(%s)(:)("(?:""|[^"])*"|\S+)' % acl_mod,
bygroups(Keyword, Punctuation, Name)),
# rsc_id[:(role|action)]
# NB: this matches all other identifiers
(r'([\w#$-]+)(?:(:)(%s))?(?![\w#$-])' % rsc_role_action,
bygroups(Name, Punctuation, Operator.Word)),
# punctuation
(r'(\\(?=\n)|[\[\](){}/:@])', Punctuation),
(r'\s+|\n', Whitespace),
],
}
class FlatlineLexer(RegexLexer):
"""
Lexer for Flatline expressions.
.. versionadded:: 2.2
"""
name = 'Flatline'
url = 'https://github.com/bigmlcom/flatline'
aliases = ['flatline']
filenames = []
mimetypes = ['text/x-flatline']
special_forms = ('let',)
builtins = (
"!=", "*", "+", "-", "<", "<=", "=", ">", ">=", "abs", "acos", "all",
"all-but", "all-with-defaults", "all-with-numeric-default", "and",
"asin", "atan", "avg", "avg-window", "bin-center", "bin-count", "call",
"category-count", "ceil", "cond", "cond-window", "cons", "cos", "cosh",
"count", "diff-window", "div", "ensure-value", "ensure-weighted-value",
"epoch", "epoch-day", "epoch-fields", "epoch-hour", "epoch-millisecond",
"epoch-minute", "epoch-month", "epoch-second", "epoch-weekday",
"epoch-year", "exp", "f", "field", "field-prop", "fields", "filter",
"first", "floor", "head", "if", "in", "integer", "language", "length",
"levenshtein", "linear-regression", "list", "ln", "log", "log10", "map",
"matches", "matches?", "max", "maximum", "md5", "mean", "median", "min",
"minimum", "missing", "missing-count", "missing?", "missing_count",
"mod", "mode", "normalize", "not", "nth", "occurrences", "or",
"percentile", "percentile-label", "population", "population-fraction",
"pow", "preferred", "preferred?", "quantile-label", "rand", "rand-int",
"random-value", "re-quote", "real", "replace", "replace-first", "rest",
"round", "row-number", "segment-label", "sha1", "sha256", "sin", "sinh",
"sqrt", "square", "standard-deviation", "standard_deviation", "str",
"subs", "sum", "sum-squares", "sum-window", "sum_squares", "summary",
"summary-no", "summary-str", "tail", "tan", "tanh", "to-degrees",
"to-radians", "variance", "vectorize", "weighted-random-value", "window",
"winnow", "within-percentiles?", "z-score",
)
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
tokens = {
'root': [
# whitespaces - usually not relevant
(r'[,]+', Text),
(r'\s+', Whitespace),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[a-f\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"\\(.|[a-z]+)", String.Char),
# expression template placeholder
(r'_', String.Symbol),
# highlight the special forms
(words(special_forms, suffix=' '), Keyword),
# highlight the builtins
(words(builtins, suffix=' '), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# parentheses
(r'(\(|\))', Punctuation),
],
}
class SnowballLexer(ExtendedRegexLexer):
"""
Lexer for Snowball source code.
.. versionadded:: 2.2
"""
name = 'Snowball'
url = 'http://snowballstem.org/'
aliases = ['snowball']
filenames = ['*.sbl']
_ws = r'\n\r\t '
def __init__(self, **options):
self._reset_stringescapes()
ExtendedRegexLexer.__init__(self, **options)
def _reset_stringescapes(self):
self._start = "'"
self._end = "'"
def _string(do_string_first):
def callback(lexer, match, ctx):
s = match.start()
text = match.group()
string = re.compile(r'([^%s]*)(.)' % re.escape(lexer._start)).match
escape = re.compile(r'([^%s]*)(.)' % re.escape(lexer._end)).match
pos = 0
do_string = do_string_first
while pos < len(text):
if do_string:
match = string(text, pos)
yield s + match.start(1), String.Single, match.group(1)
if match.group(2) == "'":
yield s + match.start(2), String.Single, match.group(2)
ctx.stack.pop()
break
yield s + match.start(2), String.Escape, match.group(2)
pos = match.end()
match = escape(text, pos)
yield s + match.start(), String.Escape, match.group()
if match.group(2) != lexer._end:
ctx.stack[-1] = 'escape'
break
pos = match.end()
do_string = True
ctx.pos = s + match.end()
return callback
def _stringescapes(lexer, match, ctx):
lexer._start = match.group(3)
lexer._end = match.group(5)
return bygroups(Keyword.Reserved, Whitespace, String.Escape, Whitespace,
String.Escape)(lexer, match, ctx)
tokens = {
'root': [
(words(('len', 'lenof'), suffix=r'\b'), Operator.Word),
include('root1'),
],
'root1': [
(r'[%s]+' % _ws, Whitespace),
(r'\d+', Number.Integer),
(r"'", String.Single, 'string'),
(r'[()]', Punctuation),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
(r'[!*+\-/<=>]=|[-=]>|<[+-]|[$*+\-/<=>?\[\]]', Operator),
(words(('as', 'get', 'hex', 'among', 'define', 'decimal',
'backwardmode'), suffix=r'\b'),
Keyword.Reserved),
(words(('strings', 'booleans', 'integers', 'routines', 'externals',
'groupings'), suffix=r'\b'),
Keyword.Reserved, 'declaration'),
(words(('do', 'or', 'and', 'for', 'hop', 'non', 'not', 'set', 'try',
'fail', 'goto', 'loop', 'next', 'test', 'true',
'false', 'unset', 'atmark', 'attach', 'delete', 'gopast',
'insert', 'repeat', 'sizeof', 'tomark', 'atleast',
'atlimit', 'reverse', 'setmark', 'tolimit', 'setlimit',
'backwards', 'substring'), suffix=r'\b'),
Operator.Word),
(words(('size', 'limit', 'cursor', 'maxint', 'minint'),
suffix=r'\b'),
Name.Builtin),
(r'(stringdef\b)([%s]*)([^%s]+)' % (_ws, _ws),
bygroups(Keyword.Reserved, Whitespace, String.Escape)),
(r'(stringescapes\b)([%s]*)(.)([%s]*)(.)' % (_ws, _ws),
_stringescapes),
(r'[A-Za-z]\w*', Name),
],
'declaration': [
(r'\)', Punctuation, '#pop'),
(words(('len', 'lenof'), suffix=r'\b'), Name,
('root1', 'declaration')),
include('root1'),
],
'string': [
(r"[^']*'", _string(True)),
],
'escape': [
(r"[^']*'", _string(False)),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
self._reset_stringescapes()
return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context)
| 36,774 | Python | 36.449083 | 107 | 0.437891 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/elm.py | """
pygments.lexers.elm
~~~~~~~~~~~~~~~~~~~
Lexer for the Elm programming language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups
from pygments.token import Comment, Keyword, Name, Number, Punctuation, \
String, Whitespace
__all__ = ['ElmLexer']
class ElmLexer(RegexLexer):
"""
For Elm source code.
.. versionadded:: 2.1
"""
name = 'Elm'
url = 'http://elm-lang.org/'
aliases = ['elm']
filenames = ['*.elm']
mimetypes = ['text/x-elm']
validName = r'[a-z_][a-zA-Z0-9_\']*'
specialName = r'^main '
builtinOps = (
'~', '||', '|>', '|', '`', '^', '\\', '\'', '>>', '>=', '>', '==',
'=', '<~', '<|', '<=', '<<', '<-', '<', '::', ':', '/=', '//', '/',
'..', '.', '->', '-', '++', '+', '*', '&&', '%',
)
reservedWords = words((
'alias', 'as', 'case', 'else', 'if', 'import', 'in',
'let', 'module', 'of', 'port', 'then', 'type', 'where',
), suffix=r'\b')
tokens = {
'root': [
# Comments
(r'\{-', Comment.Multiline, 'comment'),
(r'--.*', Comment.Single),
# Whitespace
(r'\s+', Whitespace),
# Strings
(r'"', String, 'doublequote'),
# Modules
(r'^(\s*)(module)(\s*)', bygroups(Whitespace, Keyword.Namespace,
Whitespace), 'imports'),
# Imports
(r'^(\s*)(import)(\s*)', bygroups(Whitespace, Keyword.Namespace,
Whitespace), 'imports'),
# Shaders
(r'\[glsl\|.*', Name.Entity, 'shader'),
# Keywords
(reservedWords, Keyword.Reserved),
# Types
(r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
# Main
(specialName, Keyword.Reserved),
# Prefix Operators
(words((builtinOps), prefix=r'\(', suffix=r'\)'), Name.Function),
# Infix Operators
(words(builtinOps), Name.Function),
# Numbers
include('numbers'),
# Variable Names
(validName, Name.Variable),
# Parens
(r'[,()\[\]{}]', Punctuation),
],
'comment': [
(r'-(?!\})', Comment.Multiline),
(r'\{-', Comment.Multiline, 'comment'),
(r'[^-}]', Comment.Multiline),
(r'-\}', Comment.Multiline, '#pop'),
],
'doublequote': [
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\[nrfvb\\"]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
'imports': [
(r'\w+(\.\w+)*', Name.Class, '#pop'),
],
'numbers': [
(r'_?\d+\.(?=\d+)', Number.Float),
(r'_?\d+', Number.Integer),
],
'shader': [
(r'\|(?!\])', Name.Entity),
(r'\|\]', Name.Entity, '#pop'),
(r'(.*)(\n)', bygroups(Name.Entity, Whitespace)),
],
}
| 3,152 | Python | 24.224 | 77 | 0.414657 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/jmespath.py | """
pygments.lexers.jmespath
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the JMESPath language
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import String, Punctuation, Whitespace, Name, Operator, \
Number, Literal, Keyword
__all__ = ['JMESPathLexer']
class JMESPathLexer(RegexLexer):
"""
For JMESPath queries.
"""
name = 'JMESPath'
url = 'https://jmespath.org'
filenames = ['*.jp']
aliases = ['jmespath', 'jp']
tokens = {
'string': [
(r"'(\\(.|\n)|[^'\\])*'", String),
],
'punctuation': [
(r'(\[\?|[\.\*\[\],:\(\)\{\}\|])', Punctuation),
],
'ws': [
(r" |\t|\n|\r", Whitespace)
],
"dq-identifier": [
(r'[^\\"]+', Name.Variable),
(r'\\"', Name.Variable),
(r'.', Punctuation, '#pop'),
],
'identifier': [
(r'(&)?(")', bygroups(Name.Variable, Punctuation), 'dq-identifier'),
(r'(")?(&?[A-Za-z][A-Za-z0-9_-]*)(")?', bygroups(Punctuation, Name.Variable, Punctuation)),
],
'root': [
include('ws'),
include('string'),
(r'(==|!=|<=|>=|<|>|&&|\|\||!)', Operator),
include('punctuation'),
(r'@', Name.Variable.Global),
(r'(&?[A-Za-z][A-Za-z0-9_]*)(\()', bygroups(Name.Function, Punctuation)),
(r'(&)(\()', bygroups(Name.Variable, Punctuation)),
include('identifier'),
(r'-?\d+', Number),
(r'`', Literal, 'literal'),
],
'literal': [
include('ws'),
include('string'),
include('punctuation'),
(r'(false|true|null)\b', Keyword.Constant),
include('identifier'),
(r'-?\d+\.?\d*([eE][-+]\d+)?', Number),
(r'\\`', Literal),
(r'`', Literal, '#pop'),
]
}
| 2,059 | Python | 28.855072 | 103 | 0.443905 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ampl.py | """
pygments.lexers.ampl
~~~~~~~~~~~~~~~~~~~~
Lexers for the AMPL language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, using, this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['AmplLexer']
class AmplLexer(RegexLexer):
"""
For AMPL source code.
.. versionadded:: 2.2
"""
name = 'Ampl'
url = 'http://ampl.com/'
aliases = ['ampl']
filenames = ['*.run']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Whitespace),
(r'#.*?\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(words((
'call', 'cd', 'close', 'commands', 'data', 'delete', 'display',
'drop', 'end', 'environ', 'exit', 'expand', 'include', 'load',
'model', 'objective', 'option', 'problem', 'purge', 'quit',
'redeclare', 'reload', 'remove', 'reset', 'restore', 'shell',
'show', 'solexpand', 'solution', 'solve', 'update', 'unload',
'xref', 'coeff', 'coef', 'cover', 'obj', 'interval', 'default',
'from', 'to', 'to_come', 'net_in', 'net_out', 'dimen',
'dimension', 'check', 'complements', 'write', 'function',
'pipe', 'format', 'if', 'then', 'else', 'in', 'while', 'repeat',
'for'), suffix=r'\b'), Keyword.Reserved),
(r'(integer|binary|symbolic|ordered|circular|reversed|INOUT|IN|OUT|LOCAL)',
Keyword.Type),
(r'\".*?\"', String.Double),
(r'\'.*?\'', String.Single),
(r'[()\[\]{},;:]+', Punctuation),
(r'\b(\w+)(\.)(astatus|init0|init|lb0|lb1|lb2|lb|lrc|'
r'lslack|rc|relax|slack|sstatus|status|ub0|ub1|ub2|'
r'ub|urc|uslack|val)',
bygroups(Name.Variable, Punctuation, Keyword.Reserved)),
(r'(set|param|var|arc|minimize|maximize|subject to|s\.t\.|subj to|'
r'node|table|suffix|read table|write table)(\s+)(\w+)',
bygroups(Keyword.Declaration, Whitespace, Name.Variable)),
(r'(param)(\s*)(:)(\s*)(\w+)(\s*)(:)(\s*)((\w|\s)+)',
bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace,
Name.Variable, Whitespace, Punctuation, Whitespace, Name.Variable)),
(r'(let|fix|unfix)(\s*)((?:\{.*\})?)(\s*)(\w+)',
bygroups(Keyword.Declaration, Whitespace, using(this), Whitespace,
Name.Variable)),
(words((
'abs', 'acos', 'acosh', 'alias', 'asin', 'asinh', 'atan', 'atan2',
'atanh', 'ceil', 'ctime', 'cos', 'exp', 'floor', 'log', 'log10',
'max', 'min', 'precision', 'round', 'sin', 'sinh', 'sqrt', 'tan',
'tanh', 'time', 'trunc', 'Beta', 'Cauchy', 'Exponential', 'Gamma',
'Irand224', 'Normal', 'Normal01', 'Poisson', 'Uniform', 'Uniform01',
'num', 'num0', 'ichar', 'char', 'length', 'substr', 'sprintf',
'match', 'sub', 'gsub', 'print', 'printf', 'next', 'nextw', 'prev',
'prevw', 'first', 'last', 'ord', 'ord0', 'card', 'arity',
'indexarity'), prefix=r'\b', suffix=r'\b'), Name.Builtin),
(r'(\+|\-|\*|/|\*\*|=|<=|>=|==|\||\^|<|>|\!|\.\.|:=|\&|\!=|<<|>>)',
Operator),
(words((
'or', 'exists', 'forall', 'and', 'in', 'not', 'within', 'union',
'diff', 'difference', 'symdiff', 'inter', 'intersect',
'intersection', 'cross', 'setof', 'by', 'less', 'sum', 'prod',
'product', 'div', 'mod'), suffix=r'\b'),
Keyword.Reserved), # Operator.Name but not enough emphasized with that
(r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float),
(r'\d+([eE][+-]?\d+)?', Number.Integer),
(r'[+-]?Infinity', Number.Integer),
(r'(\w+|(\.(?!\.)))', Text)
]
}
| 4,177 | Python | 45.94382 | 90 | 0.469236 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/crystal.py | """
pygments.lexers.crystal
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Crystal.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, include, bygroups, default, \
words, line_re
from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
Punctuation, Error, Whitespace
__all__ = ['CrystalLexer']
CRYSTAL_OPERATORS = [
'!=', '!~', '!', '%', '&&', '&', '**', '*', '+', '-', '/', '<=>', '<<', '<=', '<',
'===', '==', '=~', '=', '>=', '>>', '>', '[]=', '[]?', '[]', '^', '||', '|', '~'
]
class CrystalLexer(ExtendedRegexLexer):
"""
For Crystal source code.
.. versionadded:: 2.2
"""
name = 'Crystal'
url = 'http://crystal-lang.org'
aliases = ['cr', 'crystal']
filenames = ['*.cr']
mimetypes = ['text/x-crystal']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Crystal...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), String.Delimiter, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs, so limit the recursion depth
if len(heredocstack) < 100:
yield from self.get_tokens_unprocessed(context=ctx)
else:
yield ctx.pos, String.Heredoc, match.group(5)
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_crystalstrings_rules():
states = {}
states['strings'] = [
(r'\:\w+[!?]?', String.Symbol),
(words(CRYSTAL_OPERATORS, prefix=r'\:'), String.Symbol),
(r":'(\\\\|\\[^\\]|[^'\\])*'", String.Symbol),
# This allows arbitrary text after '\ for simplicity
(r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
(r':"', String.Symbol, 'simple-sym'),
# Crystal doesn't have "symbol:"s but this simplifies function args
(r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-escaped' if name == 'sym' else 'string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# https://crystal-lang.org/docs/syntax_and_semantics/literals/string.html#percent-string-literals
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'), \
('\\|', '\\|', '\\|', 'pi'):
states[name+'-intp-string'] = [
(r'\\' + lbrace, String.Other),
] + (lbrace != rbrace) * [
(lbrace, String.Other, '#push'),
] + [
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%Q?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
] + (lbrace != rbrace) * [
(lbrace, String.Other, '#push'),
] + [
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
# https://crystal-lang.org/docs/syntax_and_semantics/literals/array.html#percent-array-literals
states['strings'].append((r'%[qwi]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
] + (lbrace != rbrace) * [
(lbrace, String.Regex, '#push'),
] + [
(rbrace + '[imsx]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
# keywords
(words('''
abstract asm begin break case do else elsif end ensure extend if in
include next of private protected require rescue return select self super
then unless until when while with yield
'''.split(), suffix=r'\b'), Keyword),
(words('''
previous_def forall out uninitialized __DIR__ __FILE__ __LINE__
__END_LINE__
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Keyword.Pseudo),
# https://crystal-lang.org/docs/syntax_and_semantics/is_a.html
(r'\.(is_a\?|nil\?|responds_to\?|as\?|as\b)', Keyword.Pseudo),
(words(['true', 'false', 'nil'], suffix=r'\b'), Keyword.Constant),
# start of function, class and module names
(r'(module|lib)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'(def|fun|macro)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Whitespace, Name.Namespace), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(annotation|class|struct|union|type|alias|enum)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Whitespace, Name.Namespace), 'classname'),
# https://crystal-lang.org/api/toplevel.html
(words('''
instance_sizeof offsetof pointerof sizeof typeof
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Keyword.Pseudo),
# macros
(r'(?<!\.)(debugger\b|p!|pp!|record\b|spawn\b)', Name.Builtin.Pseudo),
# builtins
(words('''
abort at_exit caller exit gets loop main p pp print printf puts
raise rand read_line sleep spawn sprintf system
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin),
# https://crystal-lang.org/api/Object.html#macro-summary
(r'(?<!\.)(((class_)?((getter|property)\b[!?]?|setter\b))|'
r'(def_(clone|equals|equals_and_hash|hash)|delegate|forward_missing_to)\b)',
Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=\.index\s)|'
r'(?<=\.scan\s)|'
r'(?<=\.sub\s)|'
r'(?<=\.sub!\s)|'
r'(?<=\.gsub\s)|'
r'(?<=\.gsub!\s)|'
r'(?<=\.match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Whitespace, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Whitespace, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0o[0-7]+(?:_[0-7]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Oct, Whitespace, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Hex, Whitespace, Operator)),
(r'(0b[01]+(?:_[01]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Bin, Whitespace, Operator)),
# 3 separate expressions for floats because any of the 3 optional
# parts makes it a float
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+))(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'(0\b|[1-9][\d]*(?:_\d+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Integer, Whitespace, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# https://crystal-lang.org/reference/syntax_and_semantics/literals/char.html
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrtv#"\']|[0-7]{1,3}|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|u\{[a-fA-F0-9 ]+\})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][A-Z_]+\b(?!::|\.)', Name.Constant),
# macro expansion
(r'\{%', String.Interpol, 'in-macro-control'),
(r'\{\{', String.Interpol, 'in-macro-expr'),
# annotations
(r'(@\[)(\s*)([A-Z]\w*(::[A-Z]\w*)*)',
bygroups(Operator, Whitespace, Name.Decorator), 'in-annot'),
# this is needed because Crystal attributes can look
# like keywords (class) or like this: ` ?!?
(words(CRYSTAL_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
# Names can end with [!?] unless it's "!="
(r'[a-zA-Z_]\w*(?:[!?](?!=))?', Name),
(r'(\[|\]\??|\*\*|<=>?|>=|<<?|>>?|=~|===|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Whitespace)
],
'funcname': [
(r'(?:([a-zA-Z_]\w*)(\.))?'
r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'[A-Z_]\w*', Name.Class),
(r'(\()(\s*)([A-Z_]\w*)(\s*)(\))',
bygroups(Punctuation, Whitespace, Name.Class, Whitespace, Punctuation)),
default('#pop')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
],
'string-escaped': [
# https://crystal-lang.org/reference/syntax_and_semantics/literals/string.html
(r'\\([\\abefnrtv#"\']|[0-7]{1,3}|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|u\{[a-fA-F0-9 ]+\})',
String.Escape)
],
'string-intp-escaped': [
include('string-intp'),
include('string-escaped'),
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[imsx]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
],
'in-macro-control': [
(r'\{%', String.Interpol, '#push'),
(r'%\}', String.Interpol, '#pop'),
(r'(for|verbatim)\b', Keyword),
include('root'),
],
'in-macro-expr': [
(r'\{\{', String.Interpol, '#push'),
(r'\}\}', String.Interpol, '#pop'),
include('root'),
],
'in-annot': [
(r'\[', Operator, '#push'),
(r'\]', Operator, '#pop'),
include('root'),
],
}
tokens.update(gen_crystalstrings_rules())
| 15,756 | Python | 42.051912 | 107 | 0.424092 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/jsonnet.py | """
pygments.lexers.jsonnet
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Jsonnet data templating language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import include, RegexLexer, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text, Whitespace
__all__ = ['JsonnetLexer']
jsonnet_token = r'[^\W\d]\w*'
jsonnet_function_token = jsonnet_token + r'(?=\()'
def string_rules(quote_mark):
return [
(r"[^{}\\]".format(quote_mark), String),
(r"\\.", String.Escape),
(quote_mark, String, '#pop'),
]
def quoted_field_name(quote_mark):
return [
(r'([^{quote}\\]|\\.)*{quote}'.format(quote=quote_mark),
Name.Variable, 'field_separator')
]
class JsonnetLexer(RegexLexer):
"""Lexer for Jsonnet source code."""
name = 'Jsonnet'
aliases = ['jsonnet']
filenames = ['*.jsonnet', '*.libsonnet']
url = "https://jsonnet.org"
tokens = {
# Not used by itself
'_comments': [
(r'(//|#).*\n', Comment.Single),
(r'/\*\*([^/]|/(?!\*))*\*/', String.Doc),
(r'/\*([^/]|/(?!\*))*\*/', Comment),
],
'root': [
include('_comments'),
(r"@'.*'", String),
(r'@".*"', String),
(r"'", String, 'singlestring'),
(r'"', String, 'doublestring'),
(r'\|\|\|(.|\n)*\|\|\|', String),
# Jsonnet has no integers, only an IEEE754 64-bit float
(r'[+-]?[0-9]+(.[0-9])?', Number.Float),
# Omit : despite spec because it appears to be used as a field
# separator
(r'[!$~+\-&|^=<>*/%]', Operator),
(r'\{', Punctuation, 'object'),
(r'\[', Punctuation, 'array'),
(r'local\b', Keyword, ('local_name')),
(r'assert\b', Keyword, 'assert'),
(words([
'assert', 'else', 'error', 'false', 'for', 'if', 'import',
'importstr', 'in', 'null', 'tailstrict', 'then', 'self',
'super', 'true',
], suffix=r'\b'), Keyword),
(r'\s+', Whitespace),
(r'function(?=\()', Keyword, 'function_params'),
(r'std\.' + jsonnet_function_token, Name.Builtin, 'function_args'),
(jsonnet_function_token, Name.Function, 'function_args'),
(jsonnet_token, Name.Variable),
(r'[\.()]', Punctuation),
],
'singlestring': string_rules("'"),
'doublestring': string_rules('"'),
'array': [
(r',', Punctuation),
(r'\]', Punctuation, '#pop'),
include('root'),
],
'local_name': [
(jsonnet_function_token, Name.Function, 'function_params'),
(jsonnet_token, Name.Variable),
(r'\s+', Whitespace),
('(?==)', Whitespace, ('#pop', 'local_value')),
],
'local_value': [
(r'=', Operator),
(r';', Punctuation, '#pop'),
include('root'),
],
'assert': [
(r':', Punctuation),
(r';', Punctuation, '#pop'),
include('root'),
],
'function_params': [
(jsonnet_token, Name.Variable),
(r'\(', Punctuation),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\s+', Whitespace),
(r'=', Operator, 'function_param_default'),
],
'function_args': [
(r'\(', Punctuation),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\s+', Whitespace),
include('root'),
],
'object': [
(r'\s+', Whitespace),
(r'local\b', Keyword, 'object_local_name'),
(r'assert\b', Keyword, 'object_assert'),
(r'\[', Operator, 'field_name_expr'),
(fr'(?={jsonnet_token})', Text, 'field_name'),
(r'\}', Punctuation, '#pop'),
(r'"', Name.Variable, 'double_field_name'),
(r"'", Name.Variable, 'single_field_name'),
include('_comments'),
],
'field_name': [
(jsonnet_function_token, Name.Function,
('field_separator', 'function_params')
),
(jsonnet_token, Name.Variable, 'field_separator'),
],
'double_field_name': quoted_field_name('"'),
'single_field_name': quoted_field_name("'"),
'field_name_expr': [
(r'\]', Operator, 'field_separator'),
include('root'),
],
'function_param_default': [
(r'(?=[,\)])', Whitespace, '#pop'),
include('root'),
],
'field_separator': [
(r'\s+', Whitespace),
(r'\+?::?:?', Punctuation, ('#pop', '#pop', 'field_value')),
include('_comments'),
],
'field_value': [
(r',', Punctuation, '#pop'),
(r'\}', Punctuation, '#pop:2'),
include('root'),
],
'object_assert': [
(r':', Punctuation),
(r',', Punctuation, '#pop'),
include('root'),
],
'object_local_name': [
(jsonnet_token, Name.Variable, ('#pop', 'object_local_value')),
(r'\s+', Whitespace),
],
'object_local_value': [
(r'=', Operator),
(r',', Punctuation, '#pop'),
(r'\}', Punctuation, '#pop:2'),
include('root'),
],
}
| 5,635 | Python | 32.349112 | 79 | 0.442591 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/rnc.py | """
pygments.lexers.rnc
~~~~~~~~~~~~~~~~~~~
Lexer for Relax-NG Compact syntax
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
__all__ = ['RNCCompactLexer']
class RNCCompactLexer(RegexLexer):
"""
For RelaxNG-compact syntax.
.. versionadded:: 2.2
"""
name = 'Relax-NG Compact'
url = 'http://relaxng.org'
aliases = ['rng-compact', 'rnc']
filenames = ['*.rnc']
tokens = {
'root': [
(r'namespace\b', Keyword.Namespace),
(r'(?:default|datatypes)\b', Keyword.Declaration),
(r'##.*$', Comment.Preproc),
(r'#.*$', Comment.Single),
(r'"[^"]*"', String.Double),
# TODO single quoted strings and escape sequences outside of
# double-quoted strings
(r'(?:element|attribute|mixed)\b', Keyword.Declaration, 'variable'),
(r'(text\b|xsd:[^ ]+)', Keyword.Type, 'maybe_xsdattributes'),
(r'[,?&*=|~]|>>', Operator),
(r'[(){}]', Punctuation),
(r'.', Text),
],
# a variable has been declared using `element` or `attribute`
'variable': [
(r'[^{]+', Name.Variable),
(r'\{', Punctuation, '#pop'),
],
# after an xsd:<datatype> declaration there may be attributes
'maybe_xsdattributes': [
(r'\{', Punctuation, 'xsdattributes'),
(r'\}', Punctuation, '#pop'),
(r'.', Text),
],
# attributes take the form { key1 = value1 key2 = value2 ... }
'xsdattributes': [
(r'[^ =}]', Name.Attribute),
(r'=', Operator),
(r'"[^"]*"', String.Double),
(r'\}', Punctuation, '#pop'),
(r'.', Text),
],
}
| 1,973 | Python | 28.029411 | 80 | 0.49924 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ruby.py | """
pygments.lexers.ruby
~~~~~~~~~~~~~~~~~~~~
Lexers for Ruby and related languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \
bygroups, default, LexerContext, do_insertions, words, line_re
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Generic, Whitespace
from pygments.util import shebang_matches
__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer']
RUBY_OPERATORS = (
'*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~',
'[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '==='
)
class RubyLexer(ExtendedRegexLexer):
"""
For Ruby source code.
"""
name = 'Ruby'
url = 'http://www.ruby-lang.org'
aliases = ['ruby', 'rb', 'duby']
filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec',
'*.rbx', '*.duby', 'Gemfile', 'Vagrantfile']
mimetypes = ['text/x-ruby', 'application/x-ruby']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Ruby...
# match: 1 = <<[-~]?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<[-~]?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), String.Delimiter, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) in ('<<-', '<<~'), match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs, so limit the recursion depth
if len(heredocstack) < 100:
yield from self.get_tokens_unprocessed(context=ctx)
else:
yield ctx.pos, String.Heredoc, match.group(5)
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_rubystrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[mixounse]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
# easy ones
(r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
(words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
(r":'(\\\\|\\[^\\]|[^'\\])*'", String.Symbol),
(r':"', String.Symbol, 'simple-sym'),
(r'([a-zA-Z_]\w*)(:)(?!:)',
bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9
(r'"', String.Double, 'simple-string-double'),
(r"'", String.Single, 'simple-string-single'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# quoted string and symbol
for name, ttype, end in ('string-double', String.Double, '"'), \
('string-single', String.Single, "'"),\
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%[qsw]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
(lbrace, String.Regex, '#push'),
(rbrace + '[mixounse]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other),
(r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Whitespace, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Whitespace, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'\A#!.+?$', Comment.Hashbang),
(r'#.*?$', Comment.Single),
(r'=begin\s.*?\n=end.*?$', Comment.Multiline),
# keywords
(words((
'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?',
'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo',
'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef',
'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'),
Keyword),
# start of function, class and module names
(r'(module)(\s+)([a-zA-Z_]\w*'
r'(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'(def)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
# special methods
(words((
'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader',
'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private',
'module_function', 'public', 'protected', 'true', 'false', 'nil'),
suffix=r'\b'),
Keyword.Pseudo),
(r'(not|and|or)\b', Operator.Word),
(words((
'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include',
'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil',
'private_method_defined', 'protected_method_defined',
'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'),
Name.Builtin),
(r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
(words((
'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort',
'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller',
'catch', 'chomp', 'chop', 'class_eval', 'class_variables',
'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set',
'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork',
'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub',
'hash', 'id', 'included_modules', 'inspect', 'instance_eval',
'instance_method', 'instance_methods',
'instance_variable_get', 'instance_variable_set', 'instance_variables',
'lambda', 'load', 'local_variables', 'loop',
'method', 'method_missing', 'methods', 'module_eval', 'name',
'object_id', 'open', 'p', 'print', 'printf', 'private_class_method',
'private_instance_methods',
'private_methods', 'proc', 'protected_instance_methods',
'protected_methods', 'public_class_method',
'public_instance_methods', 'public_methods',
'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require',
'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep',
'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint',
'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint',
'untrace_var', 'warn'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'__(FILE|LINE)__\b', Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<[-~]?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<[-~]?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=\.index\s)|'
r'(?<=\.scan\s)|'
r'(?<=\.sub\s)|'
r'(?<=\.sub!\s)|'
r'(?<=\.gsub\s)|'
r'(?<=\.gsub!\s)|'
r'(?<=\.match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Whitespace, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Whitespace, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Whitespace, Operator)),
(r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Whitespace, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Whitespace, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z]\w+', Name.Constant),
# this is needed because ruby attributes can look
# like keywords (class) or like this: ` ?!?
(words(RUBY_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
(r'[a-zA-Z_]\w*[!?]?', Name),
(r'(\[|\]|\*\*|<<?|>>?|>=|<=|<=>|=~|={3}|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Whitespace)
],
'funcname': [
(r'\(', Punctuation, 'defexpr'),
(r'(?:([a-zA-Z_]\w*)(\.))?' # optional scope name, like "self."
r'('
r'[a-zA-Z\u0080-\uffff][a-zA-Z0-9_\u0080-\uffff]*[!?=]?' # method name
r'|!=|!~|=~|\*\*?|[-+!~]@?|[/%&|^]|<=>|<[<=]?|>[>=]?|===?' # or operator override
r'|\[\]=?' # or element reference/assignment override
r'|`' # or the undocumented backtick override
r')',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'\(', Punctuation, 'defexpr'),
(r'<<', Operator, '#pop'),
(r'[A-Z_]\w*', Name.Class, '#pop'),
default('#pop')
],
'defexpr': [
(r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
(r'\(', Operator, '#push'),
include('root')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
(r'#@@?[a-zA-Z_]\w*', String.Interpol),
(r'#\$[a-zA-Z_]\w*', String.Interpol)
],
'string-intp-escaped': [
include('string-intp'),
(r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})',
String.Escape)
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[mixounse]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
tokens.update(gen_rubystrings_rules())
def analyse_text(text):
return shebang_matches(text, r'ruby(1\.\d)?')
class RubyConsoleLexer(Lexer):
"""
For Ruby interactive console (**irb**) output like:
.. sourcecode:: rbcon
irb(main):001:0> a = 1
=> 1
irb(main):002:0> puts a
1
=> nil
"""
name = 'Ruby irb session'
aliases = ['rbcon', 'irb']
mimetypes = ['text/x-ruby-shellsession']
_prompt_re = re.compile(r'irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] '
r'|>> |\?> ')
def get_tokens_unprocessed(self, text):
rblexer = RubyLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
yield from do_insertions(
insertions, rblexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
yield from do_insertions(
insertions, rblexer.get_tokens_unprocessed(curcode))
class FancyLexer(RegexLexer):
"""
Pygments Lexer For Fancy.
Fancy is a self-hosted, pure object-oriented, dynamic,
class-based, concurrent general-purpose programming language
running on Rubinius, the Ruby VM.
.. versionadded:: 1.5
"""
name = 'Fancy'
url = 'https://github.com/bakkdoor/fancy'
filenames = ['*.fy', '*.fancypack']
aliases = ['fancy', 'fy']
mimetypes = ['text/x-fancysrc']
tokens = {
# copied from PerlLexer:
'balanced-regex': [
(r'/(\\\\|\\[^\\]|[^/\\])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\[^\\]|[^!\\])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'\{(\\\\|\\[^\\]|[^}\\])*\}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\[^\\]|[^>\\])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\[^\\]|[^\]\\])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\[^\\]|[^)\\])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\[^\\]|[^@\\])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\[^\\]|[^%\\])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\[^\\]|[^$\\])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\s+', Whitespace),
# balanced delimiters (copied from PerlLexer):
(r's\{(\\\\|\\[^\\]|[^}\\])*\}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\[^\\]|[^>\\])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\[^\\]|[^\]\\])*\]\s*', String.Regex, 'balanced-regex'),
(r's\((\\\\|\\[^\\]|[^)\\])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\[^\\]|[^///\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
# Comments
(r'#(.*?)\n', Comment.Single),
# Symbols
(r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol),
# Multi-line DoubleQuotedString
(r'"""(\\\\|\\[^\\]|[^\\])*?"""', String),
# DoubleQuotedString
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
# keywords
(r'(def|class|try|catch|finally|retry|return|return_local|match|'
r'case|->|=>)\b', Keyword),
# constants
(r'(self|super|nil|false|true)\b', Name.Constant),
(r'[(){};,/?|:\\]', Punctuation),
# names
(words((
'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String',
'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass',
'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set',
'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'),
Name.Builtin),
# functions
(r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function),
# operators, must be below functions
(r'[-+*/~,<>=&!?%^\[\].$]+', Operator),
(r'[A-Z]\w*', Name.Constant),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
('@@?', Operator),
(r'[a-zA-Z_]\w*', Name),
# numbers - / checks are necessary to avoid mismarking regexes,
# see comment in RubyLexer
(r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Whitespace, Operator)),
(r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Whitespace, Operator)),
(r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Whitespace, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Whitespace, Operator)),
(r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
]
}
| 22,775 | Python | 42.465649 | 98 | 0.429199 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/gcodelexer.py | """
pygments.lexers.gcodelexer
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the G Code Language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Name, Text, Keyword, Number
__all__ = ['GcodeLexer']
class GcodeLexer(RegexLexer):
"""
For gcode source code.
.. versionadded:: 2.9
"""
name = 'g-code'
aliases = ['gcode']
filenames = ['*.gcode']
tokens = {
'root': [
(r';.*\n', Comment),
(r'^[gmGM]\d{1,4}\s', Name.Builtin), # M or G commands
(r'([^gGmM])([+-]?\d*[.]?\d+)', bygroups(Keyword, Number)),
(r'\s', Text.Whitespace),
(r'.*\n', Text),
]
}
| 826 | Python | 21.972222 | 71 | 0.520581 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ambient.py | """
pygments.lexers.ambient
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for AmbientTalk language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['AmbientTalkLexer']
class AmbientTalkLexer(RegexLexer):
"""
Lexer for AmbientTalk source code.
.. versionadded:: 2.0
"""
name = 'AmbientTalk'
url = 'https://code.google.com/p/ambienttalk'
filenames = ['*.at']
aliases = ['ambienttalk', 'ambienttalk/2', 'at']
mimetypes = ['text/x-ambienttalk']
flags = re.MULTILINE | re.DOTALL
builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:',
'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:',
'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:',
'mirroredBy:', 'is:'))
tokens = {
'root': [
(r'\s+', Whitespace),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(def|deftype|import|alias|exclude)\b', Keyword),
(builtin, Name.Builtin),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r'\|', Punctuation, 'arglist'),
(r'<:|[*^!%&<>+=,./?-]|:=', Operator),
(r"`[a-zA-Z_]\w*", String.Symbol),
(r"[a-zA-Z_]\w*:", Name.Function),
(r"[{}()\[\];`]", Punctuation),
(r'(self|super)\b', Name.Variable.Instance),
(r"[a-zA-Z_]\w*", Name.Variable),
(r"@[a-zA-Z_]\w*", Name.Class),
(r"@\[", Name.Class, 'annotations'),
include('numbers'),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'namespace': [
(r'[a-zA-Z_]\w*\.', Name.Namespace),
(r'[a-zA-Z_]\w*:', Name.Function, '#pop'),
(r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop')
],
'annotations': [
(r"(.*?)\]", Name.Class, '#pop')
],
'arglist': [
(r'\|', Punctuation, '#pop'),
(r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
(r'[a-zA-Z_]\w*', Name.Variable),
],
}
| 2,606 | Python | 32.857142 | 83 | 0.465464 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/asc.py | """
pygments.lexers.asc
~~~~~~~~~~~~~~~~~~~
Lexer for various ASCII armored files.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Generic, Name, Operator, String, Whitespace
__all__ = ['AscLexer']
class AscLexer(RegexLexer):
"""
Lexer for ASCII armored files, containing `-----BEGIN/END ...-----` wrapped
base64 data.
.. versionadded:: 2.10
"""
name = 'ASCII armored'
aliases = ['asc', 'pem']
filenames = [
'*.asc', # PGP; *.gpg, *.pgp, and *.sig too, but those can be binary
'*.pem', # X.509; *.cer, *.crt, *.csr, and key etc too, but those can be binary
'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk',
'id_rsa', # SSH private keys
]
mimetypes = ['application/pgp-keys', 'application/pgp-encrypted',
'application/pgp-signature']
flags = re.MULTILINE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'^-----BEGIN [^\n]+-----$', Generic.Heading, 'data'),
(r'\S+', Comment),
],
'data': [
(r'\s+', Whitespace),
(r'^([^:]+)(:)([ \t]+)(.*)',
bygroups(Name.Attribute, Operator, Whitespace, String)),
(r'^-----END [^\n]+-----$', Generic.Heading, 'root'),
(r'\S+', String),
],
}
def analyse_text(text):
if re.search(r'^-----BEGIN [^\n]+-----\r?\n', text):
return True
| 1,621 | Python | 27.964285 | 88 | 0.51203 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ml.py | """
pygments.lexers.ml
~~~~~~~~~~~~~~~~~~
Lexers for ML family languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['SMLLexer', 'OcamlLexer', 'OpaLexer', 'ReasonLexer', 'FStarLexer']
class SMLLexer(RegexLexer):
"""
For the Standard ML language.
.. versionadded:: 1.5
"""
name = 'Standard ML'
aliases = ['sml']
filenames = ['*.sml', '*.sig', '*.fun']
mimetypes = ['text/x-standardml', 'application/x-standardml']
alphanumid_reserved = {
# Core
'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else',
'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix',
'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse',
'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while',
# Modules
'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature',
'struct', 'structure', 'where',
}
symbolicid_reserved = {
# Core
':', r'\|', '=', '=>', '->', '#',
# Modules
':>',
}
nonid_reserved = {'(', ')', '[', ']', '{', '}', ',', ';', '...', '_'}
alphanumid_re = r"[a-zA-Z][\w']*"
symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+"
# A character constant is a sequence of the form #s, where s is a string
# constant denoting a string of size one character. This setup just parses
# the entire string as either a String.Double or a String.Char (depending
# on the argument), even if the String.Char is an erroneous
# multiple-character string.
def stringy(whatkind):
return [
(r'[^"\\]', whatkind),
(r'\\[\\"abtnvfr]', String.Escape),
# Control-character notation is used for codes < 32,
# where \^@ == \000
(r'\\\^[\x40-\x5e]', String.Escape),
# Docs say 'decimal digits'
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\\s+\\', String.Interpol),
(r'"', whatkind, '#pop'),
]
# Callbacks for distinguishing tokens and reserved words
def long_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
else:
token = Name.Namespace
yield match.start(1), token, match.group(1)
yield match.start(2), Punctuation, match.group(2)
def end_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
elif match.group(1) in self.symbolicid_reserved:
token = Error
else:
token = Name
yield match.start(1), token, match.group(1)
def id_callback(self, match):
str = match.group(1)
if str in self.alphanumid_reserved:
token = Keyword.Reserved
elif str in self.symbolicid_reserved:
token = Punctuation
else:
token = Name
yield match.start(1), token, str
tokens = {
# Whitespace and comments are (almost) everywhere
'whitespace': [
(r'\s+', Text),
(r'\(\*', Comment.Multiline, 'comment'),
],
'delimiters': [
# This lexer treats these delimiters specially:
# Delimiters define scopes, and the scope is how the meaning of
# the `|' is resolved - is it a case/handle expression, or function
# definition by cases? (This is not how the Definition works, but
# it's how MLton behaves, see http://mlton.org/SMLNJDeviations)
(r'\(|\[|\{', Punctuation, 'main'),
(r'\)|\]|\}', Punctuation, '#pop'),
(r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')),
(r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'),
(r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'),
],
'core': [
# Punctuation that doesn't overlap symbolic identifiers
(r'(%s)' % '|'.join(re.escape(z) for z in nonid_reserved),
Punctuation),
# Special constants: strings, floats, numbers in decimal and hex
(r'#"', String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'~?0x[0-9a-fA-F]+', Number.Hex),
(r'0wx[0-9a-fA-F]+', Number.Hex),
(r'0w\d+', Number.Integer),
(r'~?\d+\.\d+[eE]~?\d+', Number.Float),
(r'~?\d+\.\d+', Number.Float),
(r'~?\d+[eE]~?\d+', Number.Float),
(r'~?\d+', Number.Integer),
# Labels
(r'#\s*[1-9][0-9]*', Name.Label),
(r'#\s*(%s)' % alphanumid_re, Name.Label),
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
(r'\b(exception)\b(?!\')', Keyword.Reserved, 'ename'),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
# Regular identifiers, long and otherwise
(r'\'[\w\']*', Name.Decorator),
(r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"),
(r'(%s)' % alphanumid_re, id_callback),
(r'(%s)' % symbolicid_re, id_callback),
],
'dotted': [
(r'(%s)(\.)' % alphanumid_re, long_id_callback),
(r'(%s)' % alphanumid_re, end_id_callback, "#pop"),
(r'(%s)' % symbolicid_re, end_id_callback, "#pop"),
(r'\s+', Error),
(r'\S+', Error),
],
# Main parser (prevents errors in files that have scoping errors)
'root': [
default('main')
],
# In this scope, I expect '|' to not be followed by a function name,
# and I expect 'and' to be followed by a binding site
'main': [
include('whitespace'),
# Special behavior of val/and/fun
(r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'),
(r'\b(fun)\b(?!\')', Keyword.Reserved,
('#pop', 'main-fun', 'fname')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# In this scope, I expect '|' and 'and' to be followed by a function
'main-fun': [
include('whitespace'),
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
# Special behavior of val/and/fun
(r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'),
(r'\b(val)\b(?!\')', Keyword.Reserved,
('#pop', 'main', 'vname')),
# Special behavior of '|' and '|'-manipulating keywords
(r'\|', Punctuation, 'fname'),
(r'\b(case|handle)\b(?!\')', Keyword.Reserved,
('#pop', 'main')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# Character and string parsers
'char': stringy(String.Char),
'string': stringy(String.Double),
'breakout': [
(r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'),
],
# Dealing with what comes after module system keywords
'sname': [
include('whitespace'),
include('breakout'),
(r'(%s)' % alphanumid_re, Name.Namespace),
default('#pop'),
],
# Dealing with what comes after the 'fun' (or 'and' or '|') keyword
'fname': [
include('whitespace'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)' % alphanumid_re, Name.Function, '#pop'),
(r'(%s)' % symbolicid_re, Name.Function, '#pop'),
# Ignore interesting function declarations like "fun (x + y) = ..."
default('#pop'),
],
# Dealing with what comes after the 'val' (or 'and') keyword
'vname': [
include('whitespace'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)' % alphanumid_re, Name.Variable, '#pop'),
(r'(%s)' % symbolicid_re, Name.Variable, '#pop'),
# Ignore interesting patterns like 'val (x, y)'
default('#pop'),
],
# Dealing with what comes after the 'type' (or 'and') keyword
'tname': [
include('whitespace'),
include('breakout'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# A type binding includes most identifiers
'typbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
include('breakout'),
include('core'),
(r'\S+', Error, '#pop'),
],
# Dealing with what comes after the 'datatype' (or 'and') keyword
'dname': [
include('whitespace'),
include('breakout'),
(r'\'[\w\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(=)(\s*)(datatype)',
bygroups(Punctuation, Text, Keyword.Reserved), '#pop'),
(r'=(?!%s)' % symbolicid_re, Punctuation,
('#pop', 'datbind', 'datcon')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# common case - A | B | C of int
'datbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')),
(r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
(r'\b(of)\b(?!\')', Keyword.Reserved),
(r'(\|)(\s*)(%s)' % alphanumid_re,
bygroups(Punctuation, Text, Name.Class)),
(r'(\|)(\s+)(%s)' % symbolicid_re,
bygroups(Punctuation, Text, Name.Class)),
include('breakout'),
include('core'),
(r'\S+', Error),
],
# Dealing with what comes after an exception
'ename': [
include('whitespace'),
(r'(and\b)(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'(and\b)(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
(r'(%s)|(%s)' % (alphanumid_re, symbolicid_re), Name.Class),
default('#pop'),
],
'datcon': [
include('whitespace'),
(r'(%s)' % alphanumid_re, Name.Class, '#pop'),
(r'(%s)' % symbolicid_re, Name.Class, '#pop'),
(r'\S+', Error, '#pop'),
],
# Series of type variables
'tyvarseq': [
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
(r'\'[\w\']*', Name.Decorator),
(alphanumid_re, Name),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
(symbolicid_re, Name),
],
'comment': [
(r'[^(*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[(*)]', Comment.Multiline),
],
}
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
.. versionadded:: 0.7
"""
name = 'OCaml'
url = 'https://ocaml.org/'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = (
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'value', 'val', 'virtual', 'when', 'while', 'with',
)
keyopts = (
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
'<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~'
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or')
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name.Class),
(r'\(\*(?![)])', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
default('#pop'),
],
}
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language.
.. versionadded:: 1.5
"""
name = 'Opa'
aliases = ['opa']
filenames = ['*.opa']
mimetypes = ['text/x-opa']
# most of these aren't strictly keywords
# but if you color only real keywords, you might just
# as well not color anything
keywords = (
'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do',
'else', 'end', 'external', 'forall', 'function', 'if', 'import',
'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then',
'type', 'val', 'with', 'xml_parser',
)
# matches both stuff and `stuff`
ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
op_re = r'[.=\-<>,@~%/+?*&^!]'
punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
# because they are also used for inserts
tokens = {
# copied from the caml lexer, should be adapted
'escape-sequence': [
(r'\\[\\"\'ntr}]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
# factorizing these rules, because they are inserted many times
'comments': [
(r'/\*', Comment, 'nested-comment'),
(r'//.*?$', Comment),
],
'comments-and-spaces': [
include('comments'),
(r'\s+', Text),
],
'root': [
include('comments-and-spaces'),
# keywords
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
# directives
# we could parse the actual set of directives instead of anything
# starting with @, but this is troublesome
# because it needs to be adjusted all the time
# and assuming we parse only sources that compile, it is useless
(r'@' + ident_re + r'\b', Name.Builtin.Pseudo),
# number literals
(r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
(r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
(r'-?\d+[eE][+\-]?\d+', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'\d+', Number.Integer),
# color literals
(r'#[\da-fA-F]{3,6}', Number.Integer),
# string literals
(r'"', String.Double, 'string'),
# char literal, should be checked because this is the regexp from
# the caml lexer
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
String.Char),
# this is meant to deal with embedded exprs in strings
# every time we find a '}' we pop a state so that if we were
# inside a string, we are back in the string state
# as a consequence, we must also push a state every time we find a
# '{' or else we will have errors when parsing {} for instance
(r'\{', Operator, '#push'),
(r'\}', Operator, '#pop'),
# html literals
# this is a much more strict that the actual parser,
# since a<b would not be parsed as html
# but then again, the parser is way too lax, and we can't hope
# to have something as tolerant
(r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
# db path
# matching the '[_]' in '/a[_]' because it is a part
# of the syntax of the db path definition
# unfortunately, i don't know how to match the ']' in
# /a[1], so this is somewhat inconsistent
(r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
# putting the same color on <- as on db path, since
# it can be used only to mean Db.write
(r'<-(?!'+op_re+r')', Name.Variable),
# 'modules'
# although modules are not distinguished by their names as in caml
# the standard library seems to follow the convention that modules
# only area capitalized
(r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
# operators
# = has a special role because this is the only
# way to syntactic distinguish binding constructions
# unfortunately, this colors the equal in {x=2} too
(r'=(?!'+op_re+r')', Keyword),
(r'(%s)+' % op_re, Operator),
(r'(%s)+' % punc_re, Operator),
# coercions
(r':', Operator, 'type'),
# type variables
# we need this rule because we don't parse specially type
# definitions so in "type t('a) = ...", "'a" is parsed by 'root'
("'"+ident_re, Keyword.Type),
# id literal, #something, or #{expr}
(r'#'+ident_re, String.Single),
(r'#(?=\{)', String.Single),
# identifiers
# this avoids to color '2' in 'a2' as an integer
(ident_re, Text),
# default, not sure if that is needed or not
# (r'.', Text),
],
# it is quite painful to have to parse types to know where they end
# this is the general rule for a type
# a type is either:
# * -> ty
# * type-with-slash
# * type-with-slash -> ty
# * type-with-slash (, type-with-slash)+ -> ty
#
# the code is pretty funky in here, but this code would roughly
# translate in caml to:
# let rec type stream =
# match stream with
# | [< "->"; stream >] -> type stream
# | [< ""; stream >] ->
# type_with_slash stream
# type_lhs_1 stream;
# and type_1 stream = ...
'type': [
include('comments-and-spaces'),
(r'->', Keyword.Type),
default(('#pop', 'type-lhs-1', 'type-with-slash')),
],
# parses all the atomic or closed constructions in the syntax of type
# expressions: record types, tuple types, type constructors, basic type
# and type variables
'type-1': [
include('comments-and-spaces'),
(r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(r'~?\{', Keyword.Type, ('#pop', 'type-record')),
(ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(ident_re, Keyword.Type, '#pop'),
("'"+ident_re, Keyword.Type),
# this case is not in the syntax but sometimes
# we think we are parsing types when in fact we are parsing
# some css, so we just pop the states until we get back into
# the root state
default('#pop'),
],
# type-with-slash is either:
# * type-1
# * type-1 (/ type-1)+
'type-with-slash': [
include('comments-and-spaces'),
default(('#pop', 'slash-type-1', 'type-1')),
],
'slash-type-1': [
include('comments-and-spaces'),
('/', Keyword.Type, ('#pop', 'type-1')),
# same remark as above
default('#pop'),
],
# we go in this state after having parsed a type-with-slash
# while trying to parse a type
# and at this point we must determine if we are parsing an arrow
# type (in which case we must continue parsing) or not (in which
# case we stop)
'type-lhs-1': [
include('comments-and-spaces'),
(r'->', Keyword.Type, ('#pop', 'type')),
(r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
default('#pop'),
],
'type-arrow': [
include('comments-and-spaces'),
# the look ahead here allows to parse f(x : int, y : float -> truc)
# correctly
(r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
(r'->', Keyword.Type, ('#pop', 'type')),
# same remark as above
default('#pop'),
],
# no need to do precise parsing for tuples and records
# because they are closed constructions, so we can simply
# find the closing delimiter
# note that this function would be not work if the source
# contained identifiers like `{)` (although it could be patched
# to support it)
'type-tuple': [
include('comments-and-spaces'),
(r'[^()/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\(', Keyword.Type, '#push'),
(r'\)', Keyword.Type, '#pop'),
],
'type-record': [
include('comments-and-spaces'),
(r'[^{}/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\{', Keyword.Type, '#push'),
(r'\}', Keyword.Type, '#pop'),
],
# 'type-tuple': [
# include('comments-and-spaces'),
# (r'\)', Keyword.Type, '#pop'),
# default(('#pop', 'type-tuple-1', 'type-1')),
# ],
# 'type-tuple-1': [
# include('comments-and-spaces'),
# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
# (r',', Keyword.Type, 'type-1'),
# ],
# 'type-record':[
# include('comments-and-spaces'),
# (r'\}', Keyword.Type, '#pop'),
# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
# ],
# 'type-record-field-expr': [
#
# ],
'nested-comment': [
(r'[^/*]+', Comment),
(r'/\*', Comment, '#push'),
(r'\*/', Comment, '#pop'),
(r'[/*]', Comment),
],
# the copy pasting between string and single-string
# is kinda sad. Is there a way to avoid that??
'string': [
(r'[^\\"{]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\{', Operator, 'root'),
include('escape-sequence'),
],
'single-string': [
(r'[^\\\'{]+', String.Double),
(r'\'', String.Double, '#pop'),
(r'\{', Operator, 'root'),
include('escape-sequence'),
],
# all the html stuff
# can't really reuse some existing html parser
# because we must be able to parse embedded expressions
# we are in this state after someone parsed the '<' that
# started the html literal
'html-open-tag': [
(r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
(r'>', String.Single, ('#pop', 'html-content')),
],
# we are in this state after someone parsed the '</' that
# started the end of the closing tag
'html-end-tag': [
# this is a star, because </> is allowed
(r'[\w\-:]*>', String.Single, '#pop'),
],
# we are in this state after having parsed '<ident(:ident)?'
# we thus parse a possibly empty list of attributes
'html-attr': [
(r'\s+', Text),
(r'[\w\-:]+=', String.Single, 'html-attr-value'),
(r'/>', String.Single, '#pop'),
(r'>', String.Single, ('#pop', 'html-content')),
],
'html-attr-value': [
(r"'", String.Single, ('#pop', 'single-string')),
(r'"', String.Single, ('#pop', 'string')),
(r'#'+ident_re, String.Single, '#pop'),
(r'#(?=\{)', String.Single, ('#pop', 'root')),
(r'[^"\'{`=<>]+', String.Single, '#pop'),
(r'\{', Operator, ('#pop', 'root')), # this is a tail call!
],
# we should probably deal with '\' escapes here
'html-content': [
(r'<!--', Comment, 'html-comment'),
(r'</', String.Single, ('#pop', 'html-end-tag')),
(r'<', String.Single, 'html-open-tag'),
(r'\{', Operator, 'root'),
(r'[^<{]+', String.Single),
],
'html-comment': [
(r'-->', Comment, '#pop'),
(r'[^\-]+|-', Comment),
],
}
class ReasonLexer(RegexLexer):
"""
For the ReasonML language.
.. versionadded:: 2.6
"""
name = 'ReasonML'
url = 'https://reasonml.github.io/'
aliases = ['reasonml', 'reason']
filenames = ['*.re', '*.rei']
mimetypes = ['text/x-reasonml']
keywords = (
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', 'downto',
'else', 'end', 'exception', 'external', 'false', 'for', 'fun', 'esfun',
'function', 'functor', 'if', 'in', 'include', 'inherit', 'initializer', 'lazy',
'let', 'switch', 'module', 'pub', 'mutable', 'new', 'nonrec', 'object', 'of',
'open', 'pri', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'val', 'virtual', 'when', 'while', 'with',
)
keyopts = (
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
r'-\.', '=>', r'\.', r'\.\.', r'\.\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
'<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|\|', r'\|]', r'\}', '~'
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lsr', 'lxor', 'mod', 'or')
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name.Class),
(r'//.*?\n', Comment.Single),
(r'\/\*(?!/)', Comment.Multiline, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator.Word),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword),
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'\/\*', Comment.Multiline, '#push'),
(r'\*\/', Comment.Multiline, '#pop'),
(r'\*', Comment.Multiline),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
default('#pop'),
],
}
class FStarLexer(RegexLexer):
"""
For the F* language.
.. versionadded:: 2.7
"""
name = 'FStar'
url = 'https://www.fstar-lang.org/'
aliases = ['fstar']
filenames = ['*.fst', '*.fsti']
mimetypes = ['text/x-fstar']
keywords = (
'abstract', 'attributes', 'noeq', 'unopteq', 'and'
'begin', 'by', 'default', 'effect', 'else', 'end', 'ensures',
'exception', 'exists', 'false', 'forall', 'fun', 'function', 'if',
'in', 'include', 'inline', 'inline_for_extraction', 'irreducible',
'logic', 'match', 'module', 'mutable', 'new', 'new_effect', 'noextract',
'of', 'open', 'opaque', 'private', 'range_of', 'reifiable',
'reify', 'reflectable', 'requires', 'set_range_of', 'sub_effect',
'synth', 'then', 'total', 'true', 'try', 'type', 'unfold', 'unfoldable',
'val', 'when', 'with', 'not'
)
decl_keywords = ('let', 'rec')
assume_keywords = ('assume', 'admit', 'assert', 'calc')
keyopts = (
r'~', r'-', r'/\\', r'\\/', r'<:', r'<@', r'\(\|', r'\|\)', r'#', r'u#',
r'&', r'\(', r'\)', r'\(\)', r',', r'~>', r'->', r'<-', r'<--', r'<==>',
r'==>', r'\.', r'\?', r'\?\.', r'\.\[', r'\.\(', r'\.\(\|', r'\.\[\|',
r'\{:pattern', r':', r'::', r':=', r';', r';;', r'=', r'%\[', r'!\{',
r'\[', r'\[@', r'\[\|', r'\|>', r'\]', r'\|\]', r'\{', r'\|', r'\}', r'\$'
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|False|True|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name.Class),
(r'\(\*(?![)])', Comment, 'comment'),
(r'\/\/.+$', Comment),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'\b(%s)\b' % '|'.join(assume_keywords), Name.Exception),
(r'\b(%s)\b' % '|'.join(decl_keywords), Keyword.Declaration),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r"\`([\w\'.]+)\`", Operator.Word), # for infix applications
(r"\`", Keyword), # for quoting
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
default('#pop'),
],
}
| 35,324 | Python | 35.758585 | 87 | 0.439729 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/go.py | """
pygments.lexers.go
~~~~~~~~~~~~~~~~~~
Lexers for the Google Go language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['GoLexer']
class GoLexer(RegexLexer):
"""
For Go source.
.. versionadded:: 1.2
"""
name = 'Go'
url = 'https://go.dev/'
filenames = ['*.go']
aliases = ['go', 'golang']
mimetypes = ['text/x-gosrc']
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuations
(r'//(.*?)$', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(import|package)\b', Keyword.Namespace),
(r'(var|func|struct|map|chan|type|interface|const)\b',
Keyword.Declaration),
(words((
'break', 'default', 'select', 'case', 'defer', 'go',
'else', 'goto', 'switch', 'fallthrough', 'if', 'range',
'continue', 'for', 'return'), suffix=r'\b'),
Keyword),
(r'(true|false|iota|nil)\b', Keyword.Constant),
# It seems the builtin types aren't actually keywords, but
# can be used as functions. So we need two declarations.
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr', 'any', 'comparable',
'print', 'println', 'panic', 'recover', 'close', 'complex',
'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete',
'new', 'make'), suffix=r'\b(\()'),
bygroups(Name.Builtin, Punctuation)),
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr', 'any', 'comparable'), suffix=r'\b'),
Keyword.Type),
# imaginary_lit
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&]'
r'|~|\|)', Operator),
(r'[|^<>=!()\[\]{}.,;:]', Punctuation),
# identifier
(r'[^\W\d]\w*', Name.Other),
]
}
| 3,761 | Python | 37 | 90 | 0.411593 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/boa.py | """
pygments.lexers.boa
~~~~~~~~~~~~~~~~~~~
Lexers for the Boa language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import String, Comment, Keyword, Name, Number, Operator, \
Punctuation, Whitespace
__all__ = ['BoaLexer']
class BoaLexer(RegexLexer):
"""
Lexer for the `Boa <http://boa.cs.iastate.edu/docs/>`_ language.
.. versionadded:: 2.4
"""
name = 'Boa'
aliases = ['boa']
filenames = ['*.boa']
reserved = words(
('input', 'output', 'of', 'weight', 'before', 'after', 'stop',
'ifall', 'foreach', 'exists', 'function', 'break', 'switch', 'case',
'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'),
suffix=r'\b', prefix=r'\b')
keywords = words(
('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum',
'top', 'string', 'int', 'bool', 'float', 'time', 'false', 'true',
'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b')
classes = words(
('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind',
'ChangedFile', 'FileKind', 'ASTRoot', 'Namespace', 'Declaration', 'Type',
'Method', 'Variable', 'Statement', 'Expression', 'Modifier',
'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility',
'TypeKind', 'Person', 'ChangeKind'),
suffix=r'\b', prefix=r'\b')
operators = ('->', ':=', ':', '=', '<<', '!', '++', '||',
'&&', '+', '-', '*', ">", "<")
string_sep = ('`', '\"')
built_in_functions = words(
(
# Array functions
'new', 'sort',
# Date & Time functions
'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now',
'addday', 'addmonth', 'addweek', 'addyear', 'dayofmonth', 'dayofweek',
'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute',
'trunctomonth', 'trunctosecond', 'trunctoyear',
# Map functions
'clear', 'haskey', 'keys', 'lookup', 'remove', 'values',
# Math functions
'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
'ceil', 'cos', 'cosh', 'exp', 'floor', 'highbit', 'isfinite', 'isinf',
'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow',
'rand', 'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc',
# Other functions
'def', 'hash', 'len',
# Set functions
'add', 'contains', 'remove',
# String functions
'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex',
'split', 'splitall', 'splitn', 'strfind', 'strreplace', 'strrfind',
'substring', 'trim', 'uppercase',
# Type Conversion functions
'bool', 'float', 'int', 'string', 'time',
# Domain-Specific functions
'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind',
'isliteral',
),
prefix=r'\b',
suffix=r'\(')
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(reserved, Keyword.Reserved),
(built_in_functions, Name.Function),
(keywords, Keyword.Type),
(classes, Name.Classes),
(words(operators), Operator),
(r'[][(),;{}\\.]', Punctuation),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"`(\\\\|\\[^\\]|[^`\\])*`", String.Backtick),
(words(string_sep), String.Delimiter),
(r'[a-zA-Z_]+', Name.Variable),
(r'[0-9]+', Number.Integer),
(r'\s+', Whitespace), # Whitespace
]
}
| 3,915 | Python | 38.959183 | 84 | 0.490421 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/graph.py | """
pygments.lexers.graph
~~~~~~~~~~~~~~~~~~~~~
Lexers for graph query languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this, words
from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
String, Number, Whitespace
__all__ = ['CypherLexer']
class CypherLexer(RegexLexer):
"""
For Cypher Query Language
For the Cypher version in Neo4j 3.3
.. versionadded:: 2.0
"""
name = 'Cypher'
url = 'https://neo4j.com/docs/developer-manual/3.3/cypher/'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('clauses'),
include('keywords'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
],
'comment': [
(r'^.*//.*$', Comment.Single),
],
'keywords': [
(r'(create|order|match|limit|set|skip|start|return|with|where|'
r'delete|foreach|not|by|true|false)\b', Keyword),
],
'clauses': [
# based on https://neo4j.com/docs/cypher-refcard/3.3/
(r'(create)(\s+)(index|unique)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(drop)(\s+)(contraint|index)(\s+)(on)\b',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(ends)(\s+)(with)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(is)(\s+)(node)(\s+)(key)\b',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(is)(\s+)(null|unique)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(load)(\s+)(csv)(\s+)(from)\b',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(on)(\s+)(match|create)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(optional)(\s+)(match)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(order)(\s+)(by)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(starts)(\s+)(with)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(union)(\s+)(all)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(using)(\s+)(periodic)(\s+)(commit)\b',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(words((
'all', 'any', 'as', 'asc', 'ascending', 'assert', 'call', 'case', 'create',
'delete', 'desc', 'descending', 'distinct', 'end', 'fieldterminator',
'foreach', 'in', 'limit', 'match', 'merge', 'none', 'not', 'null',
'remove', 'return', 'set', 'skip', 'single', 'start', 'then', 'union',
'unwind', 'yield', 'where', 'when', 'with'), suffix=r'\b'), Keyword),
],
'relations': [
(r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
(r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'(-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'-->|<--|\[|\]', Operator),
(r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
(r'[.*{}]', Punctuation),
],
'strings': [
(r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
(r'`(?:``|[^`])+`', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'\d+', Number),
],
}
| 3,861 | Python | 35.433962 | 91 | 0.47656 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/tal.py | """
pygments.lexers.tal
~~~~~~~~~~~~~~~~~~~
Lexer for Uxntal
.. versionadded:: 2.12
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Keyword, Name, String, Number, \
Punctuation, Whitespace, Literal
__all__ = ['TalLexer']
class TalLexer(RegexLexer):
"""
For `Uxntal <https://wiki.xxiivv.com/site/uxntal.html>`_ source code.
.. versionadded:: 2.12
"""
name = 'Tal'
aliases = ['tal', 'uxntal']
filenames = ['*.tal']
mimetypes = ['text/x-uxntal']
instructions = [
'BRK', 'LIT', 'INC', 'POP', 'DUP', 'NIP', 'SWP', 'OVR', 'ROT',
'EQU', 'NEQ', 'GTH', 'LTH', 'JMP', 'JCN', 'JSR', 'STH',
'LDZ', 'STZ', 'LDR', 'STR', 'LDA', 'STA', 'DEI', 'DEO',
'ADD', 'SUB', 'MUL', 'DIV', 'AND', 'ORA', 'EOR', 'SFT'
]
tokens = {
# the comment delimiters must not be adjacent to non-space characters.
# this means ( foo ) is a valid comment but (foo) is not. this also
# applies to nested comments.
'comment': [
(r'(?<!\S)\((?!\S)', Comment.Multiline, '#push'), # nested comments
(r'(?<!\S)\)(?!\S)', Comment.Multiline, '#pop'), # nested comments
(r'[^()]+', Comment.Multiline), # comments
(r'[()]+', Comment.Multiline), # comments
],
'root': [
(r'\s+', Whitespace), # spaces
(r'(?<!\S)\((?!\S)', Comment.Multiline, 'comment'), # comments
(words(instructions, prefix=r'(?<!\S)', suffix=r'2?k?r?(?!\S)'),
Keyword.Reserved), # instructions
(r'[][{}](?!\S)', Punctuation), # delimiters
(r'#([0-9a-f]{2}){1,2}(?!\S)', Number.Hex), # integer
(r'"\S+', String), # raw string
(r"'\S(?!\S)", String.Char), # raw char
(r'([0-9a-f]{2}){1,2}(?!\S)', Literal), # raw integer
(r'[|$][0-9a-f]{1,4}(?!\S)', Keyword.Declaration), # abs/rel pad
(r'%\S+', Name.Decorator), # macro
(r'@\S+', Name.Function), # label
(r'&\S+', Name.Label), # sublabel
(r'/\S+', Name.Tag), # spacer
(r'\.\S+', Name.Variable.Magic), # zero page addr
(r',\S+', Name.Variable.Instance), # rel addr
(r';\S+', Name.Variable.Global), # abs addr
(r':\S+', Literal), # raw addr
(r'~\S+', Keyword.Namespace), # include
(r'\S+', Name),
]
}
def analyse_text(text):
return '|0100' in text[:500]
| 2,639 | Python | 34.2 | 79 | 0.486927 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/dylan.py | """
pygments.lexers.dylan
~~~~~~~~~~~~~~~~~~~~~
Lexers for the Dylan language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
default, line_re
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Literal, Whitespace
__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer']
class DylanLexer(RegexLexer):
"""
For the Dylan language.
.. versionadded:: 0.7
"""
name = 'Dylan'
url = 'http://www.opendylan.org/'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
flags = re.IGNORECASE
builtins = {
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'}
keywords = {
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'}
operators = {
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'}
functions = {
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'}
valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Whitespace, String)),
default('code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#b[01]+', Number.Bin),
# octal integer
(r'#o[0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#x[0-9a-f]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
('<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + r'\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
.. versionadded:: 1.6
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Whitespace, String)),
]
}
class DylanConsoleLexer(Lexer):
"""
For Dylan interactive console output like:
.. sourcecode:: dylan-console
? let a = 1;
=> 1
? a
=> 1
This is based on a copy of the RubyConsoleLexer.
.. versionadded:: 1.6
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
filenames = ['*.dylan-console']
mimetypes = ['text/x-dylan-console']
_prompt_re = re.compile(r'\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
yield from do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
yield from do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode))
| 10,380 | Python | 35.045139 | 85 | 0.495665 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/graphviz.py | """
pygments.lexers.graphviz
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for the DOT language (graphviz).
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Keyword, Operator, Name, String, Number, \
Punctuation, Whitespace
__all__ = ['GraphvizLexer']
class GraphvizLexer(RegexLexer):
"""
For graphviz DOT graph description language.
.. versionadded:: 2.8
"""
name = 'Graphviz'
url = 'https://www.graphviz.org/doc/info/lang.html'
aliases = ['graphviz', 'dot']
filenames = ['*.gv', '*.dot']
mimetypes = ['text/x-graphviz', 'text/vnd.graphviz']
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(#|//).*?$', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(?i)(node|edge|graph|digraph|subgraph|strict)\b', Keyword),
(r'--|->', Operator),
(r'[{}[\]:;,]', Punctuation),
(r'(\b\D\w*)(\s*)(=)(\s*)',
bygroups(Name.Attribute, Whitespace, Punctuation, Whitespace),
'attr_id'),
(r'\b(n|ne|e|se|s|sw|w|nw|c|_)\b', Name.Builtin),
(r'\b\D\w*', Name.Tag), # node
(r'[-]?((\.[0-9]+)|([0-9]+(\.[0-9]*)?))', Number),
(r'"(\\"|[^"])*?"', Name.Tag), # quoted node
(r'<', Punctuation, 'xml'),
],
'attr_id': [
(r'\b\D\w*', String, '#pop'),
(r'[-]?((\.[0-9]+)|([0-9]+(\.[0-9]*)?))', Number, '#pop'),
(r'"(\\"|[^"])*?"', String.Double, '#pop'),
(r'<', Punctuation, ('#pop', 'xml')),
],
'xml': [
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\s+', Whitespace),
(r'[^<>\s]', Name.Tag),
]
}
| 1,935 | Python | 31.266666 | 78 | 0.449096 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.