file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
pascal-roth/orbit_envs/extensions/omni.isaac.carla/config/extension.toml | [package]
version = "0.0.1"
title = "CARLA extension"
description="Extension to include 3D Datasets from the Carla Simulator."
authors =["Pascal Roth"]
repository = "https://gitlab-master.nvidia.com/mmittal/omni_isaac_orbit"
category = "robotics"
keywords = ["kit", "robotics"]
readme = "docs/README.md"
[dependencies]
"omni.kit.uiapp" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
"omni.isaac.orbit" = {}
"omni.isaac.anymal" = {}
# Main python module this extension provides.
[[python.module]]
name = "omni.isaac.carla"
[[python.module]]
name = "omni.isaac.carla.scripts"
| 580 | TOML | 23.208332 | 72 | 0.696552 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/omni/isaac/carla/scripts/__init__.py | # Copyright (c) 2024 ETH Zurich (Robotic Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from .loader import CarlaLoader
__all__ = ["CarlaLoader"]
# EoF
| 208 | Python | 16.416665 | 53 | 0.711538 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/omni/isaac/carla/scripts/loader.py | # Copyright (c) 2024 ETH Zurich (Robotic Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
from typing import List, Tuple
# omniverse
import carb
import omni
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.debug_draw._debug_draw as omni_debug_draw
import scipy.spatial.transform as tf
import yaml
# isaac-carla
from omni.isaac.carla.configs import CarlaLoaderConfig
# isaac-core
from omni.isaac.core.materials import PhysicsMaterial
from omni.isaac.core.objects.ground_plane import GroundPlane
from omni.isaac.core.prims import GeometryPrim
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.semantics import add_update_semantics, remove_all_semantics
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
# isaac-orbit
from omni.isaac.orbit.utils.configclass import class_to_dict
from pxr import Gf, PhysxSchema, Usd
class CarlaLoader:
debug: bool = False
def __init__(self, cfg: CarlaLoaderConfig) -> None:
self._cfg = cfg
# Load kit helper
self.sim = SimulationContext(
stage_units_in_meters=1.0,
physics_dt=self._cfg.sim_cfg.dt,
rendering_dt=self._cfg.sim_cfg.dt * self._cfg.sim_cfg.substeps,
backend="torch",
sim_params=class_to_dict(self._cfg.sim_cfg.physx),
device=self._cfg.sim_cfg.device,
)
# Set main camera
set_camera_view([20 / self._cfg.scale, 20 / self._cfg.scale, 20 / self._cfg.scale], [0.0, 0.0, 0.0])
# Acquire draw interface
self.draw_interface = omni_debug_draw.acquire_debug_draw_interface()
self.material: PhysicsMaterial = None
return
def load(self) -> None:
"""Load the scene."""
# design scene
assert os.path.isfile(self._cfg.usd_path), f"USD File not found: {self._cfg.usd_path}"
self._design_scene()
self.sim.reset()
# modify mesh
if self._cfg.cw_config_file:
self._multiply_crosswalks()
if self._cfg.people_config_file:
self._insert_people()
if self._cfg.vehicle_config_file:
self._insert_vehicles()
# assign semantic labels
if self._cfg.sem_mesh_to_class_map:
self._add_semantics()
return
""" Scene Helper Functions """
def _design_scene(self):
"""Add prims to the scene."""
self._xform_prim = prim_utils.create_prim(
prim_path=self._cfg.prim_path,
translation=(0.0, 0.0, 0.0),
usd_path=self._cfg.usd_path,
scale=(self._cfg.scale, self._cfg.scale, self._cfg.scale),
)
# physics material
self.material = PhysicsMaterial(
"/World/PhysicsMaterial", static_friction=0.7, dynamic_friction=0.7, restitution=0
)
# enable patch-friction: yields better results!
physx_material_api = PhysxSchema.PhysxMaterialAPI.Apply(self.material._prim)
physx_material_api.CreateImprovePatchFrictionAttr().Set(True)
physx_material_api.CreateFrictionCombineModeAttr().Set("max")
physx_material_api.CreateRestitutionCombineModeAttr().Set("max")
# assign each submesh it's own geometry prim --> important for raytracing to be able to identify the submesh
submeshes = prim_utils.get_prim_children(self._xform_prim)[1].GetAllChildren()
for submesh in submeshes:
submesh_path = submesh.GetPath().pathString
# create geometry prim
GeometryPrim(
prim_path=submesh_path,
name="collision",
position=None,
orientation=None,
collision=True,
).apply_physics_material(self.material)
# physx_utils.setCollider(submesh, approximationShape="None")
# "None" will use the base triangle mesh if available
# Lights-1
prim_utils.create_prim(
"/World/Light/GreySphere",
"SphereLight",
translation=(45 / self._cfg.scale, 100 / self._cfg.scale, 100 / self._cfg.scale),
attributes={"radius": 10, "intensity": 30000.0, "color": (0.75, 0.75, 0.75)},
)
# Lights-2
prim_utils.create_prim(
"/World/Light/WhiteSphere",
"SphereLight",
translation=(100 / self._cfg.scale, 100 / self._cfg.scale, 100 / self._cfg.scale),
attributes={"radius": 10, "intensity": 30000.0, "color": (1.0, 1.0, 1.0)},
)
if self._cfg.axis_up == "Y" or self._cfg.axis_up == "y":
world_prim = prim_utils.get_prim_at_path(self._cfg.prim_path)
rot_quat = tf.Rotation.from_euler("XYZ", [90, 90, 0], degrees=True).as_quat()
gf_quat = Gf.Quatf()
gf_quat.real = rot_quat[3]
gf_quat.imaginary = Gf.Vec3f(list(rot_quat[:3]))
world_prim.GetAttribute("xformOp:orient").Set(gf_quat)
if self._cfg.groundplane:
_ = GroundPlane("/World/GroundPlane", z_position=0.0, physics_material=self.material, visible=False)
return
""" Assign Semantic Labels """
def _add_semantics(self):
# remove all previous semantic labels
remove_all_semantics(prim_utils.get_prim_at_path(self._cfg.prim_path + self._cfg.suffix), recursive=True)
# get mesh prims
mesh_prims, mesh_prims_name = self.get_mesh_prims(self._cfg.prim_path + self._cfg.suffix)
carb.log_info(f"Total of {len(mesh_prims)} meshes in the scene, start assigning semantic class ...")
# mapping from prim name to class
with open(self._cfg.sem_mesh_to_class_map) as file:
class_keywords = yaml.safe_load(file)
# make all the string lower case
mesh_prims_name = [mesh_prim_single.lower() for mesh_prim_single in mesh_prims_name]
keywords_class_mapping_lower = {
key: [value_single.lower() for value_single in value] for key, value in class_keywords.items()
}
# assign class to mesh in ISAAC
def recursive_semUpdate(prim, sem_class_name: str, update_submesh: bool) -> bool:
# Necessary for Park Mesh
# FIXME: include all meshes leads to OgnSdStageInstanceMapping does not support more than 65535 semantic entities (2718824 requested) error since entities are restricted to int16
if (
prim.GetName() == "HierarchicalInstancedStaticMesh"
): # or "FoliageInstancedStaticMeshComponent" in prim.GetName():
add_update_semantics(prim, sem_class_name)
update_submesh = True
children = prim.GetChildren()
if len(children) > 0:
for child in children:
update_submesh = recursive_semUpdate(child, sem_class_name, update_submesh)
return update_submesh
def recursive_meshInvestigator(mesh_idx, mesh_name, mesh_prim_list) -> bool:
success = False
for class_name, keywords in keywords_class_mapping_lower.items():
if any([keyword in mesh_name for keyword in keywords]):
update_submesh = recursive_semUpdate(mesh_prim_list[mesh_idx], class_name, False)
if not update_submesh:
add_update_semantics(mesh_prim_list[mesh_idx], class_name)
success = True
break
if not success:
success_child = []
mesh_prims_children, mesh_prims_name_children = self.get_mesh_prims(
mesh_prim_list[mesh_idx].GetPrimPath().pathString
)
mesh_prims_name_children = [mesh_prim_single.lower() for mesh_prim_single in mesh_prims_name_children]
for mesh_idx_child, mesh_name_child in enumerate(mesh_prims_name_children):
success_child.append(
recursive_meshInvestigator(mesh_idx_child, mesh_name_child, mesh_prims_children)
)
success = any(success_child)
return success
mesh_list = []
for mesh_idx, mesh_name in enumerate(mesh_prims_name):
success = recursive_meshInvestigator(mesh_idx=mesh_idx, mesh_name=mesh_name, mesh_prim_list=mesh_prims)
if success:
mesh_list.append(mesh_idx)
missing = [i for x, y in zip(mesh_list, mesh_list[1:]) for i in range(x + 1, y) if y - x > 1]
assert len(mesh_list) > 0, "No mesh is assigned a semantic class!"
assert len(mesh_list) == len(
mesh_prims_name
), f"Not all meshes are assigned a semantic class! Following mesh names are included yet: {[mesh_prims_name[miss_idx] for miss_idx in missing]}"
carb.log_info("Semantic mapping done.")
return
""" Modify Mesh """
def _multiply_crosswalks(self) -> None:
"""Increase number of crosswalks in the scene."""
with open(self._cfg.cw_config_file) as stream:
multipy_cfg: dict = yaml.safe_load(stream)
# get the stage
stage = omni.usd.get_context().get_stage()
# get town prim
town_prim = multipy_cfg.pop("town_prim")
# init counter
crosswalk_add_counter = 0
for key, value in multipy_cfg.items():
print(f"Execute crosswalk multiplication '{key}'")
# iterate over the number of crosswalks to be created
for copy_idx in range(value["factor"]):
success = omni.usd.duplicate_prim(
stage=stage,
prim_path=os.path.join(self._cfg.prim_path, town_prim, value["cw_prim"]),
path_to=os.path.join(
self._cfg.prim_path, town_prim, value["cw_prim"] + f"_cp{copy_idx}" + value.get("suffix", "")
),
duplicate_layers=True,
)
assert success, f"Failed to duplicate crosswalk '{key}'"
# get crosswalk prim
prim_utils.get_prim_at_path(
os.path.join(
self._cfg.prim_path, town_prim, value["cw_prim"] + f"_cp{copy_idx}" + value.get("suffix", "")
)
).GetAttribute("xformOp:translate").Set(
Gf.Vec3d(value["translation"][0], value["translation"][1], value["translation"][2]) * (copy_idx + 1)
)
# update counter
crosswalk_add_counter += 1
carb.log_info(f"Number of crosswalks added: {crosswalk_add_counter}")
print(f"Number of crosswalks added: {crosswalk_add_counter}")
return
def _insert_vehicles(self):
# load vehicle config file
with open(self._cfg.vehicle_config_file) as file:
vehicle_cfg: dict = yaml.safe_load(file)
# get the stage
stage = omni.usd.get_context().get_stage()
# get town prim and all its meshes
town_prim = vehicle_cfg.pop("town_prim")
mesh_prims: dict = prim_utils.get_prim_at_path(f"{self._cfg.prim_path}/{town_prim}").GetChildren()
mesh_prims_name = [mesh_prim_single.GetName() for mesh_prim_single in mesh_prims]
# car counter
car_add_counter = 0
for key, vehicle in vehicle_cfg.items():
print(f"Execute vehicle multiplication '{key}'")
# get all meshs that include the keystring
meshs = [
mesh_prim_single for mesh_prim_single in mesh_prims_name if vehicle["prim_part"] in mesh_prim_single
]
# iterate over the number of vehicles to be created
for idx, translation in enumerate(vehicle["translation"]):
for single_mesh in meshs:
success = omni.usd.duplicate_prim(
stage=stage,
prim_path=os.path.join(self._cfg.prim_path, town_prim, single_mesh),
path_to=os.path.join(self._cfg.prim_path, town_prim, single_mesh + key + f"_cp{idx}"),
duplicate_layers=True,
)
assert success, f"Failed to duplicate vehicle '{key}'"
# get vehicle prim
prim_utils.get_prim_at_path(
os.path.join(self._cfg.prim_path, town_prim, single_mesh + key + f"_cp{idx}")
).GetAttribute("xformOp:translate").Set(Gf.Vec3d(translation[0], translation[1], translation[2]))
car_add_counter += 1
carb.log_info(f"Number of vehicles added: {car_add_counter}")
print(f"Number of vehicles added: {car_add_counter}")
return
def _insert_people(self):
# load people config file
with open(self._cfg.people_config_file) as file:
people_cfg: dict = yaml.safe_load(file)
for key, person_cfg in people_cfg.items():
carb.log_verbose(f"Insert person '{key}'")
self.insert_single_person(
person_cfg["prim_name"],
person_cfg["translation"],
scale_people=1, # scale_people,
usd_path=person_cfg.get("usd_path", "People/Characters/F_Business_02/F_Business_02.usd"),
)
# TODO: movement of the people
carb.log_info(f"Number of people added: {len(people_cfg)}")
print(f"Number of people added: {len(people_cfg)}")
return
@staticmethod
def insert_single_person(
prim_name: str,
translation: list,
scale_people: float = 1.0,
usd_path: str = "People/Characters/F_Business_02/F_Business_02.usd",
) -> None:
person_prim = prim_utils.create_prim(
prim_path=os.path.join("/World/People", prim_name),
translation=tuple(translation),
usd_path=os.path.join(ISAAC_NUCLEUS_DIR, usd_path),
scale=(scale_people, scale_people, scale_people),
)
if isinstance(person_prim.GetAttribute("xformOp:orient").Get(), Gf.Quatd):
person_prim.GetAttribute("xformOp:orient").Set(Gf.Quatd(1.0, 0.0, 0.0, 0.0))
else:
person_prim.GetAttribute("xformOp:orient").Set(Gf.Quatf(1.0, 0.0, 0.0, 0.0))
add_update_semantics(person_prim, "person")
return
@staticmethod
def get_mesh_prims(env_prim: str) -> Tuple[List[Usd.Prim], List[str]]:
def recursive_search(start_prim: str, mesh_prims: list):
for curr_prim in prim_utils.get_prim_at_path(start_prim).GetChildren():
if curr_prim.GetTypeName() == "Xform" or curr_prim.GetTypeName() == "Mesh":
mesh_prims.append(curr_prim)
elif curr_prim.GetTypeName() == "Scope":
mesh_prims = recursive_search(start_prim=curr_prim.GetPath().pathString, mesh_prims=mesh_prims)
return mesh_prims
assert prim_utils.is_prim_path_valid(env_prim), f"Prim path '{env_prim}' is not valid"
mesh_prims = []
mesh_prims = recursive_search(env_prim, mesh_prims)
# mesh_prims: dict = prim_utils.get_prim_at_path(self._cfg.prim_path + "/" + self._cfg.usd_name.split(".")[0]).GetChildren()
mesh_prims_name = [mesh_prim_single.GetName() for mesh_prim_single in mesh_prims]
return mesh_prims, mesh_prims_name
# EoF
| 15,577 | Python | 39.149484 | 190 | 0.588688 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/omni/isaac/carla/configs/__init__.py | # Copyright (c) 2024 ETH Zurich (Robotic Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# configs
from .configs import DATA_DIR, CarlaLoaderConfig
__all__ = [
# configs
"CarlaLoaderConfig",
# path
"DATA_DIR",
]
# EoF
| 289 | Python | 15.11111 | 53 | 0.66782 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/omni/isaac/carla/configs/configs.py | # Copyright (c) 2024 ETH Zurich (Robotic Systems Lab)
# Author: Pascal Roth
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# python
import os
from dataclasses import dataclass
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../data"))
@dataclass
class SimCfg:
"""Simulation physics."""
dt = 0.005 # physics-dt:(s)
substeps = 8 # rendering-dt = physics-dt * substeps (s)
gravity = [0.0, 0.0, -9.81] # (m/s^2)
enable_scene_query_support = False # disable scene query for more speed-up
use_flatcache = True # output from simulation to flat cache
use_gpu_pipeline = True # direct GPU access functionality
device = "cpu" # device on which to run simulation/environment
@dataclass
class PhysxCfg:
"""PhysX solver parameters."""
worker_thread_count = 10 # note: unused
solver_position_iteration_count = 4 # note: unused
solver_velocity_iteration_count = 1 # note: unused
enable_sleeping = True # note: unused
max_depenetration_velocity = 1.0 # note: unused
contact_offset = 0.002 # note: unused
rest_offset = 0.0 # note: unused
use_gpu = True # GPU dynamics pipeline and broad-phase type
solver_type = 1 # 0: PGS, 1: TGS
enable_stabilization = True # additional stabilization pass in solver
# (m/s): contact with relative velocity below this will not bounce
bounce_threshold_velocity = 0.5
# (m): threshold for contact point to experience friction force
friction_offset_threshold = 0.04
# (m): used to decide if contacts are close enough to merge into a single friction anchor point
friction_correlation_distance = 0.025
# GPU buffers parameters
gpu_max_rigid_contact_count = 512 * 1024
gpu_max_rigid_patch_count = 80 * 1024 * 2
gpu_found_lost_pairs_capacity = 1024 * 1024 * 2
gpu_found_lost_aggregate_pairs_capacity = 1024 * 1024 * 32
gpu_total_aggregate_pairs_capacity = 1024 * 1024 * 2
gpu_max_soft_body_contacts = 1024 * 1024
gpu_max_particle_contacts = 1024 * 1024
gpu_heap_capacity = 128 * 1024 * 1024
gpu_temp_buffer_capacity = 32 * 1024 * 1024
gpu_max_num_partitions = 8
physx: PhysxCfg = PhysxCfg()
@dataclass
class CarlaLoaderConfig:
# carla map
root_path: str = "path_to_unreal_mesh"
usd_name: str = "Town01_Opt.usd"
suffix: str = "/Town01_Opt"
# prim path for the carla map
prim_path: str = "/World/Carla"
# SimCfg
sim_cfg: SimCfg = SimCfg()
# scale
scale: float = 0.01 # scale the scene to be in meters
# up axis
axis_up: str = "Y"
# multiply crosswalks
cw_config_file: str | None = os.path.join(
DATA_DIR, "town01", "cw_multiply_cfg.yml"
) # if None, no crosswalks are added
# mesh to semantic class mapping --> only if set, semantic classes will be added to the scene
sem_mesh_to_class_map: str | None = os.path.join(
DATA_DIR, "town01", "keyword_mapping.yml"
) # os.path.join(DATA_DIR, "park", "keyword_mapping.yml") os.path.join(DATA_DIR, "town01", "keyword_mapping.yml")
# add Groundplane to the scene
groundplane: bool = True
# add people to the scene
people_config_file: str | None = os.path.join(DATA_DIR, "town01", "people_cfg.yml") # if None, no people are added
# multiply vehicles
vehicle_config_file: str | None = os.path.join(
DATA_DIR, "town01", "vehicle_cfg.yml"
) # if None, no vehicles are added
@property
def usd_path(self) -> str:
return os.path.join(self.root_path, self.usd_name)
| 3,701 | Python | 36.393939 | 119 | 0.636585 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/town02/cw_multiply_cfg.yml | # Definition of which crosswalks should be repeated how often along which axis
# Adjusted for: TOWN02
# each entry has the following format:
# name:
# cw_prim: [str] prim of the crosswalk in the loaded town file
# factor: [int] number how often the crosswalk should be repeated
# translation: [float, float] vector along which the crosswalk should be repeated, defines the position of the first
# repeated crosswalk, every following crosswalk will be placed at the position of the
# previous one plus the translation vector
# suffix: [str] optional, str will be added to the copied prim of the new crosswalk
# NOTE: rotations and scales applied to the mesh are not applied to the translations given here, i.e. they have to be
# in the original dataformat of the town file, i.e. y-up and in cm
town_prim: "Town02"
cw_2:
cw_prim: "Road_Crosswalk_Town02_8"
factor: 4
translation: [+1500, 0, 0]
cw_3:
cw_prim: "Road_Crosswalk_Town02_10"
factor: 2
translation: [-1500, 0, 0]
cw_4:
cw_prim: "Road_Crosswalk_Town02_9"
factor: 4
translation: [+1500, 0, 0]
suffix: "_neg"
cw_5:
cw_prim: "Road_Crosswalk_Town02_11"
factor: 4
translation: [1500, 0, 0]
cw_6_pos:
cw_prim: "Road_Crosswalk_Town02_12"
factor: 1
translation: [0, 0, 1500]
cw_6_neg:
cw_prim: "Road_Crosswalk_Town02_12"
factor: 2
translation: [0, 0, -1500]
cw_7_neg:
cw_prim: "Road_Crosswalk_Town02_7"
factor: 1
translation: [-1500, 0, 0]
cw_7_pos:
cw_prim: "Road_Crosswalk_Town02_7"
factor: 1
translation: [1500, 0, 0]
cw_8:
cw_prim: "Road_Crosswalk_Town02_4"
factor: 2
translation: [1500, 0, 0]
cw_9:
cw_prim: "Road_Crosswalk_Town02_3"
factor: 4
translation: [1500, 0, 0]
cw_10:
cw_prim: "Road_Crosswalk_Town02_6"
factor: 2
translation: [-1500, 0, 0]
cw_11_neg:
cw_prim: "Road_Crosswalk_Town02_1"
factor: 4
translation: [-1500, 0, 0]
cw_11_pos:
cw_prim: "Road_Crosswalk_Town02_1"
factor: 2
translation: [+1500, 0, 0]
cw_12:
cw_prim: "Road_Crosswalk_Town02_2"
factor: 4
translation: [-1500, 0, 0]
cw_13:
cw_prim: "Road_Crosswalk_Town02_13"
factor: 2
translation: [0, 0, +1500]
cw_14_pos:
cw_prim: "Road_Crosswalk_Town02_15"
factor: 2
translation: [0, 0, +1500]
cw_14_neg:
cw_prim: "Road_Crosswalk_Town02_15"
factor: 1
translation: [0, 0, -1500]
cw_15:
cw_prim: "Road_Crosswalk_Town02_16"
factor: 2
translation: [0, 0, -1500]
cw_16_neg:
cw_prim: "Road_Crosswalk_Town02_17"
factor: 2
translation: [0, 0, -1500]
cw_16_pos:
cw_prim: "Road_Crosswalk_Town02_17"
factor: 4
translation: [0, 0, +1500]
cw_17_neg:
cw_prim: "Road_Crosswalk_Town02_19"
factor: 4
translation: [0, 0, -1500]
cw_17_pos:
cw_prim: "Road_Crosswalk_Town02_19"
factor: 1
translation: [0, 0, +1500]
cw_18:
cw_prim: "Road_Crosswalk_Town02_20"
factor: 3
translation: [0, 0, +1500]
# EoF
| 2,991 | YAML | 21.162963 | 120 | 0.641926 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/town02/vehicle_cfg.yml | # Definition of where additional vehicles should be added
# Adjusted for: TOWN02
# each entry has the following format:
# name:
# prim_part: [str] part of the prim of the vehicle that should be multiplied (every prim containing this string will be multiplied)
# translation: [[float, float, float]] list of translations of the vehicle
# NOTE: rotations and scales applied to the mesh are not applied to the translations given here, i.e. they have to be
# in the original dataformat of the town file, i.e. y-up and in cm
# NOTE: for Town02, take "Vh_Car_SeatLeon_54" for vehicles along the x axis
town_prim: "Town02"
vehicle_1:
prim_part: "Vh_Car_SeatLeon_54"
translation:
# horizontal road low
- [3900, 0, 600]
- [3900, 0, 3000]
- [3900, 0, 3500]
- [3900, 0, 4000]
- [3900, 0, 6000]
- [3900, 0, -1500]
- [3900, 0, -4000]
- [3900, 0, -7500]
- [3900, 0, -8000]
- [3500, 0, -10000]
- [3500, 0, -7500]
- [3500, 0, -3000]
- [3500, 0, 1000]
- [3500, 0, 5000]
# horizontal road middle
- [-10800, 0, 1000]
- [-10800, 0, 5000]
- [-10800, 0, -2500]
# horizontal road high
- [-15800, 0, 2000]
- [-15800, 0, 4700]
- [-16200, 0, 3400]
- [-16200, 0, 0]
- [-16200, 0, -3000]
- [-16200, 0, -6000]
- [-16200, 0, -9000]
# EoF
| 1,436 | YAML | 28.937499 | 160 | 0.550139 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/town02/keyword_mapping.yml |
# Mapping mesh keywords to VIPlanner semantic classes
road:
- Road_Road
- Road_Marking
- ManholeCover
- roadunique
sidewalk:
- Road_Sidewalk
- SideWalkCube
- Road_Grass # pedestrian terrain (between building, squares, ...)
crosswalk:
- Road_Crosswalk
floor:
- Pathwalk # way to the door of a building
- PathWay # wat to the door of a building
- curb
- iron_plank
- Cube
- Floor
vehicle:
- Van
- Vehicle
- Car
building:
- NewBlueprint # roofs, windows, other parts of buildings
- CityBuilding
- Suburb
- House
- MergingBuilding
- BuildingWall
- garage
- airConditioner
- Office
- Block
- Apartment
- ConstructBuilding
- snacksStand
- doghouse
- streetCounter
- fountain
- container
- pergola
- GuardShelter
- atm
- awning
- bus_stop
- NewsStand
- ironplank
- kiosk
- TownHall
wall:
- GardenWall
- Wall
- RepSpline # fences or walls to limit residential areas
- RepeatedMeshesAlongSpline # should make the spline go around the building --> not working in isaac
fence:
- urbanFence
- chain_barrier
- picketFence
- fence
pole:
- bollard
- Lamppost
- Parklight
- CityLamp
- Traffic_Light_Base
- ElectricPole
- PoleCylinder
traffic_sign:
- streetBillboard
- RoundSign
- roadsigns
traffic_light:
- TLights
- TL_BotCover
- SM_Charger
- SM_FreewayLights
bench:
- bench
vegetation:
- tree
- Stone
- Cypress
- PlantPot
- TreePot
- Maple
- Beech
- FanPalm
- Sassafras
- Pine_Bush
- Hedge
- Bush
- palm
- acer
- plant_pit
- arbusto_pine
terrain:
- dirtDebris # roughness in the terrain, street or sidewalk (traversable but more difficult)
- GrassLeaf
- Grass
- LandscapeComponent
- Ash
water_surface:
- TileLake
sky:
- terrain2
- sky
dynamic:
- Trashbag
- advertise
- creased_box
- garbage
- trashcan
- clothes_line
- barbecue
- ConstructionCone
- box
- droppingasset
- barrel
static:
- firehydrant
- Gnome
- metroMap
- Bikeparking
- StaticMesh # gate barrier
- trampoline
- wheelbarrow
- NewspaperBox
- swing
- bin
- big_plane
- plane
- slide
- instancedfoliageactor
- roadbillboard
- prophitreacting_child # vending machines
- prop_wateringcan
furniture:
- Campingtable
- swingcouch
- table
- chair
| 2,344 | YAML | 15.061644 | 103 | 0.664249 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/town01/cw_multiply_cfg.yml | # Definition of which crosswalks should be repeated how often along which axis
# Adjusted for: TOWN01
# each entry has the following format:
# name:
# cw_prim: [str] prim of the crosswalk in the loaded town file
# factor: [int] number how often the crosswalk should be repeated
# translation: [float, float] vector along which the crosswalk should be repeated, defines the position of the first
# repeated crosswalk, every following crosswalk will be placed at the position of the
# previous one plus the translation vector
# suffix: [str] optional, str will be added to the copied prim of the new crosswalk
# NOTE: rotations and scales applied to the mesh are not applied to the translations given here, i.e. they have to be
# in the original dataformat of the town file, i.e. y-up and in cm
town_prim: "Town01_Opt"
cw_2:
cw_prim: "Road_Crosswalk_Town01_2"
factor: 2
translation: [0, 0, -1500]
cw_3_pos:
cw_prim: "Road_Crosswalk_Town01_3"
factor: 6
translation: [1500, 0, 0]
cw_3_neg:
cw_prim: "Road_Crosswalk_Town01_3"
factor: 1
translation: [-1500, 0, 0]
suffix: "_neg"
cw_4:
cw_prim: "Road_Crosswalk_Town01_4"
factor: 1
translation: [1500, 0, 0]
cw_5:
cw_prim: "Road_Crosswalk_Town01_5"
factor: 3
translation: [1500, 0, 0]
cw_6:
cw_prim: "Road_Crosswalk_Town01_6"
factor: 3
translation: [0, 0, -1500]
cw_9:
cw_prim: "Road_Crosswalk_Town01_9"
factor: 2
translation: [0, 0, -1500]
cw_10:
cw_prim: "Road_Crosswalk_Town01_10"
factor: 1
translation: [0, 0, 1500]
cw_11:
cw_prim: "Road_Crosswalk_Town01_11"
factor: 1
translation: [0, 0, 1500]
cw_14:
cw_prim: "Road_Crosswalk_Town01_14"
factor: 1
translation: [0, 0, 1500]
cw_15:
cw_prim: "Road_Crosswalk_Town01_15"
factor: 2
translation: [0, 0, -1500]
cw_18:
cw_prim: "Road_Crosswalk_Town01_18"
factor: 5
translation: [1500, 0, 0]
cw_19:
cw_prim: "Road_Crosswalk_Town01_19"
factor: 2
translation: [1500, 0, 0]
cw_21:
cw_prim: "Road_Crosswalk_Town01_21"
factor: 3
translation: [1500, 0, 0]
cw_22:
cw_prim: "Road_Crosswalk_Town01_22"
factor: 5
translation: [1500, 0, 0]
cw_24:
cw_prim: "Road_Crosswalk_Town01_24"
factor: 3
translation: [-1500, 0, 0]
cw_26_pos:
cw_prim: "Road_Crosswalk_Town01_26"
factor: 5
translation: [1500, 0, 0]
cw_26_neg:
cw_prim: "Road_Crosswalk_Town01_26"
factor: 3
translation: [-1500, 0, 0]
suffix: "_neg"
cw_28:
cw_prim: "Road_Crosswalk_Town01_28"
factor: 4
translation: [0, 0, 1500]
cw_29:
cw_prim: "Road_Crosswalk_Town01_29"
factor: 4
translation: [0, 0, 1500]
cw_30:
cw_prim: "Road_Crosswalk_Town01_30"
factor: 4
translation: [0, 0, 1500]
cw_30_neg:
cw_prim: "Road_Crosswalk_Town01_31"
factor: 2
translation: [0, 0, -1500]
cw_32:
cw_prim: "Road_Crosswalk_Town01_32"
factor: 6
translation: [0, 0, -1500]
cw_33_pos:
cw_prim: "Road_Crosswalk_Town01_33"
factor: 4
translation: [1500, 0, 0]
cw_33_neg:
cw_prim: "Road_Crosswalk_Town01_33"
factor: 3
translation: [-2500, 0, 0]
suffix: "_neg"
cw_34:
cw_prim: "Road_Crosswalk_Town01_34"
factor: 7
translation: [1500, 0, 0]
cw_35:
cw_prim: "Road_Crosswalk_Town01_35"
factor: 1
translation: [1500, 0, 0]
cw_36_pos:
cw_prim: "Road_Crosswalk_Town01_36"
factor: 1
translation: [0, 0, 1500]
cw_36_neg:
cw_prim: "Road_Crosswalk_Town01_36"
factor: 5
translation: [0, 0, -1500]
suffix: "_neg"
cw_40:
cw_prim: "Road_Crosswalk_Town01_40"
factor: 4
translation: [1500, 0, 0]
# EoF
| 3,635 | YAML | 20.017341 | 120 | 0.641541 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/town01/vehicle_cfg.yml | # Definition of where additional vehicles should be added
# Adjusted for: TOWN01
# each entry has the following format:
# name:
# prim_part: [str] part of the prim of the vehicle that should be multiplied (every prim containing this string will be multiplied)
# translation: [[float, float, float]] list of translations of the vehicle
# NOTE: rotations and scales applied to the mesh are not applied to the translations given here, i.e. they have to be
# in the original dataformat of the town file, i.e. y-up and in cm
# NOTE: for Town01, take "ChevroletImpala_High_V4" for vehicles along the x axis and "JeepWranglerRubicon_36"
# for vehicles along the y axis
town_prim: "Town01_Opt"
vehicle_1:
prim_part: "ChevroletImpala_High_V4"
translation:
- [-15300, 0, -4000]
- [-15300, 0, 0]
- [-15300, 0, 15000]
- [-15600, 0, 21000]
- [9000, 0, 20500]
- [9400, 0, 15000]
- [9400, 0, 9000]
- [9400, 0, 7000]
- [9000, 0, 6000]
- [9000, 0, 500]
- [9000, 0, -4000]
vehicle_2:
prim_part: "JeepWranglerRubicon_36"
translation:
- [0, 0, -1500]
- [3500, 0, -1500]
- [5300, 0, -1900]
- [9000, 0, -1900]
- [16500, 0, -1500]
- [22500, 0, -1900]
- [25000, 0, 3800]
- [20000, 0, 4200]
- [17000, 0, 4200]
- [12000, 0, 3800]
- [7000, 0, 3800]
- [7000, 0, 11100]
- [11000, 0, 11500]
- [16000, 0, 11100]
- [20000, 0, 11100]
- [26000, 0, 11500]
- [26000, 0, 17800]
- [23000, 0, 18200]
- [18000, 0, 18200]
- [14000, 0, 17800]
- [13500, 0, 18200]
- [10000, 0, 18200]
- [9500, 0, 17800]
- [4000, 0, 17800]
- [2000, 0, 30800]
- [-1000, 0, 31300]
- [6000, 0, 31300]
- [12000, 0, 30800]
- [15000, 0, 30800]
- [15600, 0, 30800]
- [16400, 0, 30800]
- [21000, 0, 31300]
- [25000, 0, 31300]
# EoF
| 1,904 | YAML | 26.214285 | 160 | 0.558824 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/town01/area_filter_cfg.yaml | # Definition of which areas should not be explored and used to sample points
# Adjusted for: TOWN01
# each entry has the following format:
# name:
# x_low: [float] low number of the x axis
# x_high: [float] high number of the x axis
# y_low: [float] low number of the y axis
# y_high: [float] high number of the y axis
area_1:
x_low: 208.9
x_high: 317.8
y_low: 100.5
y_high: 325.5
area_2:
x_low: 190.3
x_high: 315.8
y_low: 12.7
y_high: 80.6
area_3:
x_low: 123.56
x_high: 139.37
y_low: 10
y_high: 80.0
| 601 | YAML | 20.499999 | 76 | 0.570715 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/town01/keyword_mapping.yml |
road:
- Road_Road
- Road_Marking
- ManholeCover
- roadunique
sidewalk:
- Road_Sidewalk
- SideWalkCube
- Road_Grass # pedestrian terrain (between building, squares, ...)
crosswalk:
- Road_Crosswalk
floor:
- Pathwalk # way to the door of a building
- PathWay # wat to the door of a building
- curb
- iron_plank
- Cube
vehicle:
- Van
- Vehicle
- Car
building:
- NewBlueprint # roofs, windows, other parts of buildings
- CityBuilding
- Suburb
- House
- MergingBuilding
- BuildingWall
- garage
- airConditioner
- Office
- Block
- Apartment
- ConstructBuilding
- snacksStand
- doghouse
- streetCounter
- fountain
- container
- pergola
- GuardShelter
- atm
- awning
- bus_stop
- NewsStand
- ironplank
- kiosk
wall:
- GardenWall
- Wall
- RepSpline # fences or walls to limit residential areas
- RepeatedMeshesAlongSpline # should make the spline go around the building --> not working in isaac
fence:
- urbanFence
- chain_barrier
- picketFence
- fence
pole:
- bollard
- Lamppost
- Parklight
- CityLamp
- Traffic_Light_Base
traffic_sign:
- streetBillboard
- RoundSign
- roadsigns
traffic_light:
- TLights
- TL_BotCover
bench:
- bench
vegetation:
- tree
- Stone
- Cypress
- PlantPot
- TreePot
- Maple
- Beech
- FanPalm
- Sassafras
- Pine_Bush
- Hedge
- Bush
- palm
- acer
terrain:
- dirtDebris # roughness in the terrain, street or sidewalk (traversable but more difficult)
- GrassLeaf
- Grass
- LandscapeComponent
- Ash
water_surface:
- TileLake
sky:
- terrain2
- sky
dynamic:
- Trashbag
- advertise
- creased_box
- garbage
- trashcan
- clothes_line
- barbecue
- ConstructionCone
- box
- droppingasset
- barrel
static:
- firehydrant
- Gnome
- metroMap
- Bikeparking
- StaticMesh # gate barrier
- trampoline
- wheelbarrow
- NewspaperBox
- swing
- bin
- big_plane
- slide
- instancedfoliageactor
- roadbillboard
- prophitreacting_child # vending machines
furniture:
- Campingtable
- swingcouch
- table
- chair
| 2,133 | YAML | 15.045113 | 103 | 0.661978 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/park/keyword_mapping.yml | sidewalk:
- Sidewalk
floor:
- SM_ParkSquare05_4HISMA
- SM_ParkSquare02_1HISMA
- SM_ParkSquare05_4HISMA
- SM_ParkSquare05_6HISMA
- SM_ParkSquare05_3HISMA
- SM_ParkSquare04_1HISMA
- SM_ParkSquare05_1HISMA
- SM_ParkSquare02_2HISMA
- SM_ParkSquare11_1HISMA
- SM_ParkSquare05_7HISMA
- SM_ParkSquare05_8HISMA
- SM_ParkSquare05_9HISMA
- SM_ParkSquare05_5HISMA
- SM_ParkSquare12_1HISMA
- SM_ParkSquare05_2HISMA
- TennisField
- BaseballField
- BasketballField
- Asphalt
- FootballField
- SM_ParkSquare03_7HISMA_598
- SM_PoolHISMA
- Border
- Manhole
- ParkPath
- RoadDecal
- MergedRoad
bridge:
- Bridge
tunnel:
- tunnel
building:
- CafeBuilding
- House
- Tribune
- Pier
- Bower
stairs:
- SM_ParkSquare03_3HISMA
- SM_ParkSquare05_3HISMA
- SM_ParkSquare07_1HISMA
- SM_ParkSquare05_12HISMA
- SM_ParkSquare03_2HISMA
- SM_ParkSquare03_5HISMA
- SM_ParkSquare03_5HISMA
- SM_ParkSquare03_7HISMA
- ParkSquare03_8HISMA
- ParkSquare13_7HISMA
- SM_ParkSquare03_2HISMA_687
- SM_ParkSquare03_1HISMA
- SM_ParkSquare05_2HISMA
wall:
- SM_ParkSquare02_4HISMA
- SM_ParkSquare01_5HISMA
- SM_ParkSquare06_1HISMA
- SM_ParkSquare02_8HISMA
- SM_ParkSquare06_4HISMA
- SM_ParkSquare10HISMA
- SM_ParkSquare06_5HISMA
- SM_ParkSquare06_3HISMA
- SM_ParkSquare06_2HISMA
- SM_ParkSquare02_7HISMA
- SM_ParkSquare02_1HISMA
- SM_ParkSquare03_6HISMA
- SM_ParkSquare06_6HISMA
- SM_ParkSquare12_2HISMA
- SM_ParkSquare07_2HISMA
- SM_ParkSquare01_3HISMA
- SM_ParkSquare01_1HISMA
- SM_ParkSquare07_3HISMA
- SM_ParkSquare05_12HISMA
- SM_ParkSquare02_6HISMA
- SM_ParkSquare01_10HISMA
- SM_ParkSquare02_3HISMA
- SM_ParkSquare02_5HISMA
- SM_ParkSquare02_5HISMA_209
- SM_ParkSquare12_3HISMA
- SM_ParkSquare01_2HISMA
- SM_ParkSquare01_9HISMA
- SM_ParkSquare03_4HISMA
- ParkSquare14_3HISMA
- ParkSquare13_5HISMA
- SM_ParkSquare02_2HISMA
- SM_ParkSquare01_7HISMA
- SM_ParkSquare01_4HISMA
- ParkSquare01_11HISMA
- SM_ParkSquare01_6HISMA
- SM_ParkSquare01_8HISMA
- ParkSquare13_7HISMA
- BaseballGate
- SM_Fountain01HISMA
- MergedParkSquare
fence:
- ParkSquare14_3HISMA
- ParkSquare13_1HISMA
- ParkSquare14_2HISMA
- ParkSquare13_3HISMA
- ParkSquare13_2HISMA
- Fence
- ParkSquare13_3HISMA_600
- ParkSquare13_4HISMA_603
- ParkSquare13_5HISMA_605
- MergedPark03_10
- ParkSquare14_1HISMA
- ParkSquare13_6HISMA
pole:
- LampPost
- TrafficBarrel
- TrashCan
traffic_sign:
- RoadSigns
traffic_light:
- TennisFloodlight
- TrafficLight
bench:
- Bench
vegetation:
- BP_SplineMeshes # all spline meshes
- Amur
- Elm
- Ivy
- Maple
- Amur
- Bush
- grass
- Weeping
- Rock
terrain:
- Landscape
- SM_ParkSquare11_3HISMA
- MergedGround
- Instancedfoliageactor_2
- SM_ParkSquare11_2HISMA
- MergedLeaks
water_surface:
- Plane
- PlanarReflection
ceiling:
- SM_ParkSquare09_1HISMA
- SM_ParkSquare09_3HISMA
- SM_ParkSquare09_4HISMA
dynamic:
- DryLeaves06HISMA
- DryLeaves07HISMA
- LeakDecal
- Newspaper
static:
- Statue
- PlayGround # all playground meshes
- TennisNet
- TennisUmpiresChair
- Umbrella
- BasketballHoop
- DrinkingFountain
- FoodStalls
- FoodballGate
- RoadBlock
- Sphere
- Tribune
- FootballGate
furniture:
- Table
- CafeChair
| 3,370 | YAML | 17.221622 | 40 | 0.71454 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/warehouse/people_cfg.yml | person_1:
prim_name: "Person_1"
translation: [4.23985, -2.42198, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_01_new/male_adult_construction_01_new.usd
person_2:
prim_name: "Person_2"
translation: [2.51653, 7.80822, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_03/male_adult_construction_03.usd
person_3:
prim_name: "Person_3"
translation: [5.07179, 3.8561, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_05_new/male_adult_construction_05_new.usd
person_4:
prim_name: "Person_4"
translation: [-3.2015, 11.79695, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/original_male_adult_construction_01/male_adult_construction_01.usd
person_5:
prim_name: "Person_5"
translation: [-6.70566, 7.58019, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/original_male_adult_construction_02/male_adult_construction_02.usd
person_6:
prim_name: "Person_6"
translation: [-5.12784, 2.43409, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/original_male_adult_construction_05/male_adult_construction_05.usd
person_7:
prim_name: "Person_7"
translation: [-6.98476, -9.47249, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_01_new/male_adult_construction_01_new.usd
person_8:
prim_name: "Person_8"
translation: [-1.63744, -3.43285, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_01_new/male_adult_construction_01_new.usd
person_9:
prim_name: "Person_9"
translation: [6.15617, -8.3114, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/original_male_adult_construction_05/male_adult_construction_05.usd
person_10:
prim_name: "Person_10"
translation: [5.34416, -7.47814, 0.0]
target: [0, 0, 0]
usd_path: People/Characters/male_adult_construction_05_new/male_adult_construction_05_new.usd
| 1,905 | YAML | 30.766666 | 96 | 0.704462 |
pascal-roth/orbit_envs/extensions/omni.isaac.carla/data/warehouse/keyword_mapping.yml | floor:
- SM_Floor1
- SM_Floor2
- SM_Floor3
- SM_Floor4
- SM_Floor5
- SM_Floor6
- groundplane
wall:
- FuseBox
- SM_PillarA
- SM_Sign
- SM_Wall
- S_Barcode
bench:
- Bench
ceiling:
- SM_Ceiling
- PillarPartA
- SM_Beam
- SM_Bracket
static:
- LampCeiling
- SM_FloorDecal
- SM_FireExtinguisher
furniture:
- SM_Rack
- SM_SignCVer
- S_AisleSign
- SM_Palette
- SM_CardBox
- SmallKLT
- SM_PushCarta
- SM_CratePlastic
| 468 | YAML | 10.725 | 23 | 0.617521 |
swadaskar/Isaac_Sim_Folder/PACKAGE-INFO.yaml | Package: isaac-sim-standalone
Version: 2022.2.1-rc.14+2022.2.494.70497c06.tc.linux-x86_64.release
Commit: 70497c064272778b550d785b89e618821248d0cf
Time: Thu Mar 16 01:35:15 2023
CI Build ID: 14259040
Platform: linux-x86_64
CI Build Number: 2022.2.1-rc.14+2022.2.494.70497c06.tc
| 278 | YAML | 33.874996 | 67 | 0.794964 |
swadaskar/Isaac_Sim_Folder/environment.yml | name: isaac-sim
channels:
- defaults
- pytorch
- nvidia
dependencies:
- python=3.7
- pip
- pytorch
- torchvision
- torchaudio
- cuda-toolkit=11.7
- pip:
- stable-baselines3==1.6.2
- tensorboard==2.11.0
- tensorboard-plugin-wit==1.8.1
- protobuf==3.20.3
| 297 | YAML | 15.555555 | 36 | 0.599327 |
swadaskar/Isaac_Sim_Folder/launcher.toml | #displayed application name
name = "Isaac Sim"
#displayed before application name in launcher
productArea = "Omniverse"
version = "2022.2.1"
#unique identifier for component, all lower case, persists between versions
slug = "isaac_sim"
## install and launch instructions by environment
[defaults.windows-x86_64]
url = ""
entrypoint = "${productRoot}/isaac-sim.selector.bat"
args = []
[defaults.windows-x86_64.environment]
[defaults.windows-x86_64.install]
pre-install = ""
pre-install-args = []
install = ""
install-args = []
post-install = "${productRoot}/omni.isaac.sim.post.install.bat"
post-install-args = ">${productRoot}/omni.isaac.sim.post.install.log"
[defaults.windows-x86_64.uninstall]
pre-uninstall = ""
pre-uninstall-args = []
uninstall = ""
uninstall-args = []
post-uninstall = ""
post-uninstall-args = []
[defaults.linux-x86_64]
url = ""
entrypoint = "${productRoot}/isaac-sim.selector.sh"
args = []
[defaults.linux-x86_64.environment]
[defaults.linux-x86_64.install]
pre-install = ""
pre-install-args = []
install = ""
install-args = []
post-install = "${productRoot}/omni.isaac.sim.post.install.sh"
post-install-args = ">${productRoot}/omni.isaac.sim.post.install.log"
[defaults.linux-x86_64.uninstall]
pre-uninstall = ""
pre-uninstall-args = []
uninstall = ""
uninstall-args = []
post-uninstall = ""
post-uninstall-args = []
| 1,349 | TOML | 24 | 75 | 0.716827 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/PACKAGE-LICENSES/omni.isaac.dofbot-LICENSE.md | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited. | 412 | Markdown | 57.999992 | 74 | 0.839806 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/config/extension.toml | [core]
reloadable = true
order = 0
[package]
version = "0.3.0"
category = "Simulation"
title = "Isaac Dofbot Robot"
description = "Isaac Dofbot Robot Helper Class"
authors = ["NVIDIA"]
repository = ""
keywords = ["isaac"]
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
icon = "data/icon.png"
[dependencies]
"omni.isaac.core" = {}
"omni.isaac.motion_generation" = {}
"omni.isaac.manipulators" = {}
[[python.module]]
name = "omni.isaac.dofbot"
| 457 | TOML | 17.319999 | 47 | 0.684902 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.dofbot.dofbot import DofBot
from omni.isaac.dofbot.kinematics_solver import KinematicsSolver
| 537 | Python | 47.909087 | 76 | 0.819367 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/kinematics_solver.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.motion_generation import ArticulationKinematicsSolver, interface_config_loader, LulaKinematicsSolver
from omni.isaac.core.articulations import Articulation
from typing import Optional
class KinematicsSolver(ArticulationKinematicsSolver):
"""Kinematics Solver for Dofbot robot. This class loads a LulaKinematicsSovler object
Args:
robot_articulation (Articulation): An initialized Articulation object representing this Dofbot
end_effector_frame_name (Optional[str]): The name of the Dofbot end effector. If None, an end effector link will
be automatically selected. Defaults to None.
"""
def __init__(self, robot_articulation: Articulation, end_effector_frame_name: Optional[str] = None) -> None:
kinematics_config = interface_config_loader.load_supported_lula_kinematics_solver_config("DofBot")
self._kinematics = LulaKinematicsSolver(**kinematics_config)
if end_effector_frame_name is None:
end_effector_frame_name = "link5"
ArticulationKinematicsSolver.__init__(self, robot_articulation, self._kinematics, end_effector_frame_name)
return
| 1,590 | Python | 47.21212 | 121 | 0.760377 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/dofbot.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Optional, List
import numpy as np
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
import carb
from omni.isaac.manipulators.grippers.parallel_gripper import ParallelGripper
class DofBot(Robot):
"""[summary]
Args:
prim_path (str): [description]
name (str, optional): [description]. Defaults to "dofbot_robot".
usd_path (Optional[str], optional): [description]. Defaults to None.
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
end_effector_prim_name (Optional[str], optional): [description]. Defaults to None.
gripper_dof_names (Optional[List[str]], optional): [description]. Defaults to None.
gripper_open_position (Optional[np.ndarray], optional): [description]. Defaults to None.
gripper_closed_position (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "dofbot_robot",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
end_effector_prim_name: Optional[str] = None,
gripper_dof_names: Optional[List[str]] = None,
gripper_open_position: Optional[np.ndarray] = None,
gripper_closed_position: Optional[np.ndarray] = None,
deltas: Optional[np.ndarray] = None,
) -> None:
prim = get_prim_at_path(prim_path)
self._end_effector = None
self._gripper = None
self._end_effector_prim_name = end_effector_prim_name
if not prim.IsValid():
if usd_path:
add_reference_to_stage(usd_path=usd_path, prim_path=prim_path)
else:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
usd_path = assets_root_path + "/Isaac/Robots/Dofbot/dofbot.usd"
add_reference_to_stage(usd_path=usd_path, prim_path=prim_path)
if self._end_effector_prim_name is None:
self._end_effector_prim_path = prim_path + "/link5/Finger_Right_01"
else:
self._end_effector_prim_path = prim_path + "/" + end_effector_prim_name
if gripper_dof_names is None:
gripper_dof_names = ["Finger_Left_01_RevoluteJoint", "Finger_Right_01_RevoluteJoint"]
if gripper_open_position is None:
gripper_open_position = np.array([-0.67192185, 0.67192185])
if gripper_closed_position is None:
gripper_closed_position = np.array([0.523599, -0.523599])
else:
# TODO: change this
if self._end_effector_prim_name is None:
self._end_effector_prim_path = prim_path + "/link5/Finger_Right_01"
else:
self._end_effector_prim_path = prim_path + "/" + end_effector_prim_name
if gripper_dof_names is None:
gripper_dof_names = ["Finger_Left_01_RevoluteJoint", "Finger_Right_01_RevoluteJoint"]
if gripper_open_position is None:
gripper_open_position = np.array([-0.67192185, 0.67192185])
if gripper_closed_position is None:
gripper_closed_position = np.array([0.523599, -0.523599])
super().__init__(
prim_path=prim_path, name=name, position=position, orientation=orientation, articulation_controller=None
)
if gripper_dof_names is not None:
if deltas is None:
deltas = np.array([-0.1, 0.1])
self._gripper = ParallelGripper(
end_effector_prim_path=self._end_effector_prim_path,
joint_prim_names=gripper_dof_names,
joint_opened_positions=gripper_open_position,
joint_closed_positions=gripper_closed_position,
action_deltas=deltas,
)
return
@property
def end_effector(self) -> RigidPrim:
"""[summary]
Returns:
RigidPrim: [description]
"""
return self._end_effector
@property
def gripper(self) -> ParallelGripper:
"""[summary]
Returns:
ParallelGripper: [description]
"""
return self._gripper
def initialize(self, physics_sim_view=None) -> None:
"""[summary]
"""
super().initialize(physics_sim_view)
self._end_effector = RigidPrim(prim_path=self._end_effector_prim_path, name=self.name + "_end_effector")
self._end_effector.initialize(physics_sim_view)
self._gripper.initialize(
physics_sim_view=physics_sim_view,
articulation_apply_action_func=self.apply_action,
get_joint_positions_func=self.get_joint_positions,
set_joint_positions_func=self.set_joint_positions,
dof_names=self.dof_names,
)
return
def post_reset(self) -> None:
"""[summary]
"""
super().post_reset()
self._gripper.post_reset()
self._articulation_controller.switch_dof_control_mode(
dof_index=self.gripper.joint_dof_indicies[0], mode="position"
)
self._articulation_controller.switch_dof_control_mode(
dof_index=self.gripper.joint_dof_indicies[1], mode="position"
)
return
| 6,255 | Python | 42.748251 | 116 | 0.614868 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/tasks/pick_place.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.isaac.core.tasks as tasks
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dofbot import DofBot
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core.utils.string import find_unique_string_name
import numpy as np
from typing import Optional
class PickPlace(tasks.PickPlace):
def __init__(
self,
name: str = "dofbot_pick_place",
cube_initial_position: Optional[np.ndarray] = None,
cube_initial_orientation: Optional[np.ndarray] = None,
target_position: Optional[np.ndarray] = None,
cube_size: Optional[np.ndarray] = None,
offset: Optional[np.ndarray] = None,
) -> None:
"""[summary]
Args:
name (str, optional): [description]. Defaults to "dofbot_pick_place".
cube_initial_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_initial_orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
if cube_initial_position is None:
cube_initial_position = np.array([0.31, 0, 0.025 / 2.0]) / get_stage_units()
if cube_size is None:
cube_size = np.array([0.025, 0.025, 0.025]) / get_stage_units()
if target_position is None:
target_position = np.array([-0.31, 0.31, 0.025]) / get_stage_units()
tasks.PickPlace.__init__(
self,
name=name,
cube_initial_position=cube_initial_position,
cube_initial_orientation=cube_initial_orientation,
target_position=target_position,
cube_size=cube_size,
offset=offset,
)
return
def set_robot(self) -> DofBot:
"""[summary]
Returns:
DofBot: [description]
"""
dofbot_prim_path = find_unique_string_name(
initial_name="/World/DofBot", is_unique_fn=lambda x: not is_prim_path_valid(x)
)
dofbot_robot_name = find_unique_string_name(
initial_name="my_dofbot", is_unique_fn=lambda x: not self.scene.object_exists(x)
)
return DofBot(prim_path=dofbot_prim_path, name=dofbot_robot_name)
| 2,912 | Python | 41.838235 | 103 | 0.645604 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/tasks/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.dofbot.tasks.follow_target import FollowTarget
from omni.isaac.dofbot.tasks.pick_place import PickPlace
| 548 | Python | 48.909086 | 76 | 0.817518 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/tasks/follow_target.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.isaac.core.tasks as tasks
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.dofbot import DofBot
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core.utils.string import find_unique_string_name
import numpy as np
from typing import Optional
class FollowTarget(tasks.FollowTarget):
"""[summary]
Args:
name (str, optional): [description]. Defaults to "dofbot_follow_target".
target_prim_path (Optional[str], optional): [description]. Defaults to None.
target_name (Optional[str], optional): [description]. Defaults to None.
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
target_orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
dofbot_prim_path (Optional[str], optional): [description]. Defaults to None.
dofbot_robot_name (Optional[str], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "dofbot_follow_target",
target_prim_path: Optional[str] = None,
target_name: Optional[str] = None,
target_position: Optional[np.ndarray] = None,
target_orientation: Optional[np.ndarray] = None,
offset: Optional[np.ndarray] = None,
dofbot_prim_path: Optional[str] = None,
dofbot_robot_name: Optional[str] = None,
) -> None:
if target_position is None:
target_position = np.array([0, 0.1, 0.1]) / get_stage_units()
tasks.FollowTarget.__init__(
self,
name=name,
target_prim_path=target_prim_path,
target_name=target_name,
target_position=target_position,
target_orientation=target_orientation,
offset=offset,
)
self._dofbot_prim_path = dofbot_prim_path
self._dofbot_robot_name = dofbot_robot_name
return
def set_robot(self) -> DofBot:
"""[summary]
Returns:
DofBot: [description]
"""
if self._dofbot_prim_path is None:
self._dofbot_prim_path = find_unique_string_name(
initial_name="/World/DofBot", is_unique_fn=lambda x: not is_prim_path_valid(x)
)
if self._dofbot_robot_name is None:
self._dofbot_robot_name = find_unique_string_name(
initial_name="my_dofbot", is_unique_fn=lambda x: not self.scene.object_exists(x)
)
return DofBot(prim_path=self._dofbot_prim_path, name=self._dofbot_robot_name)
| 3,089 | Python | 41.328767 | 96 | 0.651667 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/controllers/rmpflow_controller.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.isaac.motion_generation as mg
from omni.isaac.core.articulations import Articulation
class RMPFlowController(mg.MotionPolicyController):
"""[summary]
Args:
name (str): [description]
robot_articulation (Articulation): [description]
physics_dt (float, optional): [description]. Defaults to 1.0/60.0.
"""
def __init__(self, name: str, robot_articulation: Articulation, physics_dt: float = 1.0 / 60.0) -> None:
self.rmp_flow_config = mg.interface_config_loader.load_supported_motion_policy_config("DofBot", "RMPflow")
self.rmp_flow = mg.lula.motion_policies.RmpFlow(**self.rmp_flow_config)
self.articulation_rmp = mg.ArticulationMotionPolicy(robot_articulation, self.rmp_flow, physics_dt)
mg.MotionPolicyController.__init__(self, name=name, articulation_motion_policy=self.articulation_rmp)
self._default_position, self._default_orientation = (
self._articulation_motion_policy._robot_articulation.get_world_pose()
)
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
return
def reset(self):
mg.MotionPolicyController.reset(self)
self._motion_policy.set_robot_base_pose(
robot_position=self._default_position, robot_orientation=self._default_orientation
)
| 1,870 | Python | 43.547618 | 114 | 0.705348 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/controllers/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.dofbot.controllers.rmpflow_controller import RMPFlowController
from omni.isaac.dofbot.controllers.pick_place_controller import PickPlaceController
| 591 | Python | 52.818177 | 83 | 0.829103 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/omni/isaac/dofbot/controllers/pick_place_controller.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.core.utils.stage import get_stage_units
from omni.isaac.core.articulations import Articulation
from omni.isaac.manipulators.grippers.parallel_gripper import ParallelGripper
import omni.isaac.manipulators.controllers as manipulators_controllers
from omni.isaac.dofbot.controllers import RMPFlowController
from typing import Optional, List
class PickPlaceController(manipulators_controllers.PickPlaceController):
"""[summary]
Args:
name (str): [description]
gripper (ParallelGripper): [description]
robot_articulation(Articulation): [description]
events_dt (Optional[List[float]], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str,
gripper: ParallelGripper,
robot_articulation: Articulation,
events_dt: Optional[List[float]] = None,
) -> None:
if events_dt is None:
events_dt = [0.01, 0.01, 1, 0.01, 0.01, 0.01, 0.01, 0.05, 0.01, 0.08]
manipulators_controllers.PickPlaceController.__init__(
self,
name=name,
cspace_controller=RMPFlowController(
name=name + "_cspace_controller", robot_articulation=robot_articulation
),
gripper=gripper,
events_dt=events_dt,
end_effector_initial_height=0.2 / get_stage_units(),
)
return
| 1,854 | Python | 38.468084 | 89 | 0.677994 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/docs/CHANGELOG.md | # Changelog
## [0.3.0] - 2022-07-26
### Removed
- Removed GripperController class and used the new ParallelGripper class instead.
### Changed
- Changed gripper_dof_indices argument in PickPlaceController to gripper.
### Added
- Added deltas argument in Franka class for the gripper action deltas when openning or closing.
## [0.2.1] - 2022-07-22
### Fixed
- Bug with adding a custom usd for manipulator
## [0.2.0] - 2022-05-02
### Changed
- Changed InverseKinematicsSolver class to KinematicsSolver class, using the new LulaKinematicsSolver class in motion_generation
## [0.1.4] - 2022-04-21
### Changed
-Updated RmpFlowController class init alongside modifying motion_generation extension
## [0.1.3] - 2022-03-25
### Changed
- Updated RmpFlowController class alongside changes to motion_generation extension
## [0.1.2] - 2022-03-16
### Changed
- Replaced find_nucleus_server() with get_assets_root_path()
## [0.1.1] - 2021-12-02
### Changed
- Propagation of core api changes
## [0.1.0] - 2021-09-01
### Added
- Added Dofbot class | 1,048 | Markdown | 21.319148 | 128 | 0.721374 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/docs/README.md | # Usage
To enable this extension, go to the Extension Manager menu and enable omni.isaac.dofbot extension | 106 | Markdown | 34.666655 | 97 | 0.811321 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.dofbot/docs/index.rst | Dofbot Robot [omni.isaac.dofbot]
################################
Dofbot
=============
.. automodule:: omni.isaac.dofbot.dofbot
:inherited-members:
:members:
:undoc-members:
:exclude-members:
Dofbot Kinematics Solver
=========================
.. automodule:: omni.isaac.dofbot.kinematics_solver
:inherited-members:
:members:
Dofbot Controllers
==================
.. automodule:: omni.isaac.dofbot.controllers
:inherited-members:
:imported-members:
:members:
:undoc-members:
:exclude-members:
Dofbot Tasks
==============
.. automodule:: omni.isaac.dofbot.tasks
:inherited-members:
:imported-members:
:members:
:undoc-members:
:exclude-members:
| 718 | reStructuredText | 16.119047 | 51 | 0.584958 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/PACKAGE-LICENSES/omni.isaac.repl-LICENSE.md | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited. | 412 | Markdown | 57.999992 | 74 | 0.839806 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/config/extension.toml | [core]
reloadable = true
order = 0
[package]
version = "1.0.3"
category = "Utility"
title = "Isaac Sim REPL"
description = "Extension that provides an interactive shell to a running omniverse app"
authors = ["NVIDIA"]
repository = ""
keywords = ["isaac", "python", "repl"]
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
icon = "data/icon.png"
writeTarget.kit = true
target.platform = ["linux-*"]
[dependencies]
"omni.kit.test" = {}
[[python.module]]
name = "prompt_toolkit"
path = "pip_prebundle"
[[python.module]]
name = "omni.isaac.repl"
[[python.module]]
name = "omni.isaac.repl.tests"
[settings]
exts."omni.isaac.repl".host = "127.0.0.1"
exts."omni.isaac.repl".port = 8223
| 695 | TOML | 18.885714 | 87 | 0.684892 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/docs/CHANGELOG.md | # Changelog
## [1.0.3] - 2022-04-16
### Fixed
- ptpython was not fixed
## [1.0.2] - 2022-04-08
### Fixed
- Fix incorrect windows platform check
## [1.0.1] - 2022-04-08
### Changed
- Extenion only targets linux now due to asyncio add_reader limitation
## [1.0.0] - 2022-04-06
### Added
- Initial version of extension | 323 | Markdown | 14.428571 | 70 | 0.647059 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/docs/README.md | # Usage
To enable this extension, go to the Extension Manager menu and enable omni.isaac.repl extension.
Then login using `telnet localhost 8223`. See exetnsion.toml for a full list of settings. | 197 | Markdown | 38.599992 | 96 | 0.786802 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/docs/index.rst | Isaac Python REPL [omni.isaac.repl]
###################################
| 72 | reStructuredText | 23.333326 | 35 | 0.388889 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/util.py | """
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from io import TextIOWrapper
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*
(?: \s+ # optional in HTML5
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")?
)
[^>]*>
''', re.DOTALL | re.MULTILINE | re.VERBOSE)
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
re.IGNORECASE | re.DOTALL | re.MULTILINE)
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
class ClassNotFound(ValueError):
"""Raised if one of the lookup functions didn't find a matching class."""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, str):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, str):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.search(text)
if m is None:
return False
doctype = m.group(1)
return re.compile(regex, re.I).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""Check if the file looks like it has a html doctype."""
return doctype_matches(text, r'html')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.search(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
def surrogatepair(c):
"""Given a unicode character code with length greater than 16 bits,
return the two 16 bit surrogate pair.
"""
# From example D28 of:
# http://www.unicode.org/book/ch03.pdf
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
class Future:
"""Generic class to defer some work.
Handled specially in RegexLexerMeta, to support regex string construction at
first use.
"""
def get(self):
raise NotImplementedError
def guess_decode(text):
"""Decode *text* with guessed encoding.
First try UTF-8; this should fail for non-UTF-8 encodings.
Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
try:
text = text.decode('utf-8')
return text, 'utf-8'
except UnicodeDecodeError:
try:
import locale
prefencoding = locale.getpreferredencoding()
text = text.decode()
return text, prefencoding
except (UnicodeDecodeError, LookupError):
text = text.decode('latin1')
return text, 'latin1'
def guess_decode_from_terminal(text, term):
"""Decode *text* coming from terminal *term*.
First try the terminal encoding, if given.
Then try UTF-8. Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
if getattr(term, 'encoding', None):
try:
text = text.decode(term.encoding)
except UnicodeDecodeError:
pass
else:
return text, term.encoding
return guess_decode(text)
def terminal_encoding(term):
"""Return our best guess of encoding for the given *term*."""
if getattr(term, 'encoding', None):
return term.encoding
import locale
return locale.getpreferredencoding()
class UnclosingTextIOWrapper(TextIOWrapper):
# Don't close underlying buffer on destruction.
def close(self):
self.flush()
| 9,110 | Python | 28.485437 | 80 | 0.568825 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/cmdline.py | """
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import shutil
import argparse
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
guess_decode, guess_decode_from_terminal, terminal_encoding, \
UnclosingTextIOWrapper
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
from pygments.lexers.special import TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
load_formatter_from_file, get_formatter_for_filename, find_formatter_class
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str.strip():
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = get_lexer_by_name(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
return 0
except (AttributeError, ValueError):
print("%s not found!" % what, file=sys.stderr)
return 1
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def _print_list_as_json(requested_items):
import json
result = {}
if 'lexer' in requested_items:
info = {}
for fullname, names, filenames, mimetypes in get_all_lexers():
info[fullname] = {
'aliases': names,
'filenames': filenames,
'mimetypes': mimetypes
}
result['lexers'] = info
if 'formatter' in requested_items:
info = {}
for cls in get_all_formatters():
doc = docstring_headline(cls)
info[cls.name] = {
'aliases': cls.aliases,
'filenames': cls.filenames,
'doc': doc
}
result['formatters'] = info
if 'filter' in requested_items:
info = {}
for name in get_all_filters():
cls = find_filter_class(name)
info[name] = {
'doc': docstring_headline(cls)
}
result['filters'] = info
if 'style' in requested_items:
info = {}
for name in get_all_styles():
cls = get_style_by_name(name)
info[name] = {
'doc': docstring_headline(cls)
}
result['styles'] = info
json.dump(result, sys.stdout)
def main_inner(parser, argns):
if argns.help:
parser.print_help()
return 0
if argns.V:
print('Pygments version %s, (c) 2006-2022 by Georg Brandl, Matthäus '
'Chajdas and contributors.' % __version__)
return 0
def is_only_option(opt):
return not any(v for (k, v) in vars(argns).items() if k != opt)
# handle ``pygmentize -L``
if argns.L is not None:
arg_set = set()
for k, v in vars(argns).items():
if v:
arg_set.add(k)
arg_set.discard('L')
arg_set.discard('json')
if arg_set:
parser.print_help(sys.stderr)
return 2
# print version
if not argns.json:
main(['', '-V'])
allowed_types = {'lexer', 'formatter', 'filter', 'style'}
largs = [arg.rstrip('s') for arg in argns.L]
if any(arg not in allowed_types for arg in largs):
parser.print_help(sys.stderr)
return 0
if not largs:
largs = allowed_types
if not argns.json:
for arg in largs:
_print_list(arg)
else:
_print_list_as_json(largs)
return 0
# handle ``pygmentize -H``
if argns.H:
if not is_only_option('H'):
parser.print_help(sys.stderr)
return 2
what, name = argns.H
if what not in ('lexer', 'formatter', 'filter'):
parser.print_help(sys.stderr)
return 2
return _print_help(what, name)
# parse -O options
parsed_opts = _parse_options(argns.O or [])
# parse -P options
for p_opt in argns.P or []:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
# encodings
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
# handle ``pygmentize -N``
if argns.N:
lexer = find_lexer_class_for_filename(argns.N)
if lexer is None:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -C``
if argns.C:
inp = sys.stdin.buffer.read()
try:
lexer = guess_lexer(inp, inencoding=inencoding)
except ClassNotFound:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = argns.S
a_opt = argns.a
if S_opt is not None:
f_opt = argns.f
if not f_opt:
parser.print_help(sys.stderr)
return 2
if argns.l or argns.INPUTFILE:
parser.print_help(sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
print(fmter.get_style_defs(a_opt or ''))
return 0
# if no -S is given, -a is not allowed
if argns.a is not None:
parser.print_help(sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(argns.F or [])
# -x: allow custom (eXternal) lexers and formatters
allow_custom_lexer_formatter = bool(argns.x)
# select lexer
lexer = None
# given by name?
lexername = argns.l
if lexername:
# custom lexer, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in lexername:
try:
filename = None
name = None
if ':' in lexername:
filename, name = lexername.rsplit(':', 1)
if '.py' in name:
# This can happen on Windows: If the lexername is
# C:\lexer.py -- return to normal load path in that case
name = None
if filename and name:
lexer = load_lexer_from_file(filename, name,
**parsed_opts)
else:
lexer = load_lexer_from_file(lexername, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
lexer = get_lexer_by_name(lexername, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
# read input code
code = None
if argns.INPUTFILE:
if argns.s:
print('Error: -s option not usable when input file specified',
file=sys.stderr)
return 2
infn = argns.INPUTFILE
try:
with open(infn, 'rb') as infp:
code = infp.read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not inencoding:
code, inencoding = guess_decode(code)
# do we have to guess the lexer?
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if argns.g:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
elif not argns.s: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
code = sys.stdin.buffer.read() # use .buffer to get a binary stream
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
if not lexer:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else: # -s option needs a lexer with -l
if not lexer:
print('Error: when using -s a lexer has to be selected with -l',
file=sys.stderr)
return 2
# process filters
for fname, fopts in F_opts:
try:
lexer.add_filter(fname, **fopts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
# select formatter
outfn = argns.o
fmter = argns.f
if fmter:
# custom formatter, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in fmter:
try:
filename = None
name = None
if ':' in fmter:
# Same logic as above for custom lexer
filename, name = fmter.rsplit(':', 1)
if '.py' in name:
name = None
if filename and name:
fmter = load_formatter_from_file(filename, name,
**parsed_opts)
else:
fmter = load_formatter_from_file(fmter, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
if os.environ.get('COLORTERM','') in ('truecolor', '24bit'):
fmter = TerminalTrueColorFormatter(**parsed_opts)
elif '256' in os.environ.get('TERM', ''):
fmter = Terminal256Formatter(**parsed_opts)
else:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout.buffer
# determine output encoding if not explicitly selected
if not outencoding:
if outfn:
# output file? use lexer encoding for now (can still be None)
fmter.encoding = inencoding
else:
# else use terminal encoding
fmter.encoding = terminal_encoding(sys.stdout)
# provide coloring under Windows, if possible
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
# unfortunately colorama doesn't support binary streams on Py3
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
fmter.encoding = None
try:
import colorama.initialise
except ImportError:
pass
else:
outfile = colorama.initialise.wrap_stream(
outfile, convert=None, strip=None, autoreset=False, wrap=True)
# When using the LaTeX formatter and the option `escapeinside` is
# specified, we need a special lexer which collects escaped text
# before running the chosen language lexer.
escapeinside = parsed_opts.get('escapeinside', '')
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
left = escapeinside[0]
right = escapeinside[1]
lexer = LatexEmbeddedLexer(left, right, lexer)
# ... and do it!
if not argns.s:
# process whole input as per normal...
try:
highlight(code, lexer, fmter, outfile)
finally:
if outfn:
outfile.close()
return 0
else:
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
line = sys.stdin.buffer.readline()
if not line:
break
if not inencoding:
line = guess_decode_from_terminal(line, sys.stdin)[0]
highlight(line, lexer, fmter, outfile)
if hasattr(outfile, 'flush'):
outfile.flush()
return 0
except KeyboardInterrupt: # pragma: no cover
return 0
finally:
if outfn:
outfile.close()
class HelpFormatter(argparse.HelpFormatter):
def __init__(self, prog, indent_increment=2, max_help_position=16, width=None):
if width is None:
try:
width = shutil.get_terminal_size().columns - 2
except Exception:
pass
argparse.HelpFormatter.__init__(self, prog, indent_increment,
max_help_position, width)
def main(args=sys.argv):
"""
Main command line entry point.
"""
desc = "Highlight an input file and write the result to an output file."
parser = argparse.ArgumentParser(description=desc, add_help=False,
formatter_class=HelpFormatter)
operation = parser.add_argument_group('Main operation')
lexersel = operation.add_mutually_exclusive_group()
lexersel.add_argument(
'-l', metavar='LEXER',
help='Specify the lexer to use. (Query names with -L.) If not '
'given and -g is not present, the lexer is guessed from the filename.')
lexersel.add_argument(
'-g', action='store_true',
help='Guess the lexer from the file contents, or pass through '
'as plain text if nothing can be guessed.')
operation.add_argument(
'-F', metavar='FILTER[:options]', action='append',
help='Add a filter to the token stream. (Query names with -L.) '
'Filter options are given after a colon if necessary.')
operation.add_argument(
'-f', metavar='FORMATTER',
help='Specify the formatter to use. (Query names with -L.) '
'If not given, the formatter is guessed from the output filename, '
'and defaults to the terminal formatter if the output is to the '
'terminal or an unknown file extension.')
operation.add_argument(
'-O', metavar='OPTION=value[,OPTION=value,...]', action='append',
help='Give options to the lexer and formatter as a comma-separated '
'list of key-value pairs. '
'Example: `-O bg=light,python=cool`.')
operation.add_argument(
'-P', metavar='OPTION=value', action='append',
help='Give a single option to the lexer and formatter - with this '
'you can pass options whose value contains commas and equal signs. '
'Example: `-P "heading=Pygments, the Python highlighter"`.')
operation.add_argument(
'-o', metavar='OUTPUTFILE',
help='Where to write the output. Defaults to standard output.')
operation.add_argument(
'INPUTFILE', nargs='?',
help='Where to read the input. Defaults to standard input.')
flags = parser.add_argument_group('Operation flags')
flags.add_argument(
'-v', action='store_true',
help='Print a detailed traceback on unhandled exceptions, which '
'is useful for debugging and bug reports.')
flags.add_argument(
'-s', action='store_true',
help='Process lines one at a time until EOF, rather than waiting to '
'process the entire file. This only works for stdin, only for lexers '
'with no line-spanning constructs, and is intended for streaming '
'input such as you get from `tail -f`. '
'Example usage: `tail -f sql.log | pygmentize -s -l sql`.')
flags.add_argument(
'-x', action='store_true',
help='Allow custom lexers and formatters to be loaded from a .py file '
'relative to the current working directory. For example, '
'`-l ./customlexer.py -x`. By default, this option expects a file '
'with a class named CustomLexer or CustomFormatter; you can also '
'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). '
'Users should be very careful not to use this option with untrusted '
'files, because it will import and run them.')
flags.add_argument('--json', help='Output as JSON. This can '
'be only used in conjunction with -L.',
default=False,
action='store_true')
special_modes_group = parser.add_argument_group(
'Special modes - do not do any highlighting')
special_modes = special_modes_group.add_mutually_exclusive_group()
special_modes.add_argument(
'-S', metavar='STYLE -f formatter',
help='Print style definitions for STYLE for a formatter '
'given with -f. The argument given by -a is formatter '
'dependent.')
special_modes.add_argument(
'-L', nargs='*', metavar='WHAT',
help='List lexers, formatters, styles or filters -- '
'give additional arguments for the thing(s) you want to list '
'(e.g. "styles"), or omit them to list everything.')
special_modes.add_argument(
'-N', metavar='FILENAME',
help='Guess and print out a lexer name based solely on the given '
'filename. Does not take input or highlight anything. If no specific '
'lexer can be determined, "text" is printed.')
special_modes.add_argument(
'-C', action='store_true',
help='Like -N, but print out a lexer name based solely on '
'a given content from standard input.')
special_modes.add_argument(
'-H', action='store', nargs=2, metavar=('NAME', 'TYPE'),
help='Print detailed help for the object <name> of type <type>, '
'where <type> is one of "lexer", "formatter" or "filter".')
special_modes.add_argument(
'-V', action='store_true',
help='Print the package version.')
special_modes.add_argument(
'-h', '--help', action='store_true',
help='Print this help.')
special_modes_group.add_argument(
'-a', metavar='ARG',
help='Formatter-specific additional argument for the -S (print '
'style sheet) mode.')
argns = parser.parse_args(args[1:])
try:
return main_inner(parser, argns)
except BrokenPipeError:
# someone closed our stdout, e.g. by quitting a pager.
return 0
except Exception:
if argns.v:
print(file=sys.stderr)
print('*' * 65, file=sys.stderr)
print('An unhandled exception occurred while highlighting.',
file=sys.stderr)
print('Please report the whole traceback to the issue tracker at',
file=sys.stderr)
print('<https://github.com/pygments/pygments/issues>.',
file=sys.stderr)
print('*' * 65, file=sys.stderr)
print(file=sys.stderr)
raise
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
print('*** If this is a bug you want to report, please rerun with -v.',
file=sys.stderr)
return 1
| 23,529 | Python | 34.171898 | 92 | 0.548302 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/formatter.py | """
pygments.formatter
~~~~~~~~~~~~~~~~~~
Base formatter class.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
from pygments.util import get_bool_opt
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
if isinstance(style, str):
return get_style_by_name(style)
return style
class Formatter:
"""
Converts a token stream to text.
Options accepted:
``style``
The style to use, can be a string or a Style subclass
(default: "default"). Not used by e.g. the
TerminalFormatter.
``full``
Tells the formatter to output a "full" document, i.e.
a complete self-contained document. This doesn't have
any effect for some formatters (default: false).
``title``
If ``full`` is true, the title that should be used to
caption the document (default: '').
``encoding``
If given, must be an encoding name. This will be used to
convert the Unicode token strings to byte strings in the
output. If it is "" or None, Unicode strings will be written
to the output file, which most file-like objects do not
support (default: None).
``outencoding``
Overrides ``encoding`` if given.
"""
#: Name of the formatter
name = None
#: Shortcuts for the formatter
aliases = []
#: fn match rules
filenames = []
#: If True, this formatter outputs Unicode strings when no encoding
#: option is given.
unicodeoutput = True
def __init__(self, **options):
self.style = _lookup_style(options.get('style', 'default'))
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
if self.encoding in ('guess', 'chardet'):
# can happen for e.g. pygmentize -O encoding=guess
self.encoding = 'utf-8'
self.encoding = options.get('outencoding') or self.encoding
self.options = options
def get_style_defs(self, arg=''):
"""
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
"""
return ''
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile)
| 2,893 | Python | 29.463158 | 75 | 0.614241 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/style.py | """
pygments.style
~~~~~~~~~~~~~~
Basic style object.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Token, STANDARD_TYPES
# Default mapping of ansixxx to RGB colors.
_ansimap = {
# dark
'ansiblack': '000000',
'ansired': '7f0000',
'ansigreen': '007f00',
'ansiyellow': '7f7fe0',
'ansiblue': '00007f',
'ansimagenta': '7f007f',
'ansicyan': '007f7f',
'ansigray': 'e5e5e5',
# normal
'ansibrightblack': '555555',
'ansibrightred': 'ff0000',
'ansibrightgreen': '00ff00',
'ansibrightyellow': 'ffff00',
'ansibrightblue': '0000ff',
'ansibrightmagenta': 'ff00ff',
'ansibrightcyan': '00ffff',
'ansiwhite': 'ffffff',
}
# mapping of deprecated #ansixxx colors to new color names
_deprecated_ansicolors = {
# dark
'#ansiblack': 'ansiblack',
'#ansidarkred': 'ansired',
'#ansidarkgreen': 'ansigreen',
'#ansibrown': 'ansiyellow',
'#ansidarkblue': 'ansiblue',
'#ansipurple': 'ansimagenta',
'#ansiteal': 'ansicyan',
'#ansilightgray': 'ansigray',
# normal
'#ansidarkgray': 'ansibrightblack',
'#ansired': 'ansibrightred',
'#ansigreen': 'ansibrightgreen',
'#ansiyellow': 'ansibrightyellow',
'#ansiblue': 'ansibrightblue',
'#ansifuchsia': 'ansibrightmagenta',
'#ansiturquoise': 'ansibrightcyan',
'#ansiwhite': 'ansiwhite',
}
ansicolors = set(_ansimap)
class StyleMeta(type):
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text in ansicolors:
return text
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0] * 2 + col[1] * 2 + col[2] * 2
elif text == '':
return ''
elif text.startswith('var') or text.startswith('calc'):
return text
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj
def style_for_token(cls, token):
t = cls._styles[token]
ansicolor = bgansicolor = None
color = t[0]
if color in _deprecated_ansicolors:
color = _deprecated_ansicolors[color]
if color in ansicolors:
ansicolor = color
color = _ansimap[color]
bgcolor = t[4]
if bgcolor in _deprecated_ansicolors:
bgcolor = _deprecated_ansicolors[bgcolor]
if bgcolor in ansicolors:
bgansicolor = bgcolor
bgcolor = _ansimap[bgcolor]
return {
'color': color or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
'bgcolor': bgcolor or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
'ansicolor': ansicolor,
'bgansicolor': bgansicolor,
}
def list_styles(cls):
return list(cls)
def styles_token(cls, ttype):
return ttype in cls._styles
def __iter__(cls):
for token in cls._styles:
yield token, cls.style_for_token(token)
def __len__(cls):
return len(cls._styles)
class Style(metaclass=StyleMeta):
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
#: highlight background color
highlight_color = '#ffffcc'
#: line number font color
line_number_color = 'inherit'
#: line number background color
line_number_background_color = 'transparent'
#: special line number font color
line_number_special_color = '#000000'
#: special line number background color
line_number_special_background_color = '#ffffc0'
#: Style definitions for individual token types.
styles = {}
# Attribute for lexers defined within Pygments. If set
# to True, the style is not shown in the style gallery
# on the website. This is intended for language-specific
# styles.
web_style_gallery_exclude = False
| 6,245 | Python | 30.545454 | 70 | 0.501521 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/sphinxext.py | """
pygments.sphinxext
~~~~~~~~~~~~~~~~~~
Sphinx extension to generate automatic documentation of lexers,
formatters and filters.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import Directive
from sphinx.util.nodes import nested_parse_with_titles
MODULEDOC = '''
.. module:: %s
%s
%s
'''
LEXERDOC = '''
.. class:: %s
:Short names: %s
:Filenames: %s
:MIME types: %s
%s
'''
FMTERDOC = '''
.. class:: %s
:Short names: %s
:Filenames: %s
%s
'''
FILTERDOC = '''
.. class:: %s
:Name: %s
%s
'''
class PygmentsDoc(Directive):
"""
A directive to collect all lexers/formatters/filters and generate
autoclass directives for them.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
self.filenames = set()
if self.arguments[0] == 'lexers':
out = self.document_lexers()
elif self.arguments[0] == 'formatters':
out = self.document_formatters()
elif self.arguments[0] == 'filters':
out = self.document_filters()
elif self.arguments[0] == 'lexers_overview':
out = self.document_lexers_overview()
else:
raise Exception('invalid argument for "pygmentsdoc" directive')
node = nodes.compound()
vl = ViewList(out.split('\n'), source='')
nested_parse_with_titles(self.state, vl, node)
for fn in self.filenames:
self.state.document.settings.record_dependencies.add(fn)
return node.children
def document_lexers_overview(self):
"""Generate a tabular overview of all lexers.
The columns are the lexer name, the extensions handled by this lexer
(or "None"), the aliases and a link to the lexer class."""
from pygments.lexers._mapping import LEXERS
import pygments.lexers
out = []
table = []
def format_link(name, url):
if url:
return f'`{name} <{url}>`_'
return name
for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
lexer_cls = pygments.lexers.find_lexer_class(data[1])
extensions = lexer_cls.filenames + lexer_cls.alias_filenames
table.append({
'name': format_link(data[1], lexer_cls.url),
'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
'aliases': ', '.join(data[2]),
'class': f'{data[0]}.{classname}'
})
column_names = ['name', 'extensions', 'aliases', 'class']
column_lengths = [max([len(row[column]) for row in table if row[column]])
for column in column_names]
def write_row(*columns):
"""Format a table row"""
out = []
for l, c in zip(column_lengths, columns):
if c:
out.append(c.ljust(l))
else:
out.append(' '*l)
return ' '.join(out)
def write_seperator():
"""Write a table separator row"""
sep = ['='*c for c in column_lengths]
return write_row(*sep)
out.append(write_seperator())
out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
out.append(write_seperator())
for row in table:
out.append(write_row(
row['name'],
row['extensions'],
row['aliases'],
f':class:`~{row["class"]}`'))
out.append(write_seperator())
return '\n'.join(out)
def document_lexers(self):
from pygments.lexers._mapping import LEXERS
out = []
modules = {}
moduledocstrings = {}
for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
self.filenames.add(mod.__file__)
cls = getattr(mod, classname)
if not cls.__doc__:
print("Warning: %s does not have a docstring." % classname)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
modules.setdefault(module, []).append((
classname,
', '.join(data[2]) or 'None',
', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
', '.join(data[4]) or 'None',
docstring))
if module not in moduledocstrings:
moddoc = mod.__doc__
if isinstance(moddoc, bytes):
moddoc = moddoc.decode('utf8')
moduledocstrings[module] = moddoc
for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
if moduledocstrings[module] is None:
raise Exception("Missing docstring for %s" % (module,))
heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
out.append(MODULEDOC % (module, heading, '-'*len(heading)))
for data in lexers:
out.append(LEXERDOC % data)
return ''.join(out)
def document_formatters(self):
from pygments.formatters import FORMATTERS
out = []
for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
self.filenames.add(mod.__file__)
cls = getattr(mod, classname)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
heading = cls.__name__
out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
', '.join(data[3]).replace('*', '\\*') or 'None',
docstring))
return ''.join(out)
def document_filters(self):
from pygments.filters import FILTERS
out = []
for name, cls in FILTERS.items():
self.filenames.add(sys.modules[cls.__module__].__file__)
docstring = cls.__doc__
if isinstance(docstring, bytes):
docstring = docstring.decode('utf8')
out.append(FILTERDOC % (cls.__name__, name, docstring))
return ''.join(out)
def setup(app):
app.add_directive('pygmentsdoc', PygmentsDoc)
| 6,816 | Python | 30.270642 | 101 | 0.531397 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexer.py | """
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import time
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, Whitespace, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator, Future, guess_decode
from pygments.regexopt import regex_opt
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
'default', 'words', 'line_re']
line_re = re.compile('.*?\n')
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
(b'\xff\xfe\0\0', 'utf-32'),
(b'\0\0\xfe\xff', 'utf-32be'),
(b'\xff\xfe', 'utf-16'),
(b'\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(mcs, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(mcs, name, bases, d)
class Lexer(metaclass=LexerMeta):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
.. versionadded:: 1.3
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
Latin1 detection. Can also be ``'chardet'`` to use the chardet
library, if it is installed.
``inencoding``
Overrides the ``encoding`` if given.
"""
#: Name of the lexer
name = None
#: URL of the language specification/definition
url = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'guess')
self.encoding = options.get('inencoding') or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, str):
if self.encoding == 'guess':
text, _ = guess_decode(text)
elif self.encoding == 'chardet':
try:
import chardet
except ImportError as e:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/') from e
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = text[len(bom):].decode(encoding, 'replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = text.decode(enc.get('encoding') or 'utf-8',
'replace')
text = decoded
else:
text = text.decode(self.encoding)
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
else:
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for _, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (index, tokentype, value) pairs where "index"
is the starting position of the token within the input text.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
# ------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str): # pylint: disable=invalid-name
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit:
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit() # pylint: disable=invalid-name
class combined(tuple): # pylint: disable=invalid-name
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch:
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer,
_PseudoMatch(match.start(i + 1), data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This:
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class default:
"""
Indicates a state or state action (e.g. #pop) to apply.
For example default('#pop') is equivalent to ('', Token, '#pop')
Note that state tuples may be used as well.
.. versionadded:: 2.0
"""
def __init__(self, state):
self.state = state
class words(Future):
"""
Indicates a list of literal words that is transformed into an optimized
regex that matches any of the words.
.. versionadded:: 2.0
"""
def __init__(self, words, prefix='', suffix=''):
self.words = words
self.prefix = prefix
self.suffix = suffix
def get(self):
return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags, state):
"""Preprocess the regular expression component of a token definition."""
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err)) from err
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for state, items in toks.items():
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
# deep hierarchies are processed incrementally (e.g. for
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
# will not see any inherits in B).
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
# N.b. this is the index in items (that is, the superclass
# copy), so offset required when storing below.
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer, metaclass=RegexLexerMeta):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: At all time there is a stack of states. Initially, the stack contains
#: a single state 'root'. The top of the stack is called "the current state".
#:
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: ``new_state`` can be omitted to signify no state transition.
#: If ``new_state`` is a string, it is pushed on the stack. This ensure
#: the new current state is ``new_state``.
#: If ``new_state`` is a tuple of strings, all of those strings are pushed
#: on the stack and the current state will be the last element of the list.
#: ``new_state`` can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again. Note that if you push while in a combined state, the combined
#: state itself is pushed, and not only the state in which the rule is
#: defined.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the initial stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
yield from action(self, m)
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(statestack) > 1:
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop, but keep at least one state on the stack
# (random code leading to unexpected pops should
# not allow exceptions)
if abs(new_state) >= len(statestack):
del statestack[1:]
else:
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
# We are here only if all state tokens have been considered
# and there was not a match on any of them.
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Whitespace, '\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext:
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
yield from action(self, m, ctx)
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(ctx.stack) > 1:
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# see RegexLexer for why this check is made
if abs(new_state) >= len(ctx.stack):
del ctx.stack[1:]
else:
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, '\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
yield from tokens
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the position of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
if tmpval:
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
if oldi < len(v):
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
class ProfilingRegexLexerMeta(RegexLexerMeta):
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
def _process_regex(cls, regex, rflags, state):
if isinstance(regex, words):
rex = regex_opt(regex.words, prefix=regex.prefix,
suffix=regex.suffix)
else:
rex = regex
compiled = re.compile(rex, rflags)
def match_func(text, pos, endpos=sys.maxsize):
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
t0 = time.time()
res = compiled.match(text, pos, endpos)
t1 = time.time()
info[0] += 1
info[1] += t1 - t0
return res
return match_func
class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
_prof_data = []
_prof_sort_index = 4 # defaults to time per call
def get_tokens_unprocessed(self, text, stack=('root',)):
# this needs to be a stack, since using(this) will produce nested calls
self.__class__._prof_data.append({})
yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
rawdata = self.__class__._prof_data.pop()
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
n, 1000 * t, 1000 * t / n)
for ((s, r), (n, t)) in rawdata.items()),
key=lambda x: x[self._prof_sort_index],
reverse=True)
sum_total = sum(x[3] for x in data)
print()
print('Profiling result for %s lexing %d chars in %.3f ms' %
(self.__class__.__name__, len(text), sum_total))
print('=' * 110)
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
print('-' * 110)
for d in data:
print('%-20s %-65s %5d %8.4f %8.4f' % d)
print('=' * 110)
| 31,987 | Python | 35.18552 | 86 | 0.518992 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/__init__.py | """
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments master branch:
https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from io import StringIO, BytesIO
__version__ = '2.14.0'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError:
# Heuristic to catch a common mistake.
from pygments.lexer import RegexLexer
if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError:
# Heuristic to catch a common mistake.
from pygments.formatter import Formatter
if isinstance(formatter, type) and issubclass(formatter, Formatter):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
| 2,975 | Python | 34.855421 | 90 | 0.654118 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/regexopt.py | """
pygments.regexopt
~~~~~~~~~~~~~~~~~
An algorithm that generates optimized regexes for matching long lists of
literal strings.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from re import escape
from os.path import commonprefix
from itertools import groupby
from operator import itemgetter
CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
FIRST_ELEMENT = itemgetter(0)
def make_charset(letters):
return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
def regex_opt_inner(strings, open_paren):
"""Return a regex that matches any string in the sorted list of strings."""
close_paren = open_paren and ')' or ''
# print strings, repr(open_paren)
if not strings:
# print '-> nothing left'
return ''
first = strings[0]
if len(strings) == 1:
# print '-> only 1 string'
return open_paren + escape(first) + close_paren
if not first:
# print '-> first string empty'
return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ '?' + close_paren
if len(first) == 1:
# multiple one-char strings? make a charset
oneletter = []
rest = []
for s in strings:
if len(s) == 1:
oneletter.append(s)
else:
rest.append(s)
if len(oneletter) > 1: # do we have more than one oneletter string?
if rest:
# print '-> 1-character + rest'
return open_paren + regex_opt_inner(rest, '') + '|' \
+ make_charset(oneletter) + close_paren
# print '-> only 1-character'
return open_paren + make_charset(oneletter) + close_paren
prefix = commonprefix(strings)
if prefix:
plen = len(prefix)
# we have a prefix for all strings
# print '-> prefix:', prefix
return open_paren + escape(prefix) \
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ close_paren
# is there a suffix?
strings_rev = [s[::-1] for s in strings]
suffix = commonprefix(strings_rev)
if suffix:
slen = len(suffix)
# print '-> suffix:', suffix[::-1]
return open_paren \
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ escape(suffix[::-1]) + close_paren
# recurse on common 1-string prefixes
# print '-> last resort'
return open_paren + \
'|'.join(regex_opt_inner(list(group[1]), '')
for group in groupby(strings, lambda s: s[0] == first[0])) \
+ close_paren
def regex_opt(strings, prefix='', suffix=''):
"""Return a compiled regex that matches any string in the given list.
The strings to match must be literal strings, not regexes. They will be
regex-escaped.
*prefix* and *suffix* are pre- and appended to the final regex.
"""
strings = sorted(strings)
return prefix + regex_opt_inner(strings, '(') + suffix
| 3,072 | Python | 32.402174 | 82 | 0.570638 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/scanner.py | """
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner:
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos.
"""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolean that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
| 3,092 | Python | 28.457143 | 70 | 0.578913 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/console.py | """
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "red", "green", "yellow", "blue",
"magenta", "cyan", "gray"]
light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
"brightmagenta", "brightcyan", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%im" % (60 + x)
x += 1
del d, l, x
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
| 1,697 | Python | 22.915493 | 88 | 0.542722 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/token.py | """
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
def __copy__(self):
# These instances are supposed to be singletons
return self
def __deepcopy__(self, memo):
# These instances are supposed to be singletons
return self
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Escape = Token.Escape
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct children of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Escape: 'esc',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Function.Magic: 'fm',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Name.Variable.Magic: 'vm',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Affix: 'sa',
String.Backtick: 'sb',
String.Char: 'sc',
String.Delimiter: 'dl',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Bin: 'mb',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Punctuation.Marker: 'pm',
Comment: 'c',
Comment.Hashbang: 'ch',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.PreprocFile: 'cpf',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
| 6,184 | Python | 27.901869 | 70 | 0.466688 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/filter.py | """
pygments.filter
~~~~~~~~~~~~~~~
Module that implements the default filter.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
yield from filter_.filter(lexer, stream)
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
def simplefilter(f):
"""
Decorator that converts a function into a filter::
@simplefilter
def lowercase(self, lexer, stream, options):
for ttype, value in stream:
yield ttype, value.lower()
"""
return type(f.__name__, (FunctionFilter,), {
'__module__': getattr(f, '__module__'),
'__doc__': f.__doc__,
'function': f,
})
class Filter:
"""
Default filter. Subclass this class or use the `simplefilter`
decorator to create own filters.
"""
def __init__(self, **options):
self.options = options
def filter(self, lexer, stream):
raise NotImplementedError()
class FunctionFilter(Filter):
"""
Abstract class used by `simplefilter` to create simple
function filters on the fly. The `simplefilter` decorator
automatically creates subclasses of this class for
functions passed to it.
"""
function = None
def __init__(self, **options):
if not hasattr(self, 'function'):
raise TypeError('%r used without bound function' %
self.__class__.__name__)
Filter.__init__(self, **options)
def filter(self, lexer, stream):
# pylint: disable=not-callable
yield from self.function(lexer, stream, self.options)
| 1,938 | Python | 25.930555 | 70 | 0.603199 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/modeline.py | """
pygments.modeline
~~~~~~~~~~~~~~~~~
A simple modeline parser (based on pymodeline).
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
__all__ = ['get_filetype_from_buffer']
modeline_re = re.compile(r'''
(?: vi | vim | ex ) (?: [<=>]? \d* )? :
.* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
''', re.VERBOSE)
def get_filetype_from_line(l):
m = modeline_re.search(l)
if m:
return m.group(1)
def get_filetype_from_buffer(buf, max_lines=5):
"""
Scan the buffer for modelines and return filetype if one is found.
"""
lines = buf.splitlines()
for l in lines[-1:-max_lines-1:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
for i in range(max_lines, -1, -1):
if i < len(lines):
ret = get_filetype_from_line(lines[i])
if ret:
return ret
return None
| 986 | Python | 21.431818 | 70 | 0.541582 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/plugin.py | """
pygments.plugin
~~~~~~~~~~~~~~~
Pygments plugin interface. By default, this tries to use
``importlib.metadata``, which is in the Python standard
library since Python 3.8, or its ``importlib_metadata``
backport for earlier versions of Python. It falls back on
``pkg_resources`` if not found. Finally, if ``pkg_resources``
is not found either, no plugins are loaded at all.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def iter_entry_points(group_name):
try:
from importlib.metadata import entry_points
except ImportError:
try:
from importlib_metadata import entry_points
except ImportError:
try:
from pkg_resources import iter_entry_points
except (ImportError, OSError):
return []
else:
return iter_entry_points(group_name)
groups = entry_points()
if hasattr(groups, 'select'):
# New interface in Python 3.10 and newer versions of the
# importlib_metadata backport.
return groups.select(group=group_name)
else:
# Older interface, deprecated in Python 3.10 and recent
# importlib_metadata, but we need it in Python 3.8 and 3.9.
return groups.get(group_name, [])
def find_plugin_lexers():
for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| 2,579 | Python | 27.988764 | 70 | 0.658782 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/__main__.py | """
pygments.__main__
~~~~~~~~~~~~~~~~~
Main entry point for ``python -m pygments``.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import pygments.cmdline
try:
sys.exit(pygments.cmdline.main(sys.argv))
except KeyboardInterrupt:
sys.exit(1)
| 348 | Python | 18.388888 | 70 | 0.643678 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/filters/__init__.py | """
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""Lookup a filter by name. Return None if not found."""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""Return a generator of all filter names."""
yield from FILTERS
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
.. versionchanged:: 2.13
Now recognizes ``FIXME`` by default.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
yield from _replace_special(ttype, value, regex, Comment.Special)
else:
yield ttype, value
class SymbolFilter(Filter):
"""Convert mathematical symbols such as \\<longrightarrow> in Isabelle
or \\longrightarrow in LaTeX into Unicode characters.
This is mostly useful for HTML or console output when you want to
approximate the source rendering you'd see in an IDE.
Options accepted:
`lang` : string
The symbol language. Must be one of ``'isabelle'`` or
``'latex'``. The default is ``'isabelle'``.
"""
latex_symbols = {
'\\alpha' : '\U000003b1',
'\\beta' : '\U000003b2',
'\\gamma' : '\U000003b3',
'\\delta' : '\U000003b4',
'\\varepsilon' : '\U000003b5',
'\\zeta' : '\U000003b6',
'\\eta' : '\U000003b7',
'\\vartheta' : '\U000003b8',
'\\iota' : '\U000003b9',
'\\kappa' : '\U000003ba',
'\\lambda' : '\U000003bb',
'\\mu' : '\U000003bc',
'\\nu' : '\U000003bd',
'\\xi' : '\U000003be',
'\\pi' : '\U000003c0',
'\\varrho' : '\U000003c1',
'\\sigma' : '\U000003c3',
'\\tau' : '\U000003c4',
'\\upsilon' : '\U000003c5',
'\\varphi' : '\U000003c6',
'\\chi' : '\U000003c7',
'\\psi' : '\U000003c8',
'\\omega' : '\U000003c9',
'\\Gamma' : '\U00000393',
'\\Delta' : '\U00000394',
'\\Theta' : '\U00000398',
'\\Lambda' : '\U0000039b',
'\\Xi' : '\U0000039e',
'\\Pi' : '\U000003a0',
'\\Sigma' : '\U000003a3',
'\\Upsilon' : '\U000003a5',
'\\Phi' : '\U000003a6',
'\\Psi' : '\U000003a8',
'\\Omega' : '\U000003a9',
'\\leftarrow' : '\U00002190',
'\\longleftarrow' : '\U000027f5',
'\\rightarrow' : '\U00002192',
'\\longrightarrow' : '\U000027f6',
'\\Leftarrow' : '\U000021d0',
'\\Longleftarrow' : '\U000027f8',
'\\Rightarrow' : '\U000021d2',
'\\Longrightarrow' : '\U000027f9',
'\\leftrightarrow' : '\U00002194',
'\\longleftrightarrow' : '\U000027f7',
'\\Leftrightarrow' : '\U000021d4',
'\\Longleftrightarrow' : '\U000027fa',
'\\mapsto' : '\U000021a6',
'\\longmapsto' : '\U000027fc',
'\\relbar' : '\U00002500',
'\\Relbar' : '\U00002550',
'\\hookleftarrow' : '\U000021a9',
'\\hookrightarrow' : '\U000021aa',
'\\leftharpoondown' : '\U000021bd',
'\\rightharpoondown' : '\U000021c1',
'\\leftharpoonup' : '\U000021bc',
'\\rightharpoonup' : '\U000021c0',
'\\rightleftharpoons' : '\U000021cc',
'\\leadsto' : '\U0000219d',
'\\downharpoonleft' : '\U000021c3',
'\\downharpoonright' : '\U000021c2',
'\\upharpoonleft' : '\U000021bf',
'\\upharpoonright' : '\U000021be',
'\\restriction' : '\U000021be',
'\\uparrow' : '\U00002191',
'\\Uparrow' : '\U000021d1',
'\\downarrow' : '\U00002193',
'\\Downarrow' : '\U000021d3',
'\\updownarrow' : '\U00002195',
'\\Updownarrow' : '\U000021d5',
'\\langle' : '\U000027e8',
'\\rangle' : '\U000027e9',
'\\lceil' : '\U00002308',
'\\rceil' : '\U00002309',
'\\lfloor' : '\U0000230a',
'\\rfloor' : '\U0000230b',
'\\flqq' : '\U000000ab',
'\\frqq' : '\U000000bb',
'\\bot' : '\U000022a5',
'\\top' : '\U000022a4',
'\\wedge' : '\U00002227',
'\\bigwedge' : '\U000022c0',
'\\vee' : '\U00002228',
'\\bigvee' : '\U000022c1',
'\\forall' : '\U00002200',
'\\exists' : '\U00002203',
'\\nexists' : '\U00002204',
'\\neg' : '\U000000ac',
'\\Box' : '\U000025a1',
'\\Diamond' : '\U000025c7',
'\\vdash' : '\U000022a2',
'\\models' : '\U000022a8',
'\\dashv' : '\U000022a3',
'\\surd' : '\U0000221a',
'\\le' : '\U00002264',
'\\ge' : '\U00002265',
'\\ll' : '\U0000226a',
'\\gg' : '\U0000226b',
'\\lesssim' : '\U00002272',
'\\gtrsim' : '\U00002273',
'\\lessapprox' : '\U00002a85',
'\\gtrapprox' : '\U00002a86',
'\\in' : '\U00002208',
'\\notin' : '\U00002209',
'\\subset' : '\U00002282',
'\\supset' : '\U00002283',
'\\subseteq' : '\U00002286',
'\\supseteq' : '\U00002287',
'\\sqsubset' : '\U0000228f',
'\\sqsupset' : '\U00002290',
'\\sqsubseteq' : '\U00002291',
'\\sqsupseteq' : '\U00002292',
'\\cap' : '\U00002229',
'\\bigcap' : '\U000022c2',
'\\cup' : '\U0000222a',
'\\bigcup' : '\U000022c3',
'\\sqcup' : '\U00002294',
'\\bigsqcup' : '\U00002a06',
'\\sqcap' : '\U00002293',
'\\Bigsqcap' : '\U00002a05',
'\\setminus' : '\U00002216',
'\\propto' : '\U0000221d',
'\\uplus' : '\U0000228e',
'\\bigplus' : '\U00002a04',
'\\sim' : '\U0000223c',
'\\doteq' : '\U00002250',
'\\simeq' : '\U00002243',
'\\approx' : '\U00002248',
'\\asymp' : '\U0000224d',
'\\cong' : '\U00002245',
'\\equiv' : '\U00002261',
'\\Join' : '\U000022c8',
'\\bowtie' : '\U00002a1d',
'\\prec' : '\U0000227a',
'\\succ' : '\U0000227b',
'\\preceq' : '\U0000227c',
'\\succeq' : '\U0000227d',
'\\parallel' : '\U00002225',
'\\mid' : '\U000000a6',
'\\pm' : '\U000000b1',
'\\mp' : '\U00002213',
'\\times' : '\U000000d7',
'\\div' : '\U000000f7',
'\\cdot' : '\U000022c5',
'\\star' : '\U000022c6',
'\\circ' : '\U00002218',
'\\dagger' : '\U00002020',
'\\ddagger' : '\U00002021',
'\\lhd' : '\U000022b2',
'\\rhd' : '\U000022b3',
'\\unlhd' : '\U000022b4',
'\\unrhd' : '\U000022b5',
'\\triangleleft' : '\U000025c3',
'\\triangleright' : '\U000025b9',
'\\triangle' : '\U000025b3',
'\\triangleq' : '\U0000225c',
'\\oplus' : '\U00002295',
'\\bigoplus' : '\U00002a01',
'\\otimes' : '\U00002297',
'\\bigotimes' : '\U00002a02',
'\\odot' : '\U00002299',
'\\bigodot' : '\U00002a00',
'\\ominus' : '\U00002296',
'\\oslash' : '\U00002298',
'\\dots' : '\U00002026',
'\\cdots' : '\U000022ef',
'\\sum' : '\U00002211',
'\\prod' : '\U0000220f',
'\\coprod' : '\U00002210',
'\\infty' : '\U0000221e',
'\\int' : '\U0000222b',
'\\oint' : '\U0000222e',
'\\clubsuit' : '\U00002663',
'\\diamondsuit' : '\U00002662',
'\\heartsuit' : '\U00002661',
'\\spadesuit' : '\U00002660',
'\\aleph' : '\U00002135',
'\\emptyset' : '\U00002205',
'\\nabla' : '\U00002207',
'\\partial' : '\U00002202',
'\\flat' : '\U0000266d',
'\\natural' : '\U0000266e',
'\\sharp' : '\U0000266f',
'\\angle' : '\U00002220',
'\\copyright' : '\U000000a9',
'\\textregistered' : '\U000000ae',
'\\textonequarter' : '\U000000bc',
'\\textonehalf' : '\U000000bd',
'\\textthreequarters' : '\U000000be',
'\\textordfeminine' : '\U000000aa',
'\\textordmasculine' : '\U000000ba',
'\\euro' : '\U000020ac',
'\\pounds' : '\U000000a3',
'\\yen' : '\U000000a5',
'\\textcent' : '\U000000a2',
'\\textcurrency' : '\U000000a4',
'\\textdegree' : '\U000000b0',
}
isabelle_symbols = {
'\\<zero>' : '\U0001d7ec',
'\\<one>' : '\U0001d7ed',
'\\<two>' : '\U0001d7ee',
'\\<three>' : '\U0001d7ef',
'\\<four>' : '\U0001d7f0',
'\\<five>' : '\U0001d7f1',
'\\<six>' : '\U0001d7f2',
'\\<seven>' : '\U0001d7f3',
'\\<eight>' : '\U0001d7f4',
'\\<nine>' : '\U0001d7f5',
'\\<A>' : '\U0001d49c',
'\\<B>' : '\U0000212c',
'\\<C>' : '\U0001d49e',
'\\<D>' : '\U0001d49f',
'\\<E>' : '\U00002130',
'\\<F>' : '\U00002131',
'\\<G>' : '\U0001d4a2',
'\\<H>' : '\U0000210b',
'\\<I>' : '\U00002110',
'\\<J>' : '\U0001d4a5',
'\\<K>' : '\U0001d4a6',
'\\<L>' : '\U00002112',
'\\<M>' : '\U00002133',
'\\<N>' : '\U0001d4a9',
'\\<O>' : '\U0001d4aa',
'\\<P>' : '\U0001d4ab',
'\\<Q>' : '\U0001d4ac',
'\\<R>' : '\U0000211b',
'\\<S>' : '\U0001d4ae',
'\\<T>' : '\U0001d4af',
'\\<U>' : '\U0001d4b0',
'\\<V>' : '\U0001d4b1',
'\\<W>' : '\U0001d4b2',
'\\<X>' : '\U0001d4b3',
'\\<Y>' : '\U0001d4b4',
'\\<Z>' : '\U0001d4b5',
'\\<a>' : '\U0001d5ba',
'\\<b>' : '\U0001d5bb',
'\\<c>' : '\U0001d5bc',
'\\<d>' : '\U0001d5bd',
'\\<e>' : '\U0001d5be',
'\\<f>' : '\U0001d5bf',
'\\<g>' : '\U0001d5c0',
'\\<h>' : '\U0001d5c1',
'\\<i>' : '\U0001d5c2',
'\\<j>' : '\U0001d5c3',
'\\<k>' : '\U0001d5c4',
'\\<l>' : '\U0001d5c5',
'\\<m>' : '\U0001d5c6',
'\\<n>' : '\U0001d5c7',
'\\<o>' : '\U0001d5c8',
'\\<p>' : '\U0001d5c9',
'\\<q>' : '\U0001d5ca',
'\\<r>' : '\U0001d5cb',
'\\<s>' : '\U0001d5cc',
'\\<t>' : '\U0001d5cd',
'\\<u>' : '\U0001d5ce',
'\\<v>' : '\U0001d5cf',
'\\<w>' : '\U0001d5d0',
'\\<x>' : '\U0001d5d1',
'\\<y>' : '\U0001d5d2',
'\\<z>' : '\U0001d5d3',
'\\<AA>' : '\U0001d504',
'\\<BB>' : '\U0001d505',
'\\<CC>' : '\U0000212d',
'\\<DD>' : '\U0001d507',
'\\<EE>' : '\U0001d508',
'\\<FF>' : '\U0001d509',
'\\<GG>' : '\U0001d50a',
'\\<HH>' : '\U0000210c',
'\\<II>' : '\U00002111',
'\\<JJ>' : '\U0001d50d',
'\\<KK>' : '\U0001d50e',
'\\<LL>' : '\U0001d50f',
'\\<MM>' : '\U0001d510',
'\\<NN>' : '\U0001d511',
'\\<OO>' : '\U0001d512',
'\\<PP>' : '\U0001d513',
'\\<QQ>' : '\U0001d514',
'\\<RR>' : '\U0000211c',
'\\<SS>' : '\U0001d516',
'\\<TT>' : '\U0001d517',
'\\<UU>' : '\U0001d518',
'\\<VV>' : '\U0001d519',
'\\<WW>' : '\U0001d51a',
'\\<XX>' : '\U0001d51b',
'\\<YY>' : '\U0001d51c',
'\\<ZZ>' : '\U00002128',
'\\<aa>' : '\U0001d51e',
'\\<bb>' : '\U0001d51f',
'\\<cc>' : '\U0001d520',
'\\<dd>' : '\U0001d521',
'\\<ee>' : '\U0001d522',
'\\<ff>' : '\U0001d523',
'\\<gg>' : '\U0001d524',
'\\<hh>' : '\U0001d525',
'\\<ii>' : '\U0001d526',
'\\<jj>' : '\U0001d527',
'\\<kk>' : '\U0001d528',
'\\<ll>' : '\U0001d529',
'\\<mm>' : '\U0001d52a',
'\\<nn>' : '\U0001d52b',
'\\<oo>' : '\U0001d52c',
'\\<pp>' : '\U0001d52d',
'\\<qq>' : '\U0001d52e',
'\\<rr>' : '\U0001d52f',
'\\<ss>' : '\U0001d530',
'\\<tt>' : '\U0001d531',
'\\<uu>' : '\U0001d532',
'\\<vv>' : '\U0001d533',
'\\<ww>' : '\U0001d534',
'\\<xx>' : '\U0001d535',
'\\<yy>' : '\U0001d536',
'\\<zz>' : '\U0001d537',
'\\<alpha>' : '\U000003b1',
'\\<beta>' : '\U000003b2',
'\\<gamma>' : '\U000003b3',
'\\<delta>' : '\U000003b4',
'\\<epsilon>' : '\U000003b5',
'\\<zeta>' : '\U000003b6',
'\\<eta>' : '\U000003b7',
'\\<theta>' : '\U000003b8',
'\\<iota>' : '\U000003b9',
'\\<kappa>' : '\U000003ba',
'\\<lambda>' : '\U000003bb',
'\\<mu>' : '\U000003bc',
'\\<nu>' : '\U000003bd',
'\\<xi>' : '\U000003be',
'\\<pi>' : '\U000003c0',
'\\<rho>' : '\U000003c1',
'\\<sigma>' : '\U000003c3',
'\\<tau>' : '\U000003c4',
'\\<upsilon>' : '\U000003c5',
'\\<phi>' : '\U000003c6',
'\\<chi>' : '\U000003c7',
'\\<psi>' : '\U000003c8',
'\\<omega>' : '\U000003c9',
'\\<Gamma>' : '\U00000393',
'\\<Delta>' : '\U00000394',
'\\<Theta>' : '\U00000398',
'\\<Lambda>' : '\U0000039b',
'\\<Xi>' : '\U0000039e',
'\\<Pi>' : '\U000003a0',
'\\<Sigma>' : '\U000003a3',
'\\<Upsilon>' : '\U000003a5',
'\\<Phi>' : '\U000003a6',
'\\<Psi>' : '\U000003a8',
'\\<Omega>' : '\U000003a9',
'\\<bool>' : '\U0001d539',
'\\<complex>' : '\U00002102',
'\\<nat>' : '\U00002115',
'\\<rat>' : '\U0000211a',
'\\<real>' : '\U0000211d',
'\\<int>' : '\U00002124',
'\\<leftarrow>' : '\U00002190',
'\\<longleftarrow>' : '\U000027f5',
'\\<rightarrow>' : '\U00002192',
'\\<longrightarrow>' : '\U000027f6',
'\\<Leftarrow>' : '\U000021d0',
'\\<Longleftarrow>' : '\U000027f8',
'\\<Rightarrow>' : '\U000021d2',
'\\<Longrightarrow>' : '\U000027f9',
'\\<leftrightarrow>' : '\U00002194',
'\\<longleftrightarrow>' : '\U000027f7',
'\\<Leftrightarrow>' : '\U000021d4',
'\\<Longleftrightarrow>' : '\U000027fa',
'\\<mapsto>' : '\U000021a6',
'\\<longmapsto>' : '\U000027fc',
'\\<midarrow>' : '\U00002500',
'\\<Midarrow>' : '\U00002550',
'\\<hookleftarrow>' : '\U000021a9',
'\\<hookrightarrow>' : '\U000021aa',
'\\<leftharpoondown>' : '\U000021bd',
'\\<rightharpoondown>' : '\U000021c1',
'\\<leftharpoonup>' : '\U000021bc',
'\\<rightharpoonup>' : '\U000021c0',
'\\<rightleftharpoons>' : '\U000021cc',
'\\<leadsto>' : '\U0000219d',
'\\<downharpoonleft>' : '\U000021c3',
'\\<downharpoonright>' : '\U000021c2',
'\\<upharpoonleft>' : '\U000021bf',
'\\<upharpoonright>' : '\U000021be',
'\\<restriction>' : '\U000021be',
'\\<Colon>' : '\U00002237',
'\\<up>' : '\U00002191',
'\\<Up>' : '\U000021d1',
'\\<down>' : '\U00002193',
'\\<Down>' : '\U000021d3',
'\\<updown>' : '\U00002195',
'\\<Updown>' : '\U000021d5',
'\\<langle>' : '\U000027e8',
'\\<rangle>' : '\U000027e9',
'\\<lceil>' : '\U00002308',
'\\<rceil>' : '\U00002309',
'\\<lfloor>' : '\U0000230a',
'\\<rfloor>' : '\U0000230b',
'\\<lparr>' : '\U00002987',
'\\<rparr>' : '\U00002988',
'\\<lbrakk>' : '\U000027e6',
'\\<rbrakk>' : '\U000027e7',
'\\<lbrace>' : '\U00002983',
'\\<rbrace>' : '\U00002984',
'\\<guillemotleft>' : '\U000000ab',
'\\<guillemotright>' : '\U000000bb',
'\\<bottom>' : '\U000022a5',
'\\<top>' : '\U000022a4',
'\\<and>' : '\U00002227',
'\\<And>' : '\U000022c0',
'\\<or>' : '\U00002228',
'\\<Or>' : '\U000022c1',
'\\<forall>' : '\U00002200',
'\\<exists>' : '\U00002203',
'\\<nexists>' : '\U00002204',
'\\<not>' : '\U000000ac',
'\\<box>' : '\U000025a1',
'\\<diamond>' : '\U000025c7',
'\\<turnstile>' : '\U000022a2',
'\\<Turnstile>' : '\U000022a8',
'\\<tturnstile>' : '\U000022a9',
'\\<TTurnstile>' : '\U000022ab',
'\\<stileturn>' : '\U000022a3',
'\\<surd>' : '\U0000221a',
'\\<le>' : '\U00002264',
'\\<ge>' : '\U00002265',
'\\<lless>' : '\U0000226a',
'\\<ggreater>' : '\U0000226b',
'\\<lesssim>' : '\U00002272',
'\\<greatersim>' : '\U00002273',
'\\<lessapprox>' : '\U00002a85',
'\\<greaterapprox>' : '\U00002a86',
'\\<in>' : '\U00002208',
'\\<notin>' : '\U00002209',
'\\<subset>' : '\U00002282',
'\\<supset>' : '\U00002283',
'\\<subseteq>' : '\U00002286',
'\\<supseteq>' : '\U00002287',
'\\<sqsubset>' : '\U0000228f',
'\\<sqsupset>' : '\U00002290',
'\\<sqsubseteq>' : '\U00002291',
'\\<sqsupseteq>' : '\U00002292',
'\\<inter>' : '\U00002229',
'\\<Inter>' : '\U000022c2',
'\\<union>' : '\U0000222a',
'\\<Union>' : '\U000022c3',
'\\<squnion>' : '\U00002294',
'\\<Squnion>' : '\U00002a06',
'\\<sqinter>' : '\U00002293',
'\\<Sqinter>' : '\U00002a05',
'\\<setminus>' : '\U00002216',
'\\<propto>' : '\U0000221d',
'\\<uplus>' : '\U0000228e',
'\\<Uplus>' : '\U00002a04',
'\\<noteq>' : '\U00002260',
'\\<sim>' : '\U0000223c',
'\\<doteq>' : '\U00002250',
'\\<simeq>' : '\U00002243',
'\\<approx>' : '\U00002248',
'\\<asymp>' : '\U0000224d',
'\\<cong>' : '\U00002245',
'\\<smile>' : '\U00002323',
'\\<equiv>' : '\U00002261',
'\\<frown>' : '\U00002322',
'\\<Join>' : '\U000022c8',
'\\<bowtie>' : '\U00002a1d',
'\\<prec>' : '\U0000227a',
'\\<succ>' : '\U0000227b',
'\\<preceq>' : '\U0000227c',
'\\<succeq>' : '\U0000227d',
'\\<parallel>' : '\U00002225',
'\\<bar>' : '\U000000a6',
'\\<plusminus>' : '\U000000b1',
'\\<minusplus>' : '\U00002213',
'\\<times>' : '\U000000d7',
'\\<div>' : '\U000000f7',
'\\<cdot>' : '\U000022c5',
'\\<star>' : '\U000022c6',
'\\<bullet>' : '\U00002219',
'\\<circ>' : '\U00002218',
'\\<dagger>' : '\U00002020',
'\\<ddagger>' : '\U00002021',
'\\<lhd>' : '\U000022b2',
'\\<rhd>' : '\U000022b3',
'\\<unlhd>' : '\U000022b4',
'\\<unrhd>' : '\U000022b5',
'\\<triangleleft>' : '\U000025c3',
'\\<triangleright>' : '\U000025b9',
'\\<triangle>' : '\U000025b3',
'\\<triangleq>' : '\U0000225c',
'\\<oplus>' : '\U00002295',
'\\<Oplus>' : '\U00002a01',
'\\<otimes>' : '\U00002297',
'\\<Otimes>' : '\U00002a02',
'\\<odot>' : '\U00002299',
'\\<Odot>' : '\U00002a00',
'\\<ominus>' : '\U00002296',
'\\<oslash>' : '\U00002298',
'\\<dots>' : '\U00002026',
'\\<cdots>' : '\U000022ef',
'\\<Sum>' : '\U00002211',
'\\<Prod>' : '\U0000220f',
'\\<Coprod>' : '\U00002210',
'\\<infinity>' : '\U0000221e',
'\\<integral>' : '\U0000222b',
'\\<ointegral>' : '\U0000222e',
'\\<clubsuit>' : '\U00002663',
'\\<diamondsuit>' : '\U00002662',
'\\<heartsuit>' : '\U00002661',
'\\<spadesuit>' : '\U00002660',
'\\<aleph>' : '\U00002135',
'\\<emptyset>' : '\U00002205',
'\\<nabla>' : '\U00002207',
'\\<partial>' : '\U00002202',
'\\<flat>' : '\U0000266d',
'\\<natural>' : '\U0000266e',
'\\<sharp>' : '\U0000266f',
'\\<angle>' : '\U00002220',
'\\<copyright>' : '\U000000a9',
'\\<registered>' : '\U000000ae',
'\\<hyphen>' : '\U000000ad',
'\\<inverse>' : '\U000000af',
'\\<onequarter>' : '\U000000bc',
'\\<onehalf>' : '\U000000bd',
'\\<threequarters>' : '\U000000be',
'\\<ordfeminine>' : '\U000000aa',
'\\<ordmasculine>' : '\U000000ba',
'\\<section>' : '\U000000a7',
'\\<paragraph>' : '\U000000b6',
'\\<exclamdown>' : '\U000000a1',
'\\<questiondown>' : '\U000000bf',
'\\<euro>' : '\U000020ac',
'\\<pounds>' : '\U000000a3',
'\\<yen>' : '\U000000a5',
'\\<cent>' : '\U000000a2',
'\\<currency>' : '\U000000a4',
'\\<degree>' : '\U000000b0',
'\\<amalg>' : '\U00002a3f',
'\\<mho>' : '\U00002127',
'\\<lozenge>' : '\U000025ca',
'\\<wp>' : '\U00002118',
'\\<wrong>' : '\U00002240',
'\\<struct>' : '\U000022c4',
'\\<acute>' : '\U000000b4',
'\\<index>' : '\U00000131',
'\\<dieresis>' : '\U000000a8',
'\\<cedilla>' : '\U000000b8',
'\\<hungarumlaut>' : '\U000002dd',
'\\<some>' : '\U000003f5',
'\\<newline>' : '\U000023ce',
'\\<open>' : '\U00002039',
'\\<close>' : '\U0000203a',
'\\<here>' : '\U00002302',
'\\<^sub>' : '\U000021e9',
'\\<^sup>' : '\U000021e7',
'\\<^bold>' : '\U00002759',
'\\<^bsub>' : '\U000021d8',
'\\<^esub>' : '\U000021d9',
'\\<^bsup>' : '\U000021d7',
'\\<^esup>' : '\U000021d6',
}
lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
def __init__(self, **options):
Filter.__init__(self, **options)
lang = get_choice_opt(options, 'lang',
['isabelle', 'latex'], 'isabelle')
self.symbols = self.lang_map[lang]
def filter(self, lexer, stream):
for ttype, value in stream:
if value in self.symbols:
yield ttype, self.symbols[value]
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case',
['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(str, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in [('spaces', '·'),
('tabs', '»'),
('newlines', '¶')]:
opt = options.get(name, False)
if isinstance(opt, str) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' ' * (tabsize - 1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
yield from _replace_special(ttype, value, regex, Whitespace,
replacefunc)
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return '', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = '\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""Merges consecutive tokens with the same token type in the output
stream of a lexer.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
'symbols': SymbolFilter,
}
| 40,332 | Python | 41.861849 | 81 | 0.38017 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/python.py | """
pygments.lexers.python
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Python and related languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import keyword
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
default, words, combined, do_insertions, this, line_re
from pygments.util import get_bool_opt, shebang_matches
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Other, Error, Whitespace
from pygments import unistring as uni
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'Python2Lexer', 'Python2TracebackLexer',
'CythonLexer', 'DgLexer', 'NumPyLexer']
class PythonLexer(RegexLexer):
"""
For Python source code (version 3.x).
.. versionadded:: 0.10
.. versionchanged:: 2.5
This is now the default ``PythonLexer``. It is still available as the
alias ``Python3Lexer``.
"""
name = 'Python'
url = 'http://www.python.org'
aliases = ['python', 'py', 'sage', 'python3', 'py3']
filenames = [
'*.py',
'*.pyw',
# Type stubs
'*.pyi',
# Jython
'*.jy',
# Sage
'*.sage',
# SCons
'*.sc',
'SConstruct',
'SConscript',
# Skylark/Starlark (used by Bazel, Buck, and Pants)
'*.bzl',
'BUCK',
'BUILD',
'BUILD.bazel',
'WORKSPACE',
# Twisted Application infrastructure
'*.tac',
]
mimetypes = ['text/x-python', 'application/x-python',
'text/x-python3', 'application/x-python3']
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting (still valid in Py3)
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsaux%]', String.Interpol),
# the new style '{}'.format(...) string formatting
(r'\{'
r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
r'(\![sra])?' # conversion
r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
r'\}', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%{\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%|(\{{1,2})', ttype)
# newlines are an error (use "nl" state)
]
def fstring_rules(ttype):
return [
# Assuming that a '}' is the closing brace after format specifier.
# Sadly, this means that we won't detect syntax error. But it's
# more important to parse correct syntax correctly, than to
# highlight invalid syntax.
(r'\}', String.Interpol),
(r'\{', String.Interpol, 'expr-inside-fstring'),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"{}\n]+', ttype),
(r'[\'"\\]', ttype),
# newlines are an error (use "nl" state)
]
tokens = {
'root': [
(r'\n', Whitespace),
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
bygroups(Whitespace, String.Affix, String.Doc)),
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
bygroups(Whitespace, String.Affix, String.Doc)),
(r'\A#!.+$', Comment.Hashbang),
(r'#.*$', Comment.Single),
(r'\\\n', Text),
(r'\\', Text),
include('keywords'),
include('soft-keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'import'),
include('expr'),
],
'expr': [
# raw f-strings
('(?i)(rf|fr)(""")',
bygroups(String.Affix, String.Double),
combined('rfstringescape', 'tdqf')),
("(?i)(rf|fr)(''')",
bygroups(String.Affix, String.Single),
combined('rfstringescape', 'tsqf')),
('(?i)(rf|fr)(")',
bygroups(String.Affix, String.Double),
combined('rfstringescape', 'dqf')),
("(?i)(rf|fr)(')",
bygroups(String.Affix, String.Single),
combined('rfstringescape', 'sqf')),
# non-raw f-strings
('([fF])(""")', bygroups(String.Affix, String.Double),
combined('fstringescape', 'tdqf')),
("([fF])(''')", bygroups(String.Affix, String.Single),
combined('fstringescape', 'tsqf')),
('([fF])(")', bygroups(String.Affix, String.Double),
combined('fstringescape', 'dqf')),
("([fF])(')", bygroups(String.Affix, String.Single),
combined('fstringescape', 'sqf')),
# raw bytes and strings
('(?i)(rb|br|r)(""")',
bygroups(String.Affix, String.Double), 'tdqs'),
("(?i)(rb|br|r)(''')",
bygroups(String.Affix, String.Single), 'tsqs'),
('(?i)(rb|br|r)(")',
bygroups(String.Affix, String.Double), 'dqs'),
("(?i)(rb|br|r)(')",
bygroups(String.Affix, String.Single), 'sqs'),
# non-raw strings
('([uU]?)(""")', bygroups(String.Affix, String.Double),
combined('stringescape', 'tdqs')),
("([uU]?)(''')", bygroups(String.Affix, String.Single),
combined('stringescape', 'tsqs')),
('([uU]?)(")', bygroups(String.Affix, String.Double),
combined('stringescape', 'dqs')),
("([uU]?)(')", bygroups(String.Affix, String.Single),
combined('stringescape', 'sqs')),
# non-raw bytes
('([bB])(""")', bygroups(String.Affix, String.Double),
combined('bytesescape', 'tdqs')),
("([bB])(''')", bygroups(String.Affix, String.Single),
combined('bytesescape', 'tsqs')),
('([bB])(")', bygroups(String.Affix, String.Double),
combined('bytesescape', 'dqs')),
("([bB])(')", bygroups(String.Affix, String.Single),
combined('bytesescape', 'sqs')),
(r'[^\S\n]+', Text),
include('numbers'),
(r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
(r'[]{}:(),;[]', Punctuation),
(r'(in|is|and|or|not)\b', Operator.Word),
include('expr-keywords'),
include('builtins'),
include('magicfuncs'),
include('magicvars'),
include('name'),
],
'expr-inside-fstring': [
(r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
# without format specifier
(r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
r'(\![sraf])?' # conversion
r'\}', String.Interpol, '#pop'),
# with format specifier
# we'll catch the remaining '}' in the outer scope
(r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
r'(\![sraf])?' # conversion
r':', String.Interpol, '#pop'),
(r'\s+', Whitespace), # allow new lines
include('expr'),
],
'expr-inside-fstring-inner': [
(r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
(r'[])}]', Punctuation, '#pop'),
(r'\s+', Whitespace), # allow new lines
include('expr'),
],
'expr-keywords': [
# Based on https://docs.python.org/3/reference/expressions.html
(words((
'async for', 'await', 'else', 'for', 'if', 'lambda',
'yield', 'yield from'), suffix=r'\b'),
Keyword),
(words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
],
'keywords': [
(words((
'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
'else', 'except', 'finally', 'for', 'global', 'if', 'lambda',
'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield',
'yield from', 'as', 'with'), suffix=r'\b'),
Keyword),
(words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
],
'soft-keywords': [
# `match`, `case` and `_` soft keywords
(r'(^[ \t]*)' # at beginning of line + possible indentation
r'(match|case)\b' # a possible keyword
r'(?![ \t]*(?:' # not followed by...
r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
r'|'.join(keyword.kwlist) + r')\b))', # pattern matching
bygroups(Text, Keyword), 'soft-keywords-inner'),
],
'soft-keywords-inner': [
# optional `_` keyword
(r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)),
default('#pop')
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray',
'breakpoint', 'bytes', 'chr', 'classmethod', 'compile', 'complex',
'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter',
'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr',
'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass',
'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview',
'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print',
'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple',
'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError',
'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError',
'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
'NotImplementedError', 'OSError', 'OverflowError',
'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning',
'RuntimeError', 'RuntimeWarning', 'StopIteration',
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError',
'Warning', 'WindowsError', 'ZeroDivisionError',
# new builtin exceptions from PEP 3151
'BlockingIOError', 'ChildProcessError', 'ConnectionError',
'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError',
'ConnectionResetError', 'FileExistsError', 'FileNotFoundError',
'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
'PermissionError', 'ProcessLookupError', 'TimeoutError',
# others new in Python 3
'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError',
'EncodingWarning'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'magicfuncs': [
(words((
'__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__',
'__and__', '__anext__', '__await__', '__bool__', '__bytes__',
'__call__', '__complex__', '__contains__', '__del__', '__delattr__',
'__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__',
'__eq__', '__exit__', '__float__', '__floordiv__', '__format__',
'__ge__', '__get__', '__getattr__', '__getattribute__',
'__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__',
'__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__',
'__imul__', '__index__', '__init__', '__instancecheck__',
'__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__',
'__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__',
'__missing__', '__mod__', '__mul__', '__ne__', '__neg__',
'__new__', '__next__', '__or__', '__pos__', '__pow__',
'__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__',
'__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__',
'__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__',
'__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
'__rxor__', '__set__', '__setattr__', '__setitem__', '__str__',
'__sub__', '__subclasscheck__', '__truediv__',
'__xor__'), suffix=r'\b'),
Name.Function.Magic),
],
'magicvars': [
(words((
'__annotations__', '__bases__', '__class__', '__closure__',
'__code__', '__defaults__', '__dict__', '__doc__', '__file__',
'__func__', '__globals__', '__kwdefaults__', '__module__',
'__mro__', '__name__', '__objclass__', '__qualname__',
'__self__', '__slots__', '__weakref__'), suffix=r'\b'),
Name.Variable.Magic),
],
'numbers': [
(r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)'
r'([eE][+-]?\d(?:_?\d)*)?', Number.Float),
(r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float),
(r'0[oO](?:_?[0-7])+', Number.Oct),
(r'0[bB](?:_?[01])+', Number.Bin),
(r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex),
(r'\d(?:_?\d)*', Number.Integer),
],
'name': [
(r'@' + uni_name, Name.Decorator),
(r'@', Operator), # new matrix multiplication operator
(uni_name, Name),
],
'funcname': [
include('magicfuncs'),
(uni_name, Name.Function, '#pop'),
default('#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
default('#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
(r'\.', Name.Namespace),
# if None occurs here, it's "raise x from None", since None can
# never be a module name
(r'None\b', Name.Builtin.Pseudo, '#pop'),
(uni_name, Name.Namespace),
default('#pop'),
],
'rfstringescape': [
(r'\{\{', String.Escape),
(r'\}\}', String.Escape),
],
'fstringescape': [
include('rfstringescape'),
include('stringescape'),
],
'bytesescape': [
(r'\\([\\abfnrtv"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'stringescape': [
(r'\\(N\{.*?\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8})', String.Escape),
include('bytesescape')
],
'fstrings-single': fstring_rules(String.Single),
'fstrings-double': fstring_rules(String.Double),
'strings-single': innerstring_rules(String.Single),
'strings-double': innerstring_rules(String.Double),
'dqf': [
(r'"', String.Double, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('fstrings-double')
],
'sqf': [
(r"'", String.Single, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('fstrings-single')
],
'dqs': [
(r'"', String.Double, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('strings-double')
],
'sqs': [
(r"'", String.Single, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('strings-single')
],
'tdqf': [
(r'"""', String.Double, '#pop'),
include('fstrings-double'),
(r'\n', String.Double)
],
'tsqf': [
(r"'''", String.Single, '#pop'),
include('fstrings-single'),
(r'\n', String.Single)
],
'tdqs': [
(r'"""', String.Double, '#pop'),
include('strings-double'),
(r'\n', String.Double)
],
'tsqs': [
(r"'''", String.Single, '#pop'),
include('strings-single'),
(r'\n', String.Single)
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \
'import ' in text[:1000]
Python3Lexer = PythonLexer
class Python2Lexer(RegexLexer):
"""
For Python 2.x source code.
.. versionchanged:: 2.5
This class has been renamed from ``PythonLexer``. ``PythonLexer`` now
refers to the Python 3 variant. File name patterns like ``*.py`` have
been moved to Python 3 as well.
"""
name = 'Python 2.x'
url = 'http://www.python.org'
aliases = ['python2', 'py2']
filenames = [] # now taken over by PythonLexer (3.x)
mimetypes = ['text/x-python2', 'application/x-python2']
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%', ttype),
# newlines are an error (use "nl" state)
]
tokens = {
'root': [
(r'\n', Whitespace),
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
bygroups(Whitespace, String.Affix, String.Doc)),
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
bygroups(Whitespace, String.Affix, String.Doc)),
(r'[^\S\n]+', Text),
(r'\A#!.+$', Comment.Hashbang),
(r'#.*$', Comment.Single),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
include('keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'import'),
include('builtins'),
include('magicfuncs'),
include('magicvars'),
include('backtick'),
('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
bygroups(String.Affix, String.Double), 'tdqs'),
("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
bygroups(String.Affix, String.Single), 'tsqs'),
('([rR]|[uUbB][rR]|[rR][uUbB])(")',
bygroups(String.Affix, String.Double), 'dqs'),
("([rR]|[uUbB][rR]|[rR][uUbB])(')",
bygroups(String.Affix, String.Single), 'sqs'),
('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
combined('stringescape', 'tdqs')),
("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
combined('stringescape', 'tsqs')),
('([uUbB]?)(")', bygroups(String.Affix, String.Double),
combined('stringescape', 'dqs')),
("([uUbB]?)(')", bygroups(String.Affix, String.Single),
combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(words((
'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
'print', 'raise', 'return', 'try', 'while', 'yield',
'yield from', 'as', 'with'), suffix=r'\b'),
Keyword),
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
'unichr', 'unicode', 'vars', 'xrange', 'zip'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls'
r')\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
'MemoryError', 'NameError',
'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning',
'PendingDeprecationWarning', 'ReferenceError',
'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration',
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'magicfuncs': [
(words((
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
'__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__',
'__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__',
'__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__',
'__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__',
'__ilshift__', '__imod__', '__imul__', '__index__', '__init__',
'__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__',
'__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__',
'__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__',
'__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__',
'__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__',
'__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__',
'__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
'__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__',
'__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
'__unicode__', '__xor__'), suffix=r'\b'),
Name.Function.Magic),
],
'magicvars': [
(words((
'__bases__', '__class__', '__closure__', '__code__', '__defaults__',
'__dict__', '__doc__', '__file__', '__func__', '__globals__',
'__metaclass__', '__module__', '__mro__', '__name__', '__self__',
'__slots__', '__weakref__'),
suffix=r'\b'),
Name.Variable.Magic),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[\w.]+', Name.Decorator),
(r'[a-zA-Z_]\w*', Name),
],
'funcname': [
include('magicfuncs'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
default('#pop'),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'(?:[ \t]|\\\n)+', Text),
(r'as\b', Keyword.Namespace),
(r',', Operator),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
default('#pop') # all else: go back
],
'fromimport': [
(r'(?:[ \t]|\\\n)+', Text),
(r'import\b', Keyword.Namespace, '#pop'),
# if None occurs here, it's "raise x from None", since None can
# never be a module name
(r'None\b', Name.Builtin.Pseudo, '#pop'),
# sadly, in "raise x from y" y will be highlighted as namespace too
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
# anything else here also means "raise x from y" and is therefore
# not an error
default('#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings-single': innerstring_rules(String.Single),
'strings-double': innerstring_rules(String.Double),
'dqs': [
(r'"', String.Double, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('strings-double')
],
'sqs': [
(r"'", String.Single, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('strings-single')
],
'tdqs': [
(r'"""', String.Double, '#pop'),
include('strings-double'),
(r'\n', String.Double)
],
'tsqs': [
(r"'''", String.Single, '#pop'),
include('strings-single'),
(r'\n', String.Single)
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?2(\.\d)?')
class PythonConsoleLexer(Lexer):
"""
For Python console output or doctests, such as:
.. sourcecode:: pycon
>>> a = 'foo'
>>> print a
foo
>>> 1 / 0
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ZeroDivisionError: integer division or modulo by zero
Additional options:
`python3`
Use Python 3 lexer for code. Default is ``True``.
.. versionadded:: 1.0
.. versionchanged:: 2.5
Now defaults to ``True``.
"""
name = 'Python console session'
aliases = ['pycon']
mimetypes = ['text/x-python-doctest']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', True)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
if self.python3:
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
else:
pylexer = Python2Lexer(**self.options)
tblexer = Python2TracebackLexer(**self.options)
curcode = ''
insertions = []
curtb = ''
tbindex = 0
tb = 0
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>> ') or line.startswith('... '):
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:4])]))
curcode += line[4:]
elif line.rstrip() == '...' and not tb:
# only a new >>> prompt can end an exception block
# otherwise an ellipsis in place of the traceback frames
# will be mishandled
insertions.append((len(curcode),
[(0, Generic.Prompt, '...')]))
curcode += line[3:]
else:
if curcode:
yield from do_insertions(
insertions, pylexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
if (line.startswith('Traceback (most recent call last):') or
re.match(' File "[^"]+", line \\d+\\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
elif line == 'KeyboardInterrupt\n':
yield match.start(), Name.Class, line
elif tb:
curtb += line
if not (line.startswith(' ') or line.strip() == '...'):
tb = 0
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
curtb = ''
else:
yield match.start(), Generic.Output, line
if curcode:
yield from do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode))
if curtb:
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
class PythonTracebackLexer(RegexLexer):
"""
For Python 3.x tracebacks, with support for chained exceptions.
.. versionadded:: 1.0
.. versionchanged:: 2.5
This is now the default ``PythonTracebackLexer``. It is still available
as the alias ``Python3TracebackLexer``.
"""
name = 'Python Traceback'
aliases = ['pytb', 'py3tb']
filenames = ['*.pytb', '*.py3tb']
mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback']
tokens = {
'root': [
(r'\n', Whitespace),
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
(r'^During handling of the above exception, another '
r'exception occurred:\n\n', Generic.Traceback),
(r'^The above exception was the direct cause of the '
r'following exception:\n\n', Generic.Traceback),
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
(r'^.*\n', Other),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
(r'^( )(.+)(\n)',
bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'),
(r'^([ \t]*)(\.\.\.)(\n)',
bygroups(Whitespace, Comment, Whitespace)), # for doctests...
(r'^([^:]+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
(r'^([a-zA-Z_][\w.]*)(:?\n)',
bygroups(Generic.Error, Whitespace), '#pop')
],
'markers': [
# Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>`
# error locations in Python 3.11+, or single-caret markers
# for syntax errors before that.
(r'^( {4,})([~^]+)(\n)',
bygroups(Whitespace, Punctuation.Marker, Whitespace),
'#pop'),
default('#pop'),
],
}
Python3TracebackLexer = PythonTracebackLexer
class Python2TracebackLexer(RegexLexer):
"""
For Python tracebacks.
.. versionadded:: 0.7
.. versionchanged:: 2.5
This class has been renamed from ``PythonTracebackLexer``.
``PythonTracebackLexer`` now refers to the Python 3 variant.
"""
name = 'Python 2.x Traceback'
aliases = ['py2tb']
filenames = ['*.py2tb']
mimetypes = ['text/x-python2-traceback']
tokens = {
'root': [
# Cover both (most recent call last) and (innermost last)
# The optional ^C allows us to catch keyboard interrupt signals.
(r'^(\^C)?(Traceback.*\n)',
bygroups(Text, Generic.Traceback), 'intb'),
# SyntaxError starts with this.
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
(r'^.*\n', Other),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
(r'^( )(.+)(\n)',
bygroups(Text, using(Python2Lexer), Whitespace), 'marker'),
(r'^([ \t]*)(\.\.\.)(\n)',
bygroups(Text, Comment, Whitespace)), # for doctests...
(r'^([^:]+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
(r'^([a-zA-Z_]\w*)(:?\n)',
bygroups(Generic.Error, Whitespace), '#pop')
],
'marker': [
# For syntax errors.
(r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'),
default('#pop'),
],
}
class CythonLexer(RegexLexer):
"""
For Pyrex and Cython source code.
.. versionadded:: 1.1
"""
name = 'Cython'
url = 'http://cython.org'
aliases = ['cython', 'pyx', 'pyrex']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
tokens = {
'root': [
(r'\n', Whitespace),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Whitespace),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'(<)([a-zA-Z0-9.?]+)(>)',
bygroups(Punctuation, Keyword.Type, Punctuation)),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
(r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
bygroups(Keyword, Number.Integer, Operator, Name, Operator,
Name, Punctuation)),
include('keywords'),
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
# (should actually start a block with only cdefs)
(r'(cdef)(:)', bygroups(Keyword, Punctuation)),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(words((
'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
Keyword),
(r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit',
'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t',
'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod',
'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned',
'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
r')\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError',
'OSError', 'OverflowError', 'OverflowWarning',
'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError',
'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError',
'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@\w+', Name.Decorator),
(r'[a-zA-Z_]\w*', Name),
],
'funcname': [
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'cdef': [
(r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
(r'(struct|enum|union|class)\b', Keyword),
(r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)',
bygroups(Name.Function, Text), '#pop'),
(r'([a-zA-Z_]\w*)(\s*)(,)',
bygroups(Name.Function, Text, Punctuation)),
(r'from\b', Keyword, '#pop'),
(r'as\b', Keyword),
(r':', Punctuation, '#pop'),
(r'(?=["\'])', Text, '#pop'),
(r'[a-zA-Z_]\w*', Keyword.Type),
(r'.', Text),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
default('#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
# ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
default('#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class DgLexer(RegexLexer):
"""
Lexer for dg,
a functional and object-oriented programming language
running on the CPython 3 VM.
.. versionadded:: 1.6
"""
name = 'dg'
aliases = ['dg']
filenames = ['*.dg']
mimetypes = ['text/x-dg']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment.Single),
(r'(?i)0b[01]+', Number.Bin),
(r'(?i)0o[0-7]+', Number.Oct),
(r'(?i)0x[0-9a-f]+', Number.Hex),
(r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float),
(r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float),
(r'(?i)[+-]?[0-9]+j?', Number.Integer),
(r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')),
(r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')),
(r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')),
(r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')),
(r"`\w+'*`", Operator),
(r'\b(and|in|is|or|where)\b', Operator.Word),
(r'[!$%&*+\-./:<-@\\^|~;,]+', Operator),
(words((
'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'',
'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object',
'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str',
'super', 'tuple', 'tuple\'', 'type'),
prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
Name.Builtin),
(words((
'__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile',
'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate',
'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst',
'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init',
'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len',
'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow',
'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd',
'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'),
prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
Name.Builtin),
(r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
Name.Builtin.Pseudo),
(r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])",
Name.Exception),
(r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|"
r"SystemExit)(?!['\w])", Name.Exception),
(r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|"
r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved),
(r"[A-Z_]+'*(?!['\w])", Name),
(r"[A-Z]\w+'*(?!['\w])", Keyword.Type),
(r"\w+'*", Name),
(r'[()]', Punctuation),
(r'.', Error),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'string': [
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String),
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop')
],
'sqs': [
(r"'", String, '#pop')
],
'tdqs': [
(r'"""', String, '#pop')
],
'tsqs': [
(r"'''", String, '#pop')
],
}
class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
.. versionadded:: 0.10
"""
name = 'NumPy'
url = 'https://numpy.org/'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = {
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
def analyse_text(text):
ltext = text[:1000]
return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or
'import ' in ltext) \
and ('import numpy' in ltext or 'from numpy import' in ltext)
| 53,524 | Python | 43.419087 | 97 | 0.455347 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/smithy.py | """
pygments.lexers.smithy
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Smithy IDL.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Keyword, Name, String, \
Number, Whitespace, Punctuation
__all__ = ['SmithyLexer']
class SmithyLexer(RegexLexer):
"""
For Smithy IDL
.. versionadded:: 2.10
"""
name = 'Smithy'
url = 'https://awslabs.github.io/smithy/'
filenames = ['*.smithy']
aliases = ['smithy']
unquoted = r'[A-Za-z0-9_\.#$-]+'
identifier = r"[A-Za-z0-9_\.#$-]+"
simple_shapes = (
'use', 'byte', 'short', 'integer', 'long', 'float', 'document',
'double', 'bigInteger', 'bigDecimal', 'boolean', 'blob', 'string',
'timestamp',
)
aggregate_shapes = (
'apply', 'list', 'map', 'set', 'structure', 'union', 'resource',
'operation', 'service', 'trait'
)
tokens = {
'root': [
(r'///.*$', Comment.Multiline),
(r'//.*$', Comment),
(r'@[0-9a-zA-Z\.#-]*', Name.Decorator),
(r'(=)', Name.Decorator),
(r'^(\$version)(:)(.+)',
bygroups(Keyword.Declaration, Name.Decorator, Name.Class)),
(r'^(namespace)(\s+' + identifier + r')\b',
bygroups(Keyword.Declaration, Name.Class)),
(words(simple_shapes,
prefix=r'^', suffix=r'(\s+' + identifier + r')\b'),
bygroups(Keyword.Declaration, Name.Class)),
(words(aggregate_shapes,
prefix=r'^', suffix=r'(\s+' + identifier + r')'),
bygroups(Keyword.Declaration, Name.Class)),
(r'^(metadata)(\s+)((?:\S+)|(?:\"[^"]+\"))(\s*)(=)',
bygroups(Keyword.Declaration, Whitespace, Name.Class,
Whitespace, Name.Decorator)),
(r"(true|false|null)", Keyword.Constant),
(r"(-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?)", Number),
(identifier + ":", Name.Label),
(identifier, Name.Variable.Class),
(r'\[', Text, "#push"),
(r'\]', Text, "#pop"),
(r'\(', Text, "#push"),
(r'\)', Text, "#pop"),
(r'\{', Text, "#push"),
(r'\}', Text, "#pop"),
(r'"{3}(\\\\|\n|\\")*"{3}', String.Doc),
(r'"(\\\\|\n|\\"|[^"])*"', String.Double),
(r"'(\\\\|\n|\\'|[^'])*'", String.Single),
(r'[:,]+', Punctuation),
(r'\s+', Whitespace),
]
}
| 2,660 | Python | 32.683544 | 75 | 0.457143 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/procfile.py | """
pygments.lexers.procfile
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Procfile file format.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Name, Number, String, Text, Punctuation
__all__ = ["ProcfileLexer"]
class ProcfileLexer(RegexLexer):
"""
Lexer for Procfile file format.
The format is used to run processes on Heroku or is used by Foreman or
Honcho tools.
.. versionadded:: 2.10
"""
name = 'Procfile'
url = 'https://devcenter.heroku.com/articles/procfile#procfile-format'
aliases = ['procfile']
filenames = ['Procfile']
tokens = {
'root': [
(r'^([a-z]+)(:)', bygroups(Name.Label, Punctuation)),
(r'\s+', Text.Whitespace),
(r'"[^"]*"', String),
(r"'[^']*'", String),
(r'[0-9]+', Number.Integer),
(r'\$[a-zA-Z_][\w]*', Name.Variable),
(r'(\w+)(=)(\w+)', bygroups(Name.Variable, Punctuation, String)),
(r'([\w\-\./]+)', Text),
],
}
| 1,156 | Python | 25.906976 | 77 | 0.544118 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/roboconf.py | """
pygments.lexers.roboconf
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Roboconf DSL.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, re
from pygments.token import Text, Operator, Keyword, Name, Comment
__all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer']
class RoboconfGraphLexer(RegexLexer):
"""
Lexer for Roboconf graph files.
.. versionadded:: 2.1
"""
name = 'Roboconf Graph'
aliases = ['roboconf-graph']
filenames = ['*.graph']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
# Skip white spaces
(r'\s+', Text),
# There is one operator
(r'=', Operator),
# Keywords
(words(('facet', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
(words((
'installer', 'extends', 'exports', 'imports', 'facets',
'children'), suffix=r'\s*:?', prefix=r'\b'), Name),
# Comments
(r'#.*\n', Comment),
# Default
(r'[^#]', Text),
(r'.*\n', Text)
]
}
class RoboconfInstancesLexer(RegexLexer):
"""
Lexer for Roboconf instances files.
.. versionadded:: 2.1
"""
name = 'Roboconf Instances'
aliases = ['roboconf-instances']
filenames = ['*.instances']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
# Skip white spaces
(r'\s+', Text),
# Keywords
(words(('instance of', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
(words(('name', 'count'), suffix=r's*:?', prefix=r'\b'), Name),
(r'\s*[\w.-]+\s*:', Name),
# Comments
(r'#.*\n', Comment),
# Default
(r'[^#]', Text),
(r'.*\n', Text)
]
}
| 1,962 | Python | 22.939024 | 87 | 0.487258 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_scheme_builtins.py | """
pygments.lexers._scheme_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Scheme builtins.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated by external/scheme-builtins-generator.scm
# using Guile 3.0.5.130-5a1e7.
scheme_keywords = {
"*unspecified*",
"...",
"=>",
"@",
"@@",
"_",
"add-to-load-path",
"and",
"begin",
"begin-deprecated",
"case",
"case-lambda",
"case-lambda*",
"cond",
"cond-expand",
"current-filename",
"current-source-location",
"debug-set!",
"define",
"define*",
"define-inlinable",
"define-library",
"define-macro",
"define-module",
"define-once",
"define-option-interface",
"define-private",
"define-public",
"define-record-type",
"define-syntax",
"define-syntax-parameter",
"define-syntax-rule",
"define-values",
"defmacro",
"defmacro-public",
"delay",
"do",
"else",
"eval-when",
"export",
"export!",
"export-syntax",
"false-if-exception",
"identifier-syntax",
"if",
"import",
"include",
"include-ci",
"include-from-path",
"include-library-declarations",
"lambda",
"lambda*",
"let",
"let*",
"let*-values",
"let-syntax",
"let-values",
"letrec",
"letrec*",
"letrec-syntax",
"library",
"load",
"match",
"match-lambda",
"match-lambda*",
"match-let",
"match-let*",
"match-letrec",
"or",
"parameterize",
"print-set!",
"quasiquote",
"quasisyntax",
"quote",
"quote-syntax",
"re-export",
"re-export-syntax",
"read-set!",
"require-extension",
"set!",
"start-stack",
"syntax",
"syntax-case",
"syntax-error",
"syntax-parameterize",
"syntax-rules",
"unless",
"unquote",
"unquote-splicing",
"unsyntax",
"unsyntax-splicing",
"use-modules",
"when",
"while",
"with-ellipsis",
"with-fluids",
"with-syntax",
"λ",
}
scheme_builtins = {
"$sc-dispatch",
"%char-set-dump",
"%get-pre-modules-obarray",
"%get-stack-size",
"%global-site-dir",
"%init-rdelim-builtins",
"%init-rw-builtins",
"%library-dir",
"%load-announce",
"%load-hook",
"%make-void-port",
"%package-data-dir",
"%port-property",
"%print-module",
"%resolve-variable",
"%search-load-path",
"%set-port-property!",
"%site-ccache-dir",
"%site-dir",
"%start-stack",
"%string-dump",
"%symbol-dump",
"%warn-auto-compilation-enabled",
"*",
"+",
"-",
"->bool",
"->char-set",
"/",
"1+",
"1-",
"<",
"<=",
"=",
">",
">=",
"abort-to-prompt",
"abort-to-prompt*",
"abs",
"absolute-file-name?",
"accept",
"access?",
"acons",
"acos",
"acosh",
"add-hook!",
"addrinfo:addr",
"addrinfo:canonname",
"addrinfo:fam",
"addrinfo:flags",
"addrinfo:protocol",
"addrinfo:socktype",
"adjust-port-revealed!",
"alarm",
"alist-cons",
"alist-copy",
"alist-delete",
"alist-delete!",
"allocate-struct",
"and-map",
"and=>",
"angle",
"any",
"append",
"append!",
"append-map",
"append-map!",
"append-reverse",
"append-reverse!",
"apply",
"array->list",
"array-cell-ref",
"array-cell-set!",
"array-contents",
"array-copy!",
"array-copy-in-order!",
"array-dimensions",
"array-equal?",
"array-fill!",
"array-for-each",
"array-in-bounds?",
"array-index-map!",
"array-length",
"array-map!",
"array-map-in-order!",
"array-rank",
"array-ref",
"array-set!",
"array-shape",
"array-slice",
"array-slice-for-each",
"array-slice-for-each-in-order",
"array-type",
"array-type-code",
"array?",
"ash",
"asin",
"asinh",
"assert-load-verbosity",
"assoc",
"assoc-ref",
"assoc-remove!",
"assoc-set!",
"assq",
"assq-ref",
"assq-remove!",
"assq-set!",
"assv",
"assv-ref",
"assv-remove!",
"assv-set!",
"atan",
"atanh",
"autoload-done!",
"autoload-done-or-in-progress?",
"autoload-in-progress!",
"backtrace",
"basename",
"batch-mode?",
"beautify-user-module!",
"bind",
"bind-textdomain-codeset",
"bindtextdomain",
"bit-count",
"bit-count*",
"bit-extract",
"bit-invert!",
"bit-position",
"bit-set*!",
"bitvector",
"bitvector->list",
"bitvector-bit-clear?",
"bitvector-bit-set?",
"bitvector-clear-all-bits!",
"bitvector-clear-bit!",
"bitvector-clear-bits!",
"bitvector-count",
"bitvector-count-bits",
"bitvector-fill!",
"bitvector-flip-all-bits!",
"bitvector-length",
"bitvector-position",
"bitvector-ref",
"bitvector-set!",
"bitvector-set-all-bits!",
"bitvector-set-bit!",
"bitvector-set-bits!",
"bitvector?",
"boolean?",
"bound-identifier=?",
"break",
"break!",
"caaaar",
"caaadr",
"caaar",
"caadar",
"caaddr",
"caadr",
"caar",
"cadaar",
"cadadr",
"cadar",
"caddar",
"cadddr",
"caddr",
"cadr",
"call-with-blocked-asyncs",
"call-with-current-continuation",
"call-with-deferred-observers",
"call-with-include-port",
"call-with-input-file",
"call-with-input-string",
"call-with-module-autoload-lock",
"call-with-output-file",
"call-with-output-string",
"call-with-port",
"call-with-prompt",
"call-with-unblocked-asyncs",
"call-with-values",
"call/cc",
"canonicalize-path",
"car",
"car+cdr",
"catch",
"cdaaar",
"cdaadr",
"cdaar",
"cdadar",
"cdaddr",
"cdadr",
"cdar",
"cddaar",
"cddadr",
"cddar",
"cdddar",
"cddddr",
"cdddr",
"cddr",
"cdr",
"ceiling",
"ceiling-quotient",
"ceiling-remainder",
"ceiling/",
"centered-quotient",
"centered-remainder",
"centered/",
"char->integer",
"char-alphabetic?",
"char-ci<=?",
"char-ci<?",
"char-ci=?",
"char-ci>=?",
"char-ci>?",
"char-downcase",
"char-general-category",
"char-is-both?",
"char-lower-case?",
"char-numeric?",
"char-ready?",
"char-set",
"char-set->list",
"char-set->string",
"char-set-adjoin",
"char-set-adjoin!",
"char-set-any",
"char-set-complement",
"char-set-complement!",
"char-set-contains?",
"char-set-copy",
"char-set-count",
"char-set-cursor",
"char-set-cursor-next",
"char-set-delete",
"char-set-delete!",
"char-set-diff+intersection",
"char-set-diff+intersection!",
"char-set-difference",
"char-set-difference!",
"char-set-every",
"char-set-filter",
"char-set-filter!",
"char-set-fold",
"char-set-for-each",
"char-set-hash",
"char-set-intersection",
"char-set-intersection!",
"char-set-map",
"char-set-ref",
"char-set-size",
"char-set-unfold",
"char-set-unfold!",
"char-set-union",
"char-set-union!",
"char-set-xor",
"char-set-xor!",
"char-set<=",
"char-set=",
"char-set?",
"char-titlecase",
"char-upcase",
"char-upper-case?",
"char-whitespace?",
"char<=?",
"char<?",
"char=?",
"char>=?",
"char>?",
"char?",
"chdir",
"chmod",
"chown",
"chroot",
"circular-list",
"circular-list?",
"close",
"close-fdes",
"close-input-port",
"close-output-port",
"close-port",
"closedir",
"command-line",
"complex?",
"compose",
"concatenate",
"concatenate!",
"cond-expand-provide",
"connect",
"cons",
"cons*",
"cons-source",
"const",
"convert-assignment",
"copy-file",
"copy-random-state",
"copy-tree",
"cos",
"cosh",
"count",
"crypt",
"ctermid",
"current-dynamic-state",
"current-error-port",
"current-input-port",
"current-language",
"current-load-port",
"current-module",
"current-output-port",
"current-time",
"current-warning-port",
"datum->random-state",
"datum->syntax",
"debug-disable",
"debug-enable",
"debug-options",
"debug-options-interface",
"default-duplicate-binding-handler",
"default-duplicate-binding-procedures",
"default-prompt-tag",
"define!",
"define-module*",
"defined?",
"delete",
"delete!",
"delete-duplicates",
"delete-duplicates!",
"delete-file",
"delete1!",
"delq",
"delq!",
"delq1!",
"delv",
"delv!",
"delv1!",
"denominator",
"directory-stream?",
"dirname",
"display",
"display-application",
"display-backtrace",
"display-error",
"dotted-list?",
"doubly-weak-hash-table?",
"drain-input",
"drop",
"drop-right",
"drop-right!",
"drop-while",
"dup",
"dup->fdes",
"dup->inport",
"dup->outport",
"dup->port",
"dup2",
"duplicate-port",
"dynamic-call",
"dynamic-func",
"dynamic-link",
"dynamic-object?",
"dynamic-pointer",
"dynamic-state?",
"dynamic-unlink",
"dynamic-wind",
"effective-version",
"eighth",
"end-of-char-set?",
"endgrent",
"endhostent",
"endnetent",
"endprotoent",
"endpwent",
"endservent",
"ensure-batch-mode!",
"environ",
"eof-object?",
"eq?",
"equal?",
"eqv?",
"error",
"euclidean-quotient",
"euclidean-remainder",
"euclidean/",
"eval",
"eval-string",
"even?",
"every",
"exact->inexact",
"exact-integer-sqrt",
"exact-integer?",
"exact?",
"exception-accessor",
"exception-args",
"exception-kind",
"exception-predicate",
"exception-type?",
"exception?",
"execl",
"execle",
"execlp",
"exit",
"exp",
"expt",
"f32vector",
"f32vector->list",
"f32vector-length",
"f32vector-ref",
"f32vector-set!",
"f32vector?",
"f64vector",
"f64vector->list",
"f64vector-length",
"f64vector-ref",
"f64vector-set!",
"f64vector?",
"fcntl",
"fdes->inport",
"fdes->outport",
"fdes->ports",
"fdopen",
"fifth",
"file-encoding",
"file-exists?",
"file-is-directory?",
"file-name-separator?",
"file-port?",
"file-position",
"file-set-position",
"fileno",
"filter",
"filter!",
"filter-map",
"find",
"find-tail",
"finite?",
"first",
"flock",
"floor",
"floor-quotient",
"floor-remainder",
"floor/",
"fluid->parameter",
"fluid-bound?",
"fluid-ref",
"fluid-ref*",
"fluid-set!",
"fluid-thread-local?",
"fluid-unset!",
"fluid?",
"flush-all-ports",
"fold",
"fold-right",
"for-each",
"force",
"force-output",
"format",
"fourth",
"frame-address",
"frame-arguments",
"frame-dynamic-link",
"frame-instruction-pointer",
"frame-previous",
"frame-procedure-name",
"frame-return-address",
"frame-source",
"frame-stack-pointer",
"frame?",
"free-identifier=?",
"fsync",
"ftell",
"gai-strerror",
"gc",
"gc-disable",
"gc-dump",
"gc-enable",
"gc-run-time",
"gc-stats",
"gcd",
"generate-temporaries",
"gensym",
"get-internal-real-time",
"get-internal-run-time",
"get-output-string",
"get-print-state",
"getaddrinfo",
"getaffinity",
"getcwd",
"getegid",
"getenv",
"geteuid",
"getgid",
"getgr",
"getgrent",
"getgrgid",
"getgrnam",
"getgroups",
"gethost",
"gethostbyaddr",
"gethostbyname",
"gethostent",
"gethostname",
"getitimer",
"getlogin",
"getnet",
"getnetbyaddr",
"getnetbyname",
"getnetent",
"getpass",
"getpeername",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getproto",
"getprotobyname",
"getprotobynumber",
"getprotoent",
"getpw",
"getpwent",
"getpwnam",
"getpwuid",
"getrlimit",
"getserv",
"getservbyname",
"getservbyport",
"getservent",
"getsid",
"getsockname",
"getsockopt",
"gettext",
"gettimeofday",
"getuid",
"gmtime",
"group:gid",
"group:mem",
"group:name",
"group:passwd",
"hash",
"hash-clear!",
"hash-count",
"hash-create-handle!",
"hash-fold",
"hash-for-each",
"hash-for-each-handle",
"hash-get-handle",
"hash-map->list",
"hash-ref",
"hash-remove!",
"hash-set!",
"hash-table?",
"hashq",
"hashq-create-handle!",
"hashq-get-handle",
"hashq-ref",
"hashq-remove!",
"hashq-set!",
"hashv",
"hashv-create-handle!",
"hashv-get-handle",
"hashv-ref",
"hashv-remove!",
"hashv-set!",
"hashx-create-handle!",
"hashx-get-handle",
"hashx-ref",
"hashx-remove!",
"hashx-set!",
"hook->list",
"hook-empty?",
"hook?",
"hostent:addr-list",
"hostent:addrtype",
"hostent:aliases",
"hostent:length",
"hostent:name",
"identifier?",
"identity",
"imag-part",
"in-vicinity",
"include-deprecated-features",
"inet-lnaof",
"inet-makeaddr",
"inet-netof",
"inet-ntop",
"inet-pton",
"inexact->exact",
"inexact?",
"inf",
"inf?",
"inherit-print-state",
"input-port?",
"install-r6rs!",
"install-r7rs!",
"integer->char",
"integer-expt",
"integer-length",
"integer?",
"interaction-environment",
"iota",
"isatty?",
"issue-deprecation-warning",
"keyword->symbol",
"keyword-like-symbol->keyword",
"keyword?",
"kill",
"kw-arg-ref",
"last",
"last-pair",
"lcm",
"length",
"length+",
"link",
"list",
"list->array",
"list->bitvector",
"list->char-set",
"list->char-set!",
"list->f32vector",
"list->f64vector",
"list->s16vector",
"list->s32vector",
"list->s64vector",
"list->s8vector",
"list->string",
"list->symbol",
"list->typed-array",
"list->u16vector",
"list->u32vector",
"list->u64vector",
"list->u8vector",
"list->vector",
"list-cdr-ref",
"list-cdr-set!",
"list-copy",
"list-head",
"list-index",
"list-ref",
"list-set!",
"list-tabulate",
"list-tail",
"list=",
"list?",
"listen",
"load-compiled",
"load-extension",
"load-from-path",
"load-in-vicinity",
"load-user-init",
"local-define",
"local-define-module",
"local-ref",
"local-ref-module",
"local-remove",
"local-set!",
"localtime",
"log",
"log10",
"logand",
"logbit?",
"logcount",
"logior",
"lognot",
"logtest",
"logxor",
"lookup-duplicates-handlers",
"lset-adjoin",
"lset-diff+intersection",
"lset-diff+intersection!",
"lset-difference",
"lset-difference!",
"lset-intersection",
"lset-intersection!",
"lset-union",
"lset-union!",
"lset-xor",
"lset-xor!",
"lset<=",
"lset=",
"lstat",
"macro-binding",
"macro-name",
"macro-transformer",
"macro-type",
"macro?",
"macroexpand",
"macroexpanded?",
"magnitude",
"major-version",
"make-array",
"make-autoload-interface",
"make-bitvector",
"make-doubly-weak-hash-table",
"make-exception",
"make-exception-from-throw",
"make-exception-type",
"make-f32vector",
"make-f64vector",
"make-fluid",
"make-fresh-user-module",
"make-generalized-vector",
"make-guardian",
"make-hash-table",
"make-hook",
"make-list",
"make-module",
"make-modules-in",
"make-mutable-parameter",
"make-object-property",
"make-parameter",
"make-polar",
"make-procedure-with-setter",
"make-promise",
"make-prompt-tag",
"make-record-type",
"make-rectangular",
"make-regexp",
"make-s16vector",
"make-s32vector",
"make-s64vector",
"make-s8vector",
"make-shared-array",
"make-socket-address",
"make-soft-port",
"make-srfi-4-vector",
"make-stack",
"make-string",
"make-struct-layout",
"make-struct/no-tail",
"make-struct/simple",
"make-symbol",
"make-syntax-transformer",
"make-thread-local-fluid",
"make-typed-array",
"make-u16vector",
"make-u32vector",
"make-u64vector",
"make-u8vector",
"make-unbound-fluid",
"make-undefined-variable",
"make-variable",
"make-variable-transformer",
"make-vector",
"make-vtable",
"make-weak-key-hash-table",
"make-weak-value-hash-table",
"map",
"map!",
"map-in-order",
"max",
"member",
"memoize-expression",
"memoized-typecode",
"memq",
"memv",
"merge",
"merge!",
"micro-version",
"min",
"minor-version",
"mkdir",
"mkdtemp",
"mknod",
"mkstemp",
"mkstemp!",
"mktime",
"module-add!",
"module-autoload!",
"module-binder",
"module-bound?",
"module-call-observers",
"module-clear!",
"module-constructor",
"module-declarative?",
"module-defer-observers",
"module-define!",
"module-define-submodule!",
"module-defined?",
"module-duplicates-handlers",
"module-ensure-local-variable!",
"module-export!",
"module-export-all!",
"module-filename",
"module-for-each",
"module-generate-unique-id!",
"module-gensym",
"module-import-interface",
"module-import-obarray",
"module-kind",
"module-local-variable",
"module-locally-bound?",
"module-make-local-var!",
"module-map",
"module-modified",
"module-name",
"module-next-unique-id",
"module-obarray",
"module-obarray-get-handle",
"module-obarray-ref",
"module-obarray-remove!",
"module-obarray-set!",
"module-observe",
"module-observe-weak",
"module-observers",
"module-public-interface",
"module-re-export!",
"module-ref",
"module-ref-submodule",
"module-remove!",
"module-replace!",
"module-replacements",
"module-reverse-lookup",
"module-search",
"module-set!",
"module-submodule-binder",
"module-submodules",
"module-symbol-binding",
"module-symbol-interned?",
"module-symbol-local-binding",
"module-symbol-locally-interned?",
"module-transformer",
"module-unobserve",
"module-use!",
"module-use-interfaces!",
"module-uses",
"module-variable",
"module-version",
"module-weak-observers",
"module?",
"modulo",
"modulo-expt",
"move->fdes",
"nan",
"nan?",
"negate",
"negative?",
"nested-define!",
"nested-define-module!",
"nested-ref",
"nested-ref-module",
"nested-remove!",
"nested-set!",
"netent:addrtype",
"netent:aliases",
"netent:name",
"netent:net",
"newline",
"ngettext",
"nice",
"nil?",
"ninth",
"noop",
"not",
"not-pair?",
"null-environment",
"null-list?",
"null?",
"number->string",
"number?",
"numerator",
"object->string",
"object-address",
"object-properties",
"object-property",
"odd?",
"open",
"open-fdes",
"open-file",
"open-input-file",
"open-input-string",
"open-io-file",
"open-output-file",
"open-output-string",
"opendir",
"or-map",
"output-port?",
"pair-fold",
"pair-fold-right",
"pair-for-each",
"pair?",
"parameter-converter",
"parameter-fluid",
"parameter?",
"parse-path",
"parse-path-with-ellipsis",
"partition",
"partition!",
"passwd:dir",
"passwd:gecos",
"passwd:gid",
"passwd:name",
"passwd:passwd",
"passwd:shell",
"passwd:uid",
"pause",
"peek",
"peek-char",
"pipe",
"pk",
"port->fdes",
"port-closed?",
"port-column",
"port-conversion-strategy",
"port-encoding",
"port-filename",
"port-for-each",
"port-line",
"port-mode",
"port-revealed",
"port-with-print-state",
"port?",
"positive?",
"primitive-_exit",
"primitive-eval",
"primitive-exit",
"primitive-fork",
"primitive-load",
"primitive-load-path",
"primitive-move->fdes",
"primitive-read",
"print-disable",
"print-enable",
"print-exception",
"print-options",
"print-options-interface",
"procedure",
"procedure-documentation",
"procedure-minimum-arity",
"procedure-name",
"procedure-properties",
"procedure-property",
"procedure-source",
"procedure-with-setter?",
"procedure?",
"process-use-modules",
"program-arguments",
"promise?",
"proper-list?",
"protoent:aliases",
"protoent:name",
"protoent:proto",
"provide",
"provided?",
"purify-module!",
"putenv",
"quit",
"quotient",
"raise",
"raise-exception",
"random",
"random-state->datum",
"random-state-from-platform",
"random:exp",
"random:hollow-sphere!",
"random:normal",
"random:normal-vector!",
"random:solid-sphere!",
"random:uniform",
"rational?",
"rationalize",
"read",
"read-char",
"read-disable",
"read-enable",
"read-hash-extend",
"read-hash-procedure",
"read-hash-procedures",
"read-options",
"read-options-interface",
"read-syntax",
"readdir",
"readlink",
"real-part",
"real?",
"record-accessor",
"record-constructor",
"record-modifier",
"record-predicate",
"record-type-constructor",
"record-type-descriptor",
"record-type-extensible?",
"record-type-fields",
"record-type-has-parent?",
"record-type-mutable-fields",
"record-type-name",
"record-type-opaque?",
"record-type-parent",
"record-type-parents",
"record-type-properties",
"record-type-uid",
"record-type?",
"record?",
"recv!",
"recvfrom!",
"redirect-port",
"reduce",
"reduce-right",
"regexp-exec",
"regexp?",
"release-port-handle",
"reload-module",
"remainder",
"remove",
"remove!",
"remove-hook!",
"rename-file",
"repl-reader",
"reset-hook!",
"resolve-interface",
"resolve-module",
"resolve-r6rs-interface",
"restore-signals",
"restricted-vector-sort!",
"reverse",
"reverse!",
"reverse-list->string",
"rewinddir",
"rmdir",
"round",
"round-ash",
"round-quotient",
"round-remainder",
"round/",
"run-hook",
"s16vector",
"s16vector->list",
"s16vector-length",
"s16vector-ref",
"s16vector-set!",
"s16vector?",
"s32vector",
"s32vector->list",
"s32vector-length",
"s32vector-ref",
"s32vector-set!",
"s32vector?",
"s64vector",
"s64vector->list",
"s64vector-length",
"s64vector-ref",
"s64vector-set!",
"s64vector?",
"s8vector",
"s8vector->list",
"s8vector-length",
"s8vector-ref",
"s8vector-set!",
"s8vector?",
"save-module-excursion",
"scheme-report-environment",
"scm-error",
"search-path",
"second",
"seed->random-state",
"seek",
"select",
"self-evaluating?",
"send",
"sendfile",
"sendto",
"servent:aliases",
"servent:name",
"servent:port",
"servent:proto",
"set-autoloaded!",
"set-car!",
"set-cdr!",
"set-current-dynamic-state",
"set-current-error-port",
"set-current-input-port",
"set-current-module",
"set-current-output-port",
"set-exception-printer!",
"set-module-binder!",
"set-module-declarative?!",
"set-module-duplicates-handlers!",
"set-module-filename!",
"set-module-kind!",
"set-module-name!",
"set-module-next-unique-id!",
"set-module-obarray!",
"set-module-observers!",
"set-module-public-interface!",
"set-module-submodule-binder!",
"set-module-submodules!",
"set-module-transformer!",
"set-module-uses!",
"set-module-version!",
"set-object-properties!",
"set-object-property!",
"set-port-column!",
"set-port-conversion-strategy!",
"set-port-encoding!",
"set-port-filename!",
"set-port-line!",
"set-port-revealed!",
"set-procedure-minimum-arity!",
"set-procedure-properties!",
"set-procedure-property!",
"set-program-arguments",
"set-source-properties!",
"set-source-property!",
"set-struct-vtable-name!",
"set-symbol-property!",
"set-tm:gmtoff",
"set-tm:hour",
"set-tm:isdst",
"set-tm:mday",
"set-tm:min",
"set-tm:mon",
"set-tm:sec",
"set-tm:wday",
"set-tm:yday",
"set-tm:year",
"set-tm:zone",
"setaffinity",
"setegid",
"setenv",
"seteuid",
"setgid",
"setgr",
"setgrent",
"setgroups",
"sethost",
"sethostent",
"sethostname",
"setitimer",
"setlocale",
"setnet",
"setnetent",
"setpgid",
"setpriority",
"setproto",
"setprotoent",
"setpw",
"setpwent",
"setrlimit",
"setserv",
"setservent",
"setsid",
"setsockopt",
"setter",
"setuid",
"setvbuf",
"seventh",
"shared-array-increments",
"shared-array-offset",
"shared-array-root",
"shutdown",
"sigaction",
"simple-exceptions",
"simple-format",
"sin",
"sinh",
"sixth",
"sleep",
"sloppy-assoc",
"sloppy-assq",
"sloppy-assv",
"sockaddr:addr",
"sockaddr:fam",
"sockaddr:flowinfo",
"sockaddr:path",
"sockaddr:port",
"sockaddr:scopeid",
"socket",
"socketpair",
"sort",
"sort!",
"sort-list",
"sort-list!",
"sorted?",
"source-properties",
"source-property",
"span",
"span!",
"split-at",
"split-at!",
"sqrt",
"stable-sort",
"stable-sort!",
"stack-id",
"stack-length",
"stack-ref",
"stack?",
"stat",
"stat:atime",
"stat:atimensec",
"stat:blksize",
"stat:blocks",
"stat:ctime",
"stat:ctimensec",
"stat:dev",
"stat:gid",
"stat:ino",
"stat:mode",
"stat:mtime",
"stat:mtimensec",
"stat:nlink",
"stat:perms",
"stat:rdev",
"stat:size",
"stat:type",
"stat:uid",
"status:exit-val",
"status:stop-sig",
"status:term-sig",
"strerror",
"strftime",
"string",
"string->char-set",
"string->char-set!",
"string->list",
"string->number",
"string->symbol",
"string-any",
"string-any-c-code",
"string-append",
"string-append/shared",
"string-bytes-per-char",
"string-capitalize",
"string-capitalize!",
"string-ci->symbol",
"string-ci<",
"string-ci<=",
"string-ci<=?",
"string-ci<>",
"string-ci<?",
"string-ci=",
"string-ci=?",
"string-ci>",
"string-ci>=",
"string-ci>=?",
"string-ci>?",
"string-compare",
"string-compare-ci",
"string-concatenate",
"string-concatenate-reverse",
"string-concatenate-reverse/shared",
"string-concatenate/shared",
"string-contains",
"string-contains-ci",
"string-copy",
"string-copy!",
"string-count",
"string-delete",
"string-downcase",
"string-downcase!",
"string-drop",
"string-drop-right",
"string-every",
"string-every-c-code",
"string-fill!",
"string-filter",
"string-fold",
"string-fold-right",
"string-for-each",
"string-for-each-index",
"string-hash",
"string-hash-ci",
"string-index",
"string-index-right",
"string-join",
"string-length",
"string-map",
"string-map!",
"string-normalize-nfc",
"string-normalize-nfd",
"string-normalize-nfkc",
"string-normalize-nfkd",
"string-null?",
"string-pad",
"string-pad-right",
"string-prefix-ci?",
"string-prefix-length",
"string-prefix-length-ci",
"string-prefix?",
"string-ref",
"string-replace",
"string-reverse",
"string-reverse!",
"string-rindex",
"string-set!",
"string-skip",
"string-skip-right",
"string-split",
"string-suffix-ci?",
"string-suffix-length",
"string-suffix-length-ci",
"string-suffix?",
"string-tabulate",
"string-take",
"string-take-right",
"string-titlecase",
"string-titlecase!",
"string-tokenize",
"string-trim",
"string-trim-both",
"string-trim-right",
"string-unfold",
"string-unfold-right",
"string-upcase",
"string-upcase!",
"string-utf8-length",
"string-xcopy!",
"string<",
"string<=",
"string<=?",
"string<>",
"string<?",
"string=",
"string=?",
"string>",
"string>=",
"string>=?",
"string>?",
"string?",
"strptime",
"struct-layout",
"struct-ref",
"struct-ref/unboxed",
"struct-set!",
"struct-set!/unboxed",
"struct-vtable",
"struct-vtable-name",
"struct-vtable?",
"struct?",
"substring",
"substring-fill!",
"substring-move!",
"substring/copy",
"substring/read-only",
"substring/shared",
"supports-source-properties?",
"symbol",
"symbol->keyword",
"symbol->string",
"symbol-append",
"symbol-fref",
"symbol-fset!",
"symbol-hash",
"symbol-interned?",
"symbol-pref",
"symbol-prefix-proc",
"symbol-property",
"symbol-property-remove!",
"symbol-pset!",
"symbol?",
"symlink",
"sync",
"syntax->datum",
"syntax-source",
"syntax-violation",
"system",
"system*",
"system-async-mark",
"system-error-errno",
"system-file-name-convention",
"take",
"take!",
"take-right",
"take-while",
"take-while!",
"tan",
"tanh",
"tcgetpgrp",
"tcsetpgrp",
"tenth",
"textdomain",
"third",
"throw",
"thunk?",
"times",
"tm:gmtoff",
"tm:hour",
"tm:isdst",
"tm:mday",
"tm:min",
"tm:mon",
"tm:sec",
"tm:wday",
"tm:yday",
"tm:year",
"tm:zone",
"tmpfile",
"tmpnam",
"tms:clock",
"tms:cstime",
"tms:cutime",
"tms:stime",
"tms:utime",
"transpose-array",
"truncate",
"truncate-file",
"truncate-quotient",
"truncate-remainder",
"truncate/",
"try-load-module",
"try-module-autoload",
"ttyname",
"typed-array?",
"tzset",
"u16vector",
"u16vector->list",
"u16vector-length",
"u16vector-ref",
"u16vector-set!",
"u16vector?",
"u32vector",
"u32vector->list",
"u32vector-length",
"u32vector-ref",
"u32vector-set!",
"u32vector?",
"u64vector",
"u64vector->list",
"u64vector-length",
"u64vector-ref",
"u64vector-set!",
"u64vector?",
"u8vector",
"u8vector->list",
"u8vector-length",
"u8vector-ref",
"u8vector-set!",
"u8vector?",
"ucs-range->char-set",
"ucs-range->char-set!",
"umask",
"uname",
"unfold",
"unfold-right",
"unmemoize-expression",
"unread-char",
"unread-string",
"unsetenv",
"unspecified?",
"unzip1",
"unzip2",
"unzip3",
"unzip4",
"unzip5",
"use-srfis",
"user-modules-declarative?",
"using-readline?",
"usleep",
"utime",
"utsname:machine",
"utsname:nodename",
"utsname:release",
"utsname:sysname",
"utsname:version",
"values",
"variable-bound?",
"variable-ref",
"variable-set!",
"variable-unset!",
"variable?",
"vector",
"vector->list",
"vector-copy",
"vector-fill!",
"vector-length",
"vector-move-left!",
"vector-move-right!",
"vector-ref",
"vector-set!",
"vector?",
"version",
"version-matches?",
"waitpid",
"warn",
"weak-key-hash-table?",
"weak-value-hash-table?",
"with-continuation-barrier",
"with-dynamic-state",
"with-error-to-file",
"with-error-to-port",
"with-error-to-string",
"with-exception-handler",
"with-fluid*",
"with-fluids*",
"with-input-from-file",
"with-input-from-port",
"with-input-from-string",
"with-output-to-file",
"with-output-to-port",
"with-output-to-string",
"with-throw-handler",
"write",
"write-char",
"xcons",
"xsubstring",
"zero?",
"zip",
}
| 32,563 | Python | 19.226087 | 70 | 0.539907 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/grammar_notation.py | """
pygments.lexers.grammar_notation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for grammar notations like BNF.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, this, using, words
from pygments.token import Comment, Keyword, Literal, Name, Number, \
Operator, Punctuation, String, Text, Whitespace
__all__ = ['BnfLexer', 'AbnfLexer', 'JsgfLexer', 'PegLexer']
class BnfLexer(RegexLexer):
"""
This lexer is for grammar notations which are similar to
original BNF.
In order to maximize a number of targets of this lexer,
let's decide some designs:
* We don't distinguish `Terminal Symbol`.
* We do assume that `NonTerminal Symbol` are always enclosed
with arrow brackets.
* We do assume that `NonTerminal Symbol` may include
any printable characters except arrow brackets and ASCII 0x20.
This assumption is for `RBNF <http://www.rfc-base.org/txt/rfc-5511.txt>`_.
* We do assume that target notation doesn't support comment.
* We don't distinguish any operators and punctuation except
`::=`.
Though these decision making might cause too minimal highlighting
and you might be disappointed, but it is reasonable for us.
.. versionadded:: 2.1
"""
name = 'BNF'
aliases = ['bnf']
filenames = ['*.bnf']
mimetypes = ['text/x-bnf']
tokens = {
'root': [
(r'(<)([ -;=?-~]+)(>)',
bygroups(Punctuation, Name.Class, Punctuation)),
# an only operator
(r'::=', Operator),
# fallback
(r'[^<>:]+', Text), # for performance
(r'.', Text),
],
}
class AbnfLexer(RegexLexer):
"""
Lexer for IETF 7405 ABNF.
(Updates `5234 <http://www.ietf.org/rfc/rfc5234.txt>`_) grammars.
.. versionadded:: 2.1
"""
name = 'ABNF'
url = 'http://www.ietf.org/rfc/rfc7405.txt'
aliases = ['abnf']
filenames = ['*.abnf']
mimetypes = ['text/x-abnf']
_core_rules = (
'ALPHA', 'BIT', 'CHAR', 'CR', 'CRLF', 'CTL', 'DIGIT',
'DQUOTE', 'HEXDIG', 'HTAB', 'LF', 'LWSP', 'OCTET',
'SP', 'VCHAR', 'WSP')
tokens = {
'root': [
# comment
(r';.*$', Comment.Single),
# quoted
# double quote itself in this state, it is as '%x22'.
(r'(%[si])?"[^"]*"', Literal),
# binary (but i have never seen...)
(r'%b[01]+\-[01]+\b', Literal), # range
(r'%b[01]+(\.[01]+)*\b', Literal), # concat
# decimal
(r'%d[0-9]+\-[0-9]+\b', Literal), # range
(r'%d[0-9]+(\.[0-9]+)*\b', Literal), # concat
# hexadecimal
(r'%x[0-9a-fA-F]+\-[0-9a-fA-F]+\b', Literal), # range
(r'%x[0-9a-fA-F]+(\.[0-9a-fA-F]+)*\b', Literal), # concat
# repetition (<a>*<b>element) including nRule
(r'\b[0-9]+\*[0-9]+', Operator),
(r'\b[0-9]+\*', Operator),
(r'\b[0-9]+', Operator),
(r'\*', Operator),
# Strictly speaking, these are not keyword but
# are called `Core Rule'.
(words(_core_rules, suffix=r'\b'), Keyword),
# nonterminals (ALPHA *(ALPHA / DIGIT / "-"))
(r'[a-zA-Z][a-zA-Z0-9-]*\b', Name.Class),
# operators
(r'(=/|=|/)', Operator),
# punctuation
(r'[\[\]()]', Punctuation),
# fallback
(r'\s+', Whitespace),
(r'.', Text),
],
}
class JsgfLexer(RegexLexer):
"""
For JSpeech Grammar Format grammars.
.. versionadded:: 2.2
"""
name = 'JSGF'
url = 'https://www.w3.org/TR/jsgf/'
aliases = ['jsgf']
filenames = ['*.jsgf']
mimetypes = ['application/jsgf', 'application/x-jsgf', 'text/jsgf']
tokens = {
'root': [
include('comments'),
include('non-comments'),
],
'comments': [
(r'/\*\*(?!/)', Comment.Multiline, 'documentation comment'),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*$', Comment.Single),
],
'non-comments': [
(r'\A#JSGF[^;]*', Comment.Preproc),
(r'\s+', Whitespace),
(r';', Punctuation),
(r'[=|()\[\]*+]', Operator),
(r'/[^/]+/', Number.Float),
(r'"', String.Double, 'string'),
(r'\{', String.Other, 'tag'),
(words(('import', 'public'), suffix=r'\b'), Keyword.Reserved),
(r'grammar\b', Keyword.Reserved, 'grammar name'),
(r'(<)(NULL|VOID)(>)',
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r'<', Punctuation, 'rulename'),
(r'\w+|[^\s;=|()\[\]*+/"{<\w]+', Text),
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\.', String.Escape),
(r'[^\\"]+', String.Double),
],
'tag': [
(r'\}', String.Other, '#pop'),
(r'\\.', String.Escape),
(r'[^\\}]+', String.Other),
],
'grammar name': [
(r';', Punctuation, '#pop'),
(r'\s+', Whitespace),
(r'\.', Punctuation),
(r'[^;\s.]+', Name.Namespace),
],
'rulename': [
(r'>', Punctuation, '#pop'),
(r'\*', Punctuation),
(r'\s+', Whitespace),
(r'([^.>]+)(\s*)(\.)', bygroups(Name.Namespace, Text, Punctuation)),
(r'[^.>]+', Name.Constant),
],
'documentation comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'^(\s*)(\*?)(\s*)(@(?:example|see))(\s+)'
r'([\w\W]*?(?=(?:^\s*\*?\s*@|\*/)))',
bygroups(Whitespace, Comment.Multiline, Whitespace, Comment.Special,
Whitespace, using(this, state='example'))),
(r'(^\s*\*?\s*)(@\S*)',
bygroups(Comment.Multiline, Comment.Special)),
(r'[^*\n@]+|\w|\W', Comment.Multiline),
],
'example': [
(r'(\n\s*)(\*)', bygroups(Whitespace, Comment.Multiline)),
include('non-comments'),
(r'.', Comment.Multiline),
],
}
class PegLexer(RegexLexer):
"""
This lexer is for Parsing Expression Grammars (PEG).
Various implementations of PEG have made different decisions
regarding the syntax, so let's try to be accommodating:
* `<-`, `←`, `:`, and `=` are all accepted as rule operators.
* Both `|` and `/` are choice operators.
* `^`, `↑`, and `~` are cut operators.
* A single `a-z` character immediately before a string, or
multiple `a-z` characters following a string, are part of the
string (e.g., `r"..."` or `"..."ilmsuxa`).
.. versionadded:: 2.6
"""
name = 'PEG'
url = 'https://bford.info/pub/lang/peg.pdf'
aliases = ['peg']
filenames = ['*.peg']
mimetypes = ['text/x-peg']
tokens = {
'root': [
# Comments
(r'#.*$', Comment.Single),
# All operators
(r'<-|[←:=/|&!?*+^↑~]', Operator),
# Other punctuation
(r'[()]', Punctuation),
# Keywords
(r'\.', Keyword),
# Character classes
(r'(\[)([^\]]*(?:\\.[^\]\\]*)*)(\])',
bygroups(Punctuation, String, Punctuation)),
# Single and double quoted strings (with optional modifiers)
(r'[a-z]?"[^"\\]*(?:\\.[^"\\]*)*"[a-z]*', String.Double),
(r"[a-z]?'[^'\\]*(?:\\.[^'\\]*)*'[a-z]*", String.Single),
# Nonterminals are not whitespace, operators, or punctuation
(r'[^\s<←:=/|&!?*+\^↑~()\[\]"\'#]+', Name.Class),
# Fallback
(r'.', Text),
],
}
| 7,968 | Python | 28.958647 | 81 | 0.467369 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/qvt.py | """
pygments.lexers.qvt
~~~~~~~~~~~~~~~~~~~
Lexer for QVT Operational language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, combined, default, \
words
from pygments.token import Text, Comment, Operator, Keyword, Punctuation, \
Name, String, Number
__all__ = ['QVToLexer']
class QVToLexer(RegexLexer):
"""
For the `QVT Operational Mapping language <http://www.omg.org/spec/QVT/1.1/>`_.
Reference for implementing this: «Meta Object Facility (MOF) 2.0
Query/View/Transformation Specification», Version 1.1 - January 2011
(http://www.omg.org/spec/QVT/1.1/), see §8.4, «Concrete Syntax» in
particular.
Notable tokens assignments:
- Name.Class is assigned to the identifier following any of the following
keywords: metamodel, class, exception, primitive, enum, transformation
or library
- Name.Function is assigned to the names of mappings and queries
- Name.Builtin.Pseudo is assigned to the pre-defined variables 'this',
'self' and 'result'.
"""
# With obvious borrowings & inspiration from the Java, Python and C lexers
name = 'QVTO'
aliases = ['qvto', 'qvt']
filenames = ['*.qvto']
tokens = {
'root': [
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'(--|//)(\s*)(directive:)?(.*)$',
bygroups(Comment, Comment, Comment.Preproc, Comment)),
# Uncomment the following if you want to distinguish between
# '/*' and '/**', à la javadoc
# (r'/[*]{2}(.|\n)*?[*]/', Comment.Multiline),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'\\\n', Text),
(r'(and|not|or|xor|##?)\b', Operator.Word),
(r'(:{1,2}=|[-+]=)\b', Operator.Word),
(r'(@|<<|>>)\b', Keyword), # stereotypes
(r'!=|<>|==|=|!->|->|>=|<=|[.]{3}|[+/*%=<>&|.~]', Operator),
(r'[]{}:(),;[]', Punctuation),
(r'(true|false|unlimited|null)\b', Keyword.Constant),
(r'(this|self|result)\b', Name.Builtin.Pseudo),
(r'(var)\b', Keyword.Declaration),
(r'(from|import)\b', Keyword.Namespace, 'fromimport'),
(r'(metamodel|class|exception|primitive|enum|transformation|'
r'library)(\s+)(\w+)',
bygroups(Keyword.Word, Text, Name.Class)),
(r'(exception)(\s+)(\w+)',
bygroups(Keyword.Word, Text, Name.Exception)),
(r'(main)\b', Name.Function),
(r'(mapping|helper|query)(\s+)',
bygroups(Keyword.Declaration, Text), 'operation'),
(r'(assert)(\s+)\b', bygroups(Keyword, Text), 'assert'),
(r'(Bag|Collection|Dict|OrderedSet|Sequence|Set|Tuple|List)\b',
Keyword.Type),
include('keywords'),
('"', String, combined('stringescape', 'dqs')),
("'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
# (r'([a-zA-Z_]\w*)(::)([a-zA-Z_]\w*)',
# bygroups(Text, Text, Text)),
],
'fromimport': [
(r'(?:[ \t]|\\\n)+', Text),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
default('#pop'),
],
'operation': [
(r'::', Text),
(r'(.*::)([a-zA-Z_]\w*)([ \t]*)(\()',
bygroups(Text, Name.Function, Text, Punctuation), '#pop')
],
'assert': [
(r'(warning|error|fatal)\b', Keyword, '#pop'),
default('#pop'), # all else: go back
],
'keywords': [
(words((
'abstract', 'access', 'any', 'assert', 'blackbox', 'break',
'case', 'collect', 'collectNested', 'collectOne', 'collectselect',
'collectselectOne', 'composes', 'compute', 'configuration',
'constructor', 'continue', 'datatype', 'default', 'derived',
'disjuncts', 'do', 'elif', 'else', 'end', 'endif', 'except',
'exists', 'extends', 'forAll', 'forEach', 'forOne', 'from', 'if',
'implies', 'in', 'inherits', 'init', 'inout', 'intermediate',
'invresolve', 'invresolveIn', 'invresolveone', 'invresolveoneIn',
'isUnique', 'iterate', 'late', 'let', 'literal', 'log', 'map',
'merges', 'modeltype', 'new', 'object', 'one', 'ordered', 'out',
'package', 'population', 'property', 'raise', 'readonly',
'references', 'refines', 'reject', 'resolve', 'resolveIn',
'resolveone', 'resolveoneIn', 'return', 'select', 'selectOne',
'sortedBy', 'static', 'switch', 'tag', 'then', 'try', 'typedef',
'unlimited', 'uses', 'when', 'where', 'while', 'with', 'xcollect',
'xmap', 'xselect'), suffix=r'\b'), Keyword),
],
# There is no need to distinguish between String.Single and
# String.Double: 'strings' is factorised for 'dqs' and 'sqs'
'strings': [
(r'[^\\\'"\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
],
'stringescape': [
(r'\\([\\btnfr"\']|u[0-3][0-7]{2}|u[0-7]{1,2})', String.Escape)
],
'dqs': [ # double-quoted string
(r'"', String, '#pop'),
(r'\\\\|\\"', String.Escape),
include('strings')
],
'sqs': [ # single-quoted string
(r"'", String, '#pop'),
(r"\\\\|\\'", String.Escape),
include('strings')
],
'name': [
(r'[a-zA-Z_]\w*', Name),
],
# numbers: excerpt taken from the python lexer
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer)
],
}
| 6,066 | Python | 38.914473 | 83 | 0.48846 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/bare.py | """
pygments.lexers.bare
~~~~~~~~~~~~~~~~~~~~
Lexer for the BARE schema.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, bygroups
from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace
__all__ = ['BareLexer']
class BareLexer(RegexLexer):
"""
For BARE schema source.
.. versionadded:: 2.7
"""
name = 'BARE'
url = 'https://baremessages.org'
filenames = ['*.bare']
aliases = ['bare']
keywords = [
'type',
'enum',
'u8',
'u16',
'u32',
'u64',
'uint',
'i8',
'i16',
'i32',
'i64',
'int',
'f32',
'f64',
'bool',
'void',
'data',
'string',
'optional',
'map',
]
tokens = {
'root': [
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)(\{)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Text), 'struct'),
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)(\()',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Text), 'union'),
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)',
bygroups(Keyword, Whitespace, Name, Whitespace), 'typedef'),
(r'(enum)(\s+)([A-Z][a-zA-Z0-9]+)(\s+\{)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace), 'enum'),
(r'#.*?$', Comment),
(r'\s+', Whitespace),
],
'struct': [
(r'\{', Text, '#push'),
(r'\}', Text, '#pop'),
(r'([a-zA-Z0-9]+)(:)(\s*)',
bygroups(Name.Attribute, Text, Whitespace), 'typedef'),
(r'\s+', Whitespace),
],
'union': [
(r'\)', Text, '#pop'),
(r'(\s*)(\|)(\s*)', bygroups(Whitespace, Text, Whitespace)),
(r'[A-Z][a-zA-Z0-9]+', Name.Class),
(words(keywords), Keyword),
(r'\s+', Whitespace),
],
'typedef': [
(r'\[\]', Text),
(r'#.*?$', Comment, '#pop'),
(r'(\[)(\d+)(\])', bygroups(Text, Literal, Text)),
(r'<|>', Text),
(r'\(', Text, 'union'),
(r'(\[)([a-z][a-z-A-Z0-9]+)(\])', bygroups(Text, Keyword, Text)),
(r'(\[)([A-Z][a-z-A-Z0-9]+)(\])', bygroups(Text, Name.Class, Text)),
(r'([A-Z][a-z-A-Z0-9]+)', Name.Class),
(words(keywords), Keyword),
(r'\n', Text, '#pop'),
(r'\{', Text, 'struct'),
(r'\s+', Whitespace),
(r'\d+', Literal),
],
'enum': [
(r'\{', Text, '#push'),
(r'\}', Text, '#pop'),
(r'([A-Z][A-Z0-9_]*)(\s*=\s*)(\d+)',
bygroups(Name.Attribute, Text, Literal)),
(r'([A-Z][A-Z0-9_]*)', bygroups(Name.Attribute)),
(r'#.*?$', Comment),
(r'\s+', Whitespace),
],
}
| 3,021 | Python | 28.339806 | 84 | 0.406157 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/felix.py | """
pygments.lexers.felix
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Felix language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default, words, \
combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['FelixLexer']
class FelixLexer(RegexLexer):
"""
For Felix source code.
.. versionadded:: 1.2
"""
name = 'Felix'
url = 'http://www.felix-lang.org'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = (
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
)
keywords = (
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
)
keyword_directives = (
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
)
keyword_declarations = (
'def', 'let', 'ref', 'val', 'var',
)
keyword_types = (
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
)
keyword_constants = (
'false', 'true',
)
operator_words = (
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
)
name_builtins = (
'_svc', 'while',
)
name_pseudo = (
'root', 'self', 'this',
)
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce',
'union'), suffix=r'\b'),
Keyword, 'funcname'),
(words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'),
Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(words(keywords, suffix=r'\b'), Keyword),
(words(keyword_directives, suffix=r'\b'), Name.Decorator),
(words(keyword_declarations, suffix=r'\b'), Keyword.Declaration),
(words(keyword_types, suffix=r'\b'), Keyword.Type),
(words(keyword_constants, suffix=r'\b'), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\s+', Whitespace),
include('comment'),
# Preprocessor
(r'(#)(\s*)(if)(\s+)(0)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
Whitespace, Comment.Preproc), 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[/*]', Comment.Multiline),
],
'if0': [
(r'^(\s*)(#if.*?(?<!\\))(\n)',
bygroups(Whitespace, Comment, Whitespace), '#push'),
(r'^(\s*)(#endif.*?(?<!\\))(\n)',
bygroups(Whitespace, Comment, Whitespace), '#pop'),
(r'(.*?)(\n)', bygroups(Comment, Whitespace)),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Whitespace, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Whitespace, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Whitespace, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
# (r'/[*](.|\n)*?[*]/', Comment),
# (r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Whitespace, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
default('modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
| 9,646 | Python | 33.826715 | 82 | 0.431578 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/rebol.py | """
pygments.lexers.rebol
~~~~~~~~~~~~~~~~~~~~~
Lexers for the REBOL and related languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Generic, Whitespace
__all__ = ['RebolLexer', 'RedLexer']
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
.. versionadded:: 1.1
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3', '*.reb']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match(r'(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(r".*\?$", word):
yield match.start(), Keyword, word
elif re.match(r".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'REBOL\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?'
r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{")\s/[\]]*', Name.Attribute),
(r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*\[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5
class RedLexer(RegexLexer):
"""
A `Red-language <http://www.red-lang.org/>`_ lexer.
.. versionadded:: 2.0
"""
name = 'Red'
aliases = ['red', 'red/system']
filenames = ['*.red', '*.reds']
mimetypes = ['text/x-red', 'text/x-red-system']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
r'foreach|forall|func|function|does|has|switch|'
r'case|reduce|compose|get|set|print|prin|equal\?|'
r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
r'greater-or-equal\?|same\?|not|type\?|stats|'
r'bind|union|replace|charset|routine)$', word):
yield match.start(), Name.Builtin, word
elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
r'update|write)$', word):
yield match.start(), Name.Function, word
elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
r'none|crlf|dot|null-byte)$', word):
yield match.start(), Name.Builtin.Pseudo, word
elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
r'#switch|#default|#get-definition)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
r'quote|forever)$', word):
yield match.start(), Name.Exception, word
elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
r'any-struct\?|none\?|word\?|any-series\?)$', word):
yield match.start(), Keyword, word
elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match(r'(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
r'<<<|>>>|<<|>>|<|>%)$', word):
yield match.start(), Operator, word
elif re.match(r".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
elif re.match(":.*", word):
yield match.start(), Generic.Subheading, word # get-word
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'Red/System\s+\[', Generic.Strong, 'script'),
(r'Red\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f\s]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
bygroups(Number.Hex, Name.Variable, Whitespace)),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{^")\s/[\]]*', Name.Attribute),
(r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
| 18,600 | Python | 42.157773 | 98 | 0.456989 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/rita.py | """
pygments.lexers.rita
~~~~~~~~~~~~~~~~~~~~
Lexers for RITA language
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Comment, Operator, Keyword, Name, Literal, \
Punctuation, Whitespace
__all__ = ['RitaLexer']
class RitaLexer(RegexLexer):
"""
Lexer for RITA.
.. versionadded:: 2.11
"""
name = 'Rita'
url = 'https://github.com/zaibacu/rita-dsl'
filenames = ['*.rita']
aliases = ['rita']
mimetypes = ['text/rita']
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'#(.*?)\n', Comment.Single),
(r'@(.*?)\n', Operator), # Yes, whole line as an operator
(r'"(\w|\d|\s|(\\")|[\'_\-./,\?\!])+?"', Literal),
(r'\'(\w|\d|\s|(\\\')|["_\-./,\?\!])+?\'', Literal),
(r'([A-Z_]+)', Keyword),
(r'([a-z0-9_]+)', Name),
(r'((->)|[!?+*|=])', Operator),
(r'[\(\),\{\}]', Punctuation)
]
}
| 1,128 | Python | 24.65909 | 71 | 0.460993 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/csound.py | """
pygments.lexers.csound
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Csound languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, include, using, words
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, Punctuation, \
String, Text, Whitespace
from pygments.lexers._csound_builtins import OPCODES, DEPRECATED_OPCODES, REMOVED_OPCODES
from pygments.lexers.html import HtmlLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.scripting import LuaLexer
__all__ = ['CsoundScoreLexer', 'CsoundOrchestraLexer', 'CsoundDocumentLexer']
newline = (r'((?:(?:;|//).*)*)(\n)', bygroups(Comment.Single, Text))
class CsoundLexer(RegexLexer):
url = 'https://csound.com/'
tokens = {
'whitespace': [
(r'[ \t]+', Whitespace),
(r'/[*](?:.|\n)*?[*]/', Comment.Multiline),
(r'(?:;|//).*$', Comment.Single),
(r'(\\)(\n)', bygroups(Text, Whitespace))
],
'preprocessor directives': [
(r'#(?:e(?:nd(?:if)?|lse)\b|##)|@@?[ \t]*\d+', Comment.Preproc),
(r'#includestr', Comment.Preproc, 'includestr directive'),
(r'#include', Comment.Preproc, 'include directive'),
(r'#[ \t]*define', Comment.Preproc, 'define directive'),
(r'#(?:ifn?def|undef)\b', Comment.Preproc, 'macro directive')
],
'include directive': [
include('whitespace'),
(r'([^ \t]).*?\1', String, '#pop')
],
'includestr directive': [
include('whitespace'),
(r'"', String, ('#pop', 'quoted string'))
],
'define directive': [
(r'\n', Whitespace),
include('whitespace'),
(r'([A-Z_a-z]\w*)(\()', bygroups(Comment.Preproc, Punctuation),
('#pop', 'macro parameter name list')),
(r'[A-Z_a-z]\w*', Comment.Preproc, ('#pop', 'before macro body'))
],
'macro parameter name list': [
include('whitespace'),
(r'[A-Z_a-z]\w*', Comment.Preproc),
(r"['#]", Punctuation),
(r'\)', Punctuation, ('#pop', 'before macro body'))
],
'before macro body': [
(r'\n', Whitespace),
include('whitespace'),
(r'#', Punctuation, ('#pop', 'macro body'))
],
'macro body': [
(r'(?:\\(?!#)|[^#\\]|\n)+', Comment.Preproc),
(r'\\#', Comment.Preproc),
(r'(?<!\\)#', Punctuation, '#pop')
],
'macro directive': [
include('whitespace'),
(r'[A-Z_a-z]\w*', Comment.Preproc, '#pop')
],
'macro uses': [
(r'(\$[A-Z_a-z]\w*\.?)(\()', bygroups(Comment.Preproc, Punctuation),
'macro parameter value list'),
(r'\$[A-Z_a-z]\w*(?:\.|\b)', Comment.Preproc)
],
'macro parameter value list': [
(r'(?:[^\'#"{()]|\{(?!\{))+', Comment.Preproc),
(r"['#]", Punctuation),
(r'"', String, 'macro parameter value quoted string'),
(r'\{\{', String, 'macro parameter value braced string'),
(r'\(', Comment.Preproc, 'macro parameter value parenthetical'),
(r'\)', Punctuation, '#pop')
],
'macro parameter value quoted string': [
(r"\\[#'()]", Comment.Preproc),
(r"[#'()]", Error),
include('quoted string')
],
'macro parameter value braced string': [
(r"\\[#'()]", Comment.Preproc),
(r"[#'()]", Error),
include('braced string')
],
'macro parameter value parenthetical': [
(r'(?:[^\\()]|\\\))+', Comment.Preproc),
(r'\(', Comment.Preproc, '#push'),
(r'\)', Comment.Preproc, '#pop')
],
'whitespace and macro uses': [
include('whitespace'),
include('macro uses')
],
'numbers': [
(r'\d+[Ee][+-]?\d+|(\d+\.\d*|\d*\.\d+)([Ee][+-]?\d+)?', Number.Float),
(r'(0[Xx])([0-9A-Fa-f]+)', bygroups(Keyword.Type, Number.Hex)),
(r'\d+', Number.Integer)
],
'quoted string': [
(r'"', String, '#pop'),
(r'[^"$]+', String),
include('macro uses'),
(r'[$]', String)
],
'braced string': [
# Do nothing. This must be defined in subclasses.
]
}
class CsoundScoreLexer(CsoundLexer):
"""
For `Csound <https://csound.com>`_ scores.
.. versionadded:: 2.1
"""
name = 'Csound Score'
aliases = ['csound-score', 'csound-sco']
filenames = ['*.sco']
tokens = {
'root': [
(r'\n', Whitespace),
include('whitespace and macro uses'),
include('preprocessor directives'),
(r'[aBbCdefiqstvxy]', Keyword),
# There is also a w statement that is generated internally and should not be
# used; see https://github.com/csound/csound/issues/750.
(r'z', Keyword.Constant),
# z is a constant equal to 800,000,000,000. 800 billion seconds is about
# 25,367.8 years. See also
# https://csound.com/docs/manual/ScoreTop.html and
# https://github.com/csound/csound/search?q=stof+path%3AEngine+filename%3Asread.c.
(r'([nNpP][pP])(\d+)', bygroups(Keyword, Number.Integer)),
(r'[mn]', Keyword, 'mark statement'),
include('numbers'),
(r'[!+\-*/^%&|<>#~.]', Operator),
(r'[()\[\]]', Punctuation),
(r'"', String, 'quoted string'),
(r'\{', Comment.Preproc, 'loop after left brace'),
],
'mark statement': [
include('whitespace and macro uses'),
(r'[A-Z_a-z]\w*', Name.Label),
(r'\n', Whitespace, '#pop')
],
'loop after left brace': [
include('whitespace and macro uses'),
(r'\d+', Number.Integer, ('#pop', 'loop after repeat count')),
],
'loop after repeat count': [
include('whitespace and macro uses'),
(r'[A-Z_a-z]\w*', Comment.Preproc, ('#pop', 'loop'))
],
'loop': [
(r'\}', Comment.Preproc, '#pop'),
include('root')
],
# Braced strings are not allowed in Csound scores, but this is needed because the
# superclass includes it.
'braced string': [
(r'\}\}', String, '#pop'),
(r'[^}]|\}(?!\})', String)
]
}
class CsoundOrchestraLexer(CsoundLexer):
"""
For `Csound <https://csound.com>`_ orchestras.
.. versionadded:: 2.1
"""
name = 'Csound Orchestra'
aliases = ['csound', 'csound-orc']
filenames = ['*.orc', '*.udo']
user_defined_opcodes = set()
def opcode_name_callback(lexer, match):
opcode = match.group(0)
lexer.user_defined_opcodes.add(opcode)
yield match.start(), Name.Function, opcode
def name_callback(lexer, match):
type_annotation_token = Keyword.Type
name = match.group(1)
if name in OPCODES or name in DEPRECATED_OPCODES or name in REMOVED_OPCODES:
yield match.start(), Name.Builtin, name
elif name in lexer.user_defined_opcodes:
yield match.start(), Name.Function, name
else:
type_annotation_token = Name
name_match = re.search(r'^(g?[afikSw])(\w+)', name)
if name_match:
yield name_match.start(1), Keyword.Type, name_match.group(1)
yield name_match.start(2), Name, name_match.group(2)
else:
yield match.start(), Name, name
if match.group(2):
yield match.start(2), Punctuation, match.group(2)
yield match.start(3), type_annotation_token, match.group(3)
tokens = {
'root': [
(r'\n', Whitespace),
(r'^([ \t]*)(\w+)(:)([ \t]+|$)', bygroups(Whitespace, Name.Label, Punctuation, Whitespace)),
include('whitespace and macro uses'),
include('preprocessor directives'),
(r'\binstr\b', Keyword.Declaration, 'instrument numbers and identifiers'),
(r'\bopcode\b', Keyword.Declaration, 'after opcode keyword'),
(r'\b(?:end(?:in|op))\b', Keyword.Declaration),
include('partial statements')
],
'partial statements': [
(r'\b(?:0dbfs|A4|k(?:r|smps)|nchnls(?:_i)?|sr)\b', Name.Variable.Global),
include('numbers'),
(r'\+=|-=|\*=|/=|<<|>>|<=|>=|==|!=|&&|\|\||[~¬]|[=!+\-*/^%&|<>#?:]', Operator),
(r'[(),\[\]]', Punctuation),
(r'"', String, 'quoted string'),
(r'\{\{', String, 'braced string'),
(words((
'do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', 'kthen',
'od', 'then', 'until', 'while',
), prefix=r'\b', suffix=r'\b'), Keyword),
(words(('return', 'rireturn'), prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(r'\b[ik]?goto\b', Keyword, 'goto label'),
(r'\b(r(?:einit|igoto)|tigoto)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
'goto label'),
(r'\b(c(?:g|in?|k|nk?)goto)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
('goto label', 'goto argument')),
(r'\b(timout)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
('goto label', 'goto argument', 'goto argument')),
(r'\b(loop_[gl][et])(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
('goto label', 'goto argument', 'goto argument', 'goto argument')),
(r'\bprintk?s\b', Name.Builtin, 'prints opcode'),
(r'\b(?:readscore|scoreline(?:_i)?)\b', Name.Builtin, 'Csound score opcode'),
(r'\bpyl?run[it]?\b', Name.Builtin, 'Python opcode'),
(r'\blua_(?:exec|opdef)\b', Name.Builtin, 'Lua opcode'),
(r'\bp\d+\b', Name.Variable.Instance),
(r'\b([A-Z_a-z]\w*)(?:(:)([A-Za-z]))?\b', name_callback)
],
'instrument numbers and identifiers': [
include('whitespace and macro uses'),
(r'\d+|[A-Z_a-z]\w*', Name.Function),
(r'[+,]', Punctuation),
(r'\n', Whitespace, '#pop')
],
'after opcode keyword': [
include('whitespace and macro uses'),
(r'[A-Z_a-z]\w*', opcode_name_callback, ('#pop', 'opcode type signatures')),
(r'\n', Whitespace, '#pop')
],
'opcode type signatures': [
include('whitespace and macro uses'),
# https://github.com/csound/csound/search?q=XIDENT+path%3AEngine+filename%3Acsound_orc.lex
(r'0|[afijkKoOpPStV\[\]]+', Keyword.Type),
(r',', Punctuation),
(r'\n', Whitespace, '#pop')
],
'quoted string': [
(r'"', String, '#pop'),
(r'[^\\"$%)]+', String),
include('macro uses'),
include('escape sequences'),
include('format specifiers'),
(r'[\\$%)]', String)
],
'braced string': [
(r'\}\}', String, '#pop'),
(r'(?:[^\\%)}]|\}(?!\}))+', String),
include('escape sequences'),
include('format specifiers'),
(r'[\\%)]', String)
],
'escape sequences': [
# https://github.com/csound/csound/search?q=unquote_string+path%3AEngine+filename%3Acsound_orc_compile.c
(r'\\(?:[\\abnrt"]|[0-7]{1,3})', String.Escape)
],
# Format specifiers are highlighted in all strings, even though only
# fprintks https://csound.com/docs/manual/fprintks.html
# fprints https://csound.com/docs/manual/fprints.html
# printf/printf_i https://csound.com/docs/manual/printf.html
# printks https://csound.com/docs/manual/printks.html
# prints https://csound.com/docs/manual/prints.html
# sprintf https://csound.com/docs/manual/sprintf.html
# sprintfk https://csound.com/docs/manual/sprintfk.html
# work with strings that contain format specifiers. In addition, these opcodes’
# handling of format specifiers is inconsistent:
# - fprintks and fprints accept %a and %A specifiers, and accept %s specifiers
# starting in Csound 6.15.0.
# - printks and prints accept %a and %A specifiers, but don’t accept %s
# specifiers.
# - printf, printf_i, sprintf, and sprintfk don’t accept %a and %A specifiers,
# but accept %s specifiers.
# See https://github.com/csound/csound/issues/747 for more information.
'format specifiers': [
(r'%[#0\- +]*\d*(?:\.\d+)?[AE-GXac-giosux]', String.Interpol),
(r'%%', String.Escape)
],
'goto argument': [
include('whitespace and macro uses'),
(r',', Punctuation, '#pop'),
include('partial statements')
],
'goto label': [
include('whitespace and macro uses'),
(r'\w+', Name.Label, '#pop'),
default('#pop')
],
'prints opcode': [
include('whitespace and macro uses'),
(r'"', String, 'prints quoted string'),
default('#pop')
],
'prints quoted string': [
(r'\\\\[aAbBnNrRtT]', String.Escape),
(r'%[!nNrRtT]|[~^]{1,2}', String.Escape),
include('quoted string')
],
'Csound score opcode': [
include('whitespace and macro uses'),
(r'"', String, 'quoted string'),
(r'\{\{', String, 'Csound score'),
(r'\n', Whitespace, '#pop')
],
'Csound score': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(CsoundScoreLexer))
],
'Python opcode': [
include('whitespace and macro uses'),
(r'"', String, 'quoted string'),
(r'\{\{', String, 'Python'),
(r'\n', Whitespace, '#pop')
],
'Python': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(PythonLexer))
],
'Lua opcode': [
include('whitespace and macro uses'),
(r'"', String, 'quoted string'),
(r'\{\{', String, 'Lua'),
(r'\n', Whitespace, '#pop')
],
'Lua': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(LuaLexer))
]
}
class CsoundDocumentLexer(RegexLexer):
"""
For `Csound <https://csound.com>`_ documents.
.. versionadded:: 2.1
"""
name = 'Csound Document'
aliases = ['csound-document', 'csound-csd']
filenames = ['*.csd']
# These tokens are based on those in XmlLexer in pygments/lexers/html.py. Making
# CsoundDocumentLexer a subclass of XmlLexer rather than RegexLexer may seem like a
# better idea, since Csound Document files look like XML files. However, Csound
# Documents can contain Csound comments (preceded by //, for example) before and
# after the root element, unescaped bitwise AND & and less than < operators, etc. In
# other words, while Csound Document files look like XML files, they may not actually
# be XML files.
tokens = {
'root': [
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'(?:;|//).*$', Comment.Single),
(r'[^/;<]+|/(?!/)', Text),
(r'<\s*CsInstruments', Name.Tag, ('orchestra', 'tag')),
(r'<\s*CsScore', Name.Tag, ('score', 'tag')),
(r'<\s*[Hh][Tt][Mm][Ll]', Name.Tag, ('HTML', 'tag')),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag)
],
'orchestra': [
(r'<\s*/\s*CsInstruments\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*CsInstruments\s*>)', using(CsoundOrchestraLexer))
],
'score': [
(r'<\s*/\s*CsScore\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*CsScore\s*>)', using(CsoundScoreLexer))
],
'HTML': [
(r'<\s*/\s*[Hh][Tt][Mm][Ll]\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*[Hh][Tt][Mm][Ll]\s*>)', using(HtmlLexer))
],
'tag': [
(r'\s+', Whitespace),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop')
],
'attr': [
(r'\s+', Whitespace),
(r'".*?"', String, '#pop'),
(r"'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop')
]
}
| 16,987 | Python | 35.221748 | 116 | 0.483487 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/other.py | """
pygments.lexers.other
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer
from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \
TcshLexer
from pygments.lexers.robotframework import RobotFrameworkLexer
from pygments.lexers.testing import GherkinLexer
from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer
from pygments.lexers.prolog import LogtalkLexer
from pygments.lexers.snobol import SnobolLexer
from pygments.lexers.rebol import RebolLexer
from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer
from pygments.lexers.modeling import ModelicaLexer
from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \
HybrisLexer
from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \
AsymptoteLexer, PovrayLexer
from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \
GoodDataCLLexer, MaqlLexer
from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer
from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \
MscgenLexer, VGLLexer
from pygments.lexers.basic import CbmBasicV2Lexer
from pygments.lexers.pawn import SourcePawnLexer, PawnLexer
from pygments.lexers.ecl import ECLLexer
from pygments.lexers.urbi import UrbiscriptLexer
from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer
from pygments.lexers.installers import NSISLexer, RPMSpecLexer
from pygments.lexers.textedit import AwkLexer
from pygments.lexers.smv import NuSMVLexer
__all__ = []
| 1,744 | Python | 41.560975 | 79 | 0.818234 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/automation.py | """
pygments.lexers.automation
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for automation scripting languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, combined
from pygments.token import Text, Comment, Operator, Name, String, \
Number, Punctuation, Generic
__all__ = ['AutohotkeyLexer', 'AutoItLexer']
class AutohotkeyLexer(RegexLexer):
"""
For autohotkey source code.
.. versionadded:: 1.4
"""
name = 'autohotkey'
url = 'http://www.autohotkey.com/'
aliases = ['autohotkey', 'ahk']
filenames = ['*.ahk', '*.ahkl']
mimetypes = ['text/x-autohotkey']
tokens = {
'root': [
(r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'),
(r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'),
(r'\s+;.*?$', Comment.Single),
(r'^;.*?$', Comment.Single),
(r'[]{}(),;[]', Punctuation),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInVariables'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
include('garbage'),
],
'incomment': [
(r'^\s*\*/', Comment.Multiline, '#pop'),
(r'[^*]+', Comment.Multiline),
(r'\*', Comment.Multiline)
],
'incontinuation': [
(r'^\s*\)', Generic, '#pop'),
(r'[^)]', Generic),
(r'[)]', Generic),
],
'commands': [
(r'(?i)^(\s*)(global|local|static|'
r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|'
r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|'
r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|'
r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|'
r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|'
r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|'
r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|'
r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|'
r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|'
r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|'
r'ControlSendRaw|ControlSetText|CoordMode|Critical|'
r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|'
r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|'
r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|'
r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|'
r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|'
r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|'
r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|'
r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|'
r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|'
r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|'
r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|'
r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|'
r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|'
r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|'
r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|'
r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|'
r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|'
r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|'
r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|'
r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|'
r'SetBatchLines|SetCapslockState|SetControlDelay|'
r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|'
r'SetMouseDelay|SetNumlockState|SetScrollLockState|'
r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|'
r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|'
r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|'
r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|'
r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|'
r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|'
r'StringReplace|StringRight|StringSplit|StringTrimLeft|'
r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|'
r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|'
r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|'
r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|'
r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|'
r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|'
r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|'
r'WinWait)\b', bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|'
r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|'
r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|'
r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|'
r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|'
r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|'
r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|'
r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|'
r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|'
r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|'
r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|'
r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|'
r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|'
r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|'
r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|'
r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b',
Name.Function),
],
'builtInVariables': [
(r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|'
r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|'
r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|'
r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|'
r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|'
r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|'
r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|'
r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|'
r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|'
r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|'
r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|'
r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|'
r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|'
r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|'
r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|'
r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|'
r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|'
r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|'
r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|'
r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|'
r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|'
r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|'
r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|'
r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|'
r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|'
r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|'
r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|'
r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|'
r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|'
r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b',
Name.Variable),
],
'labels': [
# hotkeys and labels
# technically, hotkey names are limited to named keys and buttons
(r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)),
(r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
# (r'.', Text), # no cheating
],
}
class AutoItLexer(RegexLexer):
"""
For AutoIt files.
AutoIt is a freeware BASIC-like scripting language
designed for automating the Windows GUI and general scripting
.. versionadded:: 1.6
"""
name = 'AutoIt'
url = 'http://www.autoitscript.com/site/autoit/'
aliases = ['autoit']
filenames = ['*.au3']
mimetypes = ['text/x-autoit']
# Keywords, functions, macros from au3.keywords.properties
# which can be found in AutoIt installed directory, e.g.
# c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties
keywords = """\
#include-once #include #endregion #forcedef #forceref #region
and byref case continueloop dim do else elseif endfunc endif
endselect exit exitloop for func global
if local next not or return select step
then to until wend while exit""".split()
functions = """\
abs acos adlibregister adlibunregister asc ascw asin assign atan
autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen
binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor
blockinput break call cdtray ceiling chr chrw clipget clipput consoleread
consolewrite consolewriteerror controlclick controlcommand controldisable
controlenable controlfocus controlgetfocus controlgethandle controlgetpos
controlgettext controlhide controllistview controlmove controlsend
controlsettext controlshow controltreeview cos dec dircopy dircreate
dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree
dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate
dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata
drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype
drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree
drivespacetotal drivestatus envget envset envupdate eval execute exp
filechangedir fileclose filecopy filecreatentfslink filecreateshortcut
filedelete fileexists filefindfirstfile filefindnextfile fileflush
filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut
filegetshortname filegetsize filegettime filegetversion fileinstall filemove
fileopen fileopendialog fileread filereadline filerecycle filerecycleempty
filesavedialog fileselectfolder filesetattrib filesetpos filesettime
filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi
guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo
guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy
guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon
guictrlcreateinput guictrlcreatelabel guictrlcreatelist
guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu
guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj
guictrlcreatepic guictrlcreateprogress guictrlcreateradio
guictrlcreateslider guictrlcreatetab guictrlcreatetabitem
guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown
guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg
guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy
guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata
guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic
guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos
guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete
guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators
guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon
guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset
httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize
inetread inidelete iniread inireadsection inireadsectionnames
inirenamesection iniwrite iniwritesection inputbox int isadmin isarray
isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword
isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag
mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox
number objcreate objcreateinterface objevent objevent objget objname
onautoitexitregister onautoitexitunregister opt ping pixelchecksum
pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists
processgetstats processlist processsetpriority processwait processwaitclose
progressoff progresson progressset ptr random regdelete regenumkey
regenumval regread regwrite round run runas runaswait runwait send
sendkeepactive seterror setextended shellexecute shellexecutewait shutdown
sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton
sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread
string stringaddcr stringcompare stringformat stringfromasciiarray
stringinstr stringisalnum stringisalpha stringisascii stringisdigit
stringisfloat stringisint stringislower stringisspace stringisupper
stringisxdigit stringleft stringlen stringlower stringmid stringregexp
stringregexpreplace stringreplace stringright stringsplit stringstripcr
stringstripws stringtoasciiarray stringtobinary stringtrimleft
stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect
tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff
timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete
trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent
trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent
traysetpauseicon traysetstate traysettooltip traytip ubound udpbind
udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype
winactivate winactive winclose winexists winflash wingetcaretpos
wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess
wingetstate wingettext wingettitle winkill winlist winmenuselectitem
winminimizeall winminimizeallundo winmove winsetontop winsetstate
winsettitle winsettrans winwait winwaitactive winwaitclose
winwaitnotactive""".split()
macros = """\
@appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion
@autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec
@cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir
@desktopheight @desktoprefresh @desktopwidth @documentscommondir @error
@exitcode @exitmethod @extended @favoritescommondir @favoritesdir
@gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid
@gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour
@ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf
@logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang
@mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype
@osversion @programfilesdir @programscommondir @programsdir @scriptdir
@scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir
@startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide
@sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault
@sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna
@sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir
@tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday
@windowsdir @workingdir @yday @year""".split()
tokens = {
'root': [
(r';.*\n', Comment.Single),
(r'(#comments-start|#cs)(.|\n)*?(#comments-end|#ce)',
Comment.Multiline),
(r'[\[\]{}(),;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
(r'[$|@][a-zA-Z_]\w*', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInMarcros'),
(r'"', String, combined('stringescape', 'dqs')),
(r"'", String, 'sqs'),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
(r'_\n', Text), # Line continuation
include('garbage'),
],
'commands': [
(r'(?i)(\s*)(%s)\b' % '|'.join(keywords),
bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(%s)\b' % '|'.join(functions),
Name.Function),
],
'builtInMarcros': [
(r'(?i)(%s)\b' % '|'.join(macros),
Name.Variable.Global),
],
'labels': [
# sendkeys
(r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'sqs': [
(r'\'\'|\`([,%`abfnrtv])', String.Escape),
(r"'", String, '#pop'),
(r"[^'\n]+", String)
],
'garbage': [
(r'[^\S\n]', Text),
],
}
| 19,815 | Python | 50.874345 | 80 | 0.661519 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/meson.py | """
pygments.lexers.meson
~~~~~~~~~~~~~~~~~~~~~
Pygments lexer for the Meson build system
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include
from pygments.token import Comment, Name, Number, Punctuation, Operator, \
Keyword, String, Whitespace
__all__ = ['MesonLexer']
class MesonLexer(RegexLexer):
"""Meson language lexer.
The grammar definition use to transcribe the syntax was retrieved from
https://mesonbuild.com/Syntax.html#grammar for version 0.58.
Some of those definitions are improperly transcribed, so the Meson++
implementation was also checked: https://github.com/dcbaker/meson-plus-plus.
.. versionadded:: 2.10
"""
# TODO String interpolation @VARNAME@ inner matches
# TODO keyword_arg: value inner matches
name = 'Meson'
url = 'https://mesonbuild.com/'
aliases = ['meson', 'meson.build']
filenames = ['meson.build', 'meson_options.txt']
mimetypes = ['text/x-meson']
tokens = {
'root': [
(r'#.*?$', Comment),
(r"'''.*'''", String.Single),
(r'[1-9][0-9]*', Number.Integer),
(r'0o[0-7]+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
include('string'),
include('keywords'),
include('expr'),
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
(r'\s+', Whitespace),
],
'string': [
(r"[']{3}([']{0,2}([^\\']|\\(.|\n)))*[']{3}", String),
(r"'.*?(?<!\\)(\\\\)*?'", String),
],
'keywords': [
(words((
'if',
'elif',
'else',
'endif',
'foreach',
'endforeach',
'break',
'continue',
),
suffix=r'\b'), Keyword),
],
'expr': [
(r'(in|and|or|not)\b', Operator.Word),
(r'(\*=|/=|%=|\+]=|-=|==|!=|\+|-|=)', Operator),
(r'[\[\]{}:().,?]', Punctuation),
(words(('true', 'false'), suffix=r'\b'), Keyword.Constant),
include('builtins'),
(words((
'meson',
'build_machine',
'host_machine',
'target_machine',
),
suffix=r'\b'), Name.Variable.Magic),
],
'builtins': [
# This list was extracted from the v0.58 reference manual
(words((
'add_global_arguments',
'add_global_link_arguments',
'add_languages',
'add_project_arguments',
'add_project_link_arguments',
'add_test_setup',
'assert',
'benchmark',
'both_libraries',
'build_target',
'configuration_data',
'configure_file',
'custom_target',
'declare_dependency',
'dependency',
'disabler',
'environment',
'error',
'executable',
'files',
'find_library',
'find_program',
'generator',
'get_option',
'get_variable',
'include_directories',
'install_data',
'install_headers',
'install_man',
'install_subdir',
'is_disabler',
'is_variable',
'jar',
'join_paths',
'library',
'message',
'project',
'range',
'run_command',
'set_variable',
'shared_library',
'shared_module',
'static_library',
'subdir',
'subdir_done',
'subproject',
'summary',
'test',
'vcs_tag',
'warning',
),
prefix=r'(?<!\.)',
suffix=r'\b'), Name.Builtin),
(r'(?<!\.)import\b', Name.Namespace),
],
}
| 4,337 | Python | 29.765957 | 80 | 0.414803 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/jslt.py | """
pygments.lexers.jslt
~~~~~~~~~~~~~~~~~~~~
Lexers for the JSLT language
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, combined, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Whitespace
__all__ = ['JSLTLexer']
_WORD_END = r'(?=[^0-9A-Z_a-z-])'
class JSLTLexer(RegexLexer):
"""
For JSLT source.
.. versionadded:: 2.10
"""
name = 'JSLT'
url = 'https://github.com/schibsted/jslt'
filenames = ['*.jslt']
aliases = ['jslt']
mimetypes = ['text/x-jslt']
tokens = {
'root': [
(r'[\t\n\f\r ]+', Whitespace),
(r'//.*(\n|\Z)', Comment.Single),
(r'-?(0|[1-9][0-9]*)', Number.Integer),
(r'-?(0|[1-9][0-9]*)(.[0-9]+a)?([Ee][+-]?[0-9]+)', Number.Float),
(r'"([^"\\]|\\.)*"', String.Double),
(r'[(),:\[\]{}]', Punctuation),
(r'(!=|[<=>]=?)', Operator),
(r'[*+/|-]', Operator),
(r'\.', Operator),
(words(('import',), suffix=_WORD_END), Keyword.Namespace, combined('import-path', 'whitespace')),
(words(('as',), suffix=_WORD_END), Keyword.Namespace, combined('import-alias', 'whitespace')),
(words(('let',), suffix=_WORD_END), Keyword.Declaration, combined('constant', 'whitespace')),
(words(('def',), suffix=_WORD_END), Keyword.Declaration, combined('function', 'whitespace')),
(words(('false', 'null', 'true'), suffix=_WORD_END), Keyword.Constant),
(words(('else', 'for', 'if'), suffix=_WORD_END), Keyword),
(words(('and', 'or'), suffix=_WORD_END), Operator.Word),
(words((
'all', 'any', 'array', 'boolean', 'capture', 'ceiling',
'contains', 'ends-with', 'error', 'flatten', 'floor',
'format-time', 'from-json', 'get-key', 'hash-int', 'index-of',
'is-array', 'is-boolean', 'is-decimal', 'is-integer',
'is-number', 'is-object', 'is-string', 'join', 'lowercase',
'max', 'min', 'mod', 'not', 'now', 'number', 'parse-time',
'parse-url', 'random', 'replace', 'round', 'sha256-hex', 'size',
'split', 'starts-with', 'string', 'sum', 'test', 'to-json',
'trim', 'uppercase', 'zip', 'zip-with-index', 'fallback'), suffix=_WORD_END),
Name.Builtin),
(r'[A-Z_a-z][0-9A-Z_a-z-]*:[A-Z_a-z][0-9A-Z_a-z-]*', Name.Function),
(r'[A-Z_a-z][0-9A-Z_a-z-]*', Name),
(r'\$[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable),
],
'constant': [
(r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable, 'root'),
],
'function': [
(r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Function, combined('function-parameter-list', 'whitespace')),
],
'function-parameter-list': [
(r'\(', Punctuation, combined('function-parameters', 'whitespace')),
],
'function-parameters': [
(r',', Punctuation),
(r'\)', Punctuation, 'root'),
(r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Variable),
],
'import-path': [
(r'"([^"]|\\.)*"', String.Symbol, 'root'),
],
'import-alias': [
(r'[A-Z_a-z][0-9A-Z_a-z-]*', Name.Namespace, 'root'),
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\.', String.Escape),
],
'whitespace': [
(r'[\t\n\f\r ]+', Whitespace),
(r'//.*(\n|\Z)', Comment.Single),
]
}
| 3,701 | Python | 37.5625 | 109 | 0.46555 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/x10.py | """
pygments.lexers.x10
~~~~~~~~~~~~~~~~~~~
Lexers for the X10 programming language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Keyword, String
__all__ = ['X10Lexer']
class X10Lexer(RegexLexer):
"""
For the X10 language.
.. versionadded:: 2.2
"""
name = 'X10'
url = 'http://x10-lang.org/'
aliases = ['x10', 'xten']
filenames = ['*.x10']
mimetypes = ['text/x-x10']
keywords = (
'as', 'assert', 'async', 'at', 'athome', 'ateach', 'atomic',
'break', 'case', 'catch', 'class', 'clocked', 'continue',
'def', 'default', 'do', 'else', 'final', 'finally', 'finish',
'for', 'goto', 'haszero', 'here', 'if', 'import', 'in',
'instanceof', 'interface', 'isref', 'new', 'offer',
'operator', 'package', 'return', 'struct', 'switch', 'throw',
'try', 'type', 'val', 'var', 'when', 'while'
)
types = (
'void'
)
values = (
'false', 'null', 'self', 'super', 'this', 'true'
)
modifiers = (
'abstract', 'extends', 'implements', 'native', 'offers',
'private', 'property', 'protected', 'public', 'static',
'throws', 'transient'
)
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'\b(%s)\b' % '|'.join(types), Keyword.Type),
(r'\b(%s)\b' % '|'.join(values), Keyword.Constant),
(r'\b(%s)\b' % '|'.join(modifiers), Keyword.Declaration),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'.', Text)
],
}
| 1,920 | Python | 27.25 | 70 | 0.471354 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/theorem.py | """
pygments.lexers.theorem
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for theorem-proving languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
__all__ = ['CoqLexer', 'IsabelleLexer', 'LeanLexer']
class CoqLexer(RegexLexer):
"""
For the Coq theorem prover.
.. versionadded:: 1.5
"""
name = 'Coq'
url = 'http://coq.inria.fr/'
aliases = ['coq']
filenames = ['*.v']
mimetypes = ['text/x-coq']
flags = 0 # no re.MULTILINE
keywords1 = (
# Vernacular commands
'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable',
'Variables', 'Parameter', 'Parameters', 'Axiom', 'Axioms', 'Hypothesis',
'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope',
'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Example', 'Let',
'Ltac', 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit',
'Arguments', 'Types', 'Unset', 'Contextual', 'Strict', 'Prenex',
'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure',
'Variant', 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Fact',
'Remark', 'Corollary', 'Proposition', 'Property', 'Goal',
'Proof', 'Restart', 'Save', 'Qed', 'Defined', 'Abort', 'Admitted',
'Hint', 'Resolve', 'Rewrite', 'View', 'Search', 'Compute', 'Eval',
'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside',
'outside', 'Check', 'Global', 'Instance', 'Class', 'Existing',
'Universe', 'Polymorphic', 'Monomorphic', 'Context', 'Scheme', 'From',
'Undo', 'Fail', 'Function',
)
keywords2 = (
# Gallina
'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct',
'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else',
'for', 'of', 'nosimpl', 'with', 'as',
)
keywords3 = (
# Sorts
'Type', 'Prop', 'SProp', 'Set',
)
keywords4 = (
# Tactics
'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro',
'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct',
'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite',
'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold',
'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog',
'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial',
'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto',
'split', 'left', 'right', 'autorewrite', 'tauto', 'setoid_rewrite',
'intuition', 'eauto', 'eapply', 'econstructor', 'etransitivity',
'constructor', 'erewrite', 'red', 'cbv', 'lazy', 'vm_compute',
'native_compute', 'subst',
)
keywords5 = (
# Terminators
'by', 'now', 'done', 'exact', 'reflexivity',
'tauto', 'romega', 'omega', 'lia', 'nia', 'lra', 'nra', 'psatz',
'assumption', 'solve', 'contradiction', 'discriminate',
'congruence', 'admit'
)
keywords6 = (
# Control
'do', 'last', 'first', 'try', 'idtac', 'repeat',
)
# 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
# 'downto', 'else', 'end', 'exception', 'external', 'false',
# 'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
# 'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
# 'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
# 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
# 'type', 'val', 'virtual', 'when', 'while', 'with'
keyopts = (
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.',
'->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-',
'<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>',
r'/\\', r'\\/', r'\{\|', r'\|\}',
# 'Π', 'Σ', # Not defined in the standard library
'λ', '¬', '∧', '∨', '∀', '∃', '→', '↔', '≠', '≤', '≥',
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
tokens = {
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\(\*', Comment, 'comment'),
(r'\b(?:[^\W\d][\w\']*\.)+[^\W\d][\w\']*\b', Name),
(r'\bEquations\b\??', Keyword.Namespace),
# Very weak heuristic to distinguish the Set vernacular from the Set sort
(r'\bSet(?=[ \t]+[A-Z][a-z][^\n]*?\.)', Keyword.Namespace),
(words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
# (r'\b([A-Z][\w\']*)(\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name),
(r'\S', Name.Builtin.Pseudo),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^"]+', String.Double),
(r'""', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z][a-z0-9_\']*', Name, '#pop'),
default('#pop')
],
}
def analyse_text(text):
if 'Qed' in text and 'Proof' in text:
return 1
class IsabelleLexer(RegexLexer):
"""
For the Isabelle proof assistant.
.. versionadded:: 2.0
"""
name = 'Isabelle'
url = 'https://isabelle.in.tum.de/'
aliases = ['isabelle']
filenames = ['*.thy']
mimetypes = ['text/x-isabelle']
keyword_minor = (
'and', 'assumes', 'attach', 'avoids', 'binder', 'checking',
'class_instance', 'class_relation', 'code_module', 'congs',
'constant', 'constrains', 'datatypes', 'defines', 'file', 'fixes',
'for', 'functions', 'hints', 'identifier', 'if', 'imports', 'in',
'includes', 'infix', 'infixl', 'infixr', 'is', 'keywords', 'lazy',
'module_name', 'monos', 'morphisms', 'no_discs_sels', 'notes',
'obtains', 'open', 'output', 'overloaded', 'parametric', 'permissive',
'pervasive', 'rep_compat', 'shows', 'structure', 'type_class',
'type_constructor', 'unchecked', 'unsafe', 'where',
)
keyword_diag = (
'ML_command', 'ML_val', 'class_deps', 'code_deps', 'code_thms',
'display_drafts', 'find_consts', 'find_theorems', 'find_unused_assms',
'full_prf', 'help', 'locale_deps', 'nitpick', 'pr', 'prf',
'print_abbrevs', 'print_antiquotations', 'print_attributes',
'print_binds', 'print_bnfs', 'print_bundles',
'print_case_translations', 'print_cases', 'print_claset',
'print_classes', 'print_codeproc', 'print_codesetup',
'print_coercions', 'print_commands', 'print_context',
'print_defn_rules', 'print_dependencies', 'print_facts',
'print_induct_rules', 'print_inductives', 'print_interps',
'print_locale', 'print_locales', 'print_methods', 'print_options',
'print_orders', 'print_quot_maps', 'print_quotconsts',
'print_quotients', 'print_quotientsQ3', 'print_quotmapsQ3',
'print_rules', 'print_simpset', 'print_state', 'print_statement',
'print_syntax', 'print_theorems', 'print_theory', 'print_trans_rules',
'prop', 'pwd', 'quickcheck', 'refute', 'sledgehammer', 'smt_status',
'solve_direct', 'spark_status', 'term', 'thm', 'thm_deps', 'thy_deps',
'try', 'try0', 'typ', 'unused_thms', 'value', 'values', 'welcome',
'print_ML_antiquotations', 'print_term_bindings', 'values_prolog',
)
keyword_thy = ('theory', 'begin', 'end')
keyword_section = ('header', 'chapter')
keyword_subsection = (
'section', 'subsection', 'subsubsection', 'sect', 'subsect',
'subsubsect',
)
keyword_theory_decl = (
'ML', 'ML_file', 'abbreviation', 'adhoc_overloading', 'arities',
'atom_decl', 'attribute_setup', 'axiomatization', 'bundle',
'case_of_simps', 'class', 'classes', 'classrel', 'codatatype',
'code_abort', 'code_class', 'code_const', 'code_datatype',
'code_identifier', 'code_include', 'code_instance', 'code_modulename',
'code_monad', 'code_printing', 'code_reflect', 'code_reserved',
'code_type', 'coinductive', 'coinductive_set', 'consts', 'context',
'datatype', 'datatype_new', 'datatype_new_compat', 'declaration',
'declare', 'default_sort', 'defer_recdef', 'definition', 'defs',
'domain', 'domain_isomorphism', 'domaindef', 'equivariance',
'export_code', 'extract', 'extract_type', 'fixrec', 'fun',
'fun_cases', 'hide_class', 'hide_const', 'hide_fact', 'hide_type',
'import_const_map', 'import_file', 'import_tptp', 'import_type_map',
'inductive', 'inductive_set', 'instantiation', 'judgment', 'lemmas',
'lifting_forget', 'lifting_update', 'local_setup', 'locale',
'method_setup', 'nitpick_params', 'no_adhoc_overloading',
'no_notation', 'no_syntax', 'no_translations', 'no_type_notation',
'nominal_datatype', 'nonterminal', 'notation', 'notepad', 'oracle',
'overloading', 'parse_ast_translation', 'parse_translation',
'partial_function', 'primcorec', 'primrec', 'primrec_new',
'print_ast_translation', 'print_translation', 'quickcheck_generator',
'quickcheck_params', 'realizability', 'realizers', 'recdef', 'record',
'refute_params', 'setup', 'setup_lifting', 'simproc_setup',
'simps_of_case', 'sledgehammer_params', 'spark_end', 'spark_open',
'spark_open_siv', 'spark_open_vcg', 'spark_proof_functions',
'spark_types', 'statespace', 'syntax', 'syntax_declaration', 'text',
'text_raw', 'theorems', 'translations', 'type_notation',
'type_synonym', 'typed_print_translation', 'typedecl', 'hoarestate',
'install_C_file', 'install_C_types', 'wpc_setup', 'c_defs', 'c_types',
'memsafe', 'SML_export', 'SML_file', 'SML_import', 'approximate',
'bnf_axiomatization', 'cartouche', 'datatype_compat',
'free_constructors', 'functor', 'nominal_function',
'nominal_termination', 'permanent_interpretation',
'binds', 'defining', 'smt2_status', 'term_cartouche',
'boogie_file', 'text_cartouche',
)
keyword_theory_script = ('inductive_cases', 'inductive_simps')
keyword_theory_goal = (
'ax_specification', 'bnf', 'code_pred', 'corollary', 'cpodef',
'crunch', 'crunch_ignore',
'enriched_type', 'function', 'instance', 'interpretation', 'lemma',
'lift_definition', 'nominal_inductive', 'nominal_inductive2',
'nominal_primrec', 'pcpodef', 'primcorecursive',
'quotient_definition', 'quotient_type', 'recdef_tc', 'rep_datatype',
'schematic_corollary', 'schematic_lemma', 'schematic_theorem',
'spark_vc', 'specification', 'subclass', 'sublocale', 'termination',
'theorem', 'typedef', 'wrap_free_constructors',
)
keyword_qed = ('by', 'done', 'qed')
keyword_abandon_proof = ('sorry', 'oops')
keyword_proof_goal = ('have', 'hence', 'interpret')
keyword_proof_block = ('next', 'proof')
keyword_proof_chain = (
'finally', 'from', 'then', 'ultimately', 'with',
)
keyword_proof_decl = (
'ML_prf', 'also', 'include', 'including', 'let', 'moreover', 'note',
'txt', 'txt_raw', 'unfolding', 'using', 'write',
)
keyword_proof_asm = ('assume', 'case', 'def', 'fix', 'presume')
keyword_proof_asm_goal = ('guess', 'obtain', 'show', 'thus')
keyword_proof_script = (
'apply', 'apply_end', 'apply_trace', 'back', 'defer', 'prefer',
)
operators = (
'::', ':', '(', ')', '[', ']', '_', '=', ',', '|',
'+', '-', '!', '?',
)
proof_operators = ('{', '}', '.', '..')
tokens = {
'root': [
(r'\s+', Whitespace),
(r'\(\*', Comment, 'comment'),
(r'\\<open>', String.Symbol, 'cartouche'),
(r'\{\*|‹', String, 'cartouche'),
(words(operators), Operator),
(words(proof_operators), Operator.Word),
(words(keyword_minor, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(words(keyword_diag, prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words(keyword_thy, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_theory_decl, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_section, prefix=r'\b', suffix=r'\b'), Generic.Heading),
(words(keyword_subsection, prefix=r'\b', suffix=r'\b'), Generic.Subheading),
(words(keyword_theory_goal, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keyword_theory_script, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keyword_abandon_proof, prefix=r'\b', suffix=r'\b'), Generic.Error),
(words(keyword_qed, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_goal, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_block, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_decl, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_chain, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_asm, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_asm_goal, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_script, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(r'\\<(\w|\^)*>', Text.Symbol),
(r"'[^\W\d][.\w']*", Name.Type),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'"', String, 'string'),
(r'`', String.Other, 'fact'),
(r'[^\s:|\[\]\-()=,+!?{}._][^\s:|\[\]\-()=,+!?{}]*', Name),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'cartouche': [
(r'[^{*}\\‹›]+', String),
(r'\\<open>', String.Symbol, '#push'),
(r'\{\*|‹', String, '#push'),
(r'\\<close>', String.Symbol, '#pop'),
(r'\*\}|›', String, '#pop'),
(r'\\<(\w|\^)*>', String.Symbol),
(r'[{*}\\]', String),
],
'string': [
(r'[^"\\]+', String),
(r'\\<(\w|\^)*>', String.Symbol),
(r'\\"', String),
(r'\\', String),
(r'"', String, '#pop'),
],
'fact': [
(r'[^`\\]+', String.Other),
(r'\\<(\w|\^)*>', String.Symbol),
(r'\\`', String.Other),
(r'\\', String.Other),
(r'`', String.Other, '#pop'),
],
}
class LeanLexer(RegexLexer):
"""
For the Lean theorem prover.
.. versionadded:: 2.0
"""
name = 'Lean'
url = 'https://github.com/leanprover/lean'
aliases = ['lean']
filenames = ['*.lean']
mimetypes = ['text/x-lean']
tokens = {
'root': [
(r'\s+', Text),
(r'/--', String.Doc, 'docstring'),
(r'/-', Comment, 'comment'),
(r'--.*?$', Comment.Single),
(words((
'import', 'renaming', 'hiding',
'namespace',
'local',
'private', 'protected', 'section',
'include', 'omit', 'section',
'protected', 'export',
'open',
'attribute',
), prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words((
'lemma', 'theorem', 'def', 'definition', 'example',
'axiom', 'axioms', 'constant', 'constants',
'universe', 'universes',
'inductive', 'coinductive', 'structure', 'extends',
'class', 'instance',
'abbreviation',
'noncomputable theory',
'noncomputable', 'mutual', 'meta',
'attribute',
'parameter', 'parameters',
'variable', 'variables',
'reserve', 'precedence',
'postfix', 'prefix', 'notation', 'infix', 'infixl', 'infixr',
'begin', 'by', 'end',
'set_option',
'run_cmd',
), prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
(r'@\[[^\]]*\]', Keyword.Declaration),
(words((
'forall', 'fun', 'Pi', 'from', 'have', 'show', 'assume', 'suffices',
'let', 'if', 'else', 'then', 'in', 'with', 'calc', 'match',
'do'
), prefix=r'\b', suffix=r'\b'), Keyword),
(words(('sorry', 'admit'), prefix=r'\b', suffix=r'\b'), Generic.Error),
(words(('Sort', 'Prop', 'Type'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words((
'#eval', '#check', '#reduce', '#exit',
'#print', '#help',
), suffix=r'\b'), Keyword),
(words((
'(', ')', ':', '{', '}', '[', ']', '⟨', '⟩', '‹', '›', '⦃', '⦄', ':=', ',',
)), Operator),
(r'[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]'
r'[.A-Za-z_\'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079'
r'\u207f-\u2089\u2090-\u209c\u2100-\u214f0-9]*', Name),
(r'0x[A-Za-z0-9]+', Number.Integer),
(r'0b[01]+', Number.Integer),
(r'\d+', Number.Integer),
(r'"', String.Double, 'string'),
(r"'(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4})|.)'", String.Char),
(r'[~?][a-z][\w\']*:', Name.Variable),
(r'\S', Name.Builtin.Pseudo),
],
'comment': [
(r'[^/-]', Comment.Multiline),
(r'/-', Comment.Multiline, '#push'),
(r'-/', Comment.Multiline, '#pop'),
(r'[/-]', Comment.Multiline)
],
'docstring': [
(r'[^/-]', String.Doc),
(r'-/', String.Doc, '#pop'),
(r'[/-]', String.Doc)
],
'string': [
(r'[^\\"]+', String.Double),
(r"(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4}))", String.Escape),
('"', String.Double, '#pop'),
],
}
| 20,113 | Python | 40.472165 | 92 | 0.480684 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/robotframework.py | """
pygments.lexers.robotframework
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Robot Framework.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Copyright 2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pygments.lexer import Lexer
from pygments.token import Token
__all__ = ['RobotFrameworkLexer']
HEADING = Token.Generic.Heading
SETTING = Token.Keyword.Namespace
IMPORT = Token.Name.Namespace
TC_KW_NAME = Token.Generic.Subheading
KEYWORD = Token.Name.Function
ARGUMENT = Token.String
VARIABLE = Token.Name.Variable
COMMENT = Token.Comment
SEPARATOR = Token.Punctuation
SYNTAX = Token.Punctuation
GHERKIN = Token.Generic.Emph
ERROR = Token.Error
def normalize(string, remove=''):
string = string.lower()
for char in remove + ' ':
if char in string:
string = string.replace(char, '')
return string
class RobotFrameworkLexer(Lexer):
"""
For Robot Framework test data.
Supports both space and pipe separated plain text formats.
.. versionadded:: 1.6
"""
name = 'RobotFramework'
url = 'http://robotframework.org'
aliases = ['robotframework']
filenames = ['*.robot', '*.resource']
mimetypes = ['text/x-robotframework']
def __init__(self, **options):
options['tabsize'] = 2
options['encoding'] = 'UTF-8'
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
row_tokenizer = RowTokenizer()
var_tokenizer = VariableTokenizer()
index = 0
for row in text.splitlines():
for value, token in row_tokenizer.tokenize(row):
for value, token in var_tokenizer.tokenize(value, token):
if value:
yield index, token, str(value)
index += len(value)
class VariableTokenizer:
def tokenize(self, string, token):
var = VariableSplitter(string, identifiers='$@%&')
if var.start < 0 or token in (COMMENT, ERROR):
yield string, token
return
for value, token in self._tokenize(var, string, token):
if value:
yield value, token
def _tokenize(self, var, string, orig_token):
before = string[:var.start]
yield before, orig_token
yield var.identifier + '{', SYNTAX
yield from self.tokenize(var.base, VARIABLE)
yield '}', SYNTAX
if var.index is not None:
yield '[', SYNTAX
yield from self.tokenize(var.index, VARIABLE)
yield ']', SYNTAX
yield from self.tokenize(string[var.end:], orig_token)
class RowTokenizer:
def __init__(self):
self._table = UnknownTable()
self._splitter = RowSplitter()
testcases = TestCaseTable()
settings = SettingTable(testcases.set_default_template)
variables = VariableTable()
keywords = KeywordTable()
self._tables = {'settings': settings, 'setting': settings,
'metadata': settings,
'variables': variables, 'variable': variables,
'testcases': testcases, 'testcase': testcases,
'tasks': testcases, 'task': testcases,
'keywords': keywords, 'keyword': keywords,
'userkeywords': keywords, 'userkeyword': keywords}
def tokenize(self, row):
commented = False
heading = False
for index, value in enumerate(self._splitter.split(row)):
# First value, and every second after that, is a separator.
index, separator = divmod(index-1, 2)
if value.startswith('#'):
commented = True
elif index == 0 and value.startswith('*'):
self._table = self._start_table(value)
heading = True
yield from self._tokenize(value, index, commented,
separator, heading)
self._table.end_row()
def _start_table(self, header):
name = normalize(header, remove='*')
return self._tables.get(name, UnknownTable())
def _tokenize(self, value, index, commented, separator, heading):
if commented:
yield value, COMMENT
elif separator:
yield value, SEPARATOR
elif heading:
yield value, HEADING
else:
yield from self._table.tokenize(value, index)
class RowSplitter:
_space_splitter = re.compile('( {2,})')
_pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))')
def split(self, row):
splitter = (row.startswith('| ') and self._split_from_pipes
or self._split_from_spaces)
yield from splitter(row)
yield '\n'
def _split_from_spaces(self, row):
yield '' # Start with (pseudo)separator similarly as with pipes
yield from self._space_splitter.split(row)
def _split_from_pipes(self, row):
_, separator, rest = self._pipe_splitter.split(row, 1)
yield separator
while self._pipe_splitter.search(rest):
cell, separator, rest = self._pipe_splitter.split(rest, 1)
yield cell
yield separator
yield rest
class Tokenizer:
_tokens = None
def __init__(self):
self._index = 0
def tokenize(self, value):
values_and_tokens = self._tokenize(value, self._index)
self._index += 1
if isinstance(values_and_tokens, type(Token)):
values_and_tokens = [(value, values_and_tokens)]
return values_and_tokens
def _tokenize(self, value, index):
index = min(index, len(self._tokens) - 1)
return self._tokens[index]
def _is_assign(self, value):
if value.endswith('='):
value = value[:-1].strip()
var = VariableSplitter(value, identifiers='$@&')
return var.start == 0 and var.end == len(value)
class Comment(Tokenizer):
_tokens = (COMMENT,)
class Setting(Tokenizer):
_tokens = (SETTING, ARGUMENT)
_keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition',
'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate')
_import_settings = ('library', 'resource', 'variables')
_other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
'testtimeout','tasktimeout')
_custom_tokenizer = None
def __init__(self, template_setter=None):
Tokenizer.__init__(self)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 1 and self._template_setter:
self._template_setter(value)
if index == 0:
normalized = normalize(value)
if normalized in self._keyword_settings:
self._custom_tokenizer = KeywordCall(support_assign=False)
elif normalized in self._import_settings:
self._custom_tokenizer = ImportSetting()
elif normalized not in self._other_settings:
return ERROR
elif self._custom_tokenizer:
return self._custom_tokenizer.tokenize(value)
return Tokenizer._tokenize(self, value, index)
class ImportSetting(Tokenizer):
_tokens = (IMPORT, ARGUMENT)
class TestCaseSetting(Setting):
_keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
'template')
_import_settings = ()
_other_settings = ('documentation', 'tags', 'timeout')
def _tokenize(self, value, index):
if index == 0:
type = Setting._tokenize(self, value[1:-1], index)
return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
return Setting._tokenize(self, value, index)
class KeywordSetting(TestCaseSetting):
_keyword_settings = ('teardown',)
_other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags')
class Variable(Tokenizer):
_tokens = (SYNTAX, ARGUMENT)
def _tokenize(self, value, index):
if index == 0 and not self._is_assign(value):
return ERROR
return Tokenizer._tokenize(self, value, index)
class KeywordCall(Tokenizer):
_tokens = (KEYWORD, ARGUMENT)
def __init__(self, support_assign=True):
Tokenizer.__init__(self)
self._keyword_found = not support_assign
self._assigns = 0
def _tokenize(self, value, index):
if not self._keyword_found and self._is_assign(value):
self._assigns += 1
return SYNTAX # VariableTokenizer tokenizes this later.
if self._keyword_found:
return Tokenizer._tokenize(self, value, index - self._assigns)
self._keyword_found = True
return GherkinTokenizer().tokenize(value, KEYWORD)
class GherkinTokenizer:
_gherkin_prefix = re.compile('^(Given|When|Then|And|But) ', re.IGNORECASE)
def tokenize(self, value, token):
match = self._gherkin_prefix.match(value)
if not match:
return [(value, token)]
end = match.end()
return [(value[:end], GHERKIN), (value[end:], token)]
class TemplatedKeywordCall(Tokenizer):
_tokens = (ARGUMENT,)
class ForLoop(Tokenizer):
def __init__(self):
Tokenizer.__init__(self)
self._in_arguments = False
def _tokenize(self, value, index):
token = self._in_arguments and ARGUMENT or SYNTAX
if value.upper() in ('IN', 'IN RANGE'):
self._in_arguments = True
return token
class _Table:
_tokenizer_class = None
def __init__(self, prev_tokenizer=None):
self._tokenizer = self._tokenizer_class()
self._prev_tokenizer = prev_tokenizer
self._prev_values_on_row = []
def tokenize(self, value, index):
if self._continues(value, index):
self._tokenizer = self._prev_tokenizer
yield value, SYNTAX
else:
yield from self._tokenize(value, index)
self._prev_values_on_row.append(value)
def _continues(self, value, index):
return value == '...' and all(self._is_empty(t)
for t in self._prev_values_on_row)
def _is_empty(self, value):
return value in ('', '\\')
def _tokenize(self, value, index):
return self._tokenizer.tokenize(value)
def end_row(self):
self.__init__(prev_tokenizer=self._tokenizer)
class UnknownTable(_Table):
_tokenizer_class = Comment
def _continues(self, value, index):
return False
class VariableTable(_Table):
_tokenizer_class = Variable
class SettingTable(_Table):
_tokenizer_class = Setting
def __init__(self, template_setter, prev_tokenizer=None):
_Table.__init__(self, prev_tokenizer)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 0 and normalize(value) == 'testtemplate':
self._tokenizer = Setting(self._template_setter)
return _Table._tokenize(self, value, index)
def end_row(self):
self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
class TestCaseTable(_Table):
_setting_class = TestCaseSetting
_test_template = None
_default_template = None
@property
def _tokenizer_class(self):
if self._test_template or (self._default_template and
self._test_template is not False):
return TemplatedKeywordCall
return KeywordCall
def _continues(self, value, index):
return index > 0 and _Table._continues(self, value, index)
def _tokenize(self, value, index):
if index == 0:
if value:
self._test_template = None
return GherkinTokenizer().tokenize(value, TC_KW_NAME)
if index == 1 and self._is_setting(value):
if self._is_template(value):
self._test_template = False
self._tokenizer = self._setting_class(self.set_test_template)
else:
self._tokenizer = self._setting_class()
if index == 1 and self._is_for_loop(value):
self._tokenizer = ForLoop()
if index == 1 and self._is_empty(value):
return [(value, SYNTAX)]
return _Table._tokenize(self, value, index)
def _is_setting(self, value):
return value.startswith('[') and value.endswith(']')
def _is_template(self, value):
return normalize(value) == '[template]'
def _is_for_loop(self, value):
return value.startswith(':') and normalize(value, remove=':') == 'for'
def set_test_template(self, template):
self._test_template = self._is_template_set(template)
def set_default_template(self, template):
self._default_template = self._is_template_set(template)
def _is_template_set(self, template):
return normalize(template) not in ('', '\\', 'none', '${empty}')
class KeywordTable(TestCaseTable):
_tokenizer_class = KeywordCall
_setting_class = KeywordSetting
def _is_template(self, value):
return False
# Following code copied directly from Robot Framework 2.7.5.
class VariableSplitter:
def __init__(self, string, identifiers):
self.identifier = None
self.base = None
self.index = None
self.start = -1
self.end = -1
self._identifiers = identifiers
self._may_have_internal_variables = False
try:
self._split(string)
except ValueError:
pass
else:
self._finalize()
def get_replaced_base(self, variables):
if self._may_have_internal_variables:
return variables.replace_string(self.base)
return self.base
def _finalize(self):
self.identifier = self._variable_chars[0]
self.base = ''.join(self._variable_chars[2:-1])
self.end = self.start + len(self._variable_chars)
if self._has_list_or_dict_variable_index():
self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1])
self.end += len(self._list_and_dict_variable_index_chars)
def _has_list_or_dict_variable_index(self):
return self._list_and_dict_variable_index_chars\
and self._list_and_dict_variable_index_chars[-1] == ']'
def _split(self, string):
start_index, max_index = self._find_variable(string)
self.start = start_index
self._open_curly = 1
self._state = self._variable_state
self._variable_chars = [string[start_index], '{']
self._list_and_dict_variable_index_chars = []
self._string = string
start_index += 2
for index, char in enumerate(string[start_index:]):
index += start_index # Giving start to enumerate only in Py 2.6+
try:
self._state(char, index)
except StopIteration:
return
if index == max_index and not self._scanning_list_variable_index():
return
def _scanning_list_variable_index(self):
return self._state in [self._waiting_list_variable_index_state,
self._list_variable_index_state]
def _find_variable(self, string):
max_end_index = string.rfind('}')
if max_end_index == -1:
raise ValueError('No variable end found')
if self._is_escaped(string, max_end_index):
return self._find_variable(string[:max_end_index])
start_index = self._find_start_index(string, 1, max_end_index)
if start_index == -1:
raise ValueError('No variable start found')
return start_index, max_end_index
def _find_start_index(self, string, start, end):
index = string.find('{', start, end) - 1
if index < 0:
return -1
if self._start_index_is_ok(string, index):
return index
return self._find_start_index(string, index+2, end)
def _start_index_is_ok(self, string, index):
return string[index] in self._identifiers\
and not self._is_escaped(string, index)
def _is_escaped(self, string, index):
escaped = False
while index > 0 and string[index-1] == '\\':
index -= 1
escaped = not escaped
return escaped
def _variable_state(self, char, index):
self._variable_chars.append(char)
if char == '}' and not self._is_escaped(self._string, index):
self._open_curly -= 1
if self._open_curly == 0:
if not self._is_list_or_dict_variable():
raise StopIteration
self._state = self._waiting_list_variable_index_state
elif char in self._identifiers:
self._state = self._internal_variable_start_state
def _is_list_or_dict_variable(self):
return self._variable_chars[0] in ('@','&')
def _internal_variable_start_state(self, char, index):
self._state = self._variable_state
if char == '{':
self._variable_chars.append(char)
self._open_curly += 1
self._may_have_internal_variables = True
else:
self._variable_state(char, index)
def _waiting_list_variable_index_state(self, char, index):
if char != '[':
raise StopIteration
self._list_and_dict_variable_index_chars.append(char)
self._state = self._list_variable_index_state
def _list_variable_index_state(self, char, index):
self._list_and_dict_variable_index_chars.append(char)
if char == ']':
raise StopIteration
| 18,449 | Python | 32.363472 | 108 | 0.593962 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/cddl.py | """
pygments.lexers.cddl
~~~~~~~~~~~~~~~~~~~~
Lexer for the Concise data definition language (CDDL), a notational
convention to express CBOR and JSON data structures.
More information:
https://datatracker.ietf.org/doc/rfc8610/
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, words
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
Punctuation, String, Whitespace
__all__ = ['CddlLexer']
class CddlLexer(RegexLexer):
"""
Lexer for CDDL definitions.
.. versionadded:: 2.8
"""
name = "CDDL"
url = 'https://datatracker.ietf.org/doc/rfc8610/'
aliases = ["cddl"]
filenames = ["*.cddl"]
mimetypes = ["text/x-cddl"]
_prelude_types = [
"any",
"b64legacy",
"b64url",
"bigfloat",
"bigint",
"bignint",
"biguint",
"bool",
"bstr",
"bytes",
"cbor-any",
"decfrac",
"eb16",
"eb64legacy",
"eb64url",
"encoded-cbor",
"false",
"float",
"float16",
"float16-32",
"float32",
"float32-64",
"float64",
"int",
"integer",
"mime-message",
"nil",
"nint",
"null",
"number",
"regexp",
"tdate",
"text",
"time",
"true",
"tstr",
"uint",
"undefined",
"unsigned",
"uri",
]
_controls = [
".and",
".bits",
".cbor",
".cborseq",
".default",
".eq",
".ge",
".gt",
".le",
".lt",
".ne",
".regexp",
".size",
".within",
]
_re_id = (
r"[$@A-Z_a-z]"
r"(?:[\-\.]+(?=[$@0-9A-Z_a-z])|[$@0-9A-Z_a-z])*"
)
# While the spec reads more like "an int must not start with 0" we use a
# lookahead here that says "after a 0 there must be no digit". This makes the
# '0' the invalid character in '01', which looks nicer when highlighted.
_re_uint = r"(?:0b[01]+|0x[0-9a-fA-F]+|[1-9]\d*|0(?!\d))"
_re_int = r"-?" + _re_uint
tokens = {
"commentsandwhitespace": [(r"\s+", Whitespace), (r";.+$", Comment.Single)],
"root": [
include("commentsandwhitespace"),
# tag types
(r"#(\d\.{uint})?".format(uint=_re_uint), Keyword.Type), # type or any
# occurrence
(
r"({uint})?(\*)({uint})?".format(uint=_re_uint),
bygroups(Number, Operator, Number),
),
(r"\?|\+", Operator), # occurrence
(r"\^", Operator), # cuts
(r"(\.\.\.|\.\.)", Operator), # rangeop
(words(_controls, suffix=r"\b"), Operator.Word), # ctlops
# into choice op
(r"&(?=\s*({groupname}|\())".format(groupname=_re_id), Operator),
(r"~(?=\s*{})".format(_re_id), Operator), # unwrap op
(r"//|/(?!/)", Operator), # double und single slash
(r"=>|/==|/=|=", Operator),
(r"[\[\]{}\(\),<>:]", Punctuation),
# Bytestrings
(r"(b64)(')", bygroups(String.Affix, String.Single), "bstrb64url"),
(r"(h)(')", bygroups(String.Affix, String.Single), "bstrh"),
(r"'", String.Single, "bstr"),
# Barewords as member keys (must be matched before values, types, typenames,
# groupnames).
# Token type is String as barewords are always interpreted as such.
(r"({bareword})(\s*)(:)".format(bareword=_re_id),
bygroups(String, Whitespace, Punctuation)),
# predefined types
(words(_prelude_types, prefix=r"(?![\-_$@])\b", suffix=r"\b(?![\-_$@])"),
Name.Builtin),
# user-defined groupnames, typenames
(_re_id, Name.Class),
# values
(r"0b[01]+", Number.Bin),
(r"0o[0-7]+", Number.Oct),
(r"0x[0-9a-fA-F]+(\.[0-9a-fA-F]+)?p[+-]?\d+", Number.Hex), # hexfloat
(r"0x[0-9a-fA-F]+", Number.Hex), # hex
# Float
(r"{int}(?=(\.\d|e[+-]?\d))(?:\.\d+)?(?:e[+-]?\d+)?".format(int=_re_int),
Number.Float),
# Int
(_re_int, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
"bstrb64url": [
(r"'", String.Single, "#pop"),
include("commentsandwhitespace"),
(r"\\.", String.Escape),
(r"[0-9a-zA-Z\-_=]+", String.Single),
(r".", Error),
# (r";.+$", Token.Other),
],
"bstrh": [
(r"'", String.Single, "#pop"),
include("commentsandwhitespace"),
(r"\\.", String.Escape),
(r"[0-9a-fA-F]+", String.Single),
(r".", Error),
],
"bstr": [
(r"'", String.Single, "#pop"),
(r"\\.", String.Escape),
(r"[^'\\]+", String.Single),
],
}
| 5,182 | Python | 28.787356 | 88 | 0.443458 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/markup.py | """
pygments.lexers.markup
~~~~~~~~~~~~~~~~~~~~~~
Lexers for non-HTML markup languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers.html import XmlLexer
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.css import CssLexer
from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \
using, this, do_insertions, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Other, Whitespace
from pygments.util import get_bool_opt, ClassNotFound
__all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer',
'MozPreprocHashLexer', 'MozPreprocPercentLexer',
'MozPreprocXulLexer', 'MozPreprocJavascriptLexer',
'MozPreprocCssLexer', 'MarkdownLexer', 'TiddlyWiki5Lexer']
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
.. versionadded:: 0.6
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
.. versionadded:: 0.7
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'\}\}\}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'\{\{\{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For reStructuredText markup.
.. versionadded:: 0.7
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: language``,
``.. code:: language`` and ``.. code-block:: language``
directives with a lexer for the given language (default:
``True``).
.. versionadded:: 0.8
"""
name = 'reStructuredText'
url = 'https://docutils.sourceforge.io/rst.html'
aliases = ['restructuredtext', 'rst', 'rest']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
yield from do_insertions(ins, lexer.get_tokens_unprocessed(code))
# from docutils.parsers.rst.states
closers = '\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = '\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*)?\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list marker
(r'^( *)(:(?:\\\\|\\:|[^:\n])+:(?=\s))([ \t]*)',
bygroups(Text, Name.Class, Text)),
# Definition list
(r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*)?\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
(r'\\$', Keyword),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
default('#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
.. versionadded:: 0.6
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1-9]', '*.man', '*.1p', '*.3pm']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]+', Text, 'textline'),
default('textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(.{2}', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class MozPreprocHashLexer(RegexLexer):
"""
Lexer for Mozilla Preprocessor files (with '#' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozhashpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^#', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
'exprstart': [
(r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'),
(words((
'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif',
'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter',
'include', 'includesubst', 'error')),
Comment.Preproc, '#pop'),
],
'expr': [
(words(('!', '!=', '==', '&&', '||')), Operator),
(r'(defined)(\()', bygroups(Keyword, Punctuation)),
(r'\)', Punctuation),
(r'[0-9]+', Number.Decimal),
(r'__\w+?__', Name.Variable),
(r'@\w+?@', Name.Class),
(r'\w+', Name),
(r'\n', Text, '#pop'),
(r'\s+', Text),
(r'\S', Punctuation),
],
}
class MozPreprocPercentLexer(MozPreprocHashLexer):
"""
Lexer for Mozilla Preprocessor files (with '%' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozpercentpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^%', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
}
class MozPreprocXulLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`XmlLexer`.
.. versionadded:: 2.0
"""
name = "XUL+mozpreproc"
aliases = ['xul+mozpreproc']
filenames = ['*.xul.in']
mimetypes = []
def __init__(self, **options):
super().__init__(XmlLexer, MozPreprocHashLexer, **options)
class MozPreprocJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`JavascriptLexer`.
.. versionadded:: 2.0
"""
name = "Javascript+mozpreproc"
aliases = ['javascript+mozpreproc']
filenames = ['*.js.in']
mimetypes = []
def __init__(self, **options):
super().__init__(JavascriptLexer, MozPreprocHashLexer, **options)
class MozPreprocCssLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`CssLexer`.
.. versionadded:: 2.0
"""
name = "CSS+mozpreproc"
aliases = ['css+mozpreproc']
filenames = ['*.css.in']
mimetypes = []
def __init__(self, **options):
super().__init__(CssLexer, MozPreprocPercentLexer, **options)
class MarkdownLexer(RegexLexer):
"""
For Markdown markup.
.. versionadded:: 2.2
"""
name = 'Markdown'
url = 'https://daringfireball.net/projects/markdown/'
aliases = ['markdown', 'md']
filenames = ['*.md', '*.markdown']
mimetypes = ["text/x-markdown"]
flags = re.MULTILINE
def _handle_codeblock(self, match):
"""
match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
"""
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), String.Backtick, match.group(1)
yield match.start(2), String.Backtick, match.group(2)
yield match.start(3), Text , match.group(3)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name( match.group(2).strip() )
except ClassNotFound:
pass
code = match.group(4)
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(4), String, code
else:
yield from do_insertions([], lexer.get_tokens_unprocessed(code))
yield match.start(5), String.Backtick, match.group(5)
tokens = {
'root': [
# heading with '#' prefix (atx-style)
(r'(^#[^#].+)(\n)', bygroups(Generic.Heading, Text)),
# subheading with '#' prefix (atx-style)
(r'(^#{2,6}[^#].+)(\n)', bygroups(Generic.Subheading, Text)),
# heading with '=' underlines (Setext-style)
(r'^(.+)(\n)(=+)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# subheading with '-' underlines (Setext-style)
(r'^(.+)(\n)(-+)(\n)', bygroups(Generic.Subheading, Text, Generic.Subheading, Text)),
# task list
(r'^(\s*)([*-] )(\[[ xX]\])( .+\n)',
bygroups(Whitespace, Keyword, Keyword, using(this, state='inline'))),
# bulleted list
(r'^(\s*)([*-])(\s)(.+\n)',
bygroups(Whitespace, Keyword, Whitespace, using(this, state='inline'))),
# numbered list
(r'^(\s*)([0-9]+\.)( .+\n)',
bygroups(Whitespace, Keyword, using(this, state='inline'))),
# quote
(r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)),
# code block fenced by 3 backticks
(r'^(\s*```\n[\w\W]*?^\s*```$\n)', String.Backtick),
# code block with language
(r'^(\s*```)(\w+)(\n)([\w\W]*?)(^\s*```$\n)', _handle_codeblock),
include('inline'),
],
'inline': [
# escape
(r'\\.', Text),
# inline code
(r'([^`]?)(`[^`\n]+`)', bygroups(Text, String.Backtick)),
# warning: the following rules eat outer tags.
# eg. **foo _bar_ baz** => foo and baz are not recognized as bold
# bold fenced by '**'
(r'([^\*]?)(\*\*[^* \n][^*\n]*\*\*)', bygroups(Text, Generic.Strong)),
# bold fenced by '__'
(r'([^_]?)(__[^_ \n][^_\n]*__)', bygroups(Text, Generic.Strong)),
# italics fenced by '*'
(r'([^\*]?)(\*[^* \n][^*\n]*\*)', bygroups(Text, Generic.Emph)),
# italics fenced by '_'
(r'([^_]?)(_[^_ \n][^_\n]*_)', bygroups(Text, Generic.Emph)),
# strikethrough
(r'([^~]?)(~~[^~ \n][^~\n]*~~)', bygroups(Text, Generic.Deleted)),
# mentions and topics (twitter and github stuff)
(r'[@#][\w/:]+', Name.Entity),
# (image?) links eg: 
(r'(!?\[)([^]]+)(\])(\()([^)]+)(\))',
bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)),
# reference-style links, e.g.:
# [an example][id]
# [id]: http://example.com/
(r'(\[)([^]]+)(\])(\[)([^]]*)(\])',
bygroups(Text, Name.Tag, Text, Text, Name.Label, Text)),
(r'^(\s*\[)([^]]*)(\]:\s*)(.+)',
bygroups(Text, Name.Label, Text, Name.Attribute)),
# general text, must come last!
(r'[^\\\s]+', Text),
(r'.', Text),
],
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
class TiddlyWiki5Lexer(RegexLexer):
"""
For TiddlyWiki5 markup.
.. versionadded:: 2.7
"""
name = 'tiddler'
url = 'https://tiddlywiki.com/#TiddlerFiles'
aliases = ['tid']
filenames = ['*.tid']
mimetypes = ["text/vnd.tiddlywiki"]
flags = re.MULTILINE
def _handle_codeblock(self, match):
"""
match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
"""
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), String, match.group(1)
yield match.start(2), String, match.group(2)
yield match.start(3), Text, match.group(3)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(2).strip())
except ClassNotFound:
pass
code = match.group(4)
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(4), String, code
return
yield from do_insertions([], lexer.get_tokens_unprocessed(code))
yield match.start(5), String, match.group(5)
def _handle_cssblock(self, match):
"""
match args: 1:style tag 2:newline, 3:code, 4:closing style tag
"""
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), String, match.group(1)
yield match.start(2), String, match.group(2)
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name('css')
except ClassNotFound:
pass
code = match.group(3)
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(3), String, code
return
yield from do_insertions([], lexer.get_tokens_unprocessed(code))
yield match.start(4), String, match.group(4)
tokens = {
'root': [
# title in metadata section
(r'^(title)(:\s)(.+\n)', bygroups(Keyword, Text, Generic.Heading)),
# headings
(r'^(!)([^!].+\n)', bygroups(Generic.Heading, Text)),
(r'^(!{2,6})(.+\n)', bygroups(Generic.Subheading, Text)),
# bulleted or numbered lists or single-line block quotes
# (can be mixed)
(r'^(\s*)([*#>]+)(\s*)(.+\n)',
bygroups(Text, Keyword, Text, using(this, state='inline'))),
# multi-line block quotes
(r'^(<<<.*\n)([\w\W]*?)(^<<<.*$)', bygroups(String, Text, String)),
# table header
(r'^(\|.*?\|h)$', bygroups(Generic.Strong)),
# table footer or caption
(r'^(\|.*?\|[cf])$', bygroups(Generic.Emph)),
# table class
(r'^(\|.*?\|k)$', bygroups(Name.Tag)),
# definitions
(r'^(;.*)$', bygroups(Generic.Strong)),
# text block
(r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)),
# code block with language
(r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock),
# CSS style block
(r'^(<style>)(\n)([\w\W]*?)(^</style>$)', _handle_cssblock),
include('keywords'),
include('inline'),
],
'keywords': [
(words((
'\\define', '\\end', 'caption', 'created', 'modified', 'tags',
'title', 'type'), prefix=r'^', suffix=r'\b'),
Keyword),
],
'inline': [
# escape
(r'\\.', Text),
# created or modified date
(r'\d{17}', Number.Integer),
# italics
(r'(\s)(//[^/]+//)((?=\W|\n))',
bygroups(Text, Generic.Emph, Text)),
# superscript
(r'(\s)(\^\^[^\^]+\^\^)', bygroups(Text, Generic.Emph)),
# subscript
(r'(\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)),
# underscore
(r'(\s)(__[^_]+__)', bygroups(Text, Generic.Strong)),
# bold
(r"(\s)(''[^']+'')((?=\W|\n))",
bygroups(Text, Generic.Strong, Text)),
# strikethrough
(r'(\s)(~~[^~]+~~)((?=\W|\n))',
bygroups(Text, Generic.Deleted, Text)),
# TiddlyWiki variables
(r'<<[^>]+>>', Name.Tag),
(r'\$\$[^$]+\$\$', Name.Tag),
(r'\$\([^)]+\)\$', Name.Tag),
# TiddlyWiki style or class
(r'^@@.*$', Name.Tag),
# HTML tags
(r'</?[^>]+>', Name.Tag),
# inline code
(r'`[^`]+`', String.Backtick),
# HTML escaped symbols
(r'&\S*?;', String.Regex),
# Wiki links
(r'(\[{2})([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text)),
# External links
(r'(\[{2})([^]\|]+)(\|)([^]\|]+)(\]{2})',
bygroups(Text, Name.Tag, Text, Name.Attribute, Text)),
# Transclusion
(r'(\{{2})([^}]+)(\}{2})', bygroups(Text, Name.Tag, Text)),
# URLs
(r'(\b.?.?tps?://[^\s"]+)', bygroups(Name.Attribute)),
# general text, must come last!
(r'[\w]+', Text),
(r'.', Text)
],
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
| 26,797 | Python | 33.984334 | 101 | 0.462589 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/unicon.py | """
pygments.lexers.unicon
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Icon and Unicon languages, including ucode VM.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['IconLexer', 'UcodeLexer', 'UniconLexer']
class UniconLexer(RegexLexer):
"""
For Unicon source code.
.. versionadded:: 2.4
"""
name = 'Unicon'
aliases = ['unicon']
filenames = ['*.icn']
mimetypes = ['text/unicon']
flags = re.MULTILINE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'#.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'class|method|procedure', Keyword.Declaration, 'subprogram'),
(r'(record)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'(#line|\$C|\$Cend|\$define|\$else|\$endif|\$error|\$ifdef|'
r'\$ifndef|\$include|\$line|\$undef)\b', Keyword.PreProc),
(r'(&null|&fail)\b', Keyword.Constant),
(r'&allocated|&ascii|&clock|&collections|&column|&col|&control|'
r'&cset|¤t|&dateline|&date|&digits|&dump|'
r'&errno|&errornumber|&errortext|&errorvalue|&error|&errout|'
r'&eventcode|&eventvalue|&eventsource|&e|'
r'&features|&file|&host|&input|&interval|&lcase|&letters|'
r'&level|&line|&ldrag|&lpress|&lrelease|'
r'&main|&mdrag|&meta|&mpress|&mrelease|&now|&output|'
r'&phi|&pick|&pi|&pos|&progname|'
r'&random|&rdrag|®ions|&resize|&row|&rpress|&rrelease|'
r'&shift|&source|&storage|&subject|'
r'&time|&trace|&ucase|&version|'
r'&window|&x|&y', Keyword.Reserved),
(r'(by|of|not|to)\b', Keyword.Reserved),
(r'(global|local|static|abstract)\b', Keyword.Reserved),
(r'package|link|import', Keyword.Declaration),
(words((
'break', 'case', 'create', 'critical', 'default', 'end', 'all',
'do', 'else', 'every', 'fail', 'if', 'import', 'initial',
'initially', 'invocable', 'next',
'repeat', 'return', 'suspend',
'then', 'thread', 'until', 'while'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(words((
'Abort', 'abs', 'acos', 'Active', 'Alert', 'any', 'Any', 'Arb',
'Arbno', 'args', 'array', 'asin', 'atan', 'atanh', 'Attrib',
'Bal', 'bal', 'Bg', 'Break', 'Breakx',
'callout', 'center', 'char', 'chdir', 'chmod', 'chown', 'chroot',
'classname', 'Clip', 'Clone', 'close', 'cofail', 'collect',
'Color', 'ColorValue', 'condvar', 'constructor', 'copy',
'CopyArea', 'cos', 'Couple', 'crypt', 'cset', 'ctime',
'dbcolumns', 'dbdriver', 'dbkeys', 'dblimits', 'dbproduct',
'dbtables', 'delay', 'delete', 'detab', 'display', 'DrawArc',
'DrawCircle', 'DrawCube', 'DrawCurve', 'DrawCylinder',
'DrawDisk', 'DrawImage', 'DrawLine', 'DrawPoint', 'DrawPolygon',
'DrawRectangle', 'DrawSegment', 'DrawSphere', 'DrawString',
'DrawTorus', 'dtor',
'entab', 'EraseArea', 'errorclear', 'Event', 'eventmask',
'EvGet', 'EvSend', 'exec', 'exit', 'exp', 'Eye',
'Fail', 'fcntl', 'fdup', 'Fence', 'fetch', 'Fg', 'fieldnames',
'filepair', 'FillArc', 'FillCircle', 'FillPolygon',
'FillRectangle', 'find', 'flock', 'flush', 'Font', 'fork',
'FreeColor', 'FreeSpace', 'function',
'get', 'getch', 'getche', 'getegid', 'getenv', 'geteuid',
'getgid', 'getgr', 'gethost', 'getpgrp', 'getpid', 'getppid',
'getpw', 'getrusage', 'getserv', 'GetSpace', 'gettimeofday',
'getuid', 'globalnames', 'GotoRC', 'GotoXY', 'gtime', 'hardlink',
'iand', 'icom', 'IdentityMatrix', 'image', 'InPort', 'insert',
'Int86', 'integer', 'ioctl', 'ior', 'ishift', 'istate', 'ixor',
'kbhit', 'key', 'keyword', 'kill',
'left', 'Len', 'list', 'load', 'loadfunc', 'localnames',
'lock', 'log', 'Lower', 'lstat',
'many', 'map', 'match', 'MatrixMode', 'max', 'member',
'membernames', 'methodnames', 'methods', 'min', 'mkdir', 'move',
'MultMatrix', 'mutex',
'name', 'NewColor', 'Normals', 'NotAny', 'numeric',
'open', 'opencl', 'oprec', 'ord', 'OutPort',
'PaletteChars', 'PaletteColor', 'PaletteKey', 'paramnames',
'parent', 'Pattern', 'Peek', 'Pending', 'pipe', 'Pixel',
'PlayAudio', 'Poke', 'pop', 'PopMatrix', 'Pos', 'pos',
'proc', 'pull', 'push', 'PushMatrix', 'PushRotate', 'PushScale',
'PushTranslate', 'put',
'QueryPointer',
'Raise', 'read', 'ReadImage', 'readlink', 'reads', 'ready',
'real', 'receive', 'Refresh', 'Rem', 'remove', 'rename',
'repl', 'reverse', 'right', 'rmdir', 'Rotate', 'Rpos',
'Rtab', 'rtod', 'runerr',
'save', 'Scale', 'seek', 'select', 'send', 'seq',
'serial', 'set', 'setenv', 'setgid', 'setgrent',
'sethostent', 'setpgrp', 'setpwent', 'setservent',
'setuid', 'signal', 'sin', 'sort', 'sortf', 'Span',
'spawn', 'sql', 'sqrt', 'stat', 'staticnames', 'stop',
'StopAudio', 'string', 'structure', 'Succeed', 'Swi',
'symlink', 'sys_errstr', 'system', 'syswrite',
'Tab', 'tab', 'table', 'tan',
'Texcoord', 'Texture', 'TextWidth', 'Translate',
'trap', 'trim', 'truncate', 'trylock', 'type',
'umask', 'Uncouple', 'unlock', 'upto', 'utime',
'variable', 'VAttrib',
'wait', 'WAttrib', 'WDefault', 'WFlush', 'where',
'WinAssociate', 'WinButton', 'WinColorDialog', 'WindowContents',
'WinEditRegion', 'WinFontDialog', 'WinMenuBar', 'WinOpenDialog',
'WinPlayMedia', 'WinSaveDialog', 'WinScrollBar', 'WinSelectDialog',
'write', 'WriteImage', 'writes', 'WSection',
'WSync'), prefix=r'\b', suffix=r'\b'),
Name.Function),
include('numbers'),
(r'<@|<<@|>@|>>@|\.>|->|===|~===|\*\*|\+\+|--|\.|~==|~=|<=|>=|==|'
r'=|<<=|<<|>>=|>>|:=:|:=|->|<->|\+:=|\|', Operator),
(r'"(?:[^\\"]|\\.)*"', String),
(r"'(?:[^\\']|\\.)*'", String.Character),
(r'[*<>+=/&!?@~\\-]', Operator),
(r'\^', Operator),
(r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
(r"[\[\]]", Punctuation),
(r"<>|=>|[()|:;,.'`{}%&?]", Punctuation),
(r'\n+', Text),
],
'numbers': [
(r'\b([+-]?([2-9]|[12][0-9]|3[0-6])[rR][0-9a-zA-Z]+)\b', Number.Hex),
(r'[+-]?[0-9]*\.([0-9]*)([Ee][+-]?[0-9]*)?', Number.Float),
(r'\b([+-]?[0-9]+[KMGTPkmgtp]?)\b', Number.Integer),
],
'subprogram': [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'"[^"]+"|\w+', Name.Function),
include('root'),
],
'type_def': [
(r'\(', Punctuation, 'formal_part'),
],
'formal_part': [
(r'\)', Punctuation, '#pop'),
(r'\w+', Name.Variable),
(r',', Punctuation),
(r'(:string|:integer|:real)\b', Keyword.Reserved),
include('root'),
],
}
class IconLexer(RegexLexer):
"""
Lexer for Icon.
.. versionadded:: 1.6
"""
name = 'Icon'
aliases = ['icon']
filenames = ['*.icon', '*.ICON']
mimetypes = []
flags = re.MULTILINE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'#.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'class|method|procedure', Keyword.Declaration, 'subprogram'),
(r'(record)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'(#line|\$C|\$Cend|\$define|\$else|\$endif|\$error|\$ifdef|'
r'\$ifndef|\$include|\$line|\$undef)\b', Keyword.PreProc),
(r'(&null|&fail)\b', Keyword.Constant),
(r'&allocated|&ascii|&clock|&collections|&column|&col|&control|'
r'&cset|¤t|&dateline|&date|&digits|&dump|'
r'&errno|&errornumber|&errortext|&errorvalue|&error|&errout|'
r'&eventcode|&eventvalue|&eventsource|&e|'
r'&features|&file|&host|&input|&interval|&lcase|&letters|'
r'&level|&line|&ldrag|&lpress|&lrelease|'
r'&main|&mdrag|&meta|&mpress|&mrelease|&now|&output|'
r'&phi|&pick|&pi|&pos|&progname|'
r'&random|&rdrag|®ions|&resize|&row|&rpress|&rrelease|'
r'&shift|&source|&storage|&subject|'
r'&time|&trace|&ucase|&version|'
r'&window|&x|&y', Keyword.Reserved),
(r'(by|of|not|to)\b', Keyword.Reserved),
(r'(global|local|static)\b', Keyword.Reserved),
(r'link', Keyword.Declaration),
(words((
'break', 'case', 'create', 'default', 'end', 'all',
'do', 'else', 'every', 'fail', 'if', 'initial',
'invocable', 'next',
'repeat', 'return', 'suspend',
'then', 'until', 'while'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(words((
'abs', 'acos', 'Active', 'Alert', 'any',
'args', 'array', 'asin', 'atan', 'atanh', 'Attrib',
'bal', 'Bg',
'callout', 'center', 'char', 'chdir', 'chmod', 'chown', 'chroot',
'Clip', 'Clone', 'close', 'cofail', 'collect',
'Color', 'ColorValue', 'condvar', 'copy',
'CopyArea', 'cos', 'Couple', 'crypt', 'cset', 'ctime',
'delay', 'delete', 'detab', 'display', 'DrawArc',
'DrawCircle', 'DrawCube', 'DrawCurve', 'DrawCylinder',
'DrawDisk', 'DrawImage', 'DrawLine', 'DrawPoint', 'DrawPolygon',
'DrawRectangle', 'DrawSegment', 'DrawSphere', 'DrawString',
'DrawTorus', 'dtor',
'entab', 'EraseArea', 'errorclear', 'Event', 'eventmask',
'EvGet', 'EvSend', 'exec', 'exit', 'exp', 'Eye',
'fcntl', 'fdup', 'fetch', 'Fg', 'fieldnames',
'FillArc', 'FillCircle', 'FillPolygon',
'FillRectangle', 'find', 'flock', 'flush', 'Font',
'FreeColor', 'FreeSpace', 'function',
'get', 'getch', 'getche', 'getenv',
'GetSpace', 'gettimeofday',
'getuid', 'globalnames', 'GotoRC', 'GotoXY', 'gtime', 'hardlink',
'iand', 'icom', 'IdentityMatrix', 'image', 'InPort', 'insert',
'Int86', 'integer', 'ioctl', 'ior', 'ishift', 'istate', 'ixor',
'kbhit', 'key', 'keyword', 'kill',
'left', 'Len', 'list', 'load', 'loadfunc', 'localnames',
'lock', 'log', 'Lower', 'lstat',
'many', 'map', 'match', 'MatrixMode', 'max', 'member',
'membernames', 'methodnames', 'methods', 'min', 'mkdir', 'move',
'MultMatrix', 'mutex',
'name', 'NewColor', 'Normals', 'numeric',
'open', 'opencl', 'oprec', 'ord', 'OutPort',
'PaletteChars', 'PaletteColor', 'PaletteKey', 'paramnames',
'parent', 'Pattern', 'Peek', 'Pending', 'pipe', 'Pixel',
'Poke', 'pop', 'PopMatrix', 'Pos', 'pos',
'proc', 'pull', 'push', 'PushMatrix', 'PushRotate', 'PushScale',
'PushTranslate', 'put',
'QueryPointer',
'Raise', 'read', 'ReadImage', 'readlink', 'reads', 'ready',
'real', 'receive', 'Refresh', 'Rem', 'remove', 'rename',
'repl', 'reverse', 'right', 'rmdir', 'Rotate', 'Rpos',
'rtod', 'runerr',
'save', 'Scale', 'seek', 'select', 'send', 'seq',
'serial', 'set', 'setenv',
'setuid', 'signal', 'sin', 'sort', 'sortf',
'spawn', 'sql', 'sqrt', 'stat', 'staticnames', 'stop',
'string', 'structure', 'Swi',
'symlink', 'sys_errstr', 'system', 'syswrite',
'tab', 'table', 'tan',
'Texcoord', 'Texture', 'TextWidth', 'Translate',
'trap', 'trim', 'truncate', 'trylock', 'type',
'umask', 'Uncouple', 'unlock', 'upto', 'utime',
'variable',
'wait', 'WAttrib', 'WDefault', 'WFlush', 'where',
'WinAssociate', 'WinButton', 'WinColorDialog', 'WindowContents',
'WinEditRegion', 'WinFontDialog', 'WinMenuBar', 'WinOpenDialog',
'WinPlayMedia', 'WinSaveDialog', 'WinScrollBar', 'WinSelectDialog',
'write', 'WriteImage', 'writes', 'WSection',
'WSync'), prefix=r'\b', suffix=r'\b'),
Name.Function),
include('numbers'),
(r'===|~===|\*\*|\+\+|--|\.|==|~==|<=|>=|=|~=|<<=|<<|>>=|>>|'
r':=:|:=|<->|<-|\+:=|\|\||\|', Operator),
(r'"(?:[^\\"]|\\.)*"', String),
(r"'(?:[^\\']|\\.)*'", String.Character),
(r'[*<>+=/&!?@~\\-]', Operator),
(r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
(r"[\[\]]", Punctuation),
(r"<>|=>|[()|:;,.'`{}%\^&?]", Punctuation),
(r'\n+', Text),
],
'numbers': [
(r'\b([+-]?([2-9]|[12][0-9]|3[0-6])[rR][0-9a-zA-Z]+)\b', Number.Hex),
(r'[+-]?[0-9]*\.([0-9]*)([Ee][+-]?[0-9]*)?', Number.Float),
(r'\b([+-]?[0-9]+[KMGTPkmgtp]?)\b', Number.Integer),
],
'subprogram': [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'"[^"]+"|\w+', Name.Function),
include('root'),
],
'type_def': [
(r'\(', Punctuation, 'formal_part'),
],
'formal_part': [
(r'\)', Punctuation, '#pop'),
(r'\w+', Name.Variable),
(r',', Punctuation),
(r'(:string|:integer|:real)\b', Keyword.Reserved),
include('root'),
],
}
class UcodeLexer(RegexLexer):
"""
Lexer for Icon ucode files.
.. versionadded:: 2.4
"""
name = 'ucode'
aliases = ['ucode']
filenames = ['*.u', '*.u1', '*.u2']
mimetypes = []
flags = re.MULTILINE
tokens = {
'root': [
(r'(#.*\n)', Comment),
(words((
'con', 'declend', 'end',
'global',
'impl', 'invocable',
'lab', 'link', 'local',
'record',
'uid', 'unions',
'version'),
prefix=r'\b', suffix=r'\b'),
Name.Function),
(words((
'colm', 'filen', 'line', 'synt'),
prefix=r'\b', suffix=r'\b'),
Comment),
(words((
'asgn',
'bang', 'bscan',
'cat', 'ccase', 'chfail',
'coact', 'cofail', 'compl',
'coret', 'create', 'cset',
'diff', 'div', 'dup',
'efail', 'einit', 'end', 'eqv', 'eret',
'error', 'escan', 'esusp',
'field',
'goto',
'init', 'int', 'inter',
'invoke',
'keywd',
'lconcat', 'lexeq', 'lexge',
'lexgt', 'lexle', 'lexlt', 'lexne',
'limit', 'llist', 'lsusp',
'mark', 'mark0', 'minus', 'mod', 'mult',
'neg', 'neqv', 'nonnull', 'noop', 'null',
'number', 'numeq', 'numge', 'numgt',
'numle', 'numlt', 'numne',
'pfail', 'plus', 'pnull', 'pop', 'power',
'pret', 'proc', 'psusp', 'push1', 'pushn1',
'random', 'rasgn', 'rcv', 'rcvbk', 'real',
'refresh', 'rswap',
'sdup', 'sect', 'size', 'snd', 'sndbk',
'str', 'subsc', 'swap',
'tabmat', 'tally', 'toby', 'trace',
'unmark',
'value', 'var'), prefix=r'\b', suffix=r'\b'),
Keyword.Declaration),
(words((
'any',
'case',
'endcase', 'endevery', 'endif',
'endifelse', 'endrepeat', 'endsuspend',
'enduntil', 'endwhile', 'every',
'if', 'ifelse',
'repeat',
'suspend',
'until',
'while'),
prefix=r'\b', suffix=r'\b'),
Name.Constant),
(r'\d+(\s*|\.$|$)', Number.Integer),
(r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float),
(r"(<>|=>|[()|:;,.'`]|[{}]|[%^]|[&?])", Punctuation),
(r'\s+\b', Text),
(r'[\w-]+', Text),
],
}
def analyse_text(text):
"""endsuspend and endrepeat are unique to this language, and
\\self, /self doesn't seem to get used anywhere else either."""
result = 0
if 'endsuspend' in text:
result += 0.1
if 'endrepeat' in text:
result += 0.1
if ':=' in text:
result += 0.01
if 'procedure' in text and 'end' in text:
result += 0.01
# This seems quite unique to unicon -- doesn't appear in any other
# example source we have (A quick search reveals that \SELF appears in
# Perl/Raku code)
if r'\self' in text and r'/self' in text:
result += 0.5
return result
| 18,512 | Python | 43.934466 | 83 | 0.43723 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/thingsdb.py | """
pygments.lexers.thingsdb
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the ThingsDB language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Comment, Keyword, Name, Number, String, Text, \
Operator, Punctuation, Whitespace
__all__ = ['ThingsDBLexer']
class ThingsDBLexer(RegexLexer):
"""
Lexer for the ThingsDB programming language.
.. versionadded:: 2.9
"""
name = 'ThingsDB'
aliases = ['ti', 'thingsdb']
filenames = ['*.ti']
tokens = {
'root': [
include('expression'),
],
'expression': [
include('comments'),
include('whitespace'),
# numbers
(r'[-+]?0b[01]+', Number.Bin),
(r'[-+]?0o[0-8]+', Number.Oct),
(r'([-+]?0x[0-9a-fA-F]+)', Number.Hex),
(r'[-+]?[0-9]+', Number.Integer),
(r'[-+]?((inf|nan)([^0-9A-Za-z_]|$)|[0-9]*\.[0-9]+(e[+-][0-9]+)?)',
Number.Float),
# strings
(r'(?:"(?:[^"]*)")+', String.Double),
(r"(?:'(?:[^']*)')+", String.Single),
# literals
(r'(true|false|nil)\b', Keyword.Constant),
# regular expressions
(r'(/[^/\\]*(?:\\.[^/\\]*)*/i?)', String.Regex),
# thing id's
(r'#[0-9]+', Comment.Preproc),
# name, assignments and functions
include('names'),
(r'[(){}\[\],;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?]', Operator),
],
'names': [
(r'(\.)'
r'(add|call|contains|del|endswith|extend|filter|find|findindex|'
r'get|has|id|indexof|keys|len|lower|map|pop|push|remove|set|sort|'
r'splice|startswith|test|unwrap|upper|values|wrap)'
r'(\()',
bygroups(Name.Function, Name.Function, Punctuation), 'arguments'),
(r'(array|assert|assert_err|auth_err|backup_info|backups_info|'
r'bad_data_err|bool|closure|collection_info|collections_info|'
r'counters|deep|del_backup|del_collection|del_expired|del_node|'
r'del_procedure|del_token|del_type|del_user|err|float|'
r'forbidden_err|grant|int|isarray|isascii|isbool|isbytes|iserr|'
r'isfloat|isinf|isint|islist|isnan|isnil|israw|isset|isstr|'
r'isthing|istuple|isutf8|lookup_err|max_quota_err|mod_type|new|'
r'new_backup|new_collection|new_node|new_procedure|new_token|'
r'new_type|new_user|node_err|node_info|nodes_info|now|'
r'num_arguments_err|operation_err|overflow_err|procedure_doc|'
r'procedure_info|procedures_info|raise|refs|rename_collection|'
r'rename_user|reset_counters|return|revoke|run|set_log_level|set|'
r'set_quota|set_type|shutdown|str|syntax_err|thing|try|type|'
r'type_err|type_count|type_info|types_info|user_info|users_info|'
r'value_err|wse|zero_div_err)'
r'(\()',
bygroups(Name.Function, Punctuation),
'arguments'),
(r'(\.[A-Za-z_][0-9A-Za-z_]*)'
r'(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'\.[A-Za-z_][0-9A-Za-z_]*', Name.Attribute),
(r'([A-Za-z_][0-9A-Za-z_]*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator)),
(r'[A-Za-z_][0-9A-Za-z_]*', Name.Variable),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
],
'comments': [
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'arguments': [
include('expression'),
(',', Punctuation),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
],
}
| 4,228 | Python | 35.145299 | 79 | 0.494087 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/functional.py | """
pygments.lexers.functional
~~~~~~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \
NewLispLexer, ShenLexer
from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \
KokaLexer
from pygments.lexers.theorem import CoqLexer
from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \
ElixirConsoleLexer, ElixirLexer
from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer
__all__ = []
| 674 | Python | 31.142856 | 77 | 0.738872 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/qlik.py | """
pygments.lexers.qlik
~~~~~~~~~~~~~~~~~~~~
Lexer for the qlik scripting language
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text
from pygments.lexers._qlik_builtins import OPERATORS_LIST, STATEMENT_LIST, \
SCRIPT_FUNCTIONS, CONSTANT_LIST
__all__ = ["QlikLexer"]
class QlikLexer(RegexLexer):
"""
Lexer for qlik code, including .qvs files
.. versionadded:: 2.12
"""
name = "Qlik"
aliases = ["qlik", "qlikview", "qliksense", "qlikscript"]
filenames = ["*.qvs", "*.qvw"]
flags = re.IGNORECASE
tokens = {
# Handle multi-line comments
"comment": [
(r"\*/", Comment.Multiline, "#pop"),
(r"[^*]+", Comment.Multiline),
],
# Handle numbers
"numerics": [
(r"\b\d+\.\d+(e\d+)?[fd]?\b", Number.Float),
(r"\b\d+\b", Number.Integer),
],
# Handle variable names in things
"interp": [
(
r"(\$\()(\w+)(\))",
bygroups(String.Interpol, Name.Variable, String.Interpol),
),
],
# Handle strings
"string": [
(r"'", String, "#pop"),
include("interp"),
(r"[^'$]+", String),
(r"\$", String),
],
#
"assignment": [
(r";", Punctuation, "#pop"),
include("root"),
],
"field_name_quote": [
(r'"', String.Symbol, "#pop"),
include("interp"),
(r"[^\"$]+", String.Symbol),
(r"\$", String.Symbol),
],
"field_name_bracket": [
(r"\]", String.Symbol, "#pop"),
include("interp"),
(r"[^\]$]+", String.Symbol),
(r"\$", String.Symbol),
],
"function": [(r"\)", Punctuation, "#pop"), include("root")],
"root": [
# Whitespace and comments
(r"\s+", Text.Whitespace),
(r"/\*", Comment.Multiline, "comment"),
(r"//.*\n", Comment.Single),
# variable assignment
(r"(let|set)(\s+)", bygroups(Keyword.Declaration, Text.Whitespace),
"assignment"),
# Word operators
(words(OPERATORS_LIST["words"], prefix=r"\b", suffix=r"\b"),
Operator.Word),
# Statements
(words(STATEMENT_LIST, suffix=r"\b"), Keyword),
# Table names
(r"[a-z]\w*:", Keyword.Declaration),
# Constants
(words(CONSTANT_LIST, suffix=r"\b"), Keyword.Constant),
# Functions
(words(SCRIPT_FUNCTIONS, suffix=r"(?=\s*\()"), Name.Builtin,
"function"),
# interpolation - e.g. $(variableName)
include("interp"),
# Quotes denote a field/file name
(r'"', String.Symbol, "field_name_quote"),
# Square brackets denote a field/file name
(r"\[", String.Symbol, "field_name_bracket"),
# Strings
(r"'", String, "string"),
# Numbers
include("numerics"),
# Operator symbols
(words(OPERATORS_LIST["symbols"]), Operator),
# Strings denoted by single quotes
(r"'.+?'", String),
# Words as text
(r"\b\w+\b", Text),
# Basic punctuation
(r"[,;.()\\/]", Punctuation),
],
}
| 3,665 | Python | 30.067796 | 79 | 0.47176 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_openedge_builtins.py | """
pygments.lexers._openedge_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the OpenEdgeLexer.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
OPENEDGEKEYWORDS = (
'ABS',
'ABSO',
'ABSOL',
'ABSOLU',
'ABSOLUT',
'ABSOLUTE',
'ABSTRACT',
'ACCELERATOR',
'ACCUM',
'ACCUMU',
'ACCUMUL',
'ACCUMULA',
'ACCUMULAT',
'ACCUMULATE',
'ACTIVE-FORM',
'ACTIVE-WINDOW',
'ADD',
'ADD-BUFFER',
'ADD-CALC-COLUMN',
'ADD-COLUMNS-FROM',
'ADD-EVENTS-PROCEDURE',
'ADD-FIELDS-FROM',
'ADD-FIRST',
'ADD-INDEX-FIELD',
'ADD-LAST',
'ADD-LIKE-COLUMN',
'ADD-LIKE-FIELD',
'ADD-LIKE-INDEX',
'ADD-NEW-FIELD',
'ADD-NEW-INDEX',
'ADD-SCHEMA-LOCATION',
'ADD-SUPER-PROCEDURE',
'ADM-DATA',
'ADVISE',
'ALERT-BOX',
'ALIAS',
'ALL',
'ALLOW-COLUMN-SEARCHING',
'ALLOW-REPLICATION',
'ALTER',
'ALWAYS-ON-TOP',
'AMBIG',
'AMBIGU',
'AMBIGUO',
'AMBIGUOU',
'AMBIGUOUS',
'ANALYZ',
'ANALYZE',
'AND',
'ANSI-ONLY',
'ANY',
'ANYWHERE',
'APPEND',
'APPL-ALERT',
'APPL-ALERT-',
'APPL-ALERT-B',
'APPL-ALERT-BO',
'APPL-ALERT-BOX',
'APPL-ALERT-BOXE',
'APPL-ALERT-BOXES',
'APPL-CONTEXT-ID',
'APPLICATION',
'APPLY',
'APPSERVER-INFO',
'APPSERVER-PASSWORD',
'APPSERVER-USERID',
'ARRAY-MESSAGE',
'AS',
'ASC',
'ASCE',
'ASCEN',
'ASCEND',
'ASCENDI',
'ASCENDIN',
'ASCENDING',
'ASK-OVERWRITE',
'ASSEMBLY',
'ASSIGN',
'ASYNC-REQUEST-COUNT',
'ASYNC-REQUEST-HANDLE',
'ASYNCHRONOUS',
'AT',
'ATTACHED-PAIRLIST',
'ATTR',
'ATTR-SPACE',
'ATTRI',
'ATTRIB',
'ATTRIBU',
'ATTRIBUT',
'AUDIT-CONTROL',
'AUDIT-ENABLED',
'AUDIT-EVENT-CONTEXT',
'AUDIT-POLICY',
'AUTHENTICATION-FAILED',
'AUTHORIZATION',
'AUTO-COMP',
'AUTO-COMPL',
'AUTO-COMPLE',
'AUTO-COMPLET',
'AUTO-COMPLETI',
'AUTO-COMPLETIO',
'AUTO-COMPLETION',
'AUTO-END-KEY',
'AUTO-ENDKEY',
'AUTO-GO',
'AUTO-IND',
'AUTO-INDE',
'AUTO-INDEN',
'AUTO-INDENT',
'AUTO-RESIZE',
'AUTO-RET',
'AUTO-RETU',
'AUTO-RETUR',
'AUTO-RETURN',
'AUTO-SYNCHRONIZE',
'AUTO-Z',
'AUTO-ZA',
'AUTO-ZAP',
'AUTOMATIC',
'AVAIL',
'AVAILA',
'AVAILAB',
'AVAILABL',
'AVAILABLE',
'AVAILABLE-FORMATS',
'AVE',
'AVER',
'AVERA',
'AVERAG',
'AVERAGE',
'AVG',
'BACK',
'BACKG',
'BACKGR',
'BACKGRO',
'BACKGROU',
'BACKGROUN',
'BACKGROUND',
'BACKWARD',
'BACKWARDS',
'BASE64-DECODE',
'BASE64-ENCODE',
'BASE-ADE',
'BASE-KEY',
'BATCH',
'BATCH-',
'BATCH-M',
'BATCH-MO',
'BATCH-MOD',
'BATCH-MODE',
'BATCH-SIZE',
'BEFORE-H',
'BEFORE-HI',
'BEFORE-HID',
'BEFORE-HIDE',
'BEGIN-EVENT-GROUP',
'BEGINS',
'BELL',
'BETWEEN',
'BGC',
'BGCO',
'BGCOL',
'BGCOLO',
'BGCOLOR',
'BIG-ENDIAN',
'BINARY',
'BIND',
'BIND-WHERE',
'BLANK',
'BLOCK-ITERATION-DISPLAY',
'BLOCK-LEVEL',
'BORDER-B',
'BORDER-BO',
'BORDER-BOT',
'BORDER-BOTT',
'BORDER-BOTTO',
'BORDER-BOTTOM-CHARS',
'BORDER-BOTTOM-P',
'BORDER-BOTTOM-PI',
'BORDER-BOTTOM-PIX',
'BORDER-BOTTOM-PIXE',
'BORDER-BOTTOM-PIXEL',
'BORDER-BOTTOM-PIXELS',
'BORDER-L',
'BORDER-LE',
'BORDER-LEF',
'BORDER-LEFT',
'BORDER-LEFT-',
'BORDER-LEFT-C',
'BORDER-LEFT-CH',
'BORDER-LEFT-CHA',
'BORDER-LEFT-CHAR',
'BORDER-LEFT-CHARS',
'BORDER-LEFT-P',
'BORDER-LEFT-PI',
'BORDER-LEFT-PIX',
'BORDER-LEFT-PIXE',
'BORDER-LEFT-PIXEL',
'BORDER-LEFT-PIXELS',
'BORDER-R',
'BORDER-RI',
'BORDER-RIG',
'BORDER-RIGH',
'BORDER-RIGHT',
'BORDER-RIGHT-',
'BORDER-RIGHT-C',
'BORDER-RIGHT-CH',
'BORDER-RIGHT-CHA',
'BORDER-RIGHT-CHAR',
'BORDER-RIGHT-CHARS',
'BORDER-RIGHT-P',
'BORDER-RIGHT-PI',
'BORDER-RIGHT-PIX',
'BORDER-RIGHT-PIXE',
'BORDER-RIGHT-PIXEL',
'BORDER-RIGHT-PIXELS',
'BORDER-T',
'BORDER-TO',
'BORDER-TOP',
'BORDER-TOP-',
'BORDER-TOP-C',
'BORDER-TOP-CH',
'BORDER-TOP-CHA',
'BORDER-TOP-CHAR',
'BORDER-TOP-CHARS',
'BORDER-TOP-P',
'BORDER-TOP-PI',
'BORDER-TOP-PIX',
'BORDER-TOP-PIXE',
'BORDER-TOP-PIXEL',
'BORDER-TOP-PIXELS',
'BOX',
'BOX-SELECT',
'BOX-SELECTA',
'BOX-SELECTAB',
'BOX-SELECTABL',
'BOX-SELECTABLE',
'BREAK',
'BROWSE',
'BUFFER',
'BUFFER-CHARS',
'BUFFER-COMPARE',
'BUFFER-COPY',
'BUFFER-CREATE',
'BUFFER-DELETE',
'BUFFER-FIELD',
'BUFFER-HANDLE',
'BUFFER-LINES',
'BUFFER-NAME',
'BUFFER-PARTITION-ID',
'BUFFER-RELEASE',
'BUFFER-VALUE',
'BUTTON',
'BUTTONS',
'BY',
'BY-POINTER',
'BY-VARIANT-POINTER',
'CACHE',
'CACHE-SIZE',
'CALL',
'CALL-NAME',
'CALL-TYPE',
'CAN-CREATE',
'CAN-DELETE',
'CAN-DO',
'CAN-DO-DOMAIN-SUPPORT',
'CAN-FIND',
'CAN-QUERY',
'CAN-READ',
'CAN-SET',
'CAN-WRITE',
'CANCEL-BREAK',
'CANCEL-BUTTON',
'CAPS',
'CAREFUL-PAINT',
'CASE',
'CASE-SEN',
'CASE-SENS',
'CASE-SENSI',
'CASE-SENSIT',
'CASE-SENSITI',
'CASE-SENSITIV',
'CASE-SENSITIVE',
'CAST',
'CATCH',
'CDECL',
'CENTER',
'CENTERE',
'CENTERED',
'CHAINED',
'CHARACTER',
'CHARACTER_LENGTH',
'CHARSET',
'CHECK',
'CHECKED',
'CHOOSE',
'CHR',
'CLASS',
'CLASS-TYPE',
'CLEAR',
'CLEAR-APPL-CONTEXT',
'CLEAR-LOG',
'CLEAR-SELECT',
'CLEAR-SELECTI',
'CLEAR-SELECTIO',
'CLEAR-SELECTION',
'CLEAR-SORT-ARROW',
'CLEAR-SORT-ARROWS',
'CLIENT-CONNECTION-ID',
'CLIENT-PRINCIPAL',
'CLIENT-TTY',
'CLIENT-TYPE',
'CLIENT-WORKSTATION',
'CLIPBOARD',
'CLOSE',
'CLOSE-LOG',
'CODE',
'CODEBASE-LOCATOR',
'CODEPAGE',
'CODEPAGE-CONVERT',
'COL',
'COL-OF',
'COLLATE',
'COLON',
'COLON-ALIGN',
'COLON-ALIGNE',
'COLON-ALIGNED',
'COLOR',
'COLOR-TABLE',
'COLU',
'COLUM',
'COLUMN',
'COLUMN-BGCOLOR',
'COLUMN-DCOLOR',
'COLUMN-FGCOLOR',
'COLUMN-FONT',
'COLUMN-LAB',
'COLUMN-LABE',
'COLUMN-LABEL',
'COLUMN-MOVABLE',
'COLUMN-OF',
'COLUMN-PFCOLOR',
'COLUMN-READ-ONLY',
'COLUMN-RESIZABLE',
'COLUMN-SCROLLING',
'COLUMNS',
'COM-HANDLE',
'COM-SELF',
'COMBO-BOX',
'COMMAND',
'COMPARES',
'COMPILE',
'COMPILER',
'COMPLETE',
'CONFIG-NAME',
'CONNECT',
'CONNECTED',
'CONSTRUCTOR',
'CONTAINS',
'CONTENTS',
'CONTEXT',
'CONTEXT-HELP',
'CONTEXT-HELP-FILE',
'CONTEXT-HELP-ID',
'CONTEXT-POPUP',
'CONTROL',
'CONTROL-BOX',
'CONTROL-FRAME',
'CONVERT',
'CONVERT-3D-COLORS',
'CONVERT-TO-OFFS',
'CONVERT-TO-OFFSE',
'CONVERT-TO-OFFSET',
'COPY-DATASET',
'COPY-LOB',
'COPY-SAX-ATTRIBUTES',
'COPY-TEMP-TABLE',
'COUNT',
'COUNT-OF',
'CPCASE',
'CPCOLL',
'CPINTERNAL',
'CPLOG',
'CPPRINT',
'CPRCODEIN',
'CPRCODEOUT',
'CPSTREAM',
'CPTERM',
'CRC-VALUE',
'CREATE',
'CREATE-LIKE',
'CREATE-LIKE-SEQUENTIAL',
'CREATE-NODE-NAMESPACE',
'CREATE-RESULT-LIST-ENTRY',
'CREATE-TEST-FILE',
'CURRENT',
'CURRENT-CHANGED',
'CURRENT-COLUMN',
'CURRENT-ENV',
'CURRENT-ENVI',
'CURRENT-ENVIR',
'CURRENT-ENVIRO',
'CURRENT-ENVIRON',
'CURRENT-ENVIRONM',
'CURRENT-ENVIRONME',
'CURRENT-ENVIRONMEN',
'CURRENT-ENVIRONMENT',
'CURRENT-ITERATION',
'CURRENT-LANG',
'CURRENT-LANGU',
'CURRENT-LANGUA',
'CURRENT-LANGUAG',
'CURRENT-LANGUAGE',
'CURRENT-QUERY',
'CURRENT-REQUEST-INFO',
'CURRENT-RESPONSE-INFO',
'CURRENT-RESULT-ROW',
'CURRENT-ROW-MODIFIED',
'CURRENT-VALUE',
'CURRENT-WINDOW',
'CURRENT_DATE',
'CURS',
'CURSO',
'CURSOR',
'CURSOR-CHAR',
'CURSOR-LINE',
'CURSOR-OFFSET',
'DATA-BIND',
'DATA-ENTRY-RET',
'DATA-ENTRY-RETU',
'DATA-ENTRY-RETUR',
'DATA-ENTRY-RETURN',
'DATA-REL',
'DATA-RELA',
'DATA-RELAT',
'DATA-RELATI',
'DATA-RELATIO',
'DATA-RELATION',
'DATA-SOURCE',
'DATA-SOURCE-COMPLETE-MAP',
'DATA-SOURCE-MODIFIED',
'DATA-SOURCE-ROWID',
'DATA-T',
'DATA-TY',
'DATA-TYP',
'DATA-TYPE',
'DATABASE',
'DATASERVERS',
'DATASET',
'DATASET-HANDLE',
'DATE',
'DATE-F',
'DATE-FO',
'DATE-FOR',
'DATE-FORM',
'DATE-FORMA',
'DATE-FORMAT',
'DAY',
'DB-CONTEXT',
'DB-REFERENCES',
'DBCODEPAGE',
'DBCOLLATION',
'DBNAME',
'DBPARAM',
'DBREST',
'DBRESTR',
'DBRESTRI',
'DBRESTRIC',
'DBRESTRICT',
'DBRESTRICTI',
'DBRESTRICTIO',
'DBRESTRICTION',
'DBRESTRICTIONS',
'DBTASKID',
'DBTYPE',
'DBVERS',
'DBVERSI',
'DBVERSIO',
'DBVERSION',
'DCOLOR',
'DDE',
'DDE-ERROR',
'DDE-I',
'DDE-ID',
'DDE-ITEM',
'DDE-NAME',
'DDE-TOPIC',
'DEBLANK',
'DEBU',
'DEBUG',
'DEBUG-ALERT',
'DEBUG-LIST',
'DEBUGGER',
'DECIMAL',
'DECIMALS',
'DECLARE',
'DECLARE-NAMESPACE',
'DECRYPT',
'DEFAULT',
'DEFAULT-B',
'DEFAULT-BU',
'DEFAULT-BUFFER-HANDLE',
'DEFAULT-BUT',
'DEFAULT-BUTT',
'DEFAULT-BUTTO',
'DEFAULT-BUTTON',
'DEFAULT-COMMIT',
'DEFAULT-EX',
'DEFAULT-EXT',
'DEFAULT-EXTE',
'DEFAULT-EXTEN',
'DEFAULT-EXTENS',
'DEFAULT-EXTENSI',
'DEFAULT-EXTENSIO',
'DEFAULT-EXTENSION',
'DEFAULT-NOXL',
'DEFAULT-NOXLA',
'DEFAULT-NOXLAT',
'DEFAULT-NOXLATE',
'DEFAULT-VALUE',
'DEFAULT-WINDOW',
'DEFINE',
'DEFINE-USER-EVENT-MANAGER',
'DEFINED',
'DEL',
'DELE',
'DELEGATE',
'DELET',
'DELETE PROCEDURE',
'DELETE',
'DELETE-CHAR',
'DELETE-CHARA',
'DELETE-CHARAC',
'DELETE-CHARACT',
'DELETE-CHARACTE',
'DELETE-CHARACTER',
'DELETE-CURRENT-ROW',
'DELETE-LINE',
'DELETE-RESULT-LIST-ENTRY',
'DELETE-SELECTED-ROW',
'DELETE-SELECTED-ROWS',
'DELIMITER',
'DESC',
'DESCE',
'DESCEN',
'DESCEND',
'DESCENDI',
'DESCENDIN',
'DESCENDING',
'DESELECT-FOCUSED-ROW',
'DESELECT-ROWS',
'DESELECT-SELECTED-ROW',
'DESELECTION',
'DESTRUCTOR',
'DIALOG-BOX',
'DICT',
'DICTI',
'DICTIO',
'DICTION',
'DICTIONA',
'DICTIONAR',
'DICTIONARY',
'DIR',
'DISABLE',
'DISABLE-AUTO-ZAP',
'DISABLE-DUMP-TRIGGERS',
'DISABLE-LOAD-TRIGGERS',
'DISABLED',
'DISCON',
'DISCONN',
'DISCONNE',
'DISCONNEC',
'DISCONNECT',
'DISP',
'DISPL',
'DISPLA',
'DISPLAY',
'DISPLAY-MESSAGE',
'DISPLAY-T',
'DISPLAY-TY',
'DISPLAY-TYP',
'DISPLAY-TYPE',
'DISTINCT',
'DO',
'DOMAIN-DESCRIPTION',
'DOMAIN-NAME',
'DOMAIN-TYPE',
'DOS',
'DOUBLE',
'DOWN',
'DRAG-ENABLED',
'DROP',
'DROP-DOWN',
'DROP-DOWN-LIST',
'DROP-FILE-NOTIFY',
'DROP-TARGET',
'DS-CLOSE-CURSOR',
'DSLOG-MANAGER',
'DUMP',
'DYNAMIC',
'DYNAMIC-ENUM',
'DYNAMIC-FUNCTION',
'DYNAMIC-INVOKE',
'EACH',
'ECHO',
'EDGE',
'EDGE-',
'EDGE-C',
'EDGE-CH',
'EDGE-CHA',
'EDGE-CHAR',
'EDGE-CHARS',
'EDGE-P',
'EDGE-PI',
'EDGE-PIX',
'EDGE-PIXE',
'EDGE-PIXEL',
'EDGE-PIXELS',
'EDIT-CAN-PASTE',
'EDIT-CAN-UNDO',
'EDIT-CLEAR',
'EDIT-COPY',
'EDIT-CUT',
'EDIT-PASTE',
'EDIT-UNDO',
'EDITING',
'EDITOR',
'ELSE',
'EMPTY',
'EMPTY-TEMP-TABLE',
'ENABLE',
'ENABLED-FIELDS',
'ENCODE',
'ENCRYPT',
'ENCRYPT-AUDIT-MAC-KEY',
'ENCRYPTION-SALT',
'END',
'END-DOCUMENT',
'END-ELEMENT',
'END-EVENT-GROUP',
'END-FILE-DROP',
'END-KEY',
'END-MOVE',
'END-RESIZE',
'END-ROW-RESIZE',
'END-USER-PROMPT',
'ENDKEY',
'ENTERED',
'ENTITY-EXPANSION-LIMIT',
'ENTRY',
'ENUM',
'EQ',
'ERROR',
'ERROR-COL',
'ERROR-COLU',
'ERROR-COLUM',
'ERROR-COLUMN',
'ERROR-ROW',
'ERROR-STACK-TRACE',
'ERROR-STAT',
'ERROR-STATU',
'ERROR-STATUS',
'ESCAPE',
'ETIME',
'EVENT',
'EVENT-GROUP-ID',
'EVENT-PROCEDURE',
'EVENT-PROCEDURE-CONTEXT',
'EVENT-T',
'EVENT-TY',
'EVENT-TYP',
'EVENT-TYPE',
'EVENTS',
'EXCEPT',
'EXCLUSIVE',
'EXCLUSIVE-',
'EXCLUSIVE-ID',
'EXCLUSIVE-L',
'EXCLUSIVE-LO',
'EXCLUSIVE-LOC',
'EXCLUSIVE-LOCK',
'EXCLUSIVE-WEB-USER',
'EXECUTE',
'EXISTS',
'EXP',
'EXPAND',
'EXPANDABLE',
'EXPLICIT',
'EXPORT',
'EXPORT-PRINCIPAL',
'EXTENDED',
'EXTENT',
'EXTERNAL',
'FALSE',
'FETCH',
'FETCH-SELECTED-ROW',
'FGC',
'FGCO',
'FGCOL',
'FGCOLO',
'FGCOLOR',
'FIELD',
'FIELDS',
'FILE',
'FILE-CREATE-DATE',
'FILE-CREATE-TIME',
'FILE-INFO',
'FILE-INFOR',
'FILE-INFORM',
'FILE-INFORMA',
'FILE-INFORMAT',
'FILE-INFORMATI',
'FILE-INFORMATIO',
'FILE-INFORMATION',
'FILE-MOD-DATE',
'FILE-MOD-TIME',
'FILE-NAME',
'FILE-OFF',
'FILE-OFFS',
'FILE-OFFSE',
'FILE-OFFSET',
'FILE-SIZE',
'FILE-TYPE',
'FILENAME',
'FILL',
'FILL-IN',
'FILLED',
'FILTERS',
'FINAL',
'FINALLY',
'FIND',
'FIND-BY-ROWID',
'FIND-CASE-SENSITIVE',
'FIND-CURRENT',
'FIND-FIRST',
'FIND-GLOBAL',
'FIND-LAST',
'FIND-NEXT-OCCURRENCE',
'FIND-PREV-OCCURRENCE',
'FIND-SELECT',
'FIND-UNIQUE',
'FIND-WRAP-AROUND',
'FINDER',
'FIRST',
'FIRST-ASYNCH-REQUEST',
'FIRST-CHILD',
'FIRST-COLUMN',
'FIRST-FORM',
'FIRST-OBJECT',
'FIRST-OF',
'FIRST-PROC',
'FIRST-PROCE',
'FIRST-PROCED',
'FIRST-PROCEDU',
'FIRST-PROCEDUR',
'FIRST-PROCEDURE',
'FIRST-SERVER',
'FIRST-TAB-I',
'FIRST-TAB-IT',
'FIRST-TAB-ITE',
'FIRST-TAB-ITEM',
'FIT-LAST-COLUMN',
'FIXED-ONLY',
'FLAT-BUTTON',
'FLOAT',
'FOCUS',
'FOCUSED-ROW',
'FOCUSED-ROW-SELECTED',
'FONT',
'FONT-TABLE',
'FOR',
'FORCE-FILE',
'FORE',
'FOREG',
'FOREGR',
'FOREGRO',
'FOREGROU',
'FOREGROUN',
'FOREGROUND',
'FORM INPUT',
'FORM',
'FORM-LONG-INPUT',
'FORMA',
'FORMAT',
'FORMATTE',
'FORMATTED',
'FORWARD',
'FORWARDS',
'FRAGMEN',
'FRAGMENT',
'FRAM',
'FRAME',
'FRAME-COL',
'FRAME-DB',
'FRAME-DOWN',
'FRAME-FIELD',
'FRAME-FILE',
'FRAME-INDE',
'FRAME-INDEX',
'FRAME-LINE',
'FRAME-NAME',
'FRAME-ROW',
'FRAME-SPA',
'FRAME-SPAC',
'FRAME-SPACI',
'FRAME-SPACIN',
'FRAME-SPACING',
'FRAME-VAL',
'FRAME-VALU',
'FRAME-VALUE',
'FRAME-X',
'FRAME-Y',
'FREQUENCY',
'FROM',
'FROM-C',
'FROM-CH',
'FROM-CHA',
'FROM-CHAR',
'FROM-CHARS',
'FROM-CUR',
'FROM-CURR',
'FROM-CURRE',
'FROM-CURREN',
'FROM-CURRENT',
'FROM-P',
'FROM-PI',
'FROM-PIX',
'FROM-PIXE',
'FROM-PIXEL',
'FROM-PIXELS',
'FULL-HEIGHT',
'FULL-HEIGHT-',
'FULL-HEIGHT-C',
'FULL-HEIGHT-CH',
'FULL-HEIGHT-CHA',
'FULL-HEIGHT-CHAR',
'FULL-HEIGHT-CHARS',
'FULL-HEIGHT-P',
'FULL-HEIGHT-PI',
'FULL-HEIGHT-PIX',
'FULL-HEIGHT-PIXE',
'FULL-HEIGHT-PIXEL',
'FULL-HEIGHT-PIXELS',
'FULL-PATHN',
'FULL-PATHNA',
'FULL-PATHNAM',
'FULL-PATHNAME',
'FULL-WIDTH',
'FULL-WIDTH-',
'FULL-WIDTH-C',
'FULL-WIDTH-CH',
'FULL-WIDTH-CHA',
'FULL-WIDTH-CHAR',
'FULL-WIDTH-CHARS',
'FULL-WIDTH-P',
'FULL-WIDTH-PI',
'FULL-WIDTH-PIX',
'FULL-WIDTH-PIXE',
'FULL-WIDTH-PIXEL',
'FULL-WIDTH-PIXELS',
'FUNCTION',
'FUNCTION-CALL-TYPE',
'GATEWAY',
'GATEWAYS',
'GE',
'GENERATE-MD5',
'GENERATE-PBE-KEY',
'GENERATE-PBE-SALT',
'GENERATE-RANDOM-KEY',
'GENERATE-UUID',
'GET',
'GET-ATTR-CALL-TYPE',
'GET-ATTRIBUTE-NODE',
'GET-BINARY-DATA',
'GET-BLUE',
'GET-BLUE-',
'GET-BLUE-V',
'GET-BLUE-VA',
'GET-BLUE-VAL',
'GET-BLUE-VALU',
'GET-BLUE-VALUE',
'GET-BROWSE-COLUMN',
'GET-BUFFER-HANDLE',
'GET-BYTE',
'GET-CALLBACK-PROC-CONTEXT',
'GET-CALLBACK-PROC-NAME',
'GET-CGI-LIST',
'GET-CGI-LONG-VALUE',
'GET-CGI-VALUE',
'GET-CLASS',
'GET-CODEPAGES',
'GET-COLLATIONS',
'GET-CONFIG-VALUE',
'GET-CURRENT',
'GET-DOUBLE',
'GET-DROPPED-FILE',
'GET-DYNAMIC',
'GET-ERROR-COLUMN',
'GET-ERROR-ROW',
'GET-FILE',
'GET-FILE-NAME',
'GET-FILE-OFFSE',
'GET-FILE-OFFSET',
'GET-FIRST',
'GET-FLOAT',
'GET-GREEN',
'GET-GREEN-',
'GET-GREEN-V',
'GET-GREEN-VA',
'GET-GREEN-VAL',
'GET-GREEN-VALU',
'GET-GREEN-VALUE',
'GET-INDEX-BY-NAMESPACE-NAME',
'GET-INDEX-BY-QNAME',
'GET-INT64',
'GET-ITERATION',
'GET-KEY-VAL',
'GET-KEY-VALU',
'GET-KEY-VALUE',
'GET-LAST',
'GET-LOCALNAME-BY-INDEX',
'GET-LONG',
'GET-MESSAGE',
'GET-NEXT',
'GET-NUMBER',
'GET-POINTER-VALUE',
'GET-PREV',
'GET-PRINTERS',
'GET-PROPERTY',
'GET-QNAME-BY-INDEX',
'GET-RED',
'GET-RED-',
'GET-RED-V',
'GET-RED-VA',
'GET-RED-VAL',
'GET-RED-VALU',
'GET-RED-VALUE',
'GET-REPOSITIONED-ROW',
'GET-RGB-VALUE',
'GET-SELECTED',
'GET-SELECTED-',
'GET-SELECTED-W',
'GET-SELECTED-WI',
'GET-SELECTED-WID',
'GET-SELECTED-WIDG',
'GET-SELECTED-WIDGE',
'GET-SELECTED-WIDGET',
'GET-SHORT',
'GET-SIGNATURE',
'GET-SIZE',
'GET-STRING',
'GET-TAB-ITEM',
'GET-TEXT-HEIGHT',
'GET-TEXT-HEIGHT-',
'GET-TEXT-HEIGHT-C',
'GET-TEXT-HEIGHT-CH',
'GET-TEXT-HEIGHT-CHA',
'GET-TEXT-HEIGHT-CHAR',
'GET-TEXT-HEIGHT-CHARS',
'GET-TEXT-HEIGHT-P',
'GET-TEXT-HEIGHT-PI',
'GET-TEXT-HEIGHT-PIX',
'GET-TEXT-HEIGHT-PIXE',
'GET-TEXT-HEIGHT-PIXEL',
'GET-TEXT-HEIGHT-PIXELS',
'GET-TEXT-WIDTH',
'GET-TEXT-WIDTH-',
'GET-TEXT-WIDTH-C',
'GET-TEXT-WIDTH-CH',
'GET-TEXT-WIDTH-CHA',
'GET-TEXT-WIDTH-CHAR',
'GET-TEXT-WIDTH-CHARS',
'GET-TEXT-WIDTH-P',
'GET-TEXT-WIDTH-PI',
'GET-TEXT-WIDTH-PIX',
'GET-TEXT-WIDTH-PIXE',
'GET-TEXT-WIDTH-PIXEL',
'GET-TEXT-WIDTH-PIXELS',
'GET-TYPE-BY-INDEX',
'GET-TYPE-BY-NAMESPACE-NAME',
'GET-TYPE-BY-QNAME',
'GET-UNSIGNED-LONG',
'GET-UNSIGNED-SHORT',
'GET-URI-BY-INDEX',
'GET-VALUE-BY-INDEX',
'GET-VALUE-BY-NAMESPACE-NAME',
'GET-VALUE-BY-QNAME',
'GET-WAIT-STATE',
'GETBYTE',
'GLOBAL',
'GO-ON',
'GO-PEND',
'GO-PENDI',
'GO-PENDIN',
'GO-PENDING',
'GRANT',
'GRAPHIC-E',
'GRAPHIC-ED',
'GRAPHIC-EDG',
'GRAPHIC-EDGE',
'GRID-FACTOR-H',
'GRID-FACTOR-HO',
'GRID-FACTOR-HOR',
'GRID-FACTOR-HORI',
'GRID-FACTOR-HORIZ',
'GRID-FACTOR-HORIZO',
'GRID-FACTOR-HORIZON',
'GRID-FACTOR-HORIZONT',
'GRID-FACTOR-HORIZONTA',
'GRID-FACTOR-HORIZONTAL',
'GRID-FACTOR-V',
'GRID-FACTOR-VE',
'GRID-FACTOR-VER',
'GRID-FACTOR-VERT',
'GRID-FACTOR-VERTI',
'GRID-FACTOR-VERTIC',
'GRID-FACTOR-VERTICA',
'GRID-FACTOR-VERTICAL',
'GRID-SNAP',
'GRID-UNIT-HEIGHT',
'GRID-UNIT-HEIGHT-',
'GRID-UNIT-HEIGHT-C',
'GRID-UNIT-HEIGHT-CH',
'GRID-UNIT-HEIGHT-CHA',
'GRID-UNIT-HEIGHT-CHARS',
'GRID-UNIT-HEIGHT-P',
'GRID-UNIT-HEIGHT-PI',
'GRID-UNIT-HEIGHT-PIX',
'GRID-UNIT-HEIGHT-PIXE',
'GRID-UNIT-HEIGHT-PIXEL',
'GRID-UNIT-HEIGHT-PIXELS',
'GRID-UNIT-WIDTH',
'GRID-UNIT-WIDTH-',
'GRID-UNIT-WIDTH-C',
'GRID-UNIT-WIDTH-CH',
'GRID-UNIT-WIDTH-CHA',
'GRID-UNIT-WIDTH-CHAR',
'GRID-UNIT-WIDTH-CHARS',
'GRID-UNIT-WIDTH-P',
'GRID-UNIT-WIDTH-PI',
'GRID-UNIT-WIDTH-PIX',
'GRID-UNIT-WIDTH-PIXE',
'GRID-UNIT-WIDTH-PIXEL',
'GRID-UNIT-WIDTH-PIXELS',
'GRID-VISIBLE',
'GROUP',
'GT',
'GUID',
'HANDLE',
'HANDLER',
'HAS-RECORDS',
'HAVING',
'HEADER',
'HEIGHT',
'HEIGHT-',
'HEIGHT-C',
'HEIGHT-CH',
'HEIGHT-CHA',
'HEIGHT-CHAR',
'HEIGHT-CHARS',
'HEIGHT-P',
'HEIGHT-PI',
'HEIGHT-PIX',
'HEIGHT-PIXE',
'HEIGHT-PIXEL',
'HEIGHT-PIXELS',
'HELP',
'HEX-DECODE',
'HEX-ENCODE',
'HIDDEN',
'HIDE',
'HORI',
'HORIZ',
'HORIZO',
'HORIZON',
'HORIZONT',
'HORIZONTA',
'HORIZONTAL',
'HOST-BYTE-ORDER',
'HTML-CHARSET',
'HTML-END-OF-LINE',
'HTML-END-OF-PAGE',
'HTML-FRAME-BEGIN',
'HTML-FRAME-END',
'HTML-HEADER-BEGIN',
'HTML-HEADER-END',
'HTML-TITLE-BEGIN',
'HTML-TITLE-END',
'HWND',
'ICON',
'IF',
'IMAGE',
'IMAGE-DOWN',
'IMAGE-INSENSITIVE',
'IMAGE-SIZE',
'IMAGE-SIZE-C',
'IMAGE-SIZE-CH',
'IMAGE-SIZE-CHA',
'IMAGE-SIZE-CHAR',
'IMAGE-SIZE-CHARS',
'IMAGE-SIZE-P',
'IMAGE-SIZE-PI',
'IMAGE-SIZE-PIX',
'IMAGE-SIZE-PIXE',
'IMAGE-SIZE-PIXEL',
'IMAGE-SIZE-PIXELS',
'IMAGE-UP',
'IMMEDIATE-DISPLAY',
'IMPLEMENTS',
'IMPORT',
'IMPORT-PRINCIPAL',
'IN',
'IN-HANDLE',
'INCREMENT-EXCLUSIVE-ID',
'INDEX',
'INDEX-HINT',
'INDEX-INFORMATION',
'INDEXED-REPOSITION',
'INDICATOR',
'INFO',
'INFOR',
'INFORM',
'INFORMA',
'INFORMAT',
'INFORMATI',
'INFORMATIO',
'INFORMATION',
'INHERIT-BGC',
'INHERIT-BGCO',
'INHERIT-BGCOL',
'INHERIT-BGCOLO',
'INHERIT-BGCOLOR',
'INHERIT-FGC',
'INHERIT-FGCO',
'INHERIT-FGCOL',
'INHERIT-FGCOLO',
'INHERIT-FGCOLOR',
'INHERITS',
'INIT',
'INITI',
'INITIA',
'INITIAL',
'INITIAL-DIR',
'INITIAL-FILTER',
'INITIALIZE-DOCUMENT-TYPE',
'INITIATE',
'INNER-CHARS',
'INNER-LINES',
'INPUT',
'INPUT-O',
'INPUT-OU',
'INPUT-OUT',
'INPUT-OUTP',
'INPUT-OUTPU',
'INPUT-OUTPUT',
'INPUT-VALUE',
'INSERT',
'INSERT-ATTRIBUTE',
'INSERT-B',
'INSERT-BA',
'INSERT-BAC',
'INSERT-BACK',
'INSERT-BACKT',
'INSERT-BACKTA',
'INSERT-BACKTAB',
'INSERT-FILE',
'INSERT-ROW',
'INSERT-STRING',
'INSERT-T',
'INSERT-TA',
'INSERT-TAB',
'INT64',
'INT',
'INTEGER',
'INTERFACE',
'INTERNAL-ENTRIES',
'INTO',
'INVOKE',
'IS',
'IS-ATTR',
'IS-ATTR-',
'IS-ATTR-S',
'IS-ATTR-SP',
'IS-ATTR-SPA',
'IS-ATTR-SPAC',
'IS-ATTR-SPACE',
'IS-CLASS',
'IS-JSON',
'IS-LEAD-BYTE',
'IS-OPEN',
'IS-PARAMETER-SET',
'IS-PARTITIONED',
'IS-ROW-SELECTED',
'IS-SELECTED',
'IS-XML',
'ITEM',
'ITEMS-PER-ROW',
'JOIN',
'JOIN-BY-SQLDB',
'KBLABEL',
'KEEP-CONNECTION-OPEN',
'KEEP-FRAME-Z',
'KEEP-FRAME-Z-',
'KEEP-FRAME-Z-O',
'KEEP-FRAME-Z-OR',
'KEEP-FRAME-Z-ORD',
'KEEP-FRAME-Z-ORDE',
'KEEP-FRAME-Z-ORDER',
'KEEP-MESSAGES',
'KEEP-SECURITY-CACHE',
'KEEP-TAB-ORDER',
'KEY',
'KEY-CODE',
'KEY-FUNC',
'KEY-FUNCT',
'KEY-FUNCTI',
'KEY-FUNCTIO',
'KEY-FUNCTION',
'KEY-LABEL',
'KEYCODE',
'KEYFUNC',
'KEYFUNCT',
'KEYFUNCTI',
'KEYFUNCTIO',
'KEYFUNCTION',
'KEYLABEL',
'KEYS',
'KEYWORD',
'KEYWORD-ALL',
'LABEL',
'LABEL-BGC',
'LABEL-BGCO',
'LABEL-BGCOL',
'LABEL-BGCOLO',
'LABEL-BGCOLOR',
'LABEL-DC',
'LABEL-DCO',
'LABEL-DCOL',
'LABEL-DCOLO',
'LABEL-DCOLOR',
'LABEL-FGC',
'LABEL-FGCO',
'LABEL-FGCOL',
'LABEL-FGCOLO',
'LABEL-FGCOLOR',
'LABEL-FONT',
'LABEL-PFC',
'LABEL-PFCO',
'LABEL-PFCOL',
'LABEL-PFCOLO',
'LABEL-PFCOLOR',
'LABELS',
'LABELS-HAVE-COLONS',
'LANDSCAPE',
'LANGUAGE',
'LANGUAGES',
'LARGE',
'LARGE-TO-SMALL',
'LAST',
'LAST-ASYNCH-REQUEST',
'LAST-BATCH',
'LAST-CHILD',
'LAST-EVEN',
'LAST-EVENT',
'LAST-FORM',
'LAST-KEY',
'LAST-OBJECT',
'LAST-OF',
'LAST-PROCE',
'LAST-PROCED',
'LAST-PROCEDU',
'LAST-PROCEDUR',
'LAST-PROCEDURE',
'LAST-SERVER',
'LAST-TAB-I',
'LAST-TAB-IT',
'LAST-TAB-ITE',
'LAST-TAB-ITEM',
'LASTKEY',
'LC',
'LDBNAME',
'LE',
'LEAVE',
'LEFT-ALIGN',
'LEFT-ALIGNE',
'LEFT-ALIGNED',
'LEFT-TRIM',
'LENGTH',
'LIBRARY',
'LIKE',
'LIKE-SEQUENTIAL',
'LINE',
'LINE-COUNT',
'LINE-COUNTE',
'LINE-COUNTER',
'LIST-EVENTS',
'LIST-ITEM-PAIRS',
'LIST-ITEMS',
'LIST-PROPERTY-NAMES',
'LIST-QUERY-ATTRS',
'LIST-SET-ATTRS',
'LIST-WIDGETS',
'LISTI',
'LISTIN',
'LISTING',
'LITERAL-QUESTION',
'LITTLE-ENDIAN',
'LOAD',
'LOAD-DOMAINS',
'LOAD-ICON',
'LOAD-IMAGE',
'LOAD-IMAGE-DOWN',
'LOAD-IMAGE-INSENSITIVE',
'LOAD-IMAGE-UP',
'LOAD-MOUSE-P',
'LOAD-MOUSE-PO',
'LOAD-MOUSE-POI',
'LOAD-MOUSE-POIN',
'LOAD-MOUSE-POINT',
'LOAD-MOUSE-POINTE',
'LOAD-MOUSE-POINTER',
'LOAD-PICTURE',
'LOAD-SMALL-ICON',
'LOCAL-NAME',
'LOCAL-VERSION-INFO',
'LOCATOR-COLUMN-NUMBER',
'LOCATOR-LINE-NUMBER',
'LOCATOR-PUBLIC-ID',
'LOCATOR-SYSTEM-ID',
'LOCATOR-TYPE',
'LOCK-REGISTRATION',
'LOCKED',
'LOG',
'LOG-AUDIT-EVENT',
'LOG-MANAGER',
'LOGICAL',
'LOGIN-EXPIRATION-TIMESTAMP',
'LOGIN-HOST',
'LOGIN-STATE',
'LOGOUT',
'LONGCHAR',
'LOOKAHEAD',
'LOOKUP',
'LT',
'MACHINE-CLASS',
'MANDATORY',
'MANUAL-HIGHLIGHT',
'MAP',
'MARGIN-EXTRA',
'MARGIN-HEIGHT',
'MARGIN-HEIGHT-',
'MARGIN-HEIGHT-C',
'MARGIN-HEIGHT-CH',
'MARGIN-HEIGHT-CHA',
'MARGIN-HEIGHT-CHAR',
'MARGIN-HEIGHT-CHARS',
'MARGIN-HEIGHT-P',
'MARGIN-HEIGHT-PI',
'MARGIN-HEIGHT-PIX',
'MARGIN-HEIGHT-PIXE',
'MARGIN-HEIGHT-PIXEL',
'MARGIN-HEIGHT-PIXELS',
'MARGIN-WIDTH',
'MARGIN-WIDTH-',
'MARGIN-WIDTH-C',
'MARGIN-WIDTH-CH',
'MARGIN-WIDTH-CHA',
'MARGIN-WIDTH-CHAR',
'MARGIN-WIDTH-CHARS',
'MARGIN-WIDTH-P',
'MARGIN-WIDTH-PI',
'MARGIN-WIDTH-PIX',
'MARGIN-WIDTH-PIXE',
'MARGIN-WIDTH-PIXEL',
'MARGIN-WIDTH-PIXELS',
'MARK-NEW',
'MARK-ROW-STATE',
'MATCHES',
'MAX',
'MAX-BUTTON',
'MAX-CHARS',
'MAX-DATA-GUESS',
'MAX-HEIGHT',
'MAX-HEIGHT-C',
'MAX-HEIGHT-CH',
'MAX-HEIGHT-CHA',
'MAX-HEIGHT-CHAR',
'MAX-HEIGHT-CHARS',
'MAX-HEIGHT-P',
'MAX-HEIGHT-PI',
'MAX-HEIGHT-PIX',
'MAX-HEIGHT-PIXE',
'MAX-HEIGHT-PIXEL',
'MAX-HEIGHT-PIXELS',
'MAX-ROWS',
'MAX-SIZE',
'MAX-VAL',
'MAX-VALU',
'MAX-VALUE',
'MAX-WIDTH',
'MAX-WIDTH-',
'MAX-WIDTH-C',
'MAX-WIDTH-CH',
'MAX-WIDTH-CHA',
'MAX-WIDTH-CHAR',
'MAX-WIDTH-CHARS',
'MAX-WIDTH-P',
'MAX-WIDTH-PI',
'MAX-WIDTH-PIX',
'MAX-WIDTH-PIXE',
'MAX-WIDTH-PIXEL',
'MAX-WIDTH-PIXELS',
'MAXI',
'MAXIM',
'MAXIMIZE',
'MAXIMU',
'MAXIMUM',
'MAXIMUM-LEVEL',
'MD5-DIGEST',
'MEMBER',
'MEMPTR-TO-NODE-VALUE',
'MENU',
'MENU-BAR',
'MENU-ITEM',
'MENU-K',
'MENU-KE',
'MENU-KEY',
'MENU-M',
'MENU-MO',
'MENU-MOU',
'MENU-MOUS',
'MENU-MOUSE',
'MENUBAR',
'MERGE-BY-FIELD',
'MESSAGE',
'MESSAGE-AREA',
'MESSAGE-AREA-FONT',
'MESSAGE-LINES',
'METHOD',
'MIN',
'MIN-BUTTON',
'MIN-COLUMN-WIDTH-C',
'MIN-COLUMN-WIDTH-CH',
'MIN-COLUMN-WIDTH-CHA',
'MIN-COLUMN-WIDTH-CHAR',
'MIN-COLUMN-WIDTH-CHARS',
'MIN-COLUMN-WIDTH-P',
'MIN-COLUMN-WIDTH-PI',
'MIN-COLUMN-WIDTH-PIX',
'MIN-COLUMN-WIDTH-PIXE',
'MIN-COLUMN-WIDTH-PIXEL',
'MIN-COLUMN-WIDTH-PIXELS',
'MIN-HEIGHT',
'MIN-HEIGHT-',
'MIN-HEIGHT-C',
'MIN-HEIGHT-CH',
'MIN-HEIGHT-CHA',
'MIN-HEIGHT-CHAR',
'MIN-HEIGHT-CHARS',
'MIN-HEIGHT-P',
'MIN-HEIGHT-PI',
'MIN-HEIGHT-PIX',
'MIN-HEIGHT-PIXE',
'MIN-HEIGHT-PIXEL',
'MIN-HEIGHT-PIXELS',
'MIN-SIZE',
'MIN-VAL',
'MIN-VALU',
'MIN-VALUE',
'MIN-WIDTH',
'MIN-WIDTH-',
'MIN-WIDTH-C',
'MIN-WIDTH-CH',
'MIN-WIDTH-CHA',
'MIN-WIDTH-CHAR',
'MIN-WIDTH-CHARS',
'MIN-WIDTH-P',
'MIN-WIDTH-PI',
'MIN-WIDTH-PIX',
'MIN-WIDTH-PIXE',
'MIN-WIDTH-PIXEL',
'MIN-WIDTH-PIXELS',
'MINI',
'MINIM',
'MINIMU',
'MINIMUM',
'MOD',
'MODIFIED',
'MODU',
'MODUL',
'MODULO',
'MONTH',
'MOUSE',
'MOUSE-P',
'MOUSE-PO',
'MOUSE-POI',
'MOUSE-POIN',
'MOUSE-POINT',
'MOUSE-POINTE',
'MOUSE-POINTER',
'MOVABLE',
'MOVE-AFTER',
'MOVE-AFTER-',
'MOVE-AFTER-T',
'MOVE-AFTER-TA',
'MOVE-AFTER-TAB',
'MOVE-AFTER-TAB-',
'MOVE-AFTER-TAB-I',
'MOVE-AFTER-TAB-IT',
'MOVE-AFTER-TAB-ITE',
'MOVE-AFTER-TAB-ITEM',
'MOVE-BEFOR',
'MOVE-BEFORE',
'MOVE-BEFORE-',
'MOVE-BEFORE-T',
'MOVE-BEFORE-TA',
'MOVE-BEFORE-TAB',
'MOVE-BEFORE-TAB-',
'MOVE-BEFORE-TAB-I',
'MOVE-BEFORE-TAB-IT',
'MOVE-BEFORE-TAB-ITE',
'MOVE-BEFORE-TAB-ITEM',
'MOVE-COL',
'MOVE-COLU',
'MOVE-COLUM',
'MOVE-COLUMN',
'MOVE-TO-B',
'MOVE-TO-BO',
'MOVE-TO-BOT',
'MOVE-TO-BOTT',
'MOVE-TO-BOTTO',
'MOVE-TO-BOTTOM',
'MOVE-TO-EOF',
'MOVE-TO-T',
'MOVE-TO-TO',
'MOVE-TO-TOP',
'MPE',
'MTIME',
'MULTI-COMPILE',
'MULTIPLE',
'MULTIPLE-KEY',
'MULTITASKING-INTERVAL',
'MUST-EXIST',
'NAME',
'NAMESPACE-PREFIX',
'NAMESPACE-URI',
'NATIVE',
'NE',
'NEEDS-APPSERVER-PROMPT',
'NEEDS-PROMPT',
'NEW',
'NEW-INSTANCE',
'NEW-ROW',
'NEXT',
'NEXT-COLUMN',
'NEXT-PROMPT',
'NEXT-ROWID',
'NEXT-SIBLING',
'NEXT-TAB-I',
'NEXT-TAB-IT',
'NEXT-TAB-ITE',
'NEXT-TAB-ITEM',
'NEXT-VALUE',
'NO',
'NO-APPLY',
'NO-ARRAY-MESSAGE',
'NO-ASSIGN',
'NO-ATTR',
'NO-ATTR-',
'NO-ATTR-L',
'NO-ATTR-LI',
'NO-ATTR-LIS',
'NO-ATTR-LIST',
'NO-ATTR-S',
'NO-ATTR-SP',
'NO-ATTR-SPA',
'NO-ATTR-SPAC',
'NO-ATTR-SPACE',
'NO-AUTO-VALIDATE',
'NO-BIND-WHERE',
'NO-BOX',
'NO-CONSOLE',
'NO-CONVERT',
'NO-CONVERT-3D-COLORS',
'NO-CURRENT-VALUE',
'NO-DEBUG',
'NO-DRAG',
'NO-ECHO',
'NO-EMPTY-SPACE',
'NO-ERROR',
'NO-F',
'NO-FI',
'NO-FIL',
'NO-FILL',
'NO-FOCUS',
'NO-HELP',
'NO-HIDE',
'NO-INDEX-HINT',
'NO-INHERIT-BGC',
'NO-INHERIT-BGCO',
'NO-INHERIT-BGCOLOR',
'NO-INHERIT-FGC',
'NO-INHERIT-FGCO',
'NO-INHERIT-FGCOL',
'NO-INHERIT-FGCOLO',
'NO-INHERIT-FGCOLOR',
'NO-JOIN-BY-SQLDB',
'NO-LABE',
'NO-LABELS',
'NO-LOBS',
'NO-LOCK',
'NO-LOOKAHEAD',
'NO-MAP',
'NO-MES',
'NO-MESS',
'NO-MESSA',
'NO-MESSAG',
'NO-MESSAGE',
'NO-PAUSE',
'NO-PREFE',
'NO-PREFET',
'NO-PREFETC',
'NO-PREFETCH',
'NO-ROW-MARKERS',
'NO-SCROLLBAR-VERTICAL',
'NO-SEPARATE-CONNECTION',
'NO-SEPARATORS',
'NO-TAB-STOP',
'NO-UND',
'NO-UNDE',
'NO-UNDER',
'NO-UNDERL',
'NO-UNDERLI',
'NO-UNDERLIN',
'NO-UNDERLINE',
'NO-UNDO',
'NO-VAL',
'NO-VALI',
'NO-VALID',
'NO-VALIDA',
'NO-VALIDAT',
'NO-VALIDATE',
'NO-WAIT',
'NO-WORD-WRAP',
'NODE-VALUE-TO-MEMPTR',
'NONAMESPACE-SCHEMA-LOCATION',
'NONE',
'NORMALIZE',
'NOT',
'NOT-ACTIVE',
'NOW',
'NULL',
'NUM-ALI',
'NUM-ALIA',
'NUM-ALIAS',
'NUM-ALIASE',
'NUM-ALIASES',
'NUM-BUFFERS',
'NUM-BUT',
'NUM-BUTT',
'NUM-BUTTO',
'NUM-BUTTON',
'NUM-BUTTONS',
'NUM-COL',
'NUM-COLU',
'NUM-COLUM',
'NUM-COLUMN',
'NUM-COLUMNS',
'NUM-COPIES',
'NUM-DBS',
'NUM-DROPPED-FILES',
'NUM-ENTRIES',
'NUM-FIELDS',
'NUM-FORMATS',
'NUM-ITEMS',
'NUM-ITERATIONS',
'NUM-LINES',
'NUM-LOCKED-COL',
'NUM-LOCKED-COLU',
'NUM-LOCKED-COLUM',
'NUM-LOCKED-COLUMN',
'NUM-LOCKED-COLUMNS',
'NUM-MESSAGES',
'NUM-PARAMETERS',
'NUM-REFERENCES',
'NUM-REPLACED',
'NUM-RESULTS',
'NUM-SELECTED',
'NUM-SELECTED-',
'NUM-SELECTED-ROWS',
'NUM-SELECTED-W',
'NUM-SELECTED-WI',
'NUM-SELECTED-WID',
'NUM-SELECTED-WIDG',
'NUM-SELECTED-WIDGE',
'NUM-SELECTED-WIDGET',
'NUM-SELECTED-WIDGETS',
'NUM-TABS',
'NUM-TO-RETAIN',
'NUM-VISIBLE-COLUMNS',
'NUMERIC',
'NUMERIC-F',
'NUMERIC-FO',
'NUMERIC-FOR',
'NUMERIC-FORM',
'NUMERIC-FORMA',
'NUMERIC-FORMAT',
'OCTET-LENGTH',
'OF',
'OFF',
'OK',
'OK-CANCEL',
'OLD',
'ON',
'ON-FRAME',
'ON-FRAME-',
'ON-FRAME-B',
'ON-FRAME-BO',
'ON-FRAME-BOR',
'ON-FRAME-BORD',
'ON-FRAME-BORDE',
'ON-FRAME-BORDER',
'OPEN',
'OPSYS',
'OPTION',
'OR',
'ORDERED-JOIN',
'ORDINAL',
'OS-APPEND',
'OS-COMMAND',
'OS-COPY',
'OS-CREATE-DIR',
'OS-DELETE',
'OS-DIR',
'OS-DRIVE',
'OS-DRIVES',
'OS-ERROR',
'OS-GETENV',
'OS-RENAME',
'OTHERWISE',
'OUTPUT',
'OVERLAY',
'OVERRIDE',
'OWNER',
'PAGE',
'PAGE-BOT',
'PAGE-BOTT',
'PAGE-BOTTO',
'PAGE-BOTTOM',
'PAGE-NUM',
'PAGE-NUMB',
'PAGE-NUMBE',
'PAGE-NUMBER',
'PAGE-SIZE',
'PAGE-TOP',
'PAGE-WID',
'PAGE-WIDT',
'PAGE-WIDTH',
'PAGED',
'PARAM',
'PARAME',
'PARAMET',
'PARAMETE',
'PARAMETER',
'PARENT',
'PARSE-STATUS',
'PARTIAL-KEY',
'PASCAL',
'PASSWORD-FIELD',
'PATHNAME',
'PAUSE',
'PBE-HASH-ALG',
'PBE-HASH-ALGO',
'PBE-HASH-ALGOR',
'PBE-HASH-ALGORI',
'PBE-HASH-ALGORIT',
'PBE-HASH-ALGORITH',
'PBE-HASH-ALGORITHM',
'PBE-KEY-ROUNDS',
'PDBNAME',
'PERSIST',
'PERSISTE',
'PERSISTEN',
'PERSISTENT',
'PERSISTENT-CACHE-DISABLED',
'PFC',
'PFCO',
'PFCOL',
'PFCOLO',
'PFCOLOR',
'PIXELS',
'PIXELS-PER-COL',
'PIXELS-PER-COLU',
'PIXELS-PER-COLUM',
'PIXELS-PER-COLUMN',
'PIXELS-PER-ROW',
'POPUP-M',
'POPUP-ME',
'POPUP-MEN',
'POPUP-MENU',
'POPUP-O',
'POPUP-ON',
'POPUP-ONL',
'POPUP-ONLY',
'PORTRAIT',
'POSITION',
'PRECISION',
'PREFER-DATASET',
'PREPARE-STRING',
'PREPARED',
'PREPROC',
'PREPROCE',
'PREPROCES',
'PREPROCESS',
'PRESEL',
'PRESELE',
'PRESELEC',
'PRESELECT',
'PREV',
'PREV-COLUMN',
'PREV-SIBLING',
'PREV-TAB-I',
'PREV-TAB-IT',
'PREV-TAB-ITE',
'PREV-TAB-ITEM',
'PRIMARY',
'PRINTER',
'PRINTER-CONTROL-HANDLE',
'PRINTER-HDC',
'PRINTER-NAME',
'PRINTER-PORT',
'PRINTER-SETUP',
'PRIVATE',
'PRIVATE-D',
'PRIVATE-DA',
'PRIVATE-DAT',
'PRIVATE-DATA',
'PRIVILEGES',
'PROC-HA',
'PROC-HAN',
'PROC-HAND',
'PROC-HANDL',
'PROC-HANDLE',
'PROC-ST',
'PROC-STA',
'PROC-STAT',
'PROC-STATU',
'PROC-STATUS',
'PROC-TEXT',
'PROC-TEXT-BUFFER',
'PROCE',
'PROCED',
'PROCEDU',
'PROCEDUR',
'PROCEDURE',
'PROCEDURE-CALL-TYPE',
'PROCEDURE-TYPE',
'PROCESS',
'PROFILER',
'PROGRAM-NAME',
'PROGRESS',
'PROGRESS-S',
'PROGRESS-SO',
'PROGRESS-SOU',
'PROGRESS-SOUR',
'PROGRESS-SOURC',
'PROGRESS-SOURCE',
'PROMPT',
'PROMPT-F',
'PROMPT-FO',
'PROMPT-FOR',
'PROMSGS',
'PROPATH',
'PROPERTY',
'PROTECTED',
'PROVERS',
'PROVERSI',
'PROVERSIO',
'PROVERSION',
'PROXY',
'PROXY-PASSWORD',
'PROXY-USERID',
'PUBLIC',
'PUBLIC-ID',
'PUBLISH',
'PUBLISHED-EVENTS',
'PUT',
'PUT-BYTE',
'PUT-DOUBLE',
'PUT-FLOAT',
'PUT-INT64',
'PUT-KEY-VAL',
'PUT-KEY-VALU',
'PUT-KEY-VALUE',
'PUT-LONG',
'PUT-SHORT',
'PUT-STRING',
'PUT-UNSIGNED-LONG',
'PUTBYTE',
'QUERY',
'QUERY-CLOSE',
'QUERY-OFF-END',
'QUERY-OPEN',
'QUERY-PREPARE',
'QUERY-TUNING',
'QUESTION',
'QUIT',
'QUOTER',
'R-INDEX',
'RADIO-BUTTONS',
'RADIO-SET',
'RANDOM',
'RAW',
'RAW-TRANSFER',
'RCODE-INFO',
'RCODE-INFOR',
'RCODE-INFORM',
'RCODE-INFORMA',
'RCODE-INFORMAT',
'RCODE-INFORMATI',
'RCODE-INFORMATIO',
'RCODE-INFORMATION',
'READ-AVAILABLE',
'READ-EXACT-NUM',
'READ-FILE',
'READ-JSON',
'READ-ONLY',
'READ-XML',
'READ-XMLSCHEMA',
'READKEY',
'REAL',
'RECID',
'RECORD-LENGTH',
'RECT',
'RECTA',
'RECTAN',
'RECTANG',
'RECTANGL',
'RECTANGLE',
'RECURSIVE',
'REFERENCE-ONLY',
'REFRESH',
'REFRESH-AUDIT-POLICY',
'REFRESHABLE',
'REGISTER-DOMAIN',
'RELEASE',
'REMOTE',
'REMOVE-EVENTS-PROCEDURE',
'REMOVE-SUPER-PROCEDURE',
'REPEAT',
'REPLACE',
'REPLACE-SELECTION-TEXT',
'REPOSITION',
'REPOSITION-BACKWARD',
'REPOSITION-FORWARD',
'REPOSITION-MODE',
'REPOSITION-TO-ROW',
'REPOSITION-TO-ROWID',
'REQUEST',
'REQUEST-INFO',
'RESET',
'RESIZA',
'RESIZAB',
'RESIZABL',
'RESIZABLE',
'RESIZE',
'RESPONSE-INFO',
'RESTART-ROW',
'RESTART-ROWID',
'RETAIN',
'RETAIN-SHAPE',
'RETRY',
'RETRY-CANCEL',
'RETURN',
'RETURN-ALIGN',
'RETURN-ALIGNE',
'RETURN-INS',
'RETURN-INSE',
'RETURN-INSER',
'RETURN-INSERT',
'RETURN-INSERTE',
'RETURN-INSERTED',
'RETURN-TO-START-DI',
'RETURN-TO-START-DIR',
'RETURN-VAL',
'RETURN-VALU',
'RETURN-VALUE',
'RETURN-VALUE-DATA-TYPE',
'RETURNS',
'REVERSE-FROM',
'REVERT',
'REVOKE',
'RGB-VALUE',
'RIGHT-ALIGNED',
'RIGHT-TRIM',
'ROLES',
'ROUND',
'ROUTINE-LEVEL',
'ROW',
'ROW-HEIGHT-CHARS',
'ROW-HEIGHT-PIXELS',
'ROW-MARKERS',
'ROW-OF',
'ROW-RESIZABLE',
'ROWID',
'RULE',
'RUN',
'RUN-PROCEDURE',
'SAVE CACHE',
'SAVE',
'SAVE-AS',
'SAVE-FILE',
'SAX-COMPLE',
'SAX-COMPLET',
'SAX-COMPLETE',
'SAX-PARSE',
'SAX-PARSE-FIRST',
'SAX-PARSE-NEXT',
'SAX-PARSER-ERROR',
'SAX-RUNNING',
'SAX-UNINITIALIZED',
'SAX-WRITE-BEGIN',
'SAX-WRITE-COMPLETE',
'SAX-WRITE-CONTENT',
'SAX-WRITE-ELEMENT',
'SAX-WRITE-ERROR',
'SAX-WRITE-IDLE',
'SAX-WRITE-TAG',
'SAX-WRITER',
'SCHEMA',
'SCHEMA-LOCATION',
'SCHEMA-MARSHAL',
'SCHEMA-PATH',
'SCREEN',
'SCREEN-IO',
'SCREEN-LINES',
'SCREEN-VAL',
'SCREEN-VALU',
'SCREEN-VALUE',
'SCROLL',
'SCROLL-BARS',
'SCROLL-DELTA',
'SCROLL-OFFSET',
'SCROLL-TO-CURRENT-ROW',
'SCROLL-TO-I',
'SCROLL-TO-IT',
'SCROLL-TO-ITE',
'SCROLL-TO-ITEM',
'SCROLL-TO-SELECTED-ROW',
'SCROLLABLE',
'SCROLLBAR-H',
'SCROLLBAR-HO',
'SCROLLBAR-HOR',
'SCROLLBAR-HORI',
'SCROLLBAR-HORIZ',
'SCROLLBAR-HORIZO',
'SCROLLBAR-HORIZON',
'SCROLLBAR-HORIZONT',
'SCROLLBAR-HORIZONTA',
'SCROLLBAR-HORIZONTAL',
'SCROLLBAR-V',
'SCROLLBAR-VE',
'SCROLLBAR-VER',
'SCROLLBAR-VERT',
'SCROLLBAR-VERTI',
'SCROLLBAR-VERTIC',
'SCROLLBAR-VERTICA',
'SCROLLBAR-VERTICAL',
'SCROLLED-ROW-POS',
'SCROLLED-ROW-POSI',
'SCROLLED-ROW-POSIT',
'SCROLLED-ROW-POSITI',
'SCROLLED-ROW-POSITIO',
'SCROLLED-ROW-POSITION',
'SCROLLING',
'SDBNAME',
'SEAL',
'SEAL-TIMESTAMP',
'SEARCH',
'SEARCH-SELF',
'SEARCH-TARGET',
'SECTION',
'SECURITY-POLICY',
'SEEK',
'SELECT',
'SELECT-ALL',
'SELECT-FOCUSED-ROW',
'SELECT-NEXT-ROW',
'SELECT-PREV-ROW',
'SELECT-ROW',
'SELECTABLE',
'SELECTED',
'SELECTION',
'SELECTION-END',
'SELECTION-LIST',
'SELECTION-START',
'SELECTION-TEXT',
'SELF',
'SEND',
'SEND-SQL-STATEMENT',
'SENSITIVE',
'SEPARATE-CONNECTION',
'SEPARATOR-FGCOLOR',
'SEPARATORS',
'SERIALIZABLE',
'SERIALIZE-HIDDEN',
'SERIALIZE-NAME',
'SERVER',
'SERVER-CONNECTION-BOUND',
'SERVER-CONNECTION-BOUND-REQUEST',
'SERVER-CONNECTION-CONTEXT',
'SERVER-CONNECTION-ID',
'SERVER-OPERATING-MODE',
'SESSION',
'SESSION-ID',
'SET',
'SET-APPL-CONTEXT',
'SET-ATTR-CALL-TYPE',
'SET-ATTRIBUTE-NODE',
'SET-BLUE',
'SET-BLUE-',
'SET-BLUE-V',
'SET-BLUE-VA',
'SET-BLUE-VAL',
'SET-BLUE-VALU',
'SET-BLUE-VALUE',
'SET-BREAK',
'SET-BUFFERS',
'SET-CALLBACK',
'SET-CLIENT',
'SET-COMMIT',
'SET-CONTENTS',
'SET-CURRENT-VALUE',
'SET-DB-CLIENT',
'SET-DYNAMIC',
'SET-EVENT-MANAGER-OPTION',
'SET-GREEN',
'SET-GREEN-',
'SET-GREEN-V',
'SET-GREEN-VA',
'SET-GREEN-VAL',
'SET-GREEN-VALU',
'SET-GREEN-VALUE',
'SET-INPUT-SOURCE',
'SET-OPTION',
'SET-OUTPUT-DESTINATION',
'SET-PARAMETER',
'SET-POINTER-VALUE',
'SET-PROPERTY',
'SET-RED',
'SET-RED-',
'SET-RED-V',
'SET-RED-VA',
'SET-RED-VAL',
'SET-RED-VALU',
'SET-RED-VALUE',
'SET-REPOSITIONED-ROW',
'SET-RGB-VALUE',
'SET-ROLLBACK',
'SET-SELECTION',
'SET-SIZE',
'SET-SORT-ARROW',
'SET-WAIT-STATE',
'SETUSER',
'SETUSERI',
'SETUSERID',
'SHA1-DIGEST',
'SHARE',
'SHARE-',
'SHARE-L',
'SHARE-LO',
'SHARE-LOC',
'SHARE-LOCK',
'SHARED',
'SHOW-IN-TASKBAR',
'SHOW-STAT',
'SHOW-STATS',
'SIDE-LAB',
'SIDE-LABE',
'SIDE-LABEL',
'SIDE-LABEL-H',
'SIDE-LABEL-HA',
'SIDE-LABEL-HAN',
'SIDE-LABEL-HAND',
'SIDE-LABEL-HANDL',
'SIDE-LABEL-HANDLE',
'SIDE-LABELS',
'SIGNATURE',
'SILENT',
'SIMPLE',
'SINGLE',
'SINGLE-RUN',
'SINGLETON',
'SIZE',
'SIZE-C',
'SIZE-CH',
'SIZE-CHA',
'SIZE-CHAR',
'SIZE-CHARS',
'SIZE-P',
'SIZE-PI',
'SIZE-PIX',
'SIZE-PIXE',
'SIZE-PIXEL',
'SIZE-PIXELS',
'SKIP',
'SKIP-DELETED-RECORD',
'SLIDER',
'SMALL-ICON',
'SMALL-TITLE',
'SMALLINT',
'SOME',
'SORT',
'SORT-ASCENDING',
'SORT-NUMBER',
'SOURCE',
'SOURCE-PROCEDURE',
'SPACE',
'SQL',
'SQRT',
'SSL-SERVER-NAME',
'STANDALONE',
'START',
'START-DOCUMENT',
'START-ELEMENT',
'START-MOVE',
'START-RESIZE',
'START-ROW-RESIZE',
'STATE-DETAIL',
'STATIC',
'STATUS',
'STATUS-AREA',
'STATUS-AREA-FONT',
'STDCALL',
'STOP',
'STOP-AFTER',
'STOP-PARSING',
'STOPPE',
'STOPPED',
'STORED-PROC',
'STORED-PROCE',
'STORED-PROCED',
'STORED-PROCEDU',
'STORED-PROCEDUR',
'STORED-PROCEDURE',
'STREAM',
'STREAM-HANDLE',
'STREAM-IO',
'STRETCH-TO-FIT',
'STRICT',
'STRICT-ENTITY-RESOLUTION',
'STRING',
'STRING-VALUE',
'STRING-XREF',
'SUB-AVE',
'SUB-AVER',
'SUB-AVERA',
'SUB-AVERAG',
'SUB-AVERAGE',
'SUB-COUNT',
'SUB-MAXIMUM',
'SUB-MENU',
'SUB-MIN',
'SUB-MINIMUM',
'SUB-TOTAL',
'SUBSCRIBE',
'SUBST',
'SUBSTI',
'SUBSTIT',
'SUBSTITU',
'SUBSTITUT',
'SUBSTITUTE',
'SUBSTR',
'SUBSTRI',
'SUBSTRIN',
'SUBSTRING',
'SUBTYPE',
'SUM',
'SUM-MAX',
'SUM-MAXI',
'SUM-MAXIM',
'SUM-MAXIMU',
'SUPER',
'SUPER-PROCEDURES',
'SUPPRESS-NAMESPACE-PROCESSING',
'SUPPRESS-W',
'SUPPRESS-WA',
'SUPPRESS-WAR',
'SUPPRESS-WARN',
'SUPPRESS-WARNI',
'SUPPRESS-WARNIN',
'SUPPRESS-WARNING',
'SUPPRESS-WARNINGS',
'SYMMETRIC-ENCRYPTION-ALGORITHM',
'SYMMETRIC-ENCRYPTION-IV',
'SYMMETRIC-ENCRYPTION-KEY',
'SYMMETRIC-SUPPORT',
'SYSTEM-ALERT',
'SYSTEM-ALERT-',
'SYSTEM-ALERT-B',
'SYSTEM-ALERT-BO',
'SYSTEM-ALERT-BOX',
'SYSTEM-ALERT-BOXE',
'SYSTEM-ALERT-BOXES',
'SYSTEM-DIALOG',
'SYSTEM-HELP',
'SYSTEM-ID',
'TAB-POSITION',
'TAB-STOP',
'TABLE',
'TABLE-HANDLE',
'TABLE-NUMBER',
'TABLE-SCAN',
'TARGET',
'TARGET-PROCEDURE',
'TEMP-DIR',
'TEMP-DIRE',
'TEMP-DIREC',
'TEMP-DIRECT',
'TEMP-DIRECTO',
'TEMP-DIRECTOR',
'TEMP-DIRECTORY',
'TEMP-TABLE',
'TEMP-TABLE-PREPARE',
'TERM',
'TERMI',
'TERMIN',
'TERMINA',
'TERMINAL',
'TERMINATE',
'TEXT',
'TEXT-CURSOR',
'TEXT-SEG-GROW',
'TEXT-SELECTED',
'THEN',
'THIS-OBJECT',
'THIS-PROCEDURE',
'THREAD-SAFE',
'THREE-D',
'THROUGH',
'THROW',
'THRU',
'TIC-MARKS',
'TIME',
'TIME-SOURCE',
'TITLE',
'TITLE-BGC',
'TITLE-BGCO',
'TITLE-BGCOL',
'TITLE-BGCOLO',
'TITLE-BGCOLOR',
'TITLE-DC',
'TITLE-DCO',
'TITLE-DCOL',
'TITLE-DCOLO',
'TITLE-DCOLOR',
'TITLE-FGC',
'TITLE-FGCO',
'TITLE-FGCOL',
'TITLE-FGCOLO',
'TITLE-FGCOLOR',
'TITLE-FO',
'TITLE-FON',
'TITLE-FONT',
'TO',
'TO-ROWID',
'TODAY',
'TOGGLE-BOX',
'TOOLTIP',
'TOOLTIPS',
'TOP-NAV-QUERY',
'TOP-ONLY',
'TOPIC',
'TOTAL',
'TRAILING',
'TRANS',
'TRANS-INIT-PROCEDURE',
'TRANSACTION',
'TRANSACTION-MODE',
'TRANSPARENT',
'TRIGGER',
'TRIGGERS',
'TRIM',
'TRUE',
'TRUNC',
'TRUNCA',
'TRUNCAT',
'TRUNCATE',
'TYPE',
'TYPE-OF',
'UNBOX',
'UNBUFF',
'UNBUFFE',
'UNBUFFER',
'UNBUFFERE',
'UNBUFFERED',
'UNDERL',
'UNDERLI',
'UNDERLIN',
'UNDERLINE',
'UNDO',
'UNFORM',
'UNFORMA',
'UNFORMAT',
'UNFORMATT',
'UNFORMATTE',
'UNFORMATTED',
'UNION',
'UNIQUE',
'UNIQUE-ID',
'UNIQUE-MATCH',
'UNIX',
'UNLESS-HIDDEN',
'UNLOAD',
'UNSIGNED-LONG',
'UNSUBSCRIBE',
'UP',
'UPDATE',
'UPDATE-ATTRIBUTE',
'URL',
'URL-DECODE',
'URL-ENCODE',
'URL-PASSWORD',
'URL-USERID',
'USE',
'USE-DICT-EXPS',
'USE-FILENAME',
'USE-INDEX',
'USE-REVVIDEO',
'USE-TEXT',
'USE-UNDERLINE',
'USE-WIDGET-POOL',
'USER',
'USER-ID',
'USERID',
'USING',
'V6DISPLAY',
'V6FRAME',
'VALID-EVENT',
'VALID-HANDLE',
'VALID-OBJECT',
'VALIDATE',
'VALIDATE-EXPRESSION',
'VALIDATE-MESSAGE',
'VALIDATE-SEAL',
'VALIDATION-ENABLED',
'VALUE',
'VALUE-CHANGED',
'VALUES',
'VAR',
'VARI',
'VARIA',
'VARIAB',
'VARIABL',
'VARIABLE',
'VERBOSE',
'VERSION',
'VERT',
'VERTI',
'VERTIC',
'VERTICA',
'VERTICAL',
'VIEW',
'VIEW-AS',
'VIEW-FIRST-COLUMN-ON-REOPEN',
'VIRTUAL-HEIGHT',
'VIRTUAL-HEIGHT-',
'VIRTUAL-HEIGHT-C',
'VIRTUAL-HEIGHT-CH',
'VIRTUAL-HEIGHT-CHA',
'VIRTUAL-HEIGHT-CHAR',
'VIRTUAL-HEIGHT-CHARS',
'VIRTUAL-HEIGHT-P',
'VIRTUAL-HEIGHT-PI',
'VIRTUAL-HEIGHT-PIX',
'VIRTUAL-HEIGHT-PIXE',
'VIRTUAL-HEIGHT-PIXEL',
'VIRTUAL-HEIGHT-PIXELS',
'VIRTUAL-WIDTH',
'VIRTUAL-WIDTH-',
'VIRTUAL-WIDTH-C',
'VIRTUAL-WIDTH-CH',
'VIRTUAL-WIDTH-CHA',
'VIRTUAL-WIDTH-CHAR',
'VIRTUAL-WIDTH-CHARS',
'VIRTUAL-WIDTH-P',
'VIRTUAL-WIDTH-PI',
'VIRTUAL-WIDTH-PIX',
'VIRTUAL-WIDTH-PIXE',
'VIRTUAL-WIDTH-PIXEL',
'VIRTUAL-WIDTH-PIXELS',
'VISIBLE',
'VOID',
'WAIT',
'WAIT-FOR',
'WARNING',
'WEB-CONTEXT',
'WEEKDAY',
'WHEN',
'WHERE',
'WHILE',
'WIDGET',
'WIDGET-E',
'WIDGET-EN',
'WIDGET-ENT',
'WIDGET-ENTE',
'WIDGET-ENTER',
'WIDGET-ID',
'WIDGET-L',
'WIDGET-LE',
'WIDGET-LEA',
'WIDGET-LEAV',
'WIDGET-LEAVE',
'WIDGET-POOL',
'WIDTH',
'WIDTH-',
'WIDTH-C',
'WIDTH-CH',
'WIDTH-CHA',
'WIDTH-CHAR',
'WIDTH-CHARS',
'WIDTH-P',
'WIDTH-PI',
'WIDTH-PIX',
'WIDTH-PIXE',
'WIDTH-PIXEL',
'WIDTH-PIXELS',
'WINDOW',
'WINDOW-MAXIM',
'WINDOW-MAXIMI',
'WINDOW-MAXIMIZ',
'WINDOW-MAXIMIZE',
'WINDOW-MAXIMIZED',
'WINDOW-MINIM',
'WINDOW-MINIMI',
'WINDOW-MINIMIZ',
'WINDOW-MINIMIZE',
'WINDOW-MINIMIZED',
'WINDOW-NAME',
'WINDOW-NORMAL',
'WINDOW-STA',
'WINDOW-STAT',
'WINDOW-STATE',
'WINDOW-SYSTEM',
'WITH',
'WORD-INDEX',
'WORD-WRAP',
'WORK-AREA-HEIGHT-PIXELS',
'WORK-AREA-WIDTH-PIXELS',
'WORK-AREA-X',
'WORK-AREA-Y',
'WORK-TAB',
'WORK-TABL',
'WORK-TABLE',
'WORKFILE',
'WRITE',
'WRITE-CDATA',
'WRITE-CHARACTERS',
'WRITE-COMMENT',
'WRITE-DATA-ELEMENT',
'WRITE-EMPTY-ELEMENT',
'WRITE-ENTITY-REF',
'WRITE-EXTERNAL-DTD',
'WRITE-FRAGMENT',
'WRITE-JSON',
'WRITE-MESSAGE',
'WRITE-PROCESSING-INSTRUCTION',
'WRITE-STATUS',
'WRITE-XML',
'WRITE-XMLSCHEMA',
'X',
'X-OF',
'XCODE',
'XML-DATA-TYPE',
'XML-ENTITY-EXPANSION-LIMIT',
'XML-NODE-TYPE',
'XML-SCHEMA-PATH',
'XML-STRICT-ENTITY-RESOLUTION',
'XML-SUPPRESS-NAMESPACE-PROCESSING',
'XREF',
'XREF-XML',
'Y',
'Y-OF',
'YEAR',
'YEAR-OFFSET',
'YES',
'YES-NO',
'YES-NO-CANCEL'
)
| 49,398 | Python | 17.992311 | 70 | 0.52405 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/haskell.py | """
pygments.lexers.haskell
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Haskell and related languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
default, include, inherit, line_re
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
from pygments import unistring as uni
__all__ = ['HaskellLexer', 'HspecLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer',
'LiterateHaskellLexer', 'LiterateIdrisLexer', 'LiterateAgdaLexer',
'LiterateCryptolLexer', 'KokaLexer']
class HaskellLexer(RegexLexer):
"""
A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
.. versionadded:: 0.8
"""
name = 'Haskell'
url = 'https://www.haskell.org/'
aliases = ['haskell', 'hs']
filenames = ['*.hs']
mimetypes = ['text/x-haskell']
reserved = ('case', 'class', 'data', 'default', 'deriving', 'do', 'else',
'family', 'if', 'in', 'infix[lr]?', 'instance',
'let', 'newtype', 'of', 'then', 'type', 'where', '_')
ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
tokens = {
'root': [
# Whitespace:
(r'\s+', Whitespace),
# (r'--\s*|.*$', Comment.Doc),
(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r"'[^\\]'", String.Char), # this has to come before the TH quote
(r'^[_' + uni.Ll + r'][\w\']*', Name.Function),
(r"'?[_" + uni.Ll + r"][\w']*", Name),
(r"('')?[" + uni.Lu + r"][\w\']*", Keyword.Type),
(r"(')[" + uni.Lu + r"][\w\']*", Keyword.Type),
(r"(')\[[^\]]*\]", Keyword.Type), # tuples and lists get special treatment in GHC
(r"(')\([^)]*\)", Keyword.Type), # ..
(r"(')[:!#$%&*+.\\/<=>?@^|~-]+", Keyword.Type), # promoted type operators
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*_*[pP][+-]?\d(_*\d)*', Number.Float),
(r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*\.[\da-fA-F](_*[\da-fA-F])*'
r'(_*[pP][+-]?\d(_*\d)*)?', Number.Float),
(r'\d(_*\d)*_*[eE][+-]?\d(_*\d)*', Number.Float),
(r'\d(_*\d)*\.\d(_*\d)*(_*[eE][+-]?\d(_*\d)*)?', Number.Float),
(r'0[bB]_*[01](_*[01])*', Number.Bin),
(r'0[oO]_*[0-7](_*[0-7])*', Number.Oct),
(r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*', Number.Hex),
(r'\d(_*\d)*', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Whitespace),
(r'"', String, 'string'),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([' + uni.Lu + r'][\w.]*)(\s+)(as)(\s+)([' + uni.Lu + r'][\w.]*)',
bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Name), '#pop'),
# import X hiding (functions)
(r'([' + uni.Lu + r'][\w.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Punctuation), 'funclist'),
# import X (functions)
(r'([' + uni.Lu + r'][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
# import X
(r'[\w.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Whitespace),
(r'([' + uni.Lu + r'][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
(r'[' + uni.Lu + r'][\w.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Whitespace),
(r'[' + uni.Lu + r']\w*', Keyword.Type),
(r'(_[\w\']+|[' + uni.Ll + r'][\w\']*)', Name.Function),
(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
# NOTE: the next four states are shared in the AgdaLexer; make sure
# any change is compatible with Agda as well or copy over and change
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
(r'\{-', Comment.Multiline, '#push'),
(r'-\}', Comment.Multiline, '#pop'),
(r'[-{}]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']'", String.Char, '#pop'),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop'),
],
}
class HspecLexer(HaskellLexer):
"""
A Haskell lexer with support for Hspec constructs.
.. versionadded:: 2.4.0
"""
name = 'Hspec'
aliases = ['hspec']
filenames = ['*Spec.hs']
mimetypes = []
tokens = {
'root': [
(r'(it)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)),
(r'(describe)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)),
(r'(context)(\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)),
inherit,
],
}
class IdrisLexer(RegexLexer):
"""
A lexer for the dependently typed programming language Idris.
Based on the Haskell and Agda Lexer.
.. versionadded:: 2.0
"""
name = 'Idris'
url = 'https://www.idris-lang.org/'
aliases = ['idris', 'idr']
filenames = ['*.idr']
mimetypes = ['text/x-idris']
reserved = ('case', 'class', 'data', 'default', 'using', 'do', 'else',
'if', 'in', 'infix[lr]?', 'instance', 'rewrite', 'auto',
'namespace', 'codata', 'mutual', 'private', 'public', 'abstract',
'total', 'partial',
'interface', 'implementation', 'export', 'covering', 'constructor',
'let', 'proof', 'of', 'then', 'static', 'where', '_', 'with',
'pattern', 'term', 'syntax', 'prefix',
'postulate', 'parameters', 'record', 'dsl', 'impossible', 'implicit',
'tactics', 'intros', 'intro', 'compute', 'refine', 'exact', 'trivial')
ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
directives = ('lib', 'link', 'flag', 'include', 'hide', 'freeze', 'access',
'default', 'logging', 'dynamic', 'name', 'error_handlers', 'language')
tokens = {
'root': [
# Comments
(r'^(\s*)(%%(%s))' % '|'.join(directives),
bygroups(Whitespace, Keyword.Reserved)),
(r'(\s*)(--(?![!#$%&*+./<=>?@^|_~:\\]).*?)$', bygroups(Whitespace, Comment.Single)),
(r'(\s*)(\|{3}.*?)$', bygroups(Whitespace, Comment.Single)),
(r'(\s*)(\{-)', bygroups(Whitespace, Comment.Multiline), 'comment'),
# Declaration
(r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)',
bygroups(Whitespace, Name.Function, Whitespace, Operator.Word, Whitespace)),
# Identifiers
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'(import|module)(\s+)', bygroups(Keyword.Reserved, Whitespace), 'module'),
(r"('')?[A-Z][\w\']*", Keyword.Type),
(r'[a-z][\w\']*', Text),
# Special Symbols
(r'(<-|::|->|=>|=)', Operator.Word), # specials
(r'([(){}\[\]:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Strings
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
(r'[^\s(){}]+', Text),
(r'\s+?', Whitespace), # Whitespace
],
'module': [
(r'\s+', Whitespace),
(r'([A-Z][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
(r'[A-Z][\w.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Whitespace),
(r'[A-Z]\w*', Keyword.Type),
(r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
(r'--.*$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
# NOTE: the next four states are shared in the AgdaLexer; make sure
# any change is compatible with Agda as well or copy over and change
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
(r'\{-', Comment.Multiline, '#push'),
(r'-\}', Comment.Multiline, '#pop'),
(r'[-{}]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']", String.Char),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop')
],
}
class AgdaLexer(RegexLexer):
"""
For the Agda dependently typed functional programming language and
proof assistant.
.. versionadded:: 2.0
"""
name = 'Agda'
url = 'http://wiki.portal.chalmers.se/agda/pmwiki.php'
aliases = ['agda']
filenames = ['*.agda']
mimetypes = ['text/x-agda']
reserved = (
'abstract', 'codata', 'coinductive', 'constructor', 'data', 'do',
'eta-equality', 'field', 'forall', 'hiding', 'in', 'inductive', 'infix',
'infixl', 'infixr', 'instance', 'interleaved', 'let', 'macro', 'mutual',
'no-eta-equality', 'open', 'overlap', 'pattern', 'postulate', 'primitive',
'private', 'quote', 'quoteTerm', 'record', 'renaming', 'rewrite',
'syntax', 'tactic', 'unquote', 'unquoteDecl', 'unquoteDef', 'using',
'variable', 'where', 'with',
)
tokens = {
'root': [
# Declaration
(r'^(\s*)([^\s(){}]+)(\s*)(:)(\s*)',
bygroups(Whitespace, Name.Function, Whitespace,
Operator.Word, Whitespace)),
# Comments
(r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
(r'\{-', Comment.Multiline, 'comment'),
# Holes
(r'\{!', Comment.Directive, 'hole'),
# Lexemes:
# Identifiers
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'(import|module)(\s+)', bygroups(Keyword.Reserved, Whitespace),
'module'),
(r'\b(Set|Prop)[\u2080-\u2089]*\b', Keyword.Type),
# Special Symbols
(r'(\(|\)|\{|\})', Operator),
(r'(\.{1,3}|\||\u03BB|\u2200|\u2192|:|=|->)', Operator.Word),
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Strings
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
(r'[^\s(){}]+', Text),
(r'\s+?', Whitespace), # Whitespace
],
'hole': [
# Holes
(r'[^!{}]+', Comment.Directive),
(r'\{!', Comment.Directive, '#push'),
(r'!\}', Comment.Directive, '#pop'),
(r'[!{}]', Comment.Directive),
],
'module': [
(r'\{-', Comment.Multiline, 'comment'),
(r'[a-zA-Z][\w.\']*', Name, '#pop'),
(r'[\W0-9_]+', Text)
],
'comment': HaskellLexer.tokens['comment'],
'character': HaskellLexer.tokens['character'],
'string': HaskellLexer.tokens['string'],
'escape': HaskellLexer.tokens['escape']
}
class CryptolLexer(RegexLexer):
"""
FIXME: A Cryptol2 lexer based on the lexemes defined in the Haskell 98 Report.
.. versionadded:: 2.0
"""
name = 'Cryptol'
aliases = ['cryptol', 'cry']
filenames = ['*.cry']
mimetypes = ['text/x-cryptol']
reserved = ('Arith', 'Bit', 'Cmp', 'False', 'Inf', 'True', 'else',
'export', 'extern', 'fin', 'if', 'import', 'inf', 'lg2',
'max', 'min', 'module', 'newtype', 'pragma', 'property',
'then', 'type', 'where', 'width')
ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
tokens = {
'root': [
# Whitespace:
(r'\s+', Whitespace),
# (r'--\s*|.*$', Comment.Doc),
(r'//.*$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'^[_a-z][\w\']*', Name.Function),
(r"'?[_a-z][\w']*", Name),
(r"('')?[A-Z][\w\']*", Keyword.Type),
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Whitespace),
(r'"', String, 'string'),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([A-Z][\w.]*)(\s+)(as)(\s+)([A-Z][\w.]*)',
bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Name), '#pop'),
# import X hiding (functions)
(r'([A-Z][\w.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Whitespace, Keyword, Whitespace, Punctuation), 'funclist'),
# import X (functions)
(r'([A-Z][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
# import X
(r'[\w.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Whitespace),
(r'([A-Z][\w.]*)(\s+)(\()',
bygroups(Name.Namespace, Whitespace, Punctuation), 'funclist'),
(r'[A-Z][\w.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Whitespace),
(r'[A-Z]\w*', Keyword.Type),
(r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
# TODO: these don't match the comments in docs, remove.
# (r'--(?![!#$%&*+./<=>?@^|_~:\\]).*?$', Comment.Single),
# (r'{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
'comment': [
# Multiline Comments
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']'", String.Char, '#pop'),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop'),
],
}
EXTRA_KEYWORDS = {'join', 'split', 'reverse', 'transpose', 'width',
'length', 'tail', '<<', '>>', '<<<', '>>>', 'const',
'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error',
'trace'}
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Name.Builtin, value
else:
yield index, token, value
class LiterateLexer(Lexer):
"""
Base class for lexers of literate file formats based on LaTeX or Bird-style
(prefixing each code line with ">").
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
"""
bird_re = re.compile(r'(>[ \t]*)(.*\n)')
def __init__(self, baselexer, **options):
self.baselexer = baselexer
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
style = self.options.get('litstyle')
if style is None:
style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
code = ''
insertions = []
if style == 'bird':
# bird-style
for match in line_re.finditer(text):
line = match.group()
m = self.bird_re.match(line)
if m:
insertions.append((len(code),
[(0, Comment.Special, m.group(1))]))
code += m.group(2)
else:
insertions.append((len(code), [(0, Text, line)]))
else:
# latex-style
from pygments.lexers.markup import TexLexer
lxlexer = TexLexer(**self.options)
codelines = 0
latex = ''
for match in line_re.finditer(text):
line = match.group()
if codelines:
if line.lstrip().startswith('\\end{code}'):
codelines = 0
latex += line
else:
code += line
elif line.lstrip().startswith('\\begin{code}'):
codelines = 1
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
latex = ''
else:
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
yield from do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code))
class LiterateHaskellLexer(LiterateLexer):
"""
For Literate Haskell (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 0.9
"""
name = 'Literate Haskell'
aliases = ['literate-haskell', 'lhaskell', 'lhs']
filenames = ['*.lhs']
mimetypes = ['text/x-literate-haskell']
def __init__(self, **options):
hslexer = HaskellLexer(**options)
LiterateLexer.__init__(self, hslexer, **options)
class LiterateIdrisLexer(LiterateLexer):
"""
For Literate Idris (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 2.0
"""
name = 'Literate Idris'
aliases = ['literate-idris', 'lidris', 'lidr']
filenames = ['*.lidr']
mimetypes = ['text/x-literate-idris']
def __init__(self, **options):
hslexer = IdrisLexer(**options)
LiterateLexer.__init__(self, hslexer, **options)
class LiterateAgdaLexer(LiterateLexer):
"""
For Literate Agda source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 2.0
"""
name = 'Literate Agda'
aliases = ['literate-agda', 'lagda']
filenames = ['*.lagda']
mimetypes = ['text/x-literate-agda']
def __init__(self, **options):
agdalexer = AgdaLexer(**options)
LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options)
class LiterateCryptolLexer(LiterateLexer):
"""
For Literate Cryptol (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 2.0
"""
name = 'Literate Cryptol'
aliases = ['literate-cryptol', 'lcryptol', 'lcry']
filenames = ['*.lcry']
mimetypes = ['text/x-literate-cryptol']
def __init__(self, **options):
crylexer = CryptolLexer(**options)
LiterateLexer.__init__(self, crylexer, **options)
class KokaLexer(RegexLexer):
"""
Lexer for the Koka language.
.. versionadded:: 1.6
"""
name = 'Koka'
url = 'https://koka-lang.github.io/koka/doc/index.html'
aliases = ['koka']
filenames = ['*.kk', '*.kki']
mimetypes = ['text/x-koka']
keywords = [
'infix', 'infixr', 'infixl',
'type', 'cotype', 'rectype', 'alias',
'struct', 'con',
'fun', 'function', 'val', 'var',
'external',
'if', 'then', 'else', 'elif', 'return', 'match',
'private', 'public', 'private',
'module', 'import', 'as',
'include', 'inline',
'rec',
'try', 'yield', 'enum',
'interface', 'instance',
]
# keywords that are followed by a type
typeStartKeywords = [
'type', 'cotype', 'rectype', 'alias', 'struct', 'enum',
]
# keywords valid in a type
typekeywords = [
'forall', 'exists', 'some', 'with',
]
# builtin names and special names
builtin = [
'for', 'while', 'repeat',
'foreach', 'foreach-indexed',
'error', 'catch', 'finally',
'cs', 'js', 'file', 'ref', 'assigned',
]
# symbols that can be in an operator
symbols = r'[$%&*+@!/\\^~=.:\-?|<>]+'
# symbol boundary: an operator keyword should not be followed by any of these
sboundary = '(?!' + symbols + ')'
# name boundary: a keyword should not be followed by any of these
boundary = r'(?![\w/])'
# koka token abstractions
tokenType = Name.Attribute
tokenTypeDef = Name.Class
tokenConstructor = Generic.Emph
# main lexer
tokens = {
'root': [
include('whitespace'),
# go into type mode
(r'::?' + sboundary, tokenType, 'type'),
(r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef),
'alias-type'),
(r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef),
'struct-type'),
((r'(%s)' % '|'.join(typeStartKeywords)) +
r'(\s+)([a-z]\w*)?', bygroups(Keyword, Whitespace, tokenTypeDef),
'type'),
# special sequences of tokens (we use ?: for non-capturing group as
# required by 'bygroups')
(r'(module)(\s+)(interface(?=\s))?(\s+)?((?:[a-z]\w*/)*[a-z]\w*)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Namespace)),
(r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)'
r'(?:(\s*)(=)(\s*)(qualified)?(\s*)'
r'((?:[a-z]\w*/)*[a-z]\w*))?',
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace, Keyword, Whitespace,
Keyword, Whitespace, Name.Namespace)),
(r'^(public|private)?(\s+)?(function|fun|val)'
r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function)),
(r'^(?:(public|private)(?=\s+external))?((?<!^)\s+)?(external)(\s+)(inline(?=\s))?(\s+)?'
r'([a-z]\w*|\((?:' + symbols + r'|/)\))',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Name.Function)),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type),
(r'(%s)' % '|'.join(keywords) + boundary, Keyword),
(r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo),
(r'::?|:=|\->|[=.]' + sboundary, Keyword),
# names
(r'((?:[a-z]\w*/)*)([A-Z]\w*)',
bygroups(Name.Namespace, tokenConstructor)),
(r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)),
(r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))',
bygroups(Name.Namespace, Name)),
(r'_\w*', Name.Variable),
# literal string
(r'@"', String.Double, 'litstring'),
# operators
(symbols + "|/(?![*/])", Operator),
(r'`', Operator),
(r'[{}()\[\];,]', Punctuation),
# literals. No check for literal characters with len > 1
(r'[0-9]+\.[0-9]+([eE][\-+]?[0-9]+)?', Number.Float),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r"'", String.Char, 'char'),
(r'"', String.Double, 'string'),
],
# type started by alias
'alias-type': [
(r'=', Keyword),
include('type')
],
# type started by struct
'struct-type': [
(r'(?=\((?!,*\)))', Punctuation, '#pop'),
include('type')
],
# type started by colon
'type': [
(r'[(\[<]', tokenType, 'type-nested'),
include('type-content')
],
# type nested in brackets: can contain parameters, comma etc.
'type-nested': [
(r'[)\]>]', tokenType, '#pop'),
(r'[(\[<]', tokenType, 'type-nested'),
(r',', tokenType),
(r'([a-z]\w*)(\s*)(:)(?!:)',
bygroups(Name, Whitespace, tokenType)), # parameter name
include('type-content')
],
# shared contents of a type
'type-content': [
include('whitespace'),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword),
(r'(?=((%s)' % '|'.join(keywords) + boundary + '))',
Keyword, '#pop'), # need to match because names overlap...
# kinds
(r'[EPHVX]' + boundary, tokenType),
# type names
(r'[a-z][0-9]*(?![\w/])', tokenType),
(r'_\w*', tokenType.Variable), # Generic.Emph
(r'((?:[a-z]\w*/)*)([A-Z]\w*)',
bygroups(Name.Namespace, tokenType)),
(r'((?:[a-z]\w*/)*)([a-z]\w+)',
bygroups(Name.Namespace, tokenType)),
# type keyword operators
(r'::|->|[.:|]', tokenType),
# catchall
default('#pop')
],
# comments and literals
'whitespace': [
(r'(\n\s*)(#.*)$', bygroups(Whitespace, Comment.Preproc)),
(r'\s+', Whitespace),
(r'/\*', Comment.Multiline, 'comment'),
(r'//.*$', Comment.Single)
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'litstring': [
(r'[^"]+', String.Double),
(r'""', String.Escape),
(r'"', String.Double, '#pop'),
],
'string': [
(r'[^\\"\n]+', String.Double),
include('escape-sequence'),
(r'["\n]', String.Double, '#pop'),
],
'char': [
(r'[^\\\'\n]+', String.Char),
include('escape-sequence'),
(r'[\'\n]', String.Char, '#pop'),
],
'escape-sequence': [
(r'\\[nrt\\"\']', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
# Yes, \U literals are 6 hex digits.
(r'\\U[0-9a-fA-F]{6}', String.Escape)
]
}
| 32,898 | Python | 36.728211 | 101 | 0.449875 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/resource.py | """
pygments.lexers.resource
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for resource definition files.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Comment, String, Number, Operator, Text, \
Keyword, Name
__all__ = ['ResourceLexer']
class ResourceLexer(RegexLexer):
"""Lexer for `ICU Resource bundles
<http://userguide.icu-project.org/locale/resources>`_.
.. versionadded:: 2.0
"""
name = 'ResourceBundle'
aliases = ['resourcebundle', 'resource']
filenames = []
_types = (':table', ':array', ':string', ':bin', ':import', ':intvector',
':int', ':alias')
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'//.*?$', Comment),
(r'"', String, 'string'),
(r'-?\d+', Number.Integer),
(r'[,{}]', Operator),
(r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types),
bygroups(Name, Text, Keyword)),
(r'\s+', Text),
(words(_types), Keyword),
],
'string': [
(r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|'
r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String),
(r'\{', String.Escape, 'msgname'),
(r'"', String, '#pop')
],
'msgname': [
(r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message'))
],
'message': [
(r'\{', String.Escape, 'msgname'),
(r'\}', String.Escape, '#pop'),
(r'(,)(\s*)([a-z]+)(\s*\})',
bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'),
(r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)',
bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
String.Escape, Operator.Word, String.Escape, Operator,
String.Escape, Number.Integer, String.Escape), 'choice'),
(r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)',
bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
String.Escape), 'choice'),
(r'\s+', String.Escape)
],
'choice': [
(r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)',
bygroups(Operator, Number.Integer, String.Escape), 'message'),
(r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'),
(r'\}', String.Escape, ('#pop', '#pop')),
(r'\s+', String.Escape)
],
'str': [
(r'\}', String.Escape, '#pop'),
(r'\{', String.Escape, 'msgname'),
(r'[^{}]+', String)
]
}
def analyse_text(text):
if text.startswith('root:table'):
return 1.0
| 2,902 | Python | 33.152941 | 83 | 0.454514 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/ada.py | """
pygments.lexers.ada
~~~~~~~~~~~~~~~~~~~
Lexers for Ada family languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words, using, this, \
default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers._ada_builtins import KEYWORD_LIST, BUILTIN_LIST
__all__ = ['AdaLexer']
class AdaLexer(RegexLexer):
"""
For Ada source code.
.. versionadded:: 1.3
"""
name = 'Ada'
aliases = ['ada', 'ada95', 'ada2005']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
(r'(subtype|type)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'task|protected', Keyword.Declaration),
(r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
(r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
(r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text,
Comment.Preproc)),
(r'(true|false|null)\b', Keyword.Constant),
# builtin types
(words(BUILTIN_LIST, suffix=r'\b'), Keyword.Type),
(r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
(r'generic|private', Keyword.Declaration),
(r'package', Keyword.Declaration, 'package'),
(r'array\b', Keyword.Reserved, 'array_def'),
(r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'(\w+)(\s*)(:)(\s*)(constant)',
bygroups(Name.Constant, Text, Punctuation, Text,
Keyword.Reserved)),
(r'<<\w+>>', Name.Label),
(r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
# keywords
(words(KEYWORD_LIST, prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(r'"[^"]*"', String),
include('attribute'),
include('numbers'),
(r"'[^']'", String.Character),
(r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
(r"(<>|=>|:=|@|[\[\]]|[()|:;,.'])", Punctuation),
(r'[*<>+=/&-]', Operator),
(r'\n+', Text),
],
'numbers': [
(r'[0-9_]+#[0-9a-f_\.]+#', Number.Hex),
(r'[0-9_]+\.[0-9_]*', Number.Float),
(r'[0-9_]+', Number.Integer),
],
'attribute': [
(r"(')(\w+)", bygroups(Punctuation, Name.Attribute)),
],
'subprogram': [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'is\b', Keyword.Reserved, '#pop'),
(r'"[^"]+"|\w+', Name.Function),
include('root'),
],
'end': [
('(if|case|record|loop|select)', Keyword.Reserved),
(r'"[^"]+"|[\w.]+', Name.Function),
(r'\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
(r';', Punctuation, '#pop'),
(r'\(', Punctuation, 'formal_part'),
(r'\[', Punctuation, 'formal_part'),
(r'with|and|use', Keyword.Reserved),
(r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
(r'record\b', Keyword.Reserved, ('record_def')),
(r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'),
include('root'),
],
'array_def': [
(r';', Punctuation, '#pop'),
(r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)),
include('root'),
],
'record_def': [
(r'end record', Keyword.Reserved, '#pop'),
include('root'),
],
'import': [
# TODO: use Name.Namespace if appropriate. This needs
# work to disinguish imports from aspects.
(r'[\w.]+', Name, '#pop'),
default('#pop'),
],
'formal_part': [
(r'\)', Punctuation, '#pop'),
(r'\]', Punctuation, '#pop'),
(r'\w+', Name.Variable),
(r',|:[^=]', Punctuation),
(r'(in|not|null|out|access)\b', Keyword.Reserved),
include('root'),
],
'package': [
('body', Keyword.Declaration),
(r'is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
(r'\(', Punctuation, 'package_instantiation'),
(r'([\w.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
(r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)),
(r'[\w.\'"]', Text),
(r'\)', Punctuation, '#pop'),
include('root'),
],
}
| 5,320 | Python | 35.696551 | 84 | 0.457519 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/mime.py | """
pygments.lexers.mime
~~~~~~~~~~~~~~~~~~~~
Lexer for Multipurpose Internet Mail Extensions (MIME) data.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include
from pygments.lexers import get_lexer_for_mimetype
from pygments.token import Text, Name, String, Operator, Comment, Other
from pygments.util import get_int_opt, ClassNotFound
__all__ = ["MIMELexer"]
class MIMELexer(RegexLexer):
"""
Lexer for Multipurpose Internet Mail Extensions (MIME) data. This lexer is
designed to process nested multipart data.
It assumes that the given data contains both header and body (and is
split at an empty line). If no valid header is found, then the entire data
will be treated as body.
Additional options accepted:
`MIME-max-level`
Max recursion level for nested MIME structure. Any negative number
would treated as unlimited. (default: -1)
`Content-Type`
Treat the data as a specific content type. Useful when header is
missing, or this lexer would try to parse from header. (default:
`text/plain`)
`Multipart-Boundary`
Set the default multipart boundary delimiter. This option is only used
when `Content-Type` is `multipart` and header is missing. This lexer
would try to parse from header by default. (default: None)
`Content-Transfer-Encoding`
Treat the data as a specific encoding. Or this lexer would try to parse
from header by default. (default: None)
.. versionadded:: 2.5
"""
name = "MIME"
aliases = ["mime"]
mimetypes = ["multipart/mixed",
"multipart/related",
"multipart/alternative"]
def __init__(self, **options):
super().__init__(**options)
self.boundary = options.get("Multipart-Boundary")
self.content_transfer_encoding = options.get("Content_Transfer_Encoding")
self.content_type = options.get("Content_Type", "text/plain")
self.max_nested_level = get_int_opt(options, "MIME-max-level", -1)
def get_header_tokens(self, match):
field = match.group(1)
if field.lower() in self.attention_headers:
yield match.start(1), Name.Tag, field + ":"
yield match.start(2), Text.Whitespace, match.group(2)
pos = match.end(2)
body = match.group(3)
for i, t, v in self.get_tokens_unprocessed(body, ("root", field.lower())):
yield pos + i, t, v
else:
yield match.start(), Comment, match.group()
def get_body_tokens(self, match):
pos_body_start = match.start()
entire_body = match.group()
# skip first newline
if entire_body[0] == '\n':
yield pos_body_start, Text.Whitespace, '\n'
pos_body_start = pos_body_start + 1
entire_body = entire_body[1:]
# if it is not a multipart
if not self.content_type.startswith("multipart") or not self.boundary:
for i, t, v in self.get_bodypart_tokens(entire_body):
yield pos_body_start + i, t, v
return
# find boundary
bdry_pattern = r"^--%s(--)?\n" % re.escape(self.boundary)
bdry_matcher = re.compile(bdry_pattern, re.MULTILINE)
# some data has prefix text before first boundary
m = bdry_matcher.search(entire_body)
if m:
pos_part_start = pos_body_start + m.end()
pos_iter_start = lpos_end = m.end()
yield pos_body_start, Text, entire_body[:m.start()]
yield pos_body_start + lpos_end, String.Delimiter, m.group()
else:
pos_part_start = pos_body_start
pos_iter_start = 0
# process tokens of each body part
for m in bdry_matcher.finditer(entire_body, pos_iter_start):
# bodypart
lpos_start = pos_part_start - pos_body_start
lpos_end = m.start()
part = entire_body[lpos_start:lpos_end]
for i, t, v in self.get_bodypart_tokens(part):
yield pos_part_start + i, t, v
# boundary
yield pos_body_start + lpos_end, String.Delimiter, m.group()
pos_part_start = pos_body_start + m.end()
# some data has suffix text after last boundary
lpos_start = pos_part_start - pos_body_start
if lpos_start != len(entire_body):
yield pos_part_start, Text, entire_body[lpos_start:]
def get_bodypart_tokens(self, text):
# return if:
# * no content
# * no content type specific
# * content encoding is not readable
# * max recurrsion exceed
if not text.strip() or not self.content_type:
return [(0, Other, text)]
cte = self.content_transfer_encoding
if cte and cte not in {"8bit", "7bit", "quoted-printable"}:
return [(0, Other, text)]
if self.max_nested_level == 0:
return [(0, Other, text)]
# get lexer
try:
lexer = get_lexer_for_mimetype(self.content_type)
except ClassNotFound:
return [(0, Other, text)]
if isinstance(lexer, type(self)):
lexer.max_nested_level = self.max_nested_level - 1
return lexer.get_tokens_unprocessed(text)
def store_content_type(self, match):
self.content_type = match.group(1)
prefix_len = match.start(1) - match.start(0)
yield match.start(0), Text.Whitespace, match.group(0)[:prefix_len]
yield match.start(1), Name.Label, match.group(2)
yield match.end(2), String.Delimiter, '/'
yield match.start(3), Name.Label, match.group(3)
def get_content_type_subtokens(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Text.Whitespace, match.group(2)
yield match.start(3), Name.Attribute, match.group(3)
yield match.start(4), Operator, match.group(4)
yield match.start(5), String, match.group(5)
if match.group(3).lower() == "boundary":
boundary = match.group(5).strip()
if boundary[0] == '"' and boundary[-1] == '"':
boundary = boundary[1:-1]
self.boundary = boundary
def store_content_transfer_encoding(self, match):
self.content_transfer_encoding = match.group(0).lower()
yield match.start(0), Name.Constant, match.group(0)
attention_headers = {"content-type", "content-transfer-encoding"}
tokens = {
"root": [
(r"^([\w-]+):( *)([\s\S]*?\n)(?![ \t])", get_header_tokens),
(r"^$[\s\S]+", get_body_tokens),
],
"header": [
# folding
(r"\n[ \t]", Text.Whitespace),
(r"\n(?![ \t])", Text.Whitespace, "#pop"),
],
"content-type": [
include("header"),
(
r"^\s*((multipart|application|audio|font|image|model|text|video"
r"|message)/([\w-]+))",
store_content_type,
),
(r'(;)((?:[ \t]|\n[ \t])*)([\w:-]+)(=)([\s\S]*?)(?=;|\n(?![ \t]))',
get_content_type_subtokens),
(r';[ \t]*\n(?![ \t])', Text, '#pop'),
],
"content-transfer-encoding": [
include("header"),
(r"([\w-]+)", store_content_transfer_encoding),
],
}
| 7,538 | Python | 34.729858 | 86 | 0.571239 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/tlb.py | """
pygments.lexers.tlb
~~~~~~~~~~~~~~~~~~~
Lexers for TL-b.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Operator, Name, \
Number, Whitespace, Punctuation, Comment
__all__ = ['TlbLexer']
class TlbLexer(RegexLexer):
"""
For TL-b source code.
"""
name = 'Tl-b'
aliases = ['tlb']
filenames = ['*.tlb']
tokens = {
'root': [
(r'\s+', Whitespace),
include('comments'),
(r'[0-9]+', Number),
(words((
'+', '-', '*', '=', '?', '~', '.',
'^', '==', '<', '>', '<=', '>=', '!='
)), Operator),
(words(('##', '#<', '#<=')), Name.Tag),
(r'#[0-9a-f]*_?', Name.Tag),
(r'\$[01]*_?', Name.Tag),
(r'[a-zA-Z_][0-9a-zA-Z_]*', Name),
(r'[;():\[\]{}]', Punctuation)
],
'comments': [
(r'//.*', Comment.Singleline),
(r'/\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
}
| 1,377 | Python | 22.75862 | 70 | 0.404503 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/capnproto.py | """
pygments.lexers.capnproto
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Cap'n Proto schema language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, default
from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace
__all__ = ['CapnProtoLexer']
class CapnProtoLexer(RegexLexer):
"""
For Cap'n Proto source.
.. versionadded:: 2.2
"""
name = 'Cap\'n Proto'
url = 'https://capnproto.org'
filenames = ['*.capnp']
aliases = ['capnp']
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|'
r'extends|in|of|on|as|with|from|fixed)\b',
Keyword),
(r'[\w.]+', Name),
(r'[^#@=:$\w\s]+', Text),
(r'\s+', Whitespace),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[\[(]', Name.Class, 'parentype'),
default('#pop'),
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[\[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
default('#pop'),
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[\[(]', Literal, 'parenexp'),
default('#pop'),
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[\[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
default('#pop'),
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[\[(]', Name.Attribute, 'annexp'),
default('#pop'),
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[\[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
default('#pop'),
],
}
| 2,175 | Python | 27.631579 | 76 | 0.415632 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/zig.py | """
pygments.lexers.zig
~~~~~~~~~~~~~~~~~~~
Lexers for Zig.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ZigLexer']
class ZigLexer(RegexLexer):
"""
Lexer for the Zig language.
grammar: https://ziglang.org/documentation/master/#Grammar
"""
name = 'Zig'
url = 'http://www.ziglang.org'
aliases = ['zig']
filenames = ['*.zig']
mimetypes = ['text/zig']
type_keywords = (
words(('bool', 'f16', 'f32', 'f64', 'f128', 'void', 'noreturn', 'type',
'anyerror', 'promise', 'i0', 'u0', 'isize', 'usize', 'comptime_int',
'comptime_float', 'c_short', 'c_ushort', 'c_int', 'c_uint', 'c_long',
'c_ulong', 'c_longlong', 'c_ulonglong', 'c_longdouble', 'c_void'
'i8', 'u8', 'i16', 'u16', 'i32', 'u32', 'i64', 'u64', 'i128',
'u128'), suffix=r'\b'),
Keyword.Type)
storage_keywords = (
words(('const', 'var', 'extern', 'packed', 'export', 'pub', 'noalias',
'inline', 'comptime', 'nakedcc', 'stdcallcc', 'volatile', 'allowzero',
'align', 'linksection', 'threadlocal'), suffix=r'\b'),
Keyword.Reserved)
structure_keywords = (
words(('struct', 'enum', 'union', 'error'), suffix=r'\b'),
Keyword)
statement_keywords = (
words(('break', 'return', 'continue', 'asm', 'defer', 'errdefer',
'unreachable', 'try', 'catch', 'async', 'await', 'suspend',
'resume', 'cancel'), suffix=r'\b'),
Keyword)
conditional_keywords = (
words(('if', 'else', 'switch', 'and', 'or', 'orelse'), suffix=r'\b'),
Keyword)
repeat_keywords = (
words(('while', 'for'), suffix=r'\b'),
Keyword)
other_keywords = (
words(('fn', 'usingnamespace', 'test'), suffix=r'\b'),
Keyword)
constant_keywords = (
words(('true', 'false', 'null', 'undefined'), suffix=r'\b'),
Keyword.Constant)
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'//.*?\n', Comment.Single),
# Keywords
statement_keywords,
storage_keywords,
structure_keywords,
repeat_keywords,
type_keywords,
constant_keywords,
conditional_keywords,
other_keywords,
# Floats
(r'0x[0-9a-fA-F]+\.[0-9a-fA-F]+([pP][\-+]?[0-9a-fA-F]+)?', Number.Float),
(r'0x[0-9a-fA-F]+\.?[pP][\-+]?[0-9a-fA-F]+', Number.Float),
(r'[0-9]+\.[0-9]+([eE][-+]?[0-9]+)?', Number.Float),
(r'[0-9]+\.?[eE][-+]?[0-9]+', Number.Float),
# Integers
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
# Identifier
(r'@[a-zA-Z_]\w*', Name.Builtin),
(r'[a-zA-Z_]\w*', Name),
# Characters
(r'\'\\\'\'', String.Escape),
(r'\'\\(x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])\'',
String.Escape),
(r'\'[^\\\']\'', String),
# Strings
(r'\\\\[^\n]*', String.Heredoc),
(r'c\\\\[^\n]*', String.Heredoc),
(r'c?"', String, 'string'),
# Operators, Punctuation
(r'[+%=><|^!?/\-*&~:]', Operator),
(r'[{}()\[\],.;]', Punctuation)
],
'string': [
(r'\\(x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])',
String.Escape),
(r'[^\\"\n]+', String),
(r'"', String, '#pop')
]
}
| 3,953 | Python | 30.632 | 85 | 0.453833 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/modeling.py | """
pygments.lexers.modeling
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for modeling languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
from pygments.lexers.html import HtmlLexer
from pygments.lexers import _stan_builtins
__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer']
class ModelicaLexer(RegexLexer):
"""
For Modelica source code.
.. versionadded:: 1.1
"""
name = 'Modelica'
url = 'http://www.modelica.org/'
aliases = ['modelica']
filenames = ['*.mo']
mimetypes = ['text/x-modelica']
flags = re.DOTALL | re.MULTILINE
_name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)"
tokens = {
'whitespace': [
(r'[\s\ufeff]+', Text),
(r'//[^\n]*\n?', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'root': [
include('whitespace'),
(r'"', String.Double, 'string'),
(r'[()\[\]{},;]+', Punctuation),
(r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator),
(r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float),
(r'\d+', Number.Integer),
(r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|'
r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|'
r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|'
r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|'
r'identity|inStream|integer|Integer|interval|inverse|isPresent|'
r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|'
r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|'
r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|'
r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|'
r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|'
r'transpose|vector|zeros)\b', Name.Builtin),
(r'(algorithm|annotation|break|connect|constant|constrainedby|der|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
r'equation|exit|expandable|extends|external|firstTick|final|flow|for|if|'
r'import|impure|in|initial|inner|input|interval|loop|nondiscrete|outer|'
r'output|parameter|partial|protected|public|pure|redeclare|'
r'replaceable|return|stream|then|when|while)\b',
Keyword.Reserved),
(r'(and|not|or)\b', Operator.Word),
(r'(block|class|connector|end|function|model|operator|package|'
r'record|type)\b', Keyword.Reserved, 'class'),
(r'(false|true)\b', Keyword.Constant),
(r'within\b', Keyword.Reserved, 'package-prefix'),
(_name, Name)
],
'class': [
include('whitespace'),
(r'(function|record)\b', Keyword.Reserved),
(r'(if|for|when|while)\b', Keyword.Reserved, '#pop'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'package-prefix': [
include('whitespace'),
(_name, Name.Namespace, '#pop'),
default('#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\[\'"?\\abfnrtv]', String.Escape),
(r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))',
using(HtmlLexer)),
(r'<|\\?[^"\\<]+', String.Double)
]
}
class BugsLexer(RegexLexer):
"""
Pygments Lexer for OpenBugs and WinBugs
models.
.. versionadded:: 1.6
"""
name = 'BUGS'
aliases = ['bugs', 'winbugs', 'openbugs']
filenames = ['*.bug']
_FUNCTIONS = (
# Scalar functions
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
'trunc',
# Vector functions
'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
'sd', 'sort', 'sum',
# Special
'D', 'I', 'F', 'T', 'C')
""" OpenBUGS built-in functions
From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
This also includes
- T, C, I : Truncation and censoring.
``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
- D : ODE
- F : Functional http://www.openbugs.info/Examples/Functionals.html
"""
_DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
'dmt', 'dwish')
""" OpenBUGS built-in distributions
Functions from
http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
"""
tokens = {
'whitespace': [
(r"\s+", Text),
],
'comments': [
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model)(\s+)(\{)',
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(for|in)(?![\w.])', Keyword.Reserved),
# Built-in Functions
(r'(%s)(?=\s*\()'
% r'|'.join(_FUNCTIONS + _DISTRIBUTIONS),
Name.Builtin),
# Regular variable names
(r'[A-Za-z][\w.]*', Name),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
# Punctuation
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators
(r'\+|-|\*|/', Operator),
# Block
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r"^\s*model\s*{", text, re.M):
return 0.7
else:
return 0.0
class JagsLexer(RegexLexer):
"""
Pygments Lexer for JAGS.
.. versionadded:: 1.6
"""
name = 'JAGS'
aliases = ['jags']
filenames = ['*.jag', '*.bug']
# JAGS
_FUNCTIONS = (
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cos', 'cosh', 'cloglog',
'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
# Truncation/Censoring (should I include)
'T', 'I')
# Distributions with density, probability and quartile functions
_DISTRIBUTIONS = tuple('[dpq]%s' % x for x in
('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib'))
# Other distributions without density and probability
_OTHER_DISTRIBUTIONS = (
'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
'dnbinom', 'dweibull', 'ddirich')
tokens = {
'whitespace': [
(r"\s+", Text),
],
'names': [
# Regular variable names
(r'[a-zA-Z][\w.]*\b', Name),
],
'comments': [
# do not use stateful comments
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model|data)(\s+)(\{)',
bygroups(Keyword.Namespace, Text, Punctuation)),
(r'var(?![\w.])', Keyword.Declaration),
# Reserved Words
(r'(for|in)(?![\w.])', Keyword.Reserved),
# Builtins
# Need to use lookahead because . is a valid char
(r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS
+ _DISTRIBUTIONS
+ _OTHER_DISTRIBUTIONS),
Name.Builtin),
# Names
include('names'),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
(r'<-|~', Operator),
# # JAGS includes many more than OpenBUGS
(r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*model\s*\{', text, re.M):
if re.search(r'^\s*data\s*\{', text, re.M):
return 0.9
elif re.search(r'^\s*var', text, re.M):
return 0.9
else:
return 0.3
else:
return 0
class StanLexer(RegexLexer):
"""Pygments Lexer for Stan models.
The Stan modeling language is specified in the *Stan Modeling Language
User's Guide and Reference Manual, v2.17.0*,
`pdf <https://github.com/stan-dev/stan/releases/download/v2.17.0/stan-reference-2.17.0.pdf>`__.
.. versionadded:: 1.6
"""
name = 'Stan'
aliases = ['stan']
filenames = ['*.stan']
tokens = {
'whitespace': [
(r"\s+", Text),
],
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'(//|#).*$', Comment.Single),
],
'root': [
(r'"[^"]*"', String),
# Comments
include('comments'),
# block start
include('whitespace'),
# Block start
(r'(%s)(\s*)(\{)' %
r'|'.join(('functions', 'data', r'transformed\s+?data',
'parameters', r'transformed\s+parameters',
'model', r'generated\s+quantities')),
bygroups(Keyword.Namespace, Text, Punctuation)),
# target keyword
(r'target\s*\+=', Keyword),
# Reserved Words
(r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword),
# Truncation
(r'T(?=\s*\[)', Keyword),
# Data types
(r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type),
# < should be punctuation, but elsewhere I can't tell if it is in
# a range constraint
(r'(<)(\s*)(upper|lower|offset|multiplier)(\s*)(=)',
bygroups(Operator, Whitespace, Keyword, Whitespace, Punctuation)),
(r'(,)(\s*)(upper)(\s*)(=)',
bygroups(Punctuation, Whitespace, Keyword, Whitespace, Punctuation)),
# Punctuation
(r"[;,\[\]()]", Punctuation),
# Builtin
(r'(%s)(?=\s*\()' % '|'.join(_stan_builtins.FUNCTIONS), Name.Builtin),
(r'(~)(\s*)(%s)(?=\s*\()' % '|'.join(_stan_builtins.DISTRIBUTIONS),
bygroups(Operator, Whitespace, Name.Builtin)),
# Special names ending in __, like lp__
(r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo),
(r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved),
# user-defined functions
(r'[A-Za-z]\w*(?=\s*\()]', Name.Function),
# Imaginary Literals
(r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?i', Number.Float),
(r'\.[0-9]+([eE][+-]?[0-9]+)?i', Number.Float),
(r'[0-9]+i', Number.Float),
# Real Literals
(r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?', Number.Float),
(r'\.[0-9]+([eE][+-]?[0-9]+)?', Number.Float),
# Integer Literals
(r'[0-9]+', Number.Integer),
# Regular variable names
(r'[A-Za-z]\w*\b', Name),
# Assignment operators
(r'<-|(?:\+|-|\.?/|\.?\*|=)?=|~', Operator),
# Infix, prefix and postfix operators (and = )
(r"\+|-|\.?\*|\.?/|\\|'|\.?\^|!=?|<=?|>=?|\|\||&&|%|\?|:|%/%|!", Operator),
# Block delimiters
(r'[{}]', Punctuation),
# Distribution |
(r'\|', Punctuation)
]
}
def analyse_text(text):
if re.search(r'^\s*parameters\s*\{', text, re.M):
return 1.0
else:
return 0.0
| 13,524 | Python | 35.554054 | 99 | 0.474933 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/webidl.py | """
pygments.lexers.webidl
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Web IDL, including some extensions.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, default, include, words
from pygments.token import Comment, Keyword, Name, Number, Punctuation, \
String, Text
__all__ = ['WebIDLLexer']
_builtin_types = (
# primitive types
'byte', 'octet', 'boolean',
r'(?:unsigned\s+)?(?:short|long(?:\s+long)?)',
r'(?:unrestricted\s+)?(?:float|double)',
# string types
'DOMString', 'ByteString', 'USVString',
# exception types
'Error', 'DOMException',
# typed array types
'Uint8Array', 'Uint16Array', 'Uint32Array', 'Uint8ClampedArray',
'Float32Array', 'Float64Array',
# buffer source types
'ArrayBuffer', 'DataView', 'Int8Array', 'Int16Array', 'Int32Array',
# other
'any', 'void', 'object', 'RegExp',
)
_identifier = r'_?[A-Za-z][a-zA-Z0-9_-]*'
_keyword_suffix = r'(?![\w-])'
_string = r'"[^"]*"'
class WebIDLLexer(RegexLexer):
"""
For Web IDL.
.. versionadded:: 2.6
"""
name = 'Web IDL'
url = 'https://www.w3.org/wiki/Web_IDL'
aliases = ['webidl']
filenames = ['*.webidl']
tokens = {
'common': [
(r'\s+', Text),
(r'(?s)/\*.*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
(r'^#.*', Comment.Preproc),
],
'root': [
include('common'),
(r'\[', Punctuation, 'extended_attributes'),
(r'partial' + _keyword_suffix, Keyword),
(r'typedef' + _keyword_suffix, Keyword, ('typedef', 'type')),
(r'interface' + _keyword_suffix, Keyword, 'interface_rest'),
(r'enum' + _keyword_suffix, Keyword, 'enum_rest'),
(r'callback' + _keyword_suffix, Keyword, 'callback_rest'),
(r'dictionary' + _keyword_suffix, Keyword, 'dictionary_rest'),
(r'namespace' + _keyword_suffix, Keyword, 'namespace_rest'),
(_identifier, Name.Class, 'implements_rest'),
],
'extended_attributes': [
include('common'),
(r',', Punctuation),
(_identifier, Name.Decorator),
(r'=', Punctuation, 'extended_attribute_rest'),
(r'\(', Punctuation, 'argument_list'),
(r'\]', Punctuation, '#pop'),
],
'extended_attribute_rest': [
include('common'),
(_identifier, Name, 'extended_attribute_named_rest'),
(_string, String),
(r'\(', Punctuation, 'identifier_list'),
default('#pop'),
],
'extended_attribute_named_rest': [
include('common'),
(r'\(', Punctuation, 'argument_list'),
default('#pop'),
],
'argument_list': [
include('common'),
(r'\)', Punctuation, '#pop'),
default('argument'),
],
'argument': [
include('common'),
(r'optional' + _keyword_suffix, Keyword),
(r'\[', Punctuation, 'extended_attributes'),
(r',', Punctuation, '#pop'),
(r'\)', Punctuation, '#pop:2'),
default(('argument_rest', 'type'))
],
'argument_rest': [
include('common'),
(_identifier, Name.Variable),
(r'\.\.\.', Punctuation),
(r'=', Punctuation, 'default_value'),
default('#pop'),
],
'identifier_list': [
include('common'),
(_identifier, Name.Class),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'type': [
include('common'),
(r'(?:' + r'|'.join(_builtin_types) + r')' + _keyword_suffix,
Keyword.Type, 'type_null'),
(words(('sequence', 'Promise', 'FrozenArray'),
suffix=_keyword_suffix), Keyword.Type, 'type_identifier'),
(_identifier, Name.Class, 'type_identifier'),
(r'\(', Punctuation, 'union_type'),
],
'union_type': [
include('common'),
(r'or' + _keyword_suffix, Keyword),
(r'\)', Punctuation, ('#pop', 'type_null')),
default('type'),
],
'type_identifier': [
(r'<', Punctuation, 'type_list'),
default(('#pop', 'type_null'))
],
'type_null': [
(r'\?', Punctuation),
default('#pop:2'),
],
'default_value': [
include('common'),
include('const_value'),
(_string, String, '#pop'),
(r'\[\s*\]', Punctuation, '#pop'),
],
'const_value': [
include('common'),
(words(('true', 'false', '-Infinity', 'Infinity', 'NaN', 'null'),
suffix=_keyword_suffix), Keyword.Constant, '#pop'),
(r'-?(?:(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:[Ee][+-]?[0-9]+)?' +
r'|[0-9]+[Ee][+-]?[0-9]+)', Number.Float, '#pop'),
(r'-?[1-9][0-9]*', Number.Integer, '#pop'),
(r'-?0[Xx][0-9A-Fa-f]+', Number.Hex, '#pop'),
(r'-?0[0-7]*', Number.Oct, '#pop'),
],
'typedef': [
include('common'),
(_identifier, Name.Class),
(r';', Punctuation, '#pop'),
],
'namespace_rest': [
include('common'),
(_identifier, Name.Namespace),
(r'\{', Punctuation, 'namespace_body'),
(r';', Punctuation, '#pop'),
],
'namespace_body': [
include('common'),
(r'\[', Punctuation, 'extended_attributes'),
(r'readonly' + _keyword_suffix, Keyword),
(r'attribute' + _keyword_suffix,
Keyword, ('attribute_rest', 'type')),
(r'const' + _keyword_suffix, Keyword, ('const_rest', 'type')),
(r'\}', Punctuation, '#pop'),
default(('operation_rest', 'type')),
],
'interface_rest': [
include('common'),
(_identifier, Name.Class),
(r':', Punctuation),
(r'\{', Punctuation, 'interface_body'),
(r';', Punctuation, '#pop'),
],
'interface_body': [
(words(('iterable', 'maplike', 'setlike'), suffix=_keyword_suffix),
Keyword, 'iterable_maplike_setlike_rest'),
(words(('setter', 'getter', 'creator', 'deleter', 'legacycaller',
'inherit', 'static', 'stringifier', 'jsonifier'),
suffix=_keyword_suffix), Keyword),
(r'serializer' + _keyword_suffix, Keyword, 'serializer_rest'),
(r';', Punctuation),
include('namespace_body'),
],
'attribute_rest': [
include('common'),
(_identifier, Name.Variable),
(r';', Punctuation, '#pop'),
],
'const_rest': [
include('common'),
(_identifier, Name.Constant),
(r'=', Punctuation, 'const_value'),
(r';', Punctuation, '#pop'),
],
'operation_rest': [
include('common'),
(r';', Punctuation, '#pop'),
default('operation'),
],
'operation': [
include('common'),
(_identifier, Name.Function),
(r'\(', Punctuation, 'argument_list'),
(r';', Punctuation, '#pop:2'),
],
'iterable_maplike_setlike_rest': [
include('common'),
(r'<', Punctuation, 'type_list'),
(r';', Punctuation, '#pop'),
],
'type_list': [
include('common'),
(r',', Punctuation),
(r'>', Punctuation, '#pop'),
default('type'),
],
'serializer_rest': [
include('common'),
(r'=', Punctuation, 'serialization_pattern'),
(r';', Punctuation, '#pop'),
default('operation'),
],
'serialization_pattern': [
include('common'),
(_identifier, Name.Variable, '#pop'),
(r'\{', Punctuation, 'serialization_pattern_map'),
(r'\[', Punctuation, 'serialization_pattern_list'),
],
'serialization_pattern_map': [
include('common'),
(words(('getter', 'inherit', 'attribute'),
suffix=_keyword_suffix), Keyword),
(r',', Punctuation),
(_identifier, Name.Variable),
(r'\}', Punctuation, '#pop:2'),
],
'serialization_pattern_list': [
include('common'),
(words(('getter', 'attribute'), suffix=_keyword_suffix), Keyword),
(r',', Punctuation),
(_identifier, Name.Variable),
(r']', Punctuation, '#pop:2'),
],
'enum_rest': [
include('common'),
(_identifier, Name.Class),
(r'\{', Punctuation, 'enum_body'),
(r';', Punctuation, '#pop'),
],
'enum_body': [
include('common'),
(_string, String),
(r',', Punctuation),
(r'\}', Punctuation, '#pop'),
],
'callback_rest': [
include('common'),
(r'interface' + _keyword_suffix,
Keyword, ('#pop', 'interface_rest')),
(_identifier, Name.Class),
(r'=', Punctuation, ('operation', 'type')),
(r';', Punctuation, '#pop'),
],
'dictionary_rest': [
include('common'),
(_identifier, Name.Class),
(r':', Punctuation),
(r'\{', Punctuation, 'dictionary_body'),
(r';', Punctuation, '#pop'),
],
'dictionary_body': [
include('common'),
(r'\[', Punctuation, 'extended_attributes'),
(r'required' + _keyword_suffix, Keyword),
(r'\}', Punctuation, '#pop'),
default(('dictionary_item', 'type')),
],
'dictionary_item': [
include('common'),
(_identifier, Name.Variable),
(r'=', Punctuation, 'default_value'),
(r';', Punctuation, '#pop'),
],
'implements_rest': [
include('common'),
(r'implements' + _keyword_suffix, Keyword),
(_identifier, Name.Class),
(r';', Punctuation, '#pop'),
],
}
| 10,517 | Python | 34.06 | 79 | 0.456024 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/_php_builtins.py | """
pygments.lexers._php_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file loads the function names and their modules from the
php webpage and generates itself.
Run with `python -I` to regenerate.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'APCu': ('apcu_add',
'apcu_cache_info',
'apcu_cas',
'apcu_clear_cache',
'apcu_dec',
'apcu_delete',
'apcu_enabled',
'apcu_entry',
'apcu_exists',
'apcu_fetch',
'apcu_inc',
'apcu_key_info',
'apcu_sma_info',
'apcu_store'),
'Aliases and deprecated Mysqli': ('mysqli_connect',
'mysqli_execute',
'mysqli_get_client_stats',
'mysqli_get_links_stats',
'mysqli_report'),
'Apache': ('apache_child_terminate',
'apache_get_modules',
'apache_get_version',
'apache_getenv',
'apache_lookup_uri',
'apache_note',
'apache_request_headers',
'apache_response_headers',
'apache_setenv',
'getallheaders',
'virtual'),
'Array': ('array_change_key_case',
'array_chunk',
'array_column',
'array_combine',
'array_count_values',
'array_diff_assoc',
'array_diff_key',
'array_diff_uassoc',
'array_diff_ukey',
'array_diff',
'array_fill_keys',
'array_fill',
'array_filter',
'array_flip',
'array_intersect_assoc',
'array_intersect_key',
'array_intersect_uassoc',
'array_intersect_ukey',
'array_intersect',
'array_is_list',
'array_key_exists',
'array_key_first',
'array_key_last',
'array_keys',
'array_map',
'array_merge_recursive',
'array_merge',
'array_multisort',
'array_pad',
'array_pop',
'array_product',
'array_push',
'array_rand',
'array_reduce',
'array_replace_recursive',
'array_replace',
'array_reverse',
'array_search',
'array_shift',
'array_slice',
'array_splice',
'array_sum',
'array_udiff_assoc',
'array_udiff_uassoc',
'array_udiff',
'array_uintersect_assoc',
'array_uintersect_uassoc',
'array_uintersect',
'array_unique',
'array_unshift',
'array_values',
'array_walk_recursive',
'array_walk',
'array',
'arsort',
'asort',
'compact',
'count',
'current',
'each',
'end',
'extract',
'in_array',
'key_exists',
'key',
'krsort',
'ksort',
'list',
'natcasesort',
'natsort',
'next',
'pos',
'prev',
'range',
'reset',
'rsort',
'shuffle',
'sizeof',
'sort',
'uasort',
'uksort',
'usort'),
'BC Math': ('bcadd',
'bccomp',
'bcdiv',
'bcmod',
'bcmul',
'bcpow',
'bcpowmod',
'bcscale',
'bcsqrt',
'bcsub'),
'Bzip2': ('bzclose',
'bzcompress',
'bzdecompress',
'bzerrno',
'bzerror',
'bzerrstr',
'bzflush',
'bzopen',
'bzread',
'bzwrite'),
'COM': ('com_create_guid',
'com_event_sink',
'com_get_active_object',
'com_load_typelib',
'com_message_pump',
'com_print_typeinfo',
'variant_abs',
'variant_add',
'variant_and',
'variant_cast',
'variant_cat',
'variant_cmp',
'variant_date_from_timestamp',
'variant_date_to_timestamp',
'variant_div',
'variant_eqv',
'variant_fix',
'variant_get_type',
'variant_idiv',
'variant_imp',
'variant_int',
'variant_mod',
'variant_mul',
'variant_neg',
'variant_not',
'variant_or',
'variant_pow',
'variant_round',
'variant_set_type',
'variant_set',
'variant_sub',
'variant_xor'),
'CSPRNG': ('random_bytes', 'random_int'),
'CUBRID': ('cubrid_bind',
'cubrid_close_prepare',
'cubrid_close_request',
'cubrid_col_get',
'cubrid_col_size',
'cubrid_column_names',
'cubrid_column_types',
'cubrid_commit',
'cubrid_connect_with_url',
'cubrid_connect',
'cubrid_current_oid',
'cubrid_disconnect',
'cubrid_drop',
'cubrid_error_code_facility',
'cubrid_error_code',
'cubrid_error_msg',
'cubrid_execute',
'cubrid_fetch',
'cubrid_free_result',
'cubrid_get_autocommit',
'cubrid_get_charset',
'cubrid_get_class_name',
'cubrid_get_client_info',
'cubrid_get_db_parameter',
'cubrid_get_query_timeout',
'cubrid_get_server_info',
'cubrid_get',
'cubrid_insert_id',
'cubrid_is_instance',
'cubrid_lob_close',
'cubrid_lob_export',
'cubrid_lob_get',
'cubrid_lob_send',
'cubrid_lob_size',
'cubrid_lob2_bind',
'cubrid_lob2_close',
'cubrid_lob2_export',
'cubrid_lob2_import',
'cubrid_lob2_new',
'cubrid_lob2_read',
'cubrid_lob2_seek64',
'cubrid_lob2_seek',
'cubrid_lob2_size64',
'cubrid_lob2_size',
'cubrid_lob2_tell64',
'cubrid_lob2_tell',
'cubrid_lob2_write',
'cubrid_lock_read',
'cubrid_lock_write',
'cubrid_move_cursor',
'cubrid_next_result',
'cubrid_num_cols',
'cubrid_num_rows',
'cubrid_pconnect_with_url',
'cubrid_pconnect',
'cubrid_prepare',
'cubrid_put',
'cubrid_rollback',
'cubrid_schema',
'cubrid_seq_drop',
'cubrid_seq_insert',
'cubrid_seq_put',
'cubrid_set_add',
'cubrid_set_autocommit',
'cubrid_set_db_parameter',
'cubrid_set_drop',
'cubrid_set_query_timeout',
'cubrid_version'),
'Calendar': ('cal_days_in_month',
'cal_from_jd',
'cal_info',
'cal_to_jd',
'easter_date',
'easter_days',
'frenchtojd',
'gregoriantojd',
'jddayofweek',
'jdmonthname',
'jdtofrench',
'jdtogregorian',
'jdtojewish',
'jdtojulian',
'jdtounix',
'jewishtojd',
'juliantojd',
'unixtojd'),
'Classes/Object': ('__autoload',
'class_alias',
'class_exists',
'enum_exists',
'get_called_class',
'get_class_methods',
'get_class_vars',
'get_class',
'get_declared_classes',
'get_declared_interfaces',
'get_declared_traits',
'get_mangled_object_vars',
'get_object_vars',
'get_parent_class',
'interface_exists',
'is_a',
'is_subclass_of',
'method_exists',
'property_exists',
'trait_exists'),
'Ctype': ('ctype_alnum',
'ctype_alpha',
'ctype_cntrl',
'ctype_digit',
'ctype_graph',
'ctype_lower',
'ctype_print',
'ctype_punct',
'ctype_space',
'ctype_upper',
'ctype_xdigit'),
'DBA': ('dba_close',
'dba_delete',
'dba_exists',
'dba_fetch',
'dba_firstkey',
'dba_handlers',
'dba_insert',
'dba_key_split',
'dba_list',
'dba_nextkey',
'dba_open',
'dba_optimize',
'dba_popen',
'dba_replace',
'dba_sync'),
'DOM': ('dom_import_simplexml',),
'Date/Time': ('checkdate',
'date_add',
'date_create_from_format',
'date_create_immutable_from_format',
'date_create_immutable',
'date_create',
'date_date_set',
'date_default_timezone_get',
'date_default_timezone_set',
'date_diff',
'date_format',
'date_get_last_errors',
'date_interval_create_from_date_string',
'date_interval_format',
'date_isodate_set',
'date_modify',
'date_offset_get',
'date_parse_from_format',
'date_parse',
'date_sub',
'date_sun_info',
'date_sunrise',
'date_sunset',
'date_time_set',
'date_timestamp_get',
'date_timestamp_set',
'date_timezone_get',
'date_timezone_set',
'date',
'getdate',
'gettimeofday',
'gmdate',
'gmmktime',
'gmstrftime',
'idate',
'localtime',
'microtime',
'mktime',
'strftime',
'strptime',
'strtotime',
'time',
'timezone_abbreviations_list',
'timezone_identifiers_list',
'timezone_location_get',
'timezone_name_from_abbr',
'timezone_name_get',
'timezone_offset_get',
'timezone_open',
'timezone_transitions_get',
'timezone_version_get'),
'Direct IO': ('dio_close',
'dio_fcntl',
'dio_open',
'dio_read',
'dio_seek',
'dio_stat',
'dio_tcsetattr',
'dio_truncate',
'dio_write'),
'Directory': ('chdir',
'chroot',
'closedir',
'dir',
'getcwd',
'opendir',
'readdir',
'rewinddir',
'scandir'),
'Eio': ('eio_busy',
'eio_cancel',
'eio_chmod',
'eio_chown',
'eio_close',
'eio_custom',
'eio_dup2',
'eio_event_loop',
'eio_fallocate',
'eio_fchmod',
'eio_fchown',
'eio_fdatasync',
'eio_fstat',
'eio_fstatvfs',
'eio_fsync',
'eio_ftruncate',
'eio_futime',
'eio_get_event_stream',
'eio_get_last_error',
'eio_grp_add',
'eio_grp_cancel',
'eio_grp_limit',
'eio_grp',
'eio_init',
'eio_link',
'eio_lstat',
'eio_mkdir',
'eio_mknod',
'eio_nop',
'eio_npending',
'eio_nready',
'eio_nreqs',
'eio_nthreads',
'eio_open',
'eio_poll',
'eio_read',
'eio_readahead',
'eio_readdir',
'eio_readlink',
'eio_realpath',
'eio_rename',
'eio_rmdir',
'eio_seek',
'eio_sendfile',
'eio_set_max_idle',
'eio_set_max_parallel',
'eio_set_max_poll_reqs',
'eio_set_max_poll_time',
'eio_set_min_parallel',
'eio_stat',
'eio_statvfs',
'eio_symlink',
'eio_sync_file_range',
'eio_sync',
'eio_syncfs',
'eio_truncate',
'eio_unlink',
'eio_utime',
'eio_write'),
'Enchant': ('enchant_broker_describe',
'enchant_broker_dict_exists',
'enchant_broker_free_dict',
'enchant_broker_free',
'enchant_broker_get_dict_path',
'enchant_broker_get_error',
'enchant_broker_init',
'enchant_broker_list_dicts',
'enchant_broker_request_dict',
'enchant_broker_request_pwl_dict',
'enchant_broker_set_dict_path',
'enchant_broker_set_ordering',
'enchant_dict_add_to_personal',
'enchant_dict_add_to_session',
'enchant_dict_add',
'enchant_dict_check',
'enchant_dict_describe',
'enchant_dict_get_error',
'enchant_dict_is_added',
'enchant_dict_is_in_session',
'enchant_dict_quick_check',
'enchant_dict_store_replacement',
'enchant_dict_suggest'),
'Error Handling': ('debug_backtrace',
'debug_print_backtrace',
'error_clear_last',
'error_get_last',
'error_log',
'error_reporting',
'restore_error_handler',
'restore_exception_handler',
'set_error_handler',
'set_exception_handler',
'trigger_error',
'user_error'),
'Exif': ('exif_imagetype',
'exif_read_data',
'exif_tagname',
'exif_thumbnail',
'read_exif_data'),
'Expect': ('expect_expectl', 'expect_popen'),
'FDF': ('fdf_add_doc_javascript',
'fdf_add_template',
'fdf_close',
'fdf_create',
'fdf_enum_values',
'fdf_errno',
'fdf_error',
'fdf_get_ap',
'fdf_get_attachment',
'fdf_get_encoding',
'fdf_get_file',
'fdf_get_flags',
'fdf_get_opt',
'fdf_get_status',
'fdf_get_value',
'fdf_get_version',
'fdf_header',
'fdf_next_field_name',
'fdf_open_string',
'fdf_open',
'fdf_remove_item',
'fdf_save_string',
'fdf_save',
'fdf_set_ap',
'fdf_set_encoding',
'fdf_set_file',
'fdf_set_flags',
'fdf_set_javascript_action',
'fdf_set_on_import_javascript',
'fdf_set_opt',
'fdf_set_status',
'fdf_set_submit_form_action',
'fdf_set_target_frame',
'fdf_set_value',
'fdf_set_version'),
'FPM': ('fastcgi_finish_request',),
'FTP': ('ftp_alloc',
'ftp_append',
'ftp_cdup',
'ftp_chdir',
'ftp_chmod',
'ftp_close',
'ftp_connect',
'ftp_delete',
'ftp_exec',
'ftp_fget',
'ftp_fput',
'ftp_get_option',
'ftp_get',
'ftp_login',
'ftp_mdtm',
'ftp_mkdir',
'ftp_mlsd',
'ftp_nb_continue',
'ftp_nb_fget',
'ftp_nb_fput',
'ftp_nb_get',
'ftp_nb_put',
'ftp_nlist',
'ftp_pasv',
'ftp_put',
'ftp_pwd',
'ftp_quit',
'ftp_raw',
'ftp_rawlist',
'ftp_rename',
'ftp_rmdir',
'ftp_set_option',
'ftp_site',
'ftp_size',
'ftp_ssl_connect',
'ftp_systype'),
'Fann': ('fann_cascadetrain_on_data',
'fann_cascadetrain_on_file',
'fann_clear_scaling_params',
'fann_copy',
'fann_create_from_file',
'fann_create_shortcut_array',
'fann_create_shortcut',
'fann_create_sparse_array',
'fann_create_sparse',
'fann_create_standard_array',
'fann_create_standard',
'fann_create_train_from_callback',
'fann_create_train',
'fann_descale_input',
'fann_descale_output',
'fann_descale_train',
'fann_destroy_train',
'fann_destroy',
'fann_duplicate_train_data',
'fann_get_activation_function',
'fann_get_activation_steepness',
'fann_get_bias_array',
'fann_get_bit_fail_limit',
'fann_get_bit_fail',
'fann_get_cascade_activation_functions_count',
'fann_get_cascade_activation_functions',
'fann_get_cascade_activation_steepnesses_count',
'fann_get_cascade_activation_steepnesses',
'fann_get_cascade_candidate_change_fraction',
'fann_get_cascade_candidate_limit',
'fann_get_cascade_candidate_stagnation_epochs',
'fann_get_cascade_max_cand_epochs',
'fann_get_cascade_max_out_epochs',
'fann_get_cascade_min_cand_epochs',
'fann_get_cascade_min_out_epochs',
'fann_get_cascade_num_candidate_groups',
'fann_get_cascade_num_candidates',
'fann_get_cascade_output_change_fraction',
'fann_get_cascade_output_stagnation_epochs',
'fann_get_cascade_weight_multiplier',
'fann_get_connection_array',
'fann_get_connection_rate',
'fann_get_errno',
'fann_get_errstr',
'fann_get_layer_array',
'fann_get_learning_momentum',
'fann_get_learning_rate',
'fann_get_MSE',
'fann_get_network_type',
'fann_get_num_input',
'fann_get_num_layers',
'fann_get_num_output',
'fann_get_quickprop_decay',
'fann_get_quickprop_mu',
'fann_get_rprop_decrease_factor',
'fann_get_rprop_delta_max',
'fann_get_rprop_delta_min',
'fann_get_rprop_delta_zero',
'fann_get_rprop_increase_factor',
'fann_get_sarprop_step_error_shift',
'fann_get_sarprop_step_error_threshold_factor',
'fann_get_sarprop_temperature',
'fann_get_sarprop_weight_decay_shift',
'fann_get_total_connections',
'fann_get_total_neurons',
'fann_get_train_error_function',
'fann_get_train_stop_function',
'fann_get_training_algorithm',
'fann_init_weights',
'fann_length_train_data',
'fann_merge_train_data',
'fann_num_input_train_data',
'fann_num_output_train_data',
'fann_print_error',
'fann_randomize_weights',
'fann_read_train_from_file',
'fann_reset_errno',
'fann_reset_errstr',
'fann_reset_MSE',
'fann_run',
'fann_save_train',
'fann_save',
'fann_scale_input_train_data',
'fann_scale_input',
'fann_scale_output_train_data',
'fann_scale_output',
'fann_scale_train_data',
'fann_scale_train',
'fann_set_activation_function_hidden',
'fann_set_activation_function_layer',
'fann_set_activation_function_output',
'fann_set_activation_function',
'fann_set_activation_steepness_hidden',
'fann_set_activation_steepness_layer',
'fann_set_activation_steepness_output',
'fann_set_activation_steepness',
'fann_set_bit_fail_limit',
'fann_set_callback',
'fann_set_cascade_activation_functions',
'fann_set_cascade_activation_steepnesses',
'fann_set_cascade_candidate_change_fraction',
'fann_set_cascade_candidate_limit',
'fann_set_cascade_candidate_stagnation_epochs',
'fann_set_cascade_max_cand_epochs',
'fann_set_cascade_max_out_epochs',
'fann_set_cascade_min_cand_epochs',
'fann_set_cascade_min_out_epochs',
'fann_set_cascade_num_candidate_groups',
'fann_set_cascade_output_change_fraction',
'fann_set_cascade_output_stagnation_epochs',
'fann_set_cascade_weight_multiplier',
'fann_set_error_log',
'fann_set_input_scaling_params',
'fann_set_learning_momentum',
'fann_set_learning_rate',
'fann_set_output_scaling_params',
'fann_set_quickprop_decay',
'fann_set_quickprop_mu',
'fann_set_rprop_decrease_factor',
'fann_set_rprop_delta_max',
'fann_set_rprop_delta_min',
'fann_set_rprop_delta_zero',
'fann_set_rprop_increase_factor',
'fann_set_sarprop_step_error_shift',
'fann_set_sarprop_step_error_threshold_factor',
'fann_set_sarprop_temperature',
'fann_set_sarprop_weight_decay_shift',
'fann_set_scaling_params',
'fann_set_train_error_function',
'fann_set_train_stop_function',
'fann_set_training_algorithm',
'fann_set_weight_array',
'fann_set_weight',
'fann_shuffle_train_data',
'fann_subset_train_data',
'fann_test_data',
'fann_test',
'fann_train_epoch',
'fann_train_on_data',
'fann_train_on_file',
'fann_train'),
'Fileinfo': ('finfo_buffer',
'finfo_close',
'finfo_file',
'finfo_open',
'finfo_set_flags',
'mime_content_type'),
'Filesystem': ('basename',
'chgrp',
'chmod',
'chown',
'clearstatcache',
'copy',
'dirname',
'disk_free_space',
'disk_total_space',
'diskfreespace',
'fclose',
'fdatasync',
'feof',
'fflush',
'fgetc',
'fgetcsv',
'fgets',
'fgetss',
'file_exists',
'file_get_contents',
'file_put_contents',
'file',
'fileatime',
'filectime',
'filegroup',
'fileinode',
'filemtime',
'fileowner',
'fileperms',
'filesize',
'filetype',
'flock',
'fnmatch',
'fopen',
'fpassthru',
'fputcsv',
'fputs',
'fread',
'fscanf',
'fseek',
'fstat',
'fsync',
'ftell',
'ftruncate',
'fwrite',
'glob',
'is_dir',
'is_executable',
'is_file',
'is_link',
'is_readable',
'is_uploaded_file',
'is_writable',
'is_writeable',
'lchgrp',
'lchown',
'link',
'linkinfo',
'lstat',
'mkdir',
'move_uploaded_file',
'parse_ini_file',
'parse_ini_string',
'pathinfo',
'pclose',
'popen',
'readfile',
'readlink',
'realpath_cache_get',
'realpath_cache_size',
'realpath',
'rename',
'rewind',
'rmdir',
'set_file_buffer',
'stat',
'symlink',
'tempnam',
'tmpfile',
'touch',
'umask',
'unlink'),
'Filter': ('filter_has_var',
'filter_id',
'filter_input_array',
'filter_input',
'filter_list',
'filter_var_array',
'filter_var'),
'Firebird/InterBase': ('fbird_add_user',
'fbird_affected_rows',
'fbird_backup',
'fbird_blob_add',
'fbird_blob_cancel',
'fbird_blob_close',
'fbird_blob_create',
'fbird_blob_echo',
'fbird_blob_get',
'fbird_blob_import',
'fbird_blob_info',
'fbird_blob_open',
'fbird_close',
'fbird_commit_ret',
'fbird_commit',
'fbird_connect',
'fbird_db_info',
'fbird_delete_user',
'fbird_drop_db',
'fbird_errcode',
'fbird_errmsg',
'fbird_execute',
'fbird_fetch_assoc',
'fbird_fetch_object',
'fbird_fetch_row',
'fbird_field_info',
'fbird_free_event_handler',
'fbird_free_query',
'fbird_free_result',
'fbird_gen_id',
'fbird_maintain_db',
'fbird_modify_user',
'fbird_name_result',
'fbird_num_fields',
'fbird_num_params',
'fbird_param_info',
'fbird_pconnect',
'fbird_prepare',
'fbird_query',
'fbird_restore',
'fbird_rollback_ret',
'fbird_rollback',
'fbird_server_info',
'fbird_service_attach',
'fbird_service_detach',
'fbird_set_event_handler',
'fbird_trans',
'fbird_wait_event',
'ibase_add_user',
'ibase_affected_rows',
'ibase_backup',
'ibase_blob_add',
'ibase_blob_cancel',
'ibase_blob_close',
'ibase_blob_create',
'ibase_blob_echo',
'ibase_blob_get',
'ibase_blob_import',
'ibase_blob_info',
'ibase_blob_open',
'ibase_close',
'ibase_commit_ret',
'ibase_commit',
'ibase_connect',
'ibase_db_info',
'ibase_delete_user',
'ibase_drop_db',
'ibase_errcode',
'ibase_errmsg',
'ibase_execute',
'ibase_fetch_assoc',
'ibase_fetch_object',
'ibase_fetch_row',
'ibase_field_info',
'ibase_free_event_handler',
'ibase_free_query',
'ibase_free_result',
'ibase_gen_id',
'ibase_maintain_db',
'ibase_modify_user',
'ibase_name_result',
'ibase_num_fields',
'ibase_num_params',
'ibase_param_info',
'ibase_pconnect',
'ibase_prepare',
'ibase_query',
'ibase_restore',
'ibase_rollback_ret',
'ibase_rollback',
'ibase_server_info',
'ibase_service_attach',
'ibase_service_detach',
'ibase_set_event_handler',
'ibase_trans',
'ibase_wait_event'),
'Function handling': ('call_user_func_array',
'call_user_func',
'create_function',
'forward_static_call_array',
'forward_static_call',
'func_get_arg',
'func_get_args',
'func_num_args',
'function_exists',
'get_defined_functions',
'register_shutdown_function',
'register_tick_function',
'unregister_tick_function'),
'GD and Image': ('gd_info',
'getimagesize',
'getimagesizefromstring',
'image_type_to_extension',
'image_type_to_mime_type',
'image2wbmp',
'imageaffine',
'imageaffinematrixconcat',
'imageaffinematrixget',
'imagealphablending',
'imageantialias',
'imagearc',
'imageavif',
'imagebmp',
'imagechar',
'imagecharup',
'imagecolorallocate',
'imagecolorallocatealpha',
'imagecolorat',
'imagecolorclosest',
'imagecolorclosestalpha',
'imagecolorclosesthwb',
'imagecolordeallocate',
'imagecolorexact',
'imagecolorexactalpha',
'imagecolormatch',
'imagecolorresolve',
'imagecolorresolvealpha',
'imagecolorset',
'imagecolorsforindex',
'imagecolorstotal',
'imagecolortransparent',
'imageconvolution',
'imagecopy',
'imagecopymerge',
'imagecopymergegray',
'imagecopyresampled',
'imagecopyresized',
'imagecreate',
'imagecreatefromavif',
'imagecreatefrombmp',
'imagecreatefromgd2',
'imagecreatefromgd2part',
'imagecreatefromgd',
'imagecreatefromgif',
'imagecreatefromjpeg',
'imagecreatefrompng',
'imagecreatefromstring',
'imagecreatefromtga',
'imagecreatefromwbmp',
'imagecreatefromwebp',
'imagecreatefromxbm',
'imagecreatefromxpm',
'imagecreatetruecolor',
'imagecrop',
'imagecropauto',
'imagedashedline',
'imagedestroy',
'imageellipse',
'imagefill',
'imagefilledarc',
'imagefilledellipse',
'imagefilledpolygon',
'imagefilledrectangle',
'imagefilltoborder',
'imagefilter',
'imageflip',
'imagefontheight',
'imagefontwidth',
'imageftbbox',
'imagefttext',
'imagegammacorrect',
'imagegd2',
'imagegd',
'imagegetclip',
'imagegetinterpolation',
'imagegif',
'imagegrabscreen',
'imagegrabwindow',
'imageinterlace',
'imageistruecolor',
'imagejpeg',
'imagelayereffect',
'imageline',
'imageloadfont',
'imageopenpolygon',
'imagepalettecopy',
'imagepalettetotruecolor',
'imagepng',
'imagepolygon',
'imagerectangle',
'imageresolution',
'imagerotate',
'imagesavealpha',
'imagescale',
'imagesetbrush',
'imagesetclip',
'imagesetinterpolation',
'imagesetpixel',
'imagesetstyle',
'imagesetthickness',
'imagesettile',
'imagestring',
'imagestringup',
'imagesx',
'imagesy',
'imagetruecolortopalette',
'imagettfbbox',
'imagettftext',
'imagetypes',
'imagewbmp',
'imagewebp',
'imagexbm',
'iptcembed',
'iptcparse',
'jpeg2wbmp',
'png2wbmp'),
'GMP': ('gmp_abs',
'gmp_add',
'gmp_and',
'gmp_binomial',
'gmp_clrbit',
'gmp_cmp',
'gmp_com',
'gmp_div_q',
'gmp_div_qr',
'gmp_div_r',
'gmp_div',
'gmp_divexact',
'gmp_export',
'gmp_fact',
'gmp_gcd',
'gmp_gcdext',
'gmp_hamdist',
'gmp_import',
'gmp_init',
'gmp_intval',
'gmp_invert',
'gmp_jacobi',
'gmp_kronecker',
'gmp_lcm',
'gmp_legendre',
'gmp_mod',
'gmp_mul',
'gmp_neg',
'gmp_nextprime',
'gmp_or',
'gmp_perfect_power',
'gmp_perfect_square',
'gmp_popcount',
'gmp_pow',
'gmp_powm',
'gmp_prob_prime',
'gmp_random_bits',
'gmp_random_range',
'gmp_random_seed',
'gmp_random',
'gmp_root',
'gmp_rootrem',
'gmp_scan0',
'gmp_scan1',
'gmp_setbit',
'gmp_sign',
'gmp_sqrt',
'gmp_sqrtrem',
'gmp_strval',
'gmp_sub',
'gmp_testbit',
'gmp_xor'),
'GeoIP': ('geoip_asnum_by_name',
'geoip_continent_code_by_name',
'geoip_country_code_by_name',
'geoip_country_code3_by_name',
'geoip_country_name_by_name',
'geoip_database_info',
'geoip_db_avail',
'geoip_db_filename',
'geoip_db_get_all_info',
'geoip_domain_by_name',
'geoip_id_by_name',
'geoip_isp_by_name',
'geoip_netspeedcell_by_name',
'geoip_org_by_name',
'geoip_record_by_name',
'geoip_region_by_name',
'geoip_region_name_by_code',
'geoip_setup_custom_directory',
'geoip_time_zone_by_country_and_region'),
'Gettext': ('bind_textdomain_codeset',
'bindtextdomain',
'dcgettext',
'dcngettext',
'dgettext',
'dngettext',
'gettext',
'ngettext',
'textdomain'),
'GnuPG': ('gnupg_adddecryptkey',
'gnupg_addencryptkey',
'gnupg_addsignkey',
'gnupg_cleardecryptkeys',
'gnupg_clearencryptkeys',
'gnupg_clearsignkeys',
'gnupg_decrypt',
'gnupg_decryptverify',
'gnupg_encrypt',
'gnupg_encryptsign',
'gnupg_export',
'gnupg_getengineinfo',
'gnupg_geterror',
'gnupg_geterrorinfo',
'gnupg_getprotocol',
'gnupg_import',
'gnupg_init',
'gnupg_keyinfo',
'gnupg_setarmor',
'gnupg_seterrormode',
'gnupg_setsignmode',
'gnupg_sign',
'gnupg_verify'),
'Grapheme': ('grapheme_extract',
'grapheme_stripos',
'grapheme_stristr',
'grapheme_strlen',
'grapheme_strpos',
'grapheme_strripos',
'grapheme_strrpos',
'grapheme_strstr',
'grapheme_substr'),
'Hash': ('hash_algos',
'hash_copy',
'hash_equals',
'hash_file',
'hash_final',
'hash_hkdf',
'hash_hmac_algos',
'hash_hmac_file',
'hash_hmac',
'hash_init',
'hash_pbkdf2',
'hash_update_file',
'hash_update_stream',
'hash_update',
'hash'),
'IBM DB2': ('db2_autocommit',
'db2_bind_param',
'db2_client_info',
'db2_close',
'db2_column_privileges',
'db2_columns',
'db2_commit',
'db2_conn_error',
'db2_conn_errormsg',
'db2_connect',
'db2_cursor_type',
'db2_escape_string',
'db2_exec',
'db2_execute',
'db2_fetch_array',
'db2_fetch_assoc',
'db2_fetch_both',
'db2_fetch_object',
'db2_fetch_row',
'db2_field_display_size',
'db2_field_name',
'db2_field_num',
'db2_field_precision',
'db2_field_scale',
'db2_field_type',
'db2_field_width',
'db2_foreign_keys',
'db2_free_result',
'db2_free_stmt',
'db2_get_option',
'db2_last_insert_id',
'db2_lob_read',
'db2_next_result',
'db2_num_fields',
'db2_num_rows',
'db2_pclose',
'db2_pconnect',
'db2_prepare',
'db2_primary_keys',
'db2_procedure_columns',
'db2_procedures',
'db2_result',
'db2_rollback',
'db2_server_info',
'db2_set_option',
'db2_special_columns',
'db2_statistics',
'db2_stmt_error',
'db2_stmt_errormsg',
'db2_table_privileges',
'db2_tables'),
'IDN': ('idn_to_ascii', 'idn_to_utf8'),
'IMAP': ('imap_8bit',
'imap_alerts',
'imap_append',
'imap_base64',
'imap_binary',
'imap_body',
'imap_bodystruct',
'imap_check',
'imap_clearflag_full',
'imap_close',
'imap_create',
'imap_createmailbox',
'imap_delete',
'imap_deletemailbox',
'imap_errors',
'imap_expunge',
'imap_fetch_overview',
'imap_fetchbody',
'imap_fetchheader',
'imap_fetchmime',
'imap_fetchstructure',
'imap_fetchtext',
'imap_gc',
'imap_get_quota',
'imap_get_quotaroot',
'imap_getacl',
'imap_getmailboxes',
'imap_getsubscribed',
'imap_header',
'imap_headerinfo',
'imap_headers',
'imap_last_error',
'imap_list',
'imap_listmailbox',
'imap_listscan',
'imap_listsubscribed',
'imap_lsub',
'imap_mail_compose',
'imap_mail_copy',
'imap_mail_move',
'imap_mail',
'imap_mailboxmsginfo',
'imap_mime_header_decode',
'imap_msgno',
'imap_mutf7_to_utf8',
'imap_num_msg',
'imap_num_recent',
'imap_open',
'imap_ping',
'imap_qprint',
'imap_rename',
'imap_renamemailbox',
'imap_reopen',
'imap_rfc822_parse_adrlist',
'imap_rfc822_parse_headers',
'imap_rfc822_write_address',
'imap_savebody',
'imap_scan',
'imap_scanmailbox',
'imap_search',
'imap_set_quota',
'imap_setacl',
'imap_setflag_full',
'imap_sort',
'imap_status',
'imap_subscribe',
'imap_thread',
'imap_timeout',
'imap_uid',
'imap_undelete',
'imap_unsubscribe',
'imap_utf7_decode',
'imap_utf7_encode',
'imap_utf8_to_mutf7',
'imap_utf8'),
'Igbinary': ('igbinary_serialize', 'igbinary_unserialize'),
'Inotify': ('inotify_add_watch',
'inotify_init',
'inotify_queue_len',
'inotify_read',
'inotify_rm_watch'),
'JSON': ('json_decode',
'json_encode',
'json_last_error_msg',
'json_last_error'),
'LDAP': ('ldap_8859_to_t61',
'ldap_add_ext',
'ldap_add',
'ldap_bind_ext',
'ldap_bind',
'ldap_close',
'ldap_compare',
'ldap_connect',
'ldap_control_paged_result_response',
'ldap_control_paged_result',
'ldap_count_entries',
'ldap_count_references',
'ldap_delete_ext',
'ldap_delete',
'ldap_dn2ufn',
'ldap_err2str',
'ldap_errno',
'ldap_error',
'ldap_escape',
'ldap_exop_passwd',
'ldap_exop_refresh',
'ldap_exop_whoami',
'ldap_exop',
'ldap_explode_dn',
'ldap_first_attribute',
'ldap_first_entry',
'ldap_first_reference',
'ldap_free_result',
'ldap_get_attributes',
'ldap_get_dn',
'ldap_get_entries',
'ldap_get_option',
'ldap_get_values_len',
'ldap_get_values',
'ldap_list',
'ldap_mod_add_ext',
'ldap_mod_add',
'ldap_mod_del_ext',
'ldap_mod_del',
'ldap_mod_replace_ext',
'ldap_mod_replace',
'ldap_modify_batch',
'ldap_modify',
'ldap_next_attribute',
'ldap_next_entry',
'ldap_next_reference',
'ldap_parse_exop',
'ldap_parse_reference',
'ldap_parse_result',
'ldap_read',
'ldap_rename_ext',
'ldap_rename',
'ldap_sasl_bind',
'ldap_search',
'ldap_set_option',
'ldap_set_rebind_proc',
'ldap_sort',
'ldap_start_tls',
'ldap_t61_to_8859',
'ldap_unbind'),
'LZF': ('lzf_compress', 'lzf_decompress', 'lzf_optimized_for'),
'Mail': ('ezmlm_hash', 'mail'),
'Mailparse': ('mailparse_determine_best_xfer_encoding',
'mailparse_msg_create',
'mailparse_msg_extract_part_file',
'mailparse_msg_extract_part',
'mailparse_msg_extract_whole_part_file',
'mailparse_msg_free',
'mailparse_msg_get_part_data',
'mailparse_msg_get_part',
'mailparse_msg_get_structure',
'mailparse_msg_parse_file',
'mailparse_msg_parse',
'mailparse_rfc822_parse_addresses',
'mailparse_stream_encode',
'mailparse_uudecode_all'),
'Math': ('abs',
'acos',
'acosh',
'asin',
'asinh',
'atan2',
'atan',
'atanh',
'base_convert',
'bindec',
'ceil',
'cos',
'cosh',
'decbin',
'dechex',
'decoct',
'deg2rad',
'exp',
'expm1',
'fdiv',
'floor',
'fmod',
'getrandmax',
'hexdec',
'hypot',
'intdiv',
'is_finite',
'is_infinite',
'is_nan',
'lcg_value',
'log10',
'log1p',
'log',
'max',
'min',
'mt_getrandmax',
'mt_rand',
'mt_srand',
'octdec',
'pi',
'pow',
'rad2deg',
'rand',
'round',
'sin',
'sinh',
'sqrt',
'srand',
'tan',
'tanh'),
'Mcrypt': ('mcrypt_create_iv',
'mcrypt_decrypt',
'mcrypt_enc_get_algorithms_name',
'mcrypt_enc_get_block_size',
'mcrypt_enc_get_iv_size',
'mcrypt_enc_get_key_size',
'mcrypt_enc_get_modes_name',
'mcrypt_enc_get_supported_key_sizes',
'mcrypt_enc_is_block_algorithm_mode',
'mcrypt_enc_is_block_algorithm',
'mcrypt_enc_is_block_mode',
'mcrypt_enc_self_test',
'mcrypt_encrypt',
'mcrypt_generic_deinit',
'mcrypt_generic_init',
'mcrypt_generic',
'mcrypt_get_block_size',
'mcrypt_get_cipher_name',
'mcrypt_get_iv_size',
'mcrypt_get_key_size',
'mcrypt_list_algorithms',
'mcrypt_list_modes',
'mcrypt_module_close',
'mcrypt_module_get_algo_block_size',
'mcrypt_module_get_algo_key_size',
'mcrypt_module_get_supported_key_sizes',
'mcrypt_module_is_block_algorithm_mode',
'mcrypt_module_is_block_algorithm',
'mcrypt_module_is_block_mode',
'mcrypt_module_open',
'mcrypt_module_self_test',
'mdecrypt_generic'),
'Memcache': ('memcache_debug',),
'Mhash': ('mhash_count',
'mhash_get_block_size',
'mhash_get_hash_name',
'mhash_keygen_s2k',
'mhash'),
'Misc.': ('connection_aborted',
'connection_status',
'constant',
'define',
'defined',
'die',
'eval',
'exit',
'get_browser',
'__halt_compiler',
'highlight_file',
'highlight_string',
'hrtime',
'ignore_user_abort',
'pack',
'php_strip_whitespace',
'sapi_windows_cp_conv',
'sapi_windows_cp_get',
'sapi_windows_cp_is_utf8',
'sapi_windows_cp_set',
'sapi_windows_generate_ctrl_event',
'sapi_windows_set_ctrl_handler',
'sapi_windows_vt100_support',
'show_source',
'sleep',
'sys_getloadavg',
'time_nanosleep',
'time_sleep_until',
'uniqid',
'unpack',
'usleep'),
'Multibyte String': ('mb_check_encoding',
'mb_chr',
'mb_convert_case',
'mb_convert_encoding',
'mb_convert_kana',
'mb_convert_variables',
'mb_decode_mimeheader',
'mb_decode_numericentity',
'mb_detect_encoding',
'mb_detect_order',
'mb_encode_mimeheader',
'mb_encode_numericentity',
'mb_encoding_aliases',
'mb_ereg_match',
'mb_ereg_replace_callback',
'mb_ereg_replace',
'mb_ereg_search_getpos',
'mb_ereg_search_getregs',
'mb_ereg_search_init',
'mb_ereg_search_pos',
'mb_ereg_search_regs',
'mb_ereg_search_setpos',
'mb_ereg_search',
'mb_ereg',
'mb_eregi_replace',
'mb_eregi',
'mb_get_info',
'mb_http_input',
'mb_http_output',
'mb_internal_encoding',
'mb_language',
'mb_list_encodings',
'mb_ord',
'mb_output_handler',
'mb_parse_str',
'mb_preferred_mime_name',
'mb_regex_encoding',
'mb_regex_set_options',
'mb_scrub',
'mb_send_mail',
'mb_split',
'mb_str_split',
'mb_strcut',
'mb_strimwidth',
'mb_stripos',
'mb_stristr',
'mb_strlen',
'mb_strpos',
'mb_strrchr',
'mb_strrichr',
'mb_strripos',
'mb_strrpos',
'mb_strstr',
'mb_strtolower',
'mb_strtoupper',
'mb_strwidth',
'mb_substitute_character',
'mb_substr_count',
'mb_substr'),
'MySQL': ('mysql_affected_rows',
'mysql_client_encoding',
'mysql_close',
'mysql_connect',
'mysql_create_db',
'mysql_data_seek',
'mysql_db_name',
'mysql_db_query',
'mysql_drop_db',
'mysql_errno',
'mysql_error',
'mysql_escape_string',
'mysql_fetch_array',
'mysql_fetch_assoc',
'mysql_fetch_field',
'mysql_fetch_lengths',
'mysql_fetch_object',
'mysql_fetch_row',
'mysql_field_flags',
'mysql_field_len',
'mysql_field_name',
'mysql_field_seek',
'mysql_field_table',
'mysql_field_type',
'mysql_free_result',
'mysql_get_client_info',
'mysql_get_host_info',
'mysql_get_proto_info',
'mysql_get_server_info',
'mysql_info',
'mysql_insert_id',
'mysql_list_dbs',
'mysql_list_fields',
'mysql_list_processes',
'mysql_list_tables',
'mysql_num_fields',
'mysql_num_rows',
'mysql_pconnect',
'mysql_ping',
'mysql_query',
'mysql_real_escape_string',
'mysql_result',
'mysql_select_db',
'mysql_set_charset',
'mysql_stat',
'mysql_tablename',
'mysql_thread_id',
'mysql_unbuffered_query'),
'Mysql_xdevapi': ('expression', 'getSession'),
'Network': ('checkdnsrr',
'closelog',
'dns_check_record',
'dns_get_mx',
'dns_get_record',
'fsockopen',
'gethostbyaddr',
'gethostbyname',
'gethostbynamel',
'gethostname',
'getmxrr',
'getprotobyname',
'getprotobynumber',
'getservbyname',
'getservbyport',
'header_register_callback',
'header_remove',
'header',
'headers_list',
'headers_sent',
'http_response_code',
'inet_ntop',
'inet_pton',
'ip2long',
'long2ip',
'net_get_interfaces',
'openlog',
'pfsockopen',
'setcookie',
'setrawcookie',
'socket_get_status',
'socket_set_blocking',
'socket_set_timeout',
'syslog'),
'OAuth': ('oauth_get_sbs', 'oauth_urlencode'),
'OCI8': ('oci_bind_array_by_name',
'oci_bind_by_name',
'oci_cancel',
'oci_client_version',
'oci_close',
'oci_commit',
'oci_connect',
'oci_define_by_name',
'oci_error',
'oci_execute',
'oci_fetch_all',
'oci_fetch_array',
'oci_fetch_assoc',
'oci_fetch_object',
'oci_fetch_row',
'oci_fetch',
'oci_field_is_null',
'oci_field_name',
'oci_field_precision',
'oci_field_scale',
'oci_field_size',
'oci_field_type_raw',
'oci_field_type',
'oci_free_descriptor',
'oci_free_statement',
'oci_get_implicit_resultset',
'oci_lob_copy',
'oci_lob_is_equal',
'oci_new_collection',
'oci_new_connect',
'oci_new_cursor',
'oci_new_descriptor',
'oci_num_fields',
'oci_num_rows',
'oci_parse',
'oci_password_change',
'oci_pconnect',
'oci_register_taf_callback',
'oci_result',
'oci_rollback',
'oci_server_version',
'oci_set_action',
'oci_set_call_timeout',
'oci_set_client_identifier',
'oci_set_client_info',
'oci_set_db_operation',
'oci_set_edition',
'oci_set_module_name',
'oci_set_prefetch_lob',
'oci_set_prefetch',
'oci_statement_type',
'oci_unregister_taf_callback'),
'ODBC': ('odbc_autocommit',
'odbc_binmode',
'odbc_close_all',
'odbc_close',
'odbc_columnprivileges',
'odbc_columns',
'odbc_commit',
'odbc_connect',
'odbc_cursor',
'odbc_data_source',
'odbc_do',
'odbc_error',
'odbc_errormsg',
'odbc_exec',
'odbc_execute',
'odbc_fetch_array',
'odbc_fetch_into',
'odbc_fetch_object',
'odbc_fetch_row',
'odbc_field_len',
'odbc_field_name',
'odbc_field_num',
'odbc_field_precision',
'odbc_field_scale',
'odbc_field_type',
'odbc_foreignkeys',
'odbc_free_result',
'odbc_gettypeinfo',
'odbc_longreadlen',
'odbc_next_result',
'odbc_num_fields',
'odbc_num_rows',
'odbc_pconnect',
'odbc_prepare',
'odbc_primarykeys',
'odbc_procedurecolumns',
'odbc_procedures',
'odbc_result_all',
'odbc_result',
'odbc_rollback',
'odbc_setoption',
'odbc_specialcolumns',
'odbc_statistics',
'odbc_tableprivileges',
'odbc_tables'),
'OPcache': ('opcache_compile_file',
'opcache_get_configuration',
'opcache_get_status',
'opcache_invalidate',
'opcache_is_script_cached',
'opcache_reset'),
'OpenAL': ('openal_buffer_create',
'openal_buffer_data',
'openal_buffer_destroy',
'openal_buffer_get',
'openal_buffer_loadwav',
'openal_context_create',
'openal_context_current',
'openal_context_destroy',
'openal_context_process',
'openal_context_suspend',
'openal_device_close',
'openal_device_open',
'openal_listener_get',
'openal_listener_set',
'openal_source_create',
'openal_source_destroy',
'openal_source_get',
'openal_source_pause',
'openal_source_play',
'openal_source_rewind',
'openal_source_set',
'openal_source_stop',
'openal_stream'),
'OpenSSL': ('openssl_cipher_iv_length',
'openssl_cms_decrypt',
'openssl_cms_encrypt',
'openssl_cms_read',
'openssl_cms_sign',
'openssl_cms_verify',
'openssl_csr_export_to_file',
'openssl_csr_export',
'openssl_csr_get_public_key',
'openssl_csr_get_subject',
'openssl_csr_new',
'openssl_csr_sign',
'openssl_decrypt',
'openssl_dh_compute_key',
'openssl_digest',
'openssl_encrypt',
'openssl_error_string',
'openssl_free_key',
'openssl_get_cert_locations',
'openssl_get_cipher_methods',
'openssl_get_curve_names',
'openssl_get_md_methods',
'openssl_get_privatekey',
'openssl_get_publickey',
'openssl_open',
'openssl_pbkdf2',
'openssl_pkcs12_export_to_file',
'openssl_pkcs12_export',
'openssl_pkcs12_read',
'openssl_pkcs7_decrypt',
'openssl_pkcs7_encrypt',
'openssl_pkcs7_read',
'openssl_pkcs7_sign',
'openssl_pkcs7_verify',
'openssl_pkey_derive',
'openssl_pkey_export_to_file',
'openssl_pkey_export',
'openssl_pkey_free',
'openssl_pkey_get_details',
'openssl_pkey_get_private',
'openssl_pkey_get_public',
'openssl_pkey_new',
'openssl_private_decrypt',
'openssl_private_encrypt',
'openssl_public_decrypt',
'openssl_public_encrypt',
'openssl_random_pseudo_bytes',
'openssl_seal',
'openssl_sign',
'openssl_spki_export_challenge',
'openssl_spki_export',
'openssl_spki_new',
'openssl_spki_verify',
'openssl_verify',
'openssl_x509_check_private_key',
'openssl_x509_checkpurpose',
'openssl_x509_export_to_file',
'openssl_x509_export',
'openssl_x509_fingerprint',
'openssl_x509_free',
'openssl_x509_parse',
'openssl_x509_read',
'openssl_x509_verify'),
'Output Control': ('flush',
'ob_clean',
'ob_end_clean',
'ob_end_flush',
'ob_flush',
'ob_get_clean',
'ob_get_contents',
'ob_get_flush',
'ob_get_length',
'ob_get_level',
'ob_get_status',
'ob_gzhandler',
'ob_implicit_flush',
'ob_list_handlers',
'ob_start',
'output_add_rewrite_var',
'output_reset_rewrite_vars'),
'PCNTL': ('pcntl_alarm',
'pcntl_async_signals',
'pcntl_errno',
'pcntl_exec',
'pcntl_fork',
'pcntl_get_last_error',
'pcntl_getpriority',
'pcntl_setpriority',
'pcntl_signal_dispatch',
'pcntl_signal_get_handler',
'pcntl_signal',
'pcntl_sigprocmask',
'pcntl_sigtimedwait',
'pcntl_sigwaitinfo',
'pcntl_strerror',
'pcntl_wait',
'pcntl_waitpid',
'pcntl_wexitstatus',
'pcntl_wifexited',
'pcntl_wifsignaled',
'pcntl_wifstopped',
'pcntl_wstopsig',
'pcntl_wtermsig'),
'PCRE': ('preg_filter',
'preg_grep',
'preg_last_error_msg',
'preg_last_error',
'preg_match_all',
'preg_match',
'preg_quote',
'preg_replace_callback_array',
'preg_replace_callback',
'preg_replace',
'preg_split'),
'PHP Options/Info': ('assert_options',
'assert',
'cli_get_process_title',
'cli_set_process_title',
'dl',
'extension_loaded',
'gc_collect_cycles',
'gc_disable',
'gc_enable',
'gc_enabled',
'gc_mem_caches',
'gc_status',
'get_cfg_var',
'get_current_user',
'get_defined_constants',
'get_extension_funcs',
'get_include_path',
'get_included_files',
'get_loaded_extensions',
'get_magic_quotes_gpc',
'get_magic_quotes_runtime',
'get_required_files',
'get_resources',
'getenv',
'getlastmod',
'getmygid',
'getmyinode',
'getmypid',
'getmyuid',
'getopt',
'getrusage',
'ini_alter',
'ini_get_all',
'ini_get',
'ini_restore',
'ini_set',
'memory_get_peak_usage',
'memory_get_usage',
'php_ini_loaded_file',
'php_ini_scanned_files',
'php_sapi_name',
'php_uname',
'phpcredits',
'phpinfo',
'phpversion',
'putenv',
'restore_include_path',
'set_include_path',
'set_time_limit',
'sys_get_temp_dir',
'version_compare',
'zend_thread_id',
'zend_version'),
'POSIX': ('posix_access',
'posix_ctermid',
'posix_errno',
'posix_get_last_error',
'posix_getcwd',
'posix_getegid',
'posix_geteuid',
'posix_getgid',
'posix_getgrgid',
'posix_getgrnam',
'posix_getgroups',
'posix_getlogin',
'posix_getpgid',
'posix_getpgrp',
'posix_getpid',
'posix_getppid',
'posix_getpwnam',
'posix_getpwuid',
'posix_getrlimit',
'posix_getsid',
'posix_getuid',
'posix_initgroups',
'posix_isatty',
'posix_kill',
'posix_mkfifo',
'posix_mknod',
'posix_setegid',
'posix_seteuid',
'posix_setgid',
'posix_setpgid',
'posix_setrlimit',
'posix_setsid',
'posix_setuid',
'posix_strerror',
'posix_times',
'posix_ttyname',
'posix_uname'),
'PS': ('ps_add_bookmark',
'ps_add_launchlink',
'ps_add_locallink',
'ps_add_note',
'ps_add_pdflink',
'ps_add_weblink',
'ps_arc',
'ps_arcn',
'ps_begin_page',
'ps_begin_pattern',
'ps_begin_template',
'ps_circle',
'ps_clip',
'ps_close_image',
'ps_close',
'ps_closepath_stroke',
'ps_closepath',
'ps_continue_text',
'ps_curveto',
'ps_delete',
'ps_end_page',
'ps_end_pattern',
'ps_end_template',
'ps_fill_stroke',
'ps_fill',
'ps_findfont',
'ps_get_buffer',
'ps_get_parameter',
'ps_get_value',
'ps_hyphenate',
'ps_include_file',
'ps_lineto',
'ps_makespotcolor',
'ps_moveto',
'ps_new',
'ps_open_file',
'ps_open_image_file',
'ps_open_image',
'ps_open_memory_image',
'ps_place_image',
'ps_rect',
'ps_restore',
'ps_rotate',
'ps_save',
'ps_scale',
'ps_set_border_color',
'ps_set_border_dash',
'ps_set_border_style',
'ps_set_info',
'ps_set_parameter',
'ps_set_text_pos',
'ps_set_value',
'ps_setcolor',
'ps_setdash',
'ps_setflat',
'ps_setfont',
'ps_setgray',
'ps_setlinecap',
'ps_setlinejoin',
'ps_setlinewidth',
'ps_setmiterlimit',
'ps_setoverprintmode',
'ps_setpolydash',
'ps_shading_pattern',
'ps_shading',
'ps_shfill',
'ps_show_boxed',
'ps_show_xy2',
'ps_show_xy',
'ps_show2',
'ps_show',
'ps_string_geometry',
'ps_stringwidth',
'ps_stroke',
'ps_symbol_name',
'ps_symbol_width',
'ps_symbol',
'ps_translate'),
'Password Hashing': ('password_algos',
'password_get_info',
'password_hash',
'password_needs_rehash',
'password_verify'),
'PostgreSQL': ('pg_affected_rows',
'pg_cancel_query',
'pg_client_encoding',
'pg_close',
'pg_connect_poll',
'pg_connect',
'pg_connection_busy',
'pg_connection_reset',
'pg_connection_status',
'pg_consume_input',
'pg_convert',
'pg_copy_from',
'pg_copy_to',
'pg_dbname',
'pg_delete',
'pg_end_copy',
'pg_escape_bytea',
'pg_escape_identifier',
'pg_escape_literal',
'pg_escape_string',
'pg_execute',
'pg_fetch_all_columns',
'pg_fetch_all',
'pg_fetch_array',
'pg_fetch_assoc',
'pg_fetch_object',
'pg_fetch_result',
'pg_fetch_row',
'pg_field_is_null',
'pg_field_name',
'pg_field_num',
'pg_field_prtlen',
'pg_field_size',
'pg_field_table',
'pg_field_type_oid',
'pg_field_type',
'pg_flush',
'pg_free_result',
'pg_get_notify',
'pg_get_pid',
'pg_get_result',
'pg_host',
'pg_insert',
'pg_last_error',
'pg_last_notice',
'pg_last_oid',
'pg_lo_close',
'pg_lo_create',
'pg_lo_export',
'pg_lo_import',
'pg_lo_open',
'pg_lo_read_all',
'pg_lo_read',
'pg_lo_seek',
'pg_lo_tell',
'pg_lo_truncate',
'pg_lo_unlink',
'pg_lo_write',
'pg_meta_data',
'pg_num_fields',
'pg_num_rows',
'pg_options',
'pg_parameter_status',
'pg_pconnect',
'pg_ping',
'pg_port',
'pg_prepare',
'pg_put_line',
'pg_query_params',
'pg_query',
'pg_result_error_field',
'pg_result_error',
'pg_result_seek',
'pg_result_status',
'pg_select',
'pg_send_execute',
'pg_send_prepare',
'pg_send_query_params',
'pg_send_query',
'pg_set_client_encoding',
'pg_set_error_verbosity',
'pg_socket',
'pg_trace',
'pg_transaction_status',
'pg_tty',
'pg_unescape_bytea',
'pg_untrace',
'pg_update',
'pg_version'),
'Program execution': ('escapeshellarg',
'escapeshellcmd',
'exec',
'passthru',
'proc_close',
'proc_get_status',
'proc_nice',
'proc_open',
'proc_terminate',
'shell_exec',
'system'),
'Pspell': ('pspell_add_to_personal',
'pspell_add_to_session',
'pspell_check',
'pspell_clear_session',
'pspell_config_create',
'pspell_config_data_dir',
'pspell_config_dict_dir',
'pspell_config_ignore',
'pspell_config_mode',
'pspell_config_personal',
'pspell_config_repl',
'pspell_config_runtogether',
'pspell_config_save_repl',
'pspell_new_config',
'pspell_new_personal',
'pspell_new',
'pspell_save_wordlist',
'pspell_store_replacement',
'pspell_suggest'),
'RRD': ('rrd_create',
'rrd_error',
'rrd_fetch',
'rrd_first',
'rrd_graph',
'rrd_info',
'rrd_last',
'rrd_lastupdate',
'rrd_restore',
'rrd_tune',
'rrd_update',
'rrd_version',
'rrd_xport',
'rrdc_disconnect'),
'Radius': ('radius_acct_open',
'radius_add_server',
'radius_auth_open',
'radius_close',
'radius_config',
'radius_create_request',
'radius_cvt_addr',
'radius_cvt_int',
'radius_cvt_string',
'radius_demangle_mppe_key',
'radius_demangle',
'radius_get_attr',
'radius_get_tagged_attr_data',
'radius_get_tagged_attr_tag',
'radius_get_vendor_attr',
'radius_put_addr',
'radius_put_attr',
'radius_put_int',
'radius_put_string',
'radius_put_vendor_addr',
'radius_put_vendor_attr',
'radius_put_vendor_int',
'radius_put_vendor_string',
'radius_request_authenticator',
'radius_salt_encrypt_attr',
'radius_send_request',
'radius_server_secret',
'radius_strerror'),
'Rar': ('rar_wrapper_cache_stats',),
'Readline': ('readline_add_history',
'readline_callback_handler_install',
'readline_callback_handler_remove',
'readline_callback_read_char',
'readline_clear_history',
'readline_completion_function',
'readline_info',
'readline_list_history',
'readline_on_new_line',
'readline_read_history',
'readline_redisplay',
'readline_write_history',
'readline'),
'Recode': ('recode_file', 'recode_string', 'recode'),
'RpmInfo': ('rpmaddtag', 'rpmdbinfo', 'rpmdbsearch', 'rpminfo', 'rpmvercmp'),
'SNMP': ('snmp_get_quick_print',
'snmp_get_valueretrieval',
'snmp_read_mib',
'snmp_set_enum_print',
'snmp_set_oid_numeric_print',
'snmp_set_oid_output_format',
'snmp_set_quick_print',
'snmp_set_valueretrieval',
'snmp2_get',
'snmp2_getnext',
'snmp2_real_walk',
'snmp2_set',
'snmp2_walk',
'snmp3_get',
'snmp3_getnext',
'snmp3_real_walk',
'snmp3_set',
'snmp3_walk',
'snmpget',
'snmpgetnext',
'snmprealwalk',
'snmpset',
'snmpwalk',
'snmpwalkoid'),
'SOAP': ('is_soap_fault', 'use_soap_error_handler'),
'SPL': ('class_implements',
'class_parents',
'class_uses',
'iterator_apply',
'iterator_count',
'iterator_to_array',
'spl_autoload_call',
'spl_autoload_extensions',
'spl_autoload_functions',
'spl_autoload_register',
'spl_autoload_unregister',
'spl_autoload',
'spl_classes',
'spl_object_hash',
'spl_object_id'),
'SQLSRV': ('sqlsrv_begin_transaction',
'sqlsrv_cancel',
'sqlsrv_client_info',
'sqlsrv_close',
'sqlsrv_commit',
'sqlsrv_configure',
'sqlsrv_connect',
'sqlsrv_errors',
'sqlsrv_execute',
'sqlsrv_fetch_array',
'sqlsrv_fetch_object',
'sqlsrv_fetch',
'sqlsrv_field_metadata',
'sqlsrv_free_stmt',
'sqlsrv_get_config',
'sqlsrv_get_field',
'sqlsrv_has_rows',
'sqlsrv_next_result',
'sqlsrv_num_fields',
'sqlsrv_num_rows',
'sqlsrv_prepare',
'sqlsrv_query',
'sqlsrv_rollback',
'sqlsrv_rows_affected',
'sqlsrv_send_stream_data',
'sqlsrv_server_info'),
'SSH2': ('ssh2_auth_agent',
'ssh2_auth_hostbased_file',
'ssh2_auth_none',
'ssh2_auth_password',
'ssh2_auth_pubkey_file',
'ssh2_connect',
'ssh2_disconnect',
'ssh2_exec',
'ssh2_fetch_stream',
'ssh2_fingerprint',
'ssh2_forward_accept',
'ssh2_forward_listen',
'ssh2_methods_negotiated',
'ssh2_poll',
'ssh2_publickey_add',
'ssh2_publickey_init',
'ssh2_publickey_list',
'ssh2_publickey_remove',
'ssh2_scp_recv',
'ssh2_scp_send',
'ssh2_send_eof',
'ssh2_sftp_chmod',
'ssh2_sftp_lstat',
'ssh2_sftp_mkdir',
'ssh2_sftp_readlink',
'ssh2_sftp_realpath',
'ssh2_sftp_rename',
'ssh2_sftp_rmdir',
'ssh2_sftp_stat',
'ssh2_sftp_symlink',
'ssh2_sftp_unlink',
'ssh2_sftp',
'ssh2_shell',
'ssh2_tunnel'),
'SVN': ('svn_add',
'svn_auth_get_parameter',
'svn_auth_set_parameter',
'svn_blame',
'svn_cat',
'svn_checkout',
'svn_cleanup',
'svn_client_version',
'svn_commit',
'svn_delete',
'svn_diff',
'svn_export',
'svn_fs_abort_txn',
'svn_fs_apply_text',
'svn_fs_begin_txn2',
'svn_fs_change_node_prop',
'svn_fs_check_path',
'svn_fs_contents_changed',
'svn_fs_copy',
'svn_fs_delete',
'svn_fs_dir_entries',
'svn_fs_file_contents',
'svn_fs_file_length',
'svn_fs_is_dir',
'svn_fs_is_file',
'svn_fs_make_dir',
'svn_fs_make_file',
'svn_fs_node_created_rev',
'svn_fs_node_prop',
'svn_fs_props_changed',
'svn_fs_revision_prop',
'svn_fs_revision_root',
'svn_fs_txn_root',
'svn_fs_youngest_rev',
'svn_import',
'svn_log',
'svn_ls',
'svn_mkdir',
'svn_repos_create',
'svn_repos_fs_begin_txn_for_commit',
'svn_repos_fs_commit_txn',
'svn_repos_fs',
'svn_repos_hotcopy',
'svn_repos_open',
'svn_repos_recover',
'svn_revert',
'svn_status',
'svn_update'),
'Scoutapm': ('scoutapm_get_calls', 'scoutapm_list_instrumented_functions'),
'Seaslog': ('seaslog_get_author', 'seaslog_get_version'),
'Semaphore': ('ftok',
'msg_get_queue',
'msg_queue_exists',
'msg_receive',
'msg_remove_queue',
'msg_send',
'msg_set_queue',
'msg_stat_queue',
'sem_acquire',
'sem_get',
'sem_release',
'sem_remove',
'shm_attach',
'shm_detach',
'shm_get_var',
'shm_has_var',
'shm_put_var',
'shm_remove_var',
'shm_remove'),
'Session': ('session_abort',
'session_cache_expire',
'session_cache_limiter',
'session_commit',
'session_create_id',
'session_decode',
'session_destroy',
'session_encode',
'session_gc',
'session_get_cookie_params',
'session_id',
'session_module_name',
'session_name',
'session_regenerate_id',
'session_register_shutdown',
'session_reset',
'session_save_path',
'session_set_cookie_params',
'session_set_save_handler',
'session_start',
'session_status',
'session_unset',
'session_write_close'),
'Shared Memory': ('shmop_close',
'shmop_delete',
'shmop_open',
'shmop_read',
'shmop_size',
'shmop_write'),
'SimpleXML': ('simplexml_import_dom',
'simplexml_load_file',
'simplexml_load_string'),
'Socket': ('socket_accept',
'socket_addrinfo_bind',
'socket_addrinfo_connect',
'socket_addrinfo_explain',
'socket_addrinfo_lookup',
'socket_bind',
'socket_clear_error',
'socket_close',
'socket_cmsg_space',
'socket_connect',
'socket_create_listen',
'socket_create_pair',
'socket_create',
'socket_export_stream',
'socket_get_option',
'socket_getopt',
'socket_getpeername',
'socket_getsockname',
'socket_import_stream',
'socket_last_error',
'socket_listen',
'socket_read',
'socket_recv',
'socket_recvfrom',
'socket_recvmsg',
'socket_select',
'socket_send',
'socket_sendmsg',
'socket_sendto',
'socket_set_block',
'socket_set_nonblock',
'socket_set_option',
'socket_setopt',
'socket_shutdown',
'socket_strerror',
'socket_write',
'socket_wsaprotocol_info_export',
'socket_wsaprotocol_info_import',
'socket_wsaprotocol_info_release'),
'Sodium': ('sodium_add',
'sodium_base642bin',
'sodium_bin2base64',
'sodium_bin2hex',
'sodium_compare',
'sodium_crypto_aead_aes256gcm_decrypt',
'sodium_crypto_aead_aes256gcm_encrypt',
'sodium_crypto_aead_aes256gcm_is_available',
'sodium_crypto_aead_aes256gcm_keygen',
'sodium_crypto_aead_chacha20poly1305_decrypt',
'sodium_crypto_aead_chacha20poly1305_encrypt',
'sodium_crypto_aead_chacha20poly1305_ietf_decrypt',
'sodium_crypto_aead_chacha20poly1305_ietf_encrypt',
'sodium_crypto_aead_chacha20poly1305_ietf_keygen',
'sodium_crypto_aead_chacha20poly1305_keygen',
'sodium_crypto_aead_xchacha20poly1305_ietf_decrypt',
'sodium_crypto_aead_xchacha20poly1305_ietf_encrypt',
'sodium_crypto_aead_xchacha20poly1305_ietf_keygen',
'sodium_crypto_auth_keygen',
'sodium_crypto_auth_verify',
'sodium_crypto_auth',
'sodium_crypto_box_keypair_from_secretkey_and_publickey',
'sodium_crypto_box_keypair',
'sodium_crypto_box_open',
'sodium_crypto_box_publickey_from_secretkey',
'sodium_crypto_box_publickey',
'sodium_crypto_box_seal_open',
'sodium_crypto_box_seal',
'sodium_crypto_box_secretkey',
'sodium_crypto_box_seed_keypair',
'sodium_crypto_box',
'sodium_crypto_generichash_final',
'sodium_crypto_generichash_init',
'sodium_crypto_generichash_keygen',
'sodium_crypto_generichash_update',
'sodium_crypto_generichash',
'sodium_crypto_kdf_derive_from_key',
'sodium_crypto_kdf_keygen',
'sodium_crypto_kx_client_session_keys',
'sodium_crypto_kx_keypair',
'sodium_crypto_kx_publickey',
'sodium_crypto_kx_secretkey',
'sodium_crypto_kx_seed_keypair',
'sodium_crypto_kx_server_session_keys',
'sodium_crypto_pwhash_scryptsalsa208sha256_str_verify',
'sodium_crypto_pwhash_scryptsalsa208sha256_str',
'sodium_crypto_pwhash_scryptsalsa208sha256',
'sodium_crypto_pwhash_str_needs_rehash',
'sodium_crypto_pwhash_str_verify',
'sodium_crypto_pwhash_str',
'sodium_crypto_pwhash',
'sodium_crypto_scalarmult_base',
'sodium_crypto_scalarmult',
'sodium_crypto_secretbox_keygen',
'sodium_crypto_secretbox_open',
'sodium_crypto_secretbox',
'sodium_crypto_secretstream_xchacha20poly1305_init_pull',
'sodium_crypto_secretstream_xchacha20poly1305_init_push',
'sodium_crypto_secretstream_xchacha20poly1305_keygen',
'sodium_crypto_secretstream_xchacha20poly1305_pull',
'sodium_crypto_secretstream_xchacha20poly1305_push',
'sodium_crypto_secretstream_xchacha20poly1305_rekey',
'sodium_crypto_shorthash_keygen',
'sodium_crypto_shorthash',
'sodium_crypto_sign_detached',
'sodium_crypto_sign_ed25519_pk_to_curve25519',
'sodium_crypto_sign_ed25519_sk_to_curve25519',
'sodium_crypto_sign_keypair_from_secretkey_and_publickey',
'sodium_crypto_sign_keypair',
'sodium_crypto_sign_open',
'sodium_crypto_sign_publickey_from_secretkey',
'sodium_crypto_sign_publickey',
'sodium_crypto_sign_secretkey',
'sodium_crypto_sign_seed_keypair',
'sodium_crypto_sign_verify_detached',
'sodium_crypto_sign',
'sodium_crypto_stream_keygen',
'sodium_crypto_stream_xor',
'sodium_crypto_stream',
'sodium_hex2bin',
'sodium_increment',
'sodium_memcmp',
'sodium_memzero',
'sodium_pad',
'sodium_unpad'),
'Solr': ('solr_get_version',),
'Stomp': ('stomp_connect_error', 'stomp_version'),
'Stream': ('stream_bucket_append',
'stream_bucket_make_writeable',
'stream_bucket_new',
'stream_bucket_prepend',
'stream_context_create',
'stream_context_get_default',
'stream_context_get_options',
'stream_context_get_params',
'stream_context_set_default',
'stream_context_set_option',
'stream_context_set_params',
'stream_copy_to_stream',
'stream_filter_append',
'stream_filter_prepend',
'stream_filter_register',
'stream_filter_remove',
'stream_get_contents',
'stream_get_filters',
'stream_get_line',
'stream_get_meta_data',
'stream_get_transports',
'stream_get_wrappers',
'stream_is_local',
'stream_isatty',
'stream_notification_callback',
'stream_register_wrapper',
'stream_resolve_include_path',
'stream_select',
'stream_set_blocking',
'stream_set_chunk_size',
'stream_set_read_buffer',
'stream_set_timeout',
'stream_set_write_buffer',
'stream_socket_accept',
'stream_socket_client',
'stream_socket_enable_crypto',
'stream_socket_get_name',
'stream_socket_pair',
'stream_socket_recvfrom',
'stream_socket_sendto',
'stream_socket_server',
'stream_socket_shutdown',
'stream_supports_lock',
'stream_wrapper_register',
'stream_wrapper_restore',
'stream_wrapper_unregister'),
'String': ('addcslashes',
'addslashes',
'bin2hex',
'chop',
'chr',
'chunk_split',
'convert_cyr_string',
'convert_uudecode',
'convert_uuencode',
'count_chars',
'crc32',
'crypt',
'echo',
'explode',
'fprintf',
'get_html_translation_table',
'hebrev',
'hebrevc',
'hex2bin',
'html_entity_decode',
'htmlentities',
'htmlspecialchars_decode',
'htmlspecialchars',
'implode',
'join',
'lcfirst',
'levenshtein',
'localeconv',
'ltrim',
'md5_file',
'md5',
'metaphone',
'money_format',
'nl_langinfo',
'nl2br',
'number_format',
'ord',
'parse_str',
'print',
'printf',
'quoted_printable_decode',
'quoted_printable_encode',
'quotemeta',
'rtrim',
'setlocale',
'sha1_file',
'sha1',
'similar_text',
'soundex',
'sprintf',
'sscanf',
'str_contains',
'str_ends_with',
'str_getcsv',
'str_ireplace',
'str_pad',
'str_repeat',
'str_replace',
'str_rot13',
'str_shuffle',
'str_split',
'str_starts_with',
'str_word_count',
'strcasecmp',
'strchr',
'strcmp',
'strcoll',
'strcspn',
'strip_tags',
'stripcslashes',
'stripos',
'stripslashes',
'stristr',
'strlen',
'strnatcasecmp',
'strnatcmp',
'strncasecmp',
'strncmp',
'strpbrk',
'strpos',
'strrchr',
'strrev',
'strripos',
'strrpos',
'strspn',
'strstr',
'strtok',
'strtolower',
'strtoupper',
'strtr',
'substr_compare',
'substr_count',
'substr_replace',
'substr',
'trim',
'ucfirst',
'ucwords',
'vfprintf',
'vprintf',
'vsprintf',
'wordwrap'),
'Swoole': ('swoole_async_dns_lookup',
'swoole_async_read',
'swoole_async_readfile',
'swoole_async_set',
'swoole_async_write',
'swoole_async_writefile',
'swoole_clear_error',
'swoole_client_select',
'swoole_cpu_num',
'swoole_errno',
'swoole_error_log',
'swoole_event_add',
'swoole_event_defer',
'swoole_event_del',
'swoole_event_exit',
'swoole_event_set',
'swoole_event_wait',
'swoole_event_write',
'swoole_get_local_ip',
'swoole_last_error',
'swoole_load_module',
'swoole_select',
'swoole_set_process_name',
'swoole_strerror',
'swoole_timer_after',
'swoole_timer_exists',
'swoole_timer_tick',
'swoole_version'),
'TCP': ('tcpwrap_check',),
'Taint': ('is_tainted', 'taint', 'untaint'),
'Tidy': ('ob_tidyhandler',
'tidy_access_count',
'tidy_config_count',
'tidy_error_count',
'tidy_get_output',
'tidy_warning_count'),
'Tokenizer': ('token_get_all', 'token_name'),
'Trader': ('trader_acos',
'trader_ad',
'trader_add',
'trader_adosc',
'trader_adx',
'trader_adxr',
'trader_apo',
'trader_aroon',
'trader_aroonosc',
'trader_asin',
'trader_atan',
'trader_atr',
'trader_avgprice',
'trader_bbands',
'trader_beta',
'trader_bop',
'trader_cci',
'trader_cdl2crows',
'trader_cdl3blackcrows',
'trader_cdl3inside',
'trader_cdl3linestrike',
'trader_cdl3outside',
'trader_cdl3starsinsouth',
'trader_cdl3whitesoldiers',
'trader_cdlabandonedbaby',
'trader_cdladvanceblock',
'trader_cdlbelthold',
'trader_cdlbreakaway',
'trader_cdlclosingmarubozu',
'trader_cdlconcealbabyswall',
'trader_cdlcounterattack',
'trader_cdldarkcloudcover',
'trader_cdldoji',
'trader_cdldojistar',
'trader_cdldragonflydoji',
'trader_cdlengulfing',
'trader_cdleveningdojistar',
'trader_cdleveningstar',
'trader_cdlgapsidesidewhite',
'trader_cdlgravestonedoji',
'trader_cdlhammer',
'trader_cdlhangingman',
'trader_cdlharami',
'trader_cdlharamicross',
'trader_cdlhighwave',
'trader_cdlhikkake',
'trader_cdlhikkakemod',
'trader_cdlhomingpigeon',
'trader_cdlidentical3crows',
'trader_cdlinneck',
'trader_cdlinvertedhammer',
'trader_cdlkicking',
'trader_cdlkickingbylength',
'trader_cdlladderbottom',
'trader_cdllongleggeddoji',
'trader_cdllongline',
'trader_cdlmarubozu',
'trader_cdlmatchinglow',
'trader_cdlmathold',
'trader_cdlmorningdojistar',
'trader_cdlmorningstar',
'trader_cdlonneck',
'trader_cdlpiercing',
'trader_cdlrickshawman',
'trader_cdlrisefall3methods',
'trader_cdlseparatinglines',
'trader_cdlshootingstar',
'trader_cdlshortline',
'trader_cdlspinningtop',
'trader_cdlstalledpattern',
'trader_cdlsticksandwich',
'trader_cdltakuri',
'trader_cdltasukigap',
'trader_cdlthrusting',
'trader_cdltristar',
'trader_cdlunique3river',
'trader_cdlupsidegap2crows',
'trader_cdlxsidegap3methods',
'trader_ceil',
'trader_cmo',
'trader_correl',
'trader_cos',
'trader_cosh',
'trader_dema',
'trader_div',
'trader_dx',
'trader_ema',
'trader_errno',
'trader_exp',
'trader_floor',
'trader_get_compat',
'trader_get_unstable_period',
'trader_ht_dcperiod',
'trader_ht_dcphase',
'trader_ht_phasor',
'trader_ht_sine',
'trader_ht_trendline',
'trader_ht_trendmode',
'trader_kama',
'trader_linearreg_angle',
'trader_linearreg_intercept',
'trader_linearreg_slope',
'trader_linearreg',
'trader_ln',
'trader_log10',
'trader_ma',
'trader_macd',
'trader_macdext',
'trader_macdfix',
'trader_mama',
'trader_mavp',
'trader_max',
'trader_maxindex',
'trader_medprice',
'trader_mfi',
'trader_midpoint',
'trader_midprice',
'trader_min',
'trader_minindex',
'trader_minmax',
'trader_minmaxindex',
'trader_minus_di',
'trader_minus_dm',
'trader_mom',
'trader_mult',
'trader_natr',
'trader_obv',
'trader_plus_di',
'trader_plus_dm',
'trader_ppo',
'trader_roc',
'trader_rocp',
'trader_rocr100',
'trader_rocr',
'trader_rsi',
'trader_sar',
'trader_sarext',
'trader_set_compat',
'trader_set_unstable_period',
'trader_sin',
'trader_sinh',
'trader_sma',
'trader_sqrt',
'trader_stddev',
'trader_stoch',
'trader_stochf',
'trader_stochrsi',
'trader_sub',
'trader_sum',
'trader_t3',
'trader_tan',
'trader_tanh',
'trader_tema',
'trader_trange',
'trader_trima',
'trader_trix',
'trader_tsf',
'trader_typprice',
'trader_ultosc',
'trader_var',
'trader_wclprice',
'trader_willr',
'trader_wma'),
'URL': ('base64_decode',
'base64_encode',
'get_headers',
'get_meta_tags',
'http_build_query',
'parse_url',
'rawurldecode',
'rawurlencode',
'urldecode',
'urlencode'),
'Uopz': ('uopz_add_function',
'uopz_allow_exit',
'uopz_backup',
'uopz_compose',
'uopz_copy',
'uopz_del_function',
'uopz_delete',
'uopz_extend',
'uopz_flags',
'uopz_function',
'uopz_get_exit_status',
'uopz_get_hook',
'uopz_get_mock',
'uopz_get_property',
'uopz_get_return',
'uopz_get_static',
'uopz_implement',
'uopz_overload',
'uopz_redefine',
'uopz_rename',
'uopz_restore',
'uopz_set_hook',
'uopz_set_mock',
'uopz_set_property',
'uopz_set_return',
'uopz_set_static',
'uopz_undefine',
'uopz_unset_hook',
'uopz_unset_mock',
'uopz_unset_return'),
'Variable handling': ('boolval',
'debug_zval_dump',
'doubleval',
'empty',
'floatval',
'get_debug_type',
'get_defined_vars',
'get_resource_id',
'get_resource_type',
'gettype',
'intval',
'is_array',
'is_bool',
'is_callable',
'is_countable',
'is_double',
'is_float',
'is_int',
'is_integer',
'is_iterable',
'is_long',
'is_null',
'is_numeric',
'is_object',
'is_real',
'is_resource',
'is_scalar',
'is_string',
'isset',
'print_r',
'serialize',
'settype',
'strval',
'unserialize',
'unset',
'var_dump',
'var_export'),
'WDDX': ('wddx_add_vars',
'wddx_deserialize',
'wddx_packet_end',
'wddx_packet_start',
'wddx_serialize_value',
'wddx_serialize_vars'),
'WinCache': ('wincache_fcache_fileinfo',
'wincache_fcache_meminfo',
'wincache_lock',
'wincache_ocache_fileinfo',
'wincache_ocache_meminfo',
'wincache_refresh_if_changed',
'wincache_rplist_fileinfo',
'wincache_rplist_meminfo',
'wincache_scache_info',
'wincache_scache_meminfo',
'wincache_ucache_add',
'wincache_ucache_cas',
'wincache_ucache_clear',
'wincache_ucache_dec',
'wincache_ucache_delete',
'wincache_ucache_exists',
'wincache_ucache_get',
'wincache_ucache_inc',
'wincache_ucache_info',
'wincache_ucache_meminfo',
'wincache_ucache_set',
'wincache_unlock'),
'XML Parser': ('utf8_decode',
'utf8_encode',
'xml_error_string',
'xml_get_current_byte_index',
'xml_get_current_column_number',
'xml_get_current_line_number',
'xml_get_error_code',
'xml_parse_into_struct',
'xml_parse',
'xml_parser_create_ns',
'xml_parser_create',
'xml_parser_free',
'xml_parser_get_option',
'xml_parser_set_option',
'xml_set_character_data_handler',
'xml_set_default_handler',
'xml_set_element_handler',
'xml_set_end_namespace_decl_handler',
'xml_set_external_entity_ref_handler',
'xml_set_notation_decl_handler',
'xml_set_object',
'xml_set_processing_instruction_handler',
'xml_set_start_namespace_decl_handler',
'xml_set_unparsed_entity_decl_handler'),
'XML-RPC': ('xmlrpc_decode_request',
'xmlrpc_decode',
'xmlrpc_encode_request',
'xmlrpc_encode',
'xmlrpc_get_type',
'xmlrpc_is_fault',
'xmlrpc_parse_method_descriptions',
'xmlrpc_server_add_introspection_data',
'xmlrpc_server_call_method',
'xmlrpc_server_create',
'xmlrpc_server_destroy',
'xmlrpc_server_register_introspection_callback',
'xmlrpc_server_register_method',
'xmlrpc_set_type'),
'Xhprof': ('xhprof_disable',
'xhprof_enable',
'xhprof_sample_disable',
'xhprof_sample_enable'),
'YAZ': ('yaz_addinfo',
'yaz_ccl_conf',
'yaz_ccl_parse',
'yaz_close',
'yaz_connect',
'yaz_database',
'yaz_element',
'yaz_errno',
'yaz_error',
'yaz_es_result',
'yaz_es',
'yaz_get_option',
'yaz_hits',
'yaz_itemorder',
'yaz_present',
'yaz_range',
'yaz_record',
'yaz_scan_result',
'yaz_scan',
'yaz_schema',
'yaz_search',
'yaz_set_option',
'yaz_sort',
'yaz_syntax',
'yaz_wait'),
'Yaml': ('yaml_emit_file',
'yaml_emit',
'yaml_parse_file',
'yaml_parse_url',
'yaml_parse'),
'Zip': ('zip_close',
'zip_entry_close',
'zip_entry_compressedsize',
'zip_entry_compressionmethod',
'zip_entry_filesize',
'zip_entry_name',
'zip_entry_open',
'zip_entry_read',
'zip_open',
'zip_read'),
'Zlib': ('deflate_add',
'deflate_init',
'gzclose',
'gzcompress',
'gzdecode',
'gzdeflate',
'gzencode',
'gzeof',
'gzfile',
'gzgetc',
'gzgets',
'gzgetss',
'gzinflate',
'gzopen',
'gzpassthru',
'gzputs',
'gzread',
'gzrewind',
'gzseek',
'gztell',
'gzuncompress',
'gzwrite',
'inflate_add',
'inflate_get_read_len',
'inflate_get_status',
'inflate_init',
'readgzfile',
'zlib_decode',
'zlib_encode',
'zlib_get_coding_type'),
'ZooKeeper': ('zookeeper_dispatch',),
'cURL': ('curl_close',
'curl_copy_handle',
'curl_errno',
'curl_error',
'curl_escape',
'curl_exec',
'curl_file_create',
'curl_getinfo',
'curl_init',
'curl_multi_add_handle',
'curl_multi_close',
'curl_multi_errno',
'curl_multi_exec',
'curl_multi_getcontent',
'curl_multi_info_read',
'curl_multi_init',
'curl_multi_remove_handle',
'curl_multi_select',
'curl_multi_setopt',
'curl_multi_strerror',
'curl_pause',
'curl_reset',
'curl_setopt_array',
'curl_setopt',
'curl_share_close',
'curl_share_errno',
'curl_share_init',
'curl_share_setopt',
'curl_share_strerror',
'curl_strerror',
'curl_unescape',
'curl_version'),
'dBase': ('dbase_add_record',
'dbase_close',
'dbase_create',
'dbase_delete_record',
'dbase_get_header_info',
'dbase_get_record_with_names',
'dbase_get_record',
'dbase_numfields',
'dbase_numrecords',
'dbase_open',
'dbase_pack',
'dbase_replace_record'),
'iconv': ('iconv_get_encoding',
'iconv_mime_decode_headers',
'iconv_mime_decode',
'iconv_mime_encode',
'iconv_set_encoding',
'iconv_strlen',
'iconv_strpos',
'iconv_strrpos',
'iconv_substr',
'iconv',
'ob_iconv_handler'),
'intl': ('intl_error_name',
'intl_get_error_code',
'intl_get_error_message',
'intl_is_failure'),
'libxml': ('libxml_clear_errors',
'libxml_disable_entity_loader',
'libxml_get_errors',
'libxml_get_last_error',
'libxml_set_external_entity_loader',
'libxml_set_streams_context',
'libxml_use_internal_errors'),
'mqseries': ('mqseries_back',
'mqseries_begin',
'mqseries_close',
'mqseries_cmit',
'mqseries_conn',
'mqseries_connx',
'mqseries_disc',
'mqseries_get',
'mqseries_inq',
'mqseries_open',
'mqseries_put1',
'mqseries_put',
'mqseries_set',
'mqseries_strerror'),
'phpdbg': ('phpdbg_break_file',
'phpdbg_break_function',
'phpdbg_break_method',
'phpdbg_break_next',
'phpdbg_clear',
'phpdbg_color',
'phpdbg_end_oplog',
'phpdbg_exec',
'phpdbg_get_executable',
'phpdbg_prompt',
'phpdbg_start_oplog'),
'runkit7': ('runkit7_constant_add',
'runkit7_constant_redefine',
'runkit7_constant_remove',
'runkit7_function_add',
'runkit7_function_copy',
'runkit7_function_redefine',
'runkit7_function_remove',
'runkit7_function_rename',
'runkit7_import',
'runkit7_method_add',
'runkit7_method_copy',
'runkit7_method_redefine',
'runkit7_method_remove',
'runkit7_method_rename',
'runkit7_object_id',
'runkit7_superglobals',
'runkit7_zval_inspect'),
'ssdeep': ('ssdeep_fuzzy_compare',
'ssdeep_fuzzy_hash_filename',
'ssdeep_fuzzy_hash'),
'var_representation': ('var_representation',),
'win32service': ('win32_continue_service',
'win32_create_service',
'win32_delete_service',
'win32_get_last_control_message',
'win32_pause_service',
'win32_query_service_status',
'win32_send_custom_control',
'win32_set_service_exit_code',
'win32_set_service_exit_mode',
'win32_set_service_status',
'win32_start_service_ctrl_dispatcher',
'win32_start_service',
'win32_stop_service'),
'xattr': ('xattr_get',
'xattr_list',
'xattr_remove',
'xattr_set',
'xattr_supported'),
'xdiff': ('xdiff_file_bdiff_size',
'xdiff_file_bdiff',
'xdiff_file_bpatch',
'xdiff_file_diff_binary',
'xdiff_file_diff',
'xdiff_file_merge3',
'xdiff_file_patch_binary',
'xdiff_file_patch',
'xdiff_file_rabdiff',
'xdiff_string_bdiff_size',
'xdiff_string_bdiff',
'xdiff_string_bpatch',
'xdiff_string_diff_binary',
'xdiff_string_diff',
'xdiff_string_merge3',
'xdiff_string_patch_binary',
'xdiff_string_patch',
'xdiff_string_rabdiff')}
if __name__ == '__main__': # pragma: no cover
import glob
import os
import pprint
import re
import shutil
import tarfile
from urllib.request import urlretrieve
PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz'
PHP_MANUAL_DIR = './php-chunked-xhtml/'
PHP_REFERENCE_GLOB = 'ref.*'
PHP_FUNCTION_RE = r'<a href="function\..*?\.html">(.*?)</a>'
PHP_MODULE_RE = '<title>(.*?) Functions</title>'
def get_php_functions():
function_re = re.compile(PHP_FUNCTION_RE)
module_re = re.compile(PHP_MODULE_RE)
modules = {}
for file in get_php_references():
module = ''
with open(file) as f:
for line in f:
if not module:
search = module_re.search(line)
if search:
module = search.group(1)
modules[module] = []
elif 'href="function.' in line:
for match in function_re.finditer(line):
fn = match.group(1)
if '»' not in fn and '«' not in fn and \
'::' not in fn and '\\' not in fn and \
fn not in modules[module]:
modules[module].append(fn)
if module:
# These are dummy manual pages, not actual functions
if module == 'Filesystem':
modules[module].remove('delete')
if not modules[module]:
del modules[module]
for key in modules:
modules[key] = tuple(modules[key])
return modules
def get_php_references():
download = urlretrieve(PHP_MANUAL_URL)
with tarfile.open(download[0]) as tar:
tar.extractall()
yield from glob.glob("%s%s" % (PHP_MANUAL_DIR, PHP_REFERENCE_GLOB))
os.remove(download[0])
def regenerate(filename, modules):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(filename, 'w') as fp:
fp.write(header)
fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
fp.write(footer)
def run():
print('>> Downloading Function Index')
modules = get_php_functions()
total = sum(len(v) for v in modules.values())
print('%d functions found' % total)
regenerate(__file__, modules)
shutil.rmtree(PHP_MANUAL_DIR)
run()
| 107,874 | Python | 31.433854 | 87 | 0.438335 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/smalltalk.py | """
pygments.lexers.smalltalk
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Smalltalk and related languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SmalltalkLexer', 'NewspeakLexer']
class SmalltalkLexer(RegexLexer):
"""
For Smalltalk syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
.. versionadded:: 0.10
"""
name = 'Smalltalk'
url = 'http://www.smalltalk.org/'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
'root': [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition': [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables': [
include('whitespaces'),
(r'(:)(\s*)(\w+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
default('#pop'), # else pop
],
'literals': [
(r"'(''|[^'])*'", String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper': [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
# literals
(r"'(''|[^'])*'", String),
(r'\$.', String.Char),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth': [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root', 'afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces': [
# skip whitespace and comments
(r'\s+', Text),
(r'"(""|[^"])*"', Comment),
],
'objects': [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject': [
(r'! !$', Keyword, '#pop'), # squeak chunk delimiter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout': [
# Squeak fileout format (optional)
(r'^"(""|[^"])*"!', Keyword),
(r"^'(''|[^'])*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class NewspeakLexer(RegexLexer):
"""
For Newspeak syntax.
.. versionadded:: 1.1
"""
name = 'Newspeak'
url = 'http://newspeaklanguage.org/'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root': [
(r'\b(Newsqueak2)\b', Keyword.Declaration),
(r"'[^']*'", String),
(r'\b(class)(\s+)(\w+)(\s*)',
bygroups(Keyword.Declaration, Text, Name.Class, Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'<\w+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+', Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace': [
(r'\s+', Text),
(r'"[^"]*"', Comment)
],
}
| 7,206 | Python | 35.583756 | 88 | 0.425618 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/foxpro.py | """
pygments.lexers.foxpro
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft Visual FoxPro source code.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String
__all__ = ['FoxProLexer']
class FoxProLexer(RegexLexer):
"""Lexer for Microsoft Visual FoxPro language.
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
.. versionadded:: 1.6
"""
name = 'FoxPro'
aliases = ['foxpro', 'vfp', 'clipper', 'xbase']
filenames = ['*.PRG', '*.prg']
mimetype = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r';\s*\n', Punctuation), # consume newline
(r'(^|\n)\s*', Text, 'newline'),
# Square brackets may be used for array indices
# and for string literal. Look for arrays
# before matching string literals.
(r'(?<=\w)\[[0-9, ]+\]', Text),
(r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
(r'(^\s*\*|&&|&&).*?\n', Comment.Single),
(r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
r'YEAR)(?=\s*\()', Name.Function),
(r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
(r'THISFORMSET|THISFORM|THIS', Name.Builtin),
(r'Application|CheckBox|Collection|Column|ComboBox|'
r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
r'Project|Relation|ReportListener|Separator|Servers|Server|'
r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
r'XMLAdapter|XMLField|XMLTable', Name.Class),
(r'm\.[a-z_]\w*', Name.Variable),
(r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
(r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
r'BreakOnError|BufferModeOverride|BufferMode|'
r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
r'ContinuousScroll|ControlBox|ControlCount|Controls|'
r'ControlSource|ConversionFunc|Count|CurrentControl|'
r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
r'CursorSchema|CursorSource|CursorStatus|Curvature|'
r'Database|DataSessionID|DataSession|DataSourceType|'
r'DataSource|DataType|DateFormat|DateMark|Debug|'
r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
r'DeleteCmd|DeleteMark|Description|Desktop|'
r'Details|DisabledBackColor|DisabledForeColor|'
r'DisabledItemBackColor|DisabledItemForeColor|'
r'DisabledPicture|DisableEncode|DisplayCount|'
r'DisplayValue|Dockable|Docked|DockPosition|'
r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
r'DynamicFontItalic|DynamicFontStrikethru|'
r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
r'DynamicLineHeight|EditorOptions|Enabled|'
r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
r'FetchMemoDataSource|FetchMemo|FetchSize|'
r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
r'HelpContextID|HideSelection|HighlightBackColor|'
r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
r'InsertCmdDataSource|InsertCmdRefreshCmd|'
r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
r'InsertCmd|Instancing|IntegralHeight|'
r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
r'NumberOfElements|Object|OLEClass|OLEDragMode|'
r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
r'OutputPageCount|OutputType|PageCount|PageHeight|'
r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
r'Parent|Partition|PasswordChar|PictureMargin|'
r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
r'PictureVal|Picture|Prepared|'
r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
r'ProjectHookLibrary|ProjectHook|QuietMode|'
r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
r'RecordSource|RefreshAlias|'
r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
r'Rotation|RowColChange|RowHeight|RowSourceType|'
r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
r'Seconds|SelectCmd|SelectedID|'
r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
r'ServerClass|ServerHelpFile|ServerName|'
r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
r'VersionCompany|VersionCopyright|VersionDescription|'
r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
r'XSDtype|ZoomBox)', Name.Attribute),
(r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
r'AddProperty|AddTableSchema|AddToSCC|Add|'
r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
(r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
r'AfterCursorUpdate|AfterDelete|AfterInsert|'
r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
r'BeforeInsert|BeforeDock|BeforeOpenTables|'
r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
r'dbc_AfterDropOffline|dbc_AfterDropTable|'
r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
r'dbc_AfterRenameTable|dbc_AfterRenameView|'
r'dbc_AfterValidateData|dbc_BeforeAddTable|'
r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
(r'\s+', Text),
# everything else is not colored
(r'.', Text),
],
'newline': [
(r'\*.*?$', Comment.Single, '#pop'),
(r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
Keyword.Reserved, '#pop'),
(r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
Comment.Preproc, '#pop'),
(r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
(r'.', Text, '#pop'),
],
}
| 26,212 | Python | 60.245327 | 80 | 0.657752 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/fantom.py | """
pygments.lexers.fantom
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Fantom language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from string import Template
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Whitespace
__all__ = ['FantomLexer']
class FantomLexer(RegexLexer):
"""
For Fantom source code.
.. versionadded:: 1.5
"""
name = 'Fantom'
aliases = ['fan']
filenames = ['*.fan']
mimetypes = ['application/x-fantom']
# often used regexes
def s(str):
return Template(str).substitute(
dict(
pod=r'[\"\w\.]+',
eos=r'\n|;',
id=r'[a-zA-Z_]\w*',
# all chars which can be part of type definition. Starts with
# either letter, or [ (maps), or | (funcs)
type=r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]|\->?]*?',
)
)
tokens = {
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline), # Multiline
(r'//.*?$', Comment.Single), # Single line
# TODO: highlight references in fandocs
(r'\*\*.*?$', Comment.Special), # Fandoc
(r'#.*$', Comment.Single) # Shell-style
],
'literals': [
(r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration
(r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration with dot
(r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), # Float/Decimal
(r'\b-?0x[0-9a-fA-F_]+', Number.Hex), # Hex
(r'\b-?[\d_]+', Number.Integer), # Int
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), # Char
(r'"', Punctuation, 'insideStr'), # Opening quote
(r'`', Punctuation, 'insideUri'), # Opening accent
(r'\b(true|false|null)\b', Keyword.Constant), # Bool & null
(r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', # DSL
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, String, Punctuation)),
(r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', # Type/slot literal
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, Name.Function)),
(r'\[,\]', Literal), # Empty list
(s(r'($type)(\[,\])'), # Typed empty list
bygroups(using(this, state='inType'), Literal)),
(r'\[:\]', Literal), # Empty Map
(s(r'($type)(\[:\])'),
bygroups(using(this, state='inType'), Literal)),
],
'insideStr': [
(r'\\\\', String.Escape), # Escaped backslash
(r'\\"', String.Escape), # Escaped "
(r'\\`', String.Escape), # Escaped `
(r'\$\w+', String.Interpol), # Subst var
(r'\$\{.*?\}', String.Interpol), # Subst expr
(r'"', Punctuation, '#pop'), # Closing quot
(r'.', String) # String content
],
'insideUri': [ # TODO: remove copy/paste str/uri
(r'\\\\', String.Escape), # Escaped backslash
(r'\\"', String.Escape), # Escaped "
(r'\\`', String.Escape), # Escaped `
(r'\$\w+', String.Interpol), # Subst var
(r'\$\{.*?\}', String.Interpol), # Subst expr
(r'`', Punctuation, '#pop'), # Closing tick
(r'.', String.Backtick) # URI content
],
'protectionKeywords': [
(r'\b(public|protected|private|internal)\b', Keyword),
],
'typeKeywords': [
(r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
],
'methodKeywords': [
(r'\b(abstract|native|once|override|static|virtual|final)\b',
Keyword),
],
'fieldKeywords': [
(r'\b(abstract|const|final|native|override|static|virtual|'
r'readonly)\b', Keyword)
],
'otherKeywords': [
(words((
'try', 'catch', 'throw', 'finally', 'for', 'if', 'else', 'while',
'as', 'is', 'isnot', 'switch', 'case', 'default', 'continue',
'break', 'do', 'return', 'get', 'set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'\b(it|this|super)\b', Name.Builtin.Pseudo),
],
'operators': [
(r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
],
'inType': [
(r'[\[\]|\->:?]', Punctuation),
(s(r'$id'), Name.Class),
default('#pop'),
],
'root': [
include('comments'),
include('protectionKeywords'),
include('typeKeywords'),
include('methodKeywords'),
include('fieldKeywords'),
include('literals'),
include('otherKeywords'),
include('operators'),
(r'using\b', Keyword.Namespace, 'using'), # Using stmt
(r'@\w+', Name.Decorator, 'facet'), # Symbol
(r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Whitespace, Name.Class),
'inheritance'), # Inheritance list
# Type var := val
(s(r'($type)([ \t]+)($id)(\s*)(:=)'),
bygroups(using(this, state='inType'), Whitespace,
Name.Variable, Whitespace, Operator)),
# var := val
(s(r'($id)(\s*)(:=)'),
bygroups(Name.Variable, Whitespace, Operator)),
# .someId( or ->someId( ###
(s(r'(\.|(?:\->))($id)(\s*)(\()'),
bygroups(Operator, Name.Function, Whitespace, Punctuation),
'insideParen'),
# .someId or ->someId
(s(r'(\.|(?:\->))($id)'),
bygroups(Operator, Name.Function)),
# new makeXXX (
(r'(new)(\s+)(make\w*)(\s*)(\()',
bygroups(Keyword, Whitespace, Name.Function, Whitespace, Punctuation),
'insideMethodDeclArgs'),
# Type name (
(s(r'($type)([ \t]+)' # Return type and whitespace
r'($id)(\s*)(\()'), # method name + open brace
bygroups(using(this, state='inType'), Whitespace,
Name.Function, Whitespace, Punctuation),
'insideMethodDeclArgs'),
# ArgType argName,
(s(r'($type)(\s+)($id)(\s*)(,)'),
bygroups(using(this, state='inType'), Whitespace, Name.Variable,
Whitespace, Punctuation)),
# ArgType argName)
# Covered in 'insideParen' state
# ArgType argName -> ArgType|
(s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
bygroups(using(this, state='inType'), Whitespace, Name.Variable,
Whitespace, Punctuation, Whitespace, using(this, state='inType'),
Punctuation)),
# ArgType argName|
(s(r'($type)(\s+)($id)(\s*)(\|)'),
bygroups(using(this, state='inType'), Whitespace, Name.Variable,
Whitespace, Punctuation)),
# Type var
(s(r'($type)([ \t]+)($id)'),
bygroups(using(this, state='inType'), Whitespace,
Name.Variable)),
(r'\(', Punctuation, 'insideParen'),
(r'\{', Punctuation, 'insideBrace'),
(r'\s+', Whitespace),
(r'.', Text)
],
'insideParen': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'insideMethodDeclArgs': [
(r'\)', Punctuation, '#pop'),
(s(r'($type)(\s+)($id)(\s*)(\))'),
bygroups(using(this, state='inType'), Whitespace, Name.Variable,
Whitespace, Punctuation), '#pop'),
include('root'),
],
'insideBrace': [
(r'\}', Punctuation, '#pop'),
include('root'),
],
'inheritance': [
(r'\s+', Whitespace), # Whitespace
(r':|,', Punctuation),
(r'(?:(\w+)(::))?(\w+)',
bygroups(Name.Namespace, Punctuation, Name.Class)),
(r'\{', Punctuation, '#pop')
],
'using': [
(r'[ \t]+', Whitespace), # consume whitespaces
(r'(\[)(\w+)(\])',
bygroups(Punctuation, Comment.Special, Punctuation)), # ffi
(r'(\")?([\w.]+)(\")?',
bygroups(Punctuation, Name.Namespace, Punctuation)), # podname
(r'::', Punctuation, 'usingClass'),
default('#pop')
],
'usingClass': [
(r'[ \t]+', Whitespace), # consume whitespaces
(r'(as)(\s+)(\w+)',
bygroups(Keyword.Declaration, Whitespace, Name.Class), '#pop:2'),
(r'[\w$]+', Name.Class),
default('#pop:2') # jump out to root state
],
'facet': [
(r'\s+', Whitespace),
(r'\{', Punctuation, 'facetFields'),
default('#pop')
],
'facetFields': [
include('comments'),
include('literals'),
include('operators'),
(r'\s+', Whitespace),
(r'(\s*)(\w+)(\s*)(=)', bygroups(Whitespace, Name, Whitespace, Operator)),
(r'\}', Punctuation, '#pop'),
(r'\s+', Whitespace),
(r'.', Text)
],
}
| 10,197 | Python | 39.468254 | 87 | 0.419143 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/kuin.py | """
pygments.lexers.kuin
~~~~~~~~~~~~~~~~~~~~
Lexers for the Kuin language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, using, this, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['KuinLexer']
class KuinLexer(RegexLexer):
"""
For Kuin source code.
.. versionadded:: 2.9
"""
name = 'Kuin'
url = 'https://github.com/kuina/Kuin'
aliases = ['kuin']
filenames = ['*.kn']
tokens = {
'root': [
include('statement'),
],
'statement': [
# Whitespace / Comment
include('whitespace'),
# Block-statement
(r'(\+?)([ \t]*)(\*?)([ \t]*)(\bfunc)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)',
bygroups(Keyword,Whitespace, Keyword, Whitespace, Keyword,
using(this), Name.Function), 'func_'),
(r'\b(class)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)',
bygroups(Keyword, using(this), Name.Class), 'class_'),
(r'\b(enum)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)',
bygroups(Keyword, using(this), Name.Constant), 'enum_'),
(r'\b(block)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
bygroups(Keyword, using(this), Name.Other), 'block_'),
(r'\b(ifdef)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
bygroups(Keyword, using(this), Name.Other), 'ifdef_'),
(r'\b(if)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
bygroups(Keyword, using(this), Name.Other), 'if_'),
(r'\b(switch)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
bygroups(Keyword, using(this), Name.Other), 'switch_'),
(r'\b(while)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
bygroups(Keyword, using(this), Name.Other), 'while_'),
(r'\b(for)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
bygroups(Keyword, using(this), Name.Other), 'for_'),
(r'\b(foreach)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
bygroups(Keyword, using(this), Name.Other), 'foreach_'),
(r'\b(try)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?',
bygroups(Keyword, using(this), Name.Other), 'try_'),
# Line-statement
(r'\b(do)\b', Keyword, 'do'),
(r'(\+?[ \t]*\bvar)\b', Keyword, 'var'),
(r'\b(const)\b', Keyword, 'const'),
(r'\b(ret)\b', Keyword, 'ret'),
(r'\b(throw)\b', Keyword, 'throw'),
(r'\b(alias)\b', Keyword, 'alias'),
(r'\b(assert)\b', Keyword, 'assert'),
(r'\|', Text, 'continued_line'),
(r'[ \t]*\n', Whitespace),
],
# Whitespace / Comment
'whitespace': [
(r'^([ \t]*)(;.*)', bygroups(Comment.Single, Whitespace)),
(r'[ \t]+(?![; \t])', Whitespace),
(r'\{', Comment.Multiline, 'multiline_comment'),
],
'multiline_comment': [
(r'\{', Comment.Multiline, 'multiline_comment'),
(r'(?:\s*;.*|[^{}\n]+)', Comment.Multiline),
(r'\n', Comment.Multiline),
(r'\}', Comment.Multiline, '#pop'),
],
# Block-statement
'func_': [
include('expr'),
(r'\n', Whitespace, 'func'),
],
'func': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(func)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
include('statement'),
],
'class_': [
include('expr'),
(r'\n', Whitespace, 'class'),
],
'class': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(class)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
include('statement'),
],
'enum_': [
include('expr'),
(r'\n', Whitespace, 'enum'),
],
'enum': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(enum)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
include('expr'),
(r'\n', Whitespace),
],
'block_': [
include('expr'),
(r'\n', Whitespace, 'block'),
],
'block': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(block)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
include('statement'),
include('break'),
include('skip'),
],
'ifdef_': [
include('expr'),
(r'\n', Whitespace, 'ifdef'),
],
'ifdef': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(ifdef)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
(words(('rls', 'dbg'), prefix=r'\b', suffix=r'\b'),
Keyword.Constant, 'ifdef_sp'),
include('statement'),
include('break'),
include('skip'),
],
'ifdef_sp': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'if_': [
include('expr'),
(r'\n', Whitespace, 'if'),
],
'if': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(if)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
(words(('elif', 'else'), prefix=r'\b', suffix=r'\b'), Keyword, 'if_sp'),
include('statement'),
include('break'),
include('skip'),
],
'if_sp': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'switch_': [
include('expr'),
(r'\n', Whitespace, 'switch'),
],
'switch': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(switch)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
(words(('case', 'default', 'to'), prefix=r'\b', suffix=r'\b'),
Keyword, 'switch_sp'),
include('statement'),
include('break'),
include('skip'),
],
'switch_sp': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'while_': [
include('expr'),
(r'\n', Whitespace, 'while'),
],
'while': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(while)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
include('statement'),
include('break'),
include('skip'),
],
'for_': [
include('expr'),
(r'\n', Whitespace, 'for'),
],
'for': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(for)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
include('statement'),
include('break'),
include('skip'),
],
'foreach_': [
include('expr'),
(r'\n', Whitespace, 'foreach'),
],
'foreach': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(foreach)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
include('statement'),
include('break'),
include('skip'),
],
'try_': [
include('expr'),
(r'\n', Whitespace, 'try'),
],
'try': [
(r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(try)\b',
bygroups(Keyword, using(this), Keyword), '#pop:2'),
(words(('catch', 'finally', 'to'), prefix=r'\b', suffix=r'\b'),
Keyword, 'try_sp'),
include('statement'),
include('break'),
include('skip'),
],
'try_sp': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
# Line-statement
'break': [
(r'\b(break)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)',
bygroups(Keyword, using(this), Name.Other)),
],
'skip': [
(r'\b(skip)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)',
bygroups(Keyword, using(this), Name.Other)),
],
'alias': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'assert': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'const': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'do': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'ret': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'throw': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'var': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'continued_line': [
include('expr'),
(r'\n', Whitespace, '#pop'),
],
'expr': [
# Whitespace / Comment
include('whitespace'),
# Punctuation
(r'\(', Punctuation,),
(r'\)', Punctuation,),
(r'\[', Punctuation,),
(r'\]', Punctuation,),
(r',', Punctuation),
# Keyword
(words((
'true', 'false', 'null', 'inf'
), prefix=r'\b', suffix=r'\b'), Keyword.Constant),
(words((
'me'
), prefix=r'\b', suffix=r'\b'), Keyword),
(words((
'bit16', 'bit32', 'bit64', 'bit8', 'bool',
'char', 'class', 'dict', 'enum', 'float', 'func',
'int', 'list', 'queue', 'stack'
), prefix=r'\b', suffix=r'\b'), Keyword.Type),
# Number
(r'\b[0-9]\.[0-9]+(?!\.)(:?e[\+-][0-9]+)?\b', Number.Float),
(r'\b2#[01]+(?:b(?:8|16|32|64))?\b', Number.Bin),
(r'\b8#[0-7]+(?:b(?:8|16|32|64))?\b', Number.Oct),
(r'\b16#[0-9A-F]+(?:b(?:8|16|32|64))?\b', Number.Hex),
(r'\b[0-9]+(?:b(?:8|16|32|64))?\b', Number.Decimal),
# String / Char
(r'"', String.Double, 'string'),
(r"'(?:\\.|.)+?'", String.Char),
# Operator
(r'(?:\.|\$(?:>|<)?)', Operator),
(r'(?:\^)', Operator),
(r'(?:\+|-|!|##?)', Operator),
(r'(?:\*|/|%)', Operator),
(r'(?:~)', Operator),
(r'(?:(?:=|<>)(?:&|\$)?|<=?|>=?)', Operator),
(r'(?:&)', Operator),
(r'(?:\|)', Operator),
(r'(?:\?)', Operator),
(r'(?::(?::|\+|-|\*|/|%|\^|~)?)', Operator),
# Identifier
(r"\b([a-zA-Z_][0-9a-zA-Z_]*)(?=@)\b", Name),
(r"(@)?\b([a-zA-Z_][0-9a-zA-Z_]*)\b",
bygroups(Name.Other, Name.Variable)),
],
# String
'string': [
(r'(?:\\[^{\n]|[^"\\])+', String.Double),
(r'\\\{', String.Double, 'toStrInString'),
(r'"', String.Double, '#pop'),
],
'toStrInString': [
include('expr'),
(r'\}', String.Double, '#pop'),
],
}
| 11,406 | Python | 33.152695 | 101 | 0.376995 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/webmisc.py | """
pygments.lexers.webmisc
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for misc. web stuff.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Whitespace
from pygments.lexers.css import _indentation, _starts_block
from pygments.lexers.html import HtmlLexer
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.ruby import RubyLexer
__all__ = ['DuelLexer', 'SlimLexer', 'XQueryLexer', 'QmlLexer', 'CirruLexer']
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
.. versionadded:: 1.4
"""
name = 'Duel'
url = 'http://duelengine.org/'
aliases = ['duel', 'jbst', 'jsonml+bst']
filenames = ['*.duel', '*.jbst']
mimetypes = ['text/x-duel', 'text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
.. versionadded:: 1.4
"""
name = 'XQuery'
url = 'https://www.w3.org/XML/Query/'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
# ncnamestartchar = (
# r"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# r"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# r"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# r"[\u10000-\uEFFFF]"
# )
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
# ncnamechar = ncnamestartchar + (r"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# r"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WYZ]|_|:|[a-kn-wyz])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
# elementcontentchar = (r'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# r'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_\'`|~]'
# quotattrcontentchar = (r'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# r'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%()*+,\-./:;=?@\[\\\]^_\'`|~]'
# aposattrcontentchar = (r'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# r'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_`|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
# x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
if lexer.xquery_parse_state:
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
if not ctx.stack:
# make sure we have at least the root state on invalid inputs
ctx.stack = ['root']
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_map_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Whitespace, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'[^:()]+', Comment),
(r'\(:', Comment, '#push'),
(r':\)', Comment, '#pop'),
(r'[:()]', Comment),
],
'whitespace': [
(r'\s+', Whitespace),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'group by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|count|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=|!)',
operator_root_callback),
(r'(::|:|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Whitespace, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Whitespace, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Whitespace, Keyword), 'itemtype'),
(r'(case)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Whitespace, String.Double), 'itemtype'),
(r'(case)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Whitespace, String.Single), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Whitespace, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let|previous|next)(\s+)(\$)',
bygroups(Keyword, Whitespace, Name.Variable), 'varname'),
(r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword,
Whitespace, Name.Variable),
'varname'),
# (r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)',
bygroups(Keyword, Whitespace, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'(allowing)(\s+)(empty)',
bygroups(Keyword, Whitespace, Keyword)),
(r'external', Keyword),
(r'(start|when|end)', Keyword, 'root'),
(r'(only)(\s+)(end)', bygroups(Keyword, Whitespace, Keyword),
'root'),
(r'collation', Keyword, 'uritooperator'),
# eXist specific XQUF
(r'(into|following|preceding|with)', Keyword, 'root'),
# support for current context on rhs of Simple Map Operator
(r'\.', Operator),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Whitespace), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')',
bygroups(Keyword, Whitespace, String.Double)),
(r"(at)(\s+)("+stringsingle+')',
bygroups(Keyword, Whitespace, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'annotationname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Decorator),
(r'(\()(' + stringdouble + ')', bygroups(Punctuation, String.Double)),
(r'(\()(' + stringsingle + ')', bygroups(Punctuation, String.Single)),
(r'(\,)(\s+)(' + stringdouble + ')',
bygroups(Punctuation, Text, String.Double)),
(r'(\,)(\s+)(' + stringsingle + ')',
bygroups(Punctuation, Text, String.Single)),
(r'\)', Punctuation),
(r'(\s+)(\%)', bygroups(Text, Name.Decorator), 'annotationname'),
(r'(\s+)(variable)(\s+)(\$)',
bygroups(Text, Keyword.Declaration, Text, Name.Variable), 'varname'),
(r'(\s+)(function)(\s+)',
bygroups(Text, Keyword.Declaration, Text), 'root')
],
'varname': [
(r'\(:', Comment, 'comment'),
(r'(' + qname + r')(\()?', bygroups(Name, Punctuation), 'operator'),
],
'singletype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Name.Variable, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'(\(\#)(\s*)', bygroups(Punctuation, Text), 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where|count',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'(case)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'itemtype'),
(r'(case)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'itemtype'),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(r'(function|map|array)(\()', bygroups(Keyword.Type, Punctuation)),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'\{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]',
Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]',
Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]',
Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(r'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]',
Literal),
(r'(\s+)', Whitespace),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
default('operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][+-]?\d+', Number.Float, 'operator'),
(r'(\.\d+)[eE][+-]?\d+', Number.Float, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number.Float, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(r'(declare)(\s+)(context)(\s+)(item)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(ncname + r':\*', Name, 'operator'),
(r'\*:'+ncname, Name.Tag, 'operator'),
(r'\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\}|\])', popstate_callback),
# NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
Whitespace, Keyword.Declaration)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration),
'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration),
'namespacedecl'),
# NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
Whitespace, Keyword.Declaration),
'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Whitespace, Keyword.Pseudo),
'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration),
'namespacekeyword'),
# VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Whitespace, Name.Variable), 'varname'),
(r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword,
Whitespace, Name.Variable),
'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
Whitespace, Name.Variable),
'varname'),
# ANNOTATED GLOBAL VARIABLES AND FUNCTIONS
(r'(declare)(\s+)(\%)', bygroups(Keyword.Declaration, Whitespace,
Name.Decorator),
'annotationname'),
# ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Whitespace, Keyword),
'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Whitespace,
Punctuation)),
(r'(switch)(\s*)(\()', bygroups(Keyword, Whitespace, Punctuation)),
(r'(element|attribute|namespace)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
# ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Whitespace), 'attribute_qname'),
# ELEMENT
(r'(element)(\s+)(?=' + qname + r')',
bygroups(Keyword, Whitespace), 'element_qname'),
# PROCESSING_INSTRUCTION
(r'(processing-instruction|namespace)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration)),
(r'(\{|\[)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(map|array)(\s*)(\{)',
pushstate_operator_map_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration),
'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Whitespace, Keyword.Pseudo),
'xqueryversion'),
(r'(\(#)(\s*)', bygroups(Punctuation, Whitespace), 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword.Declaration,
Whitespace,
Keyword.Declaration),
'option'),
# URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Whitespace, Punctuation)),
(r'then|else', Keyword),
# eXist specific XQUF
(r'(update)(\s*)(insert|delete|replace|value|rename)',
bygroups(Keyword, Whitespace, Keyword)),
(r'(into|following|preceding|with)', Keyword),
# Marklogic specific
(r'(try)(\s*)', bygroups(Keyword, Whitespace), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Whitespace, Punctuation, Name.Variable),
'varname'),
(r'(@'+qname+')', Name.Attribute, 'operator'),
(r'(@'+ncname+')', Name.Attribute, 'operator'),
(r'@\*:'+ncname, Name.Attribute, 'operator'),
(r'@\*', Name.Attribute, 'operator'),
(r'(@)', Name.Attribute, 'operator'),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*\{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(r'(' + qname + ')(#)([0-9]+)', bygroups(Name.Function, Keyword.Type, Number.Integer)),
(qname, Name.Tag, 'operator'),
]
}
class QmlLexer(RegexLexer):
"""
For QML files.
.. versionadded:: 1.6
"""
# QML is based on javascript, so much of this is taken from the
# JavascriptLexer above.
name = 'QML'
url = 'https://doc.qt.io/qt-6/qmlapplications.html'
aliases = ['qml', 'qbs']
filenames = ['*.qml', '*.qbs']
mimetypes = ['application/x-qml', 'application/x-qt.qbs+qml']
# pasted from JavascriptLexer, with some additions
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
# QML insertions
(r'\bid\s*:\s*[A-Za-z][\w.]*', Keyword.Declaration,
'slashstartsregex'),
(r'\b[A-Za-z][\w.]*\s*:', Keyword, 'slashstartsregex'),
# the rest from JavascriptLexer
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
]
}
class CirruLexer(RegexLexer):
r"""
* using ``()`` for expressions, but restricted in a same line
* using ``""`` for strings, with ``\`` for escaping chars
* using ``$`` as folding operator
* using ``,`` as unfolding operator
* using indentations for nested blocks
.. versionadded:: 2.0
"""
name = 'Cirru'
url = 'http://cirru.org/'
aliases = ['cirru']
filenames = ['*.cirru']
mimetypes = ['text/x-cirru']
flags = re.MULTILINE
tokens = {
'string': [
(r'[^"\\\n]+', String),
(r'\\', String.Escape, 'escape'),
(r'"', String, '#pop'),
],
'escape': [
(r'.', String.Escape, '#pop'),
],
'function': [
(r'\,', Operator, '#pop'),
(r'[^\s"()]+', Name.Function, '#pop'),
(r'\)', Operator, '#pop'),
(r'(?=\n)', Text, '#pop'),
(r'\(', Operator, '#push'),
(r'"', String, ('#pop', 'string')),
(r'[ ]+', Text.Whitespace),
],
'line': [
(r'(?<!\w)\$(?!\w)', Operator, 'function'),
(r'\(', Operator, 'function'),
(r'\)', Operator),
(r'\n', Text, '#pop'),
(r'"', String, 'string'),
(r'[ ]+', Text.Whitespace),
(r'[+-]?[\d.]+\b', Number),
(r'[^\s"()]+', Name.Variable)
],
'root': [
(r'^\n+', Text.Whitespace),
default(('line', 'function')),
]
}
class SlimLexer(ExtendedRegexLexer):
"""
For Slim markup.
.. versionadded:: 2.0
"""
name = 'Slim'
aliases = ['slim']
filenames = ['*.slim']
mimetypes = ['text/x-slim']
flags = re.IGNORECASE
_dot = r'(?: \|\n(?=.* \|)|.)'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'([ \t]*==?)(.*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'[ \t]+[\w:-]+(?==)', Name.Attribute, 'html-attributes'),
default('plain'),
],
'content': [
include('css'),
(r'[\w:-]+:[ \t]*\n', Text, 'plain'),
(r'(-)(.*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r'\|' + _dot + r'*\n', _starts_block(Text, 'plain'), '#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment.Preproc, 'slim-comment-block'), '#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
(r'[ \t]+\n', Punctuation, '#pop:2'),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(.*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'=', Punctuation),
(r'"[^"]+"', using(RubyLexer), 'tag'),
(r'\'[^\']+\'', using(RubyLexer), 'tag'),
(r'\w+', Text, 'tag'),
],
'slim-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
}
| 40,549 | Python | 39.108803 | 110 | 0.508397 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/solidity.py | """
pygments.lexers.solidity
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Solidity.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['SolidityLexer']
class SolidityLexer(RegexLexer):
"""
For Solidity source code.
.. versionadded:: 2.5
"""
name = 'Solidity'
aliases = ['solidity']
filenames = ['*.sol']
mimetypes = []
datatype = (
r'\b(address|bool|(?:(?:bytes|hash|int|string|uint)(?:8|16|24|32|40|48|56|64'
r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208'
r'|216|224|232|240|248|256)?))\b'
)
tokens = {
'root': [
include('whitespace'),
include('comments'),
(r'\bpragma\s+solidity\b', Keyword, 'pragma'),
(r'\b(contract)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword, Whitespace, Name.Entity)),
(datatype + r'(\s+)((?:external|public|internal|private)\s+)?' +
r'([a-zA-Z_]\w*)',
bygroups(Keyword.Type, Whitespace, Keyword, Name.Variable)),
(r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Type, Whitespace, Name.Variable)),
(r'\b(msg|block|tx)\.([A-Za-z_][a-zA-Z0-9_]*)\b', Keyword),
(words((
'block', 'break', 'constant', 'constructor', 'continue',
'contract', 'do', 'else', 'external', 'false', 'for',
'function', 'if', 'import', 'inherited', 'internal', 'is',
'library', 'mapping', 'memory', 'modifier', 'msg', 'new',
'payable', 'private', 'public', 'require', 'return',
'returns', 'struct', 'suicide', 'throw', 'this', 'true',
'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin),
(datatype, Keyword.Type),
include('constants'),
(r'[a-zA-Z_]\w*', Text),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[.;{}(),\[\]]', Punctuation)
],
'comments': [
(r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
(r'/(\\\n)?[*][\w\W]*', Comment.Multiline)
],
'constants': [
(r'("(\\"|.)*?")', String.Double),
(r"('(\\'|.)*?')", String.Single),
(r'\b0[xX][0-9a-fA-F]+\b', Number.Hex),
(r'\b\d+\b', Number.Decimal),
],
'pragma': [
include('whitespace'),
include('comments'),
(r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)',
bygroups(Operator, Whitespace, Keyword)),
(r';', Punctuation, '#pop')
],
'whitespace': [
(r'\s+', Whitespace),
(r'\n', Whitespace)
]
}
| 3,127 | Python | 34.545454 | 85 | 0.464663 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/whiley.py | """
pygments.lexers.whiley
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Whiley language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text
__all__ = ['WhileyLexer']
class WhileyLexer(RegexLexer):
"""
Lexer for the Whiley programming language.
.. versionadded:: 2.2
"""
name = 'Whiley'
url = 'http://whiley.org/'
filenames = ['*.whiley']
aliases = ['whiley']
mimetypes = ['text/x-whiley']
# See the language specification:
# http://whiley.org/download/WhileyLanguageSpec.pdf
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# Comments
(r'//.*', Comment.Single),
# don't parse empty comment as doc comment
(r'/\*\*/', Comment.Multiline),
(r'(?s)/\*\*.*?\*/', String.Doc),
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Keywords
(words((
'if', 'else', 'while', 'for', 'do', 'return',
'switch', 'case', 'default', 'break', 'continue',
'requires', 'ensures', 'where', 'assert', 'assume',
'all', 'no', 'some', 'in', 'is', 'new',
'throw', 'try', 'catch', 'debug', 'skip', 'fail',
'finite', 'total'), suffix=r'\b'), Keyword.Reserved),
(words((
'function', 'method', 'public', 'private', 'protected',
'export', 'native'), suffix=r'\b'), Keyword.Declaration),
# "constant" & "type" are not keywords unless used in declarations
(r'(constant|type)(\s+)([a-zA-Z_]\w*)(\s+)(is)\b',
bygroups(Keyword.Declaration, Text, Name, Text, Keyword.Reserved)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(bool|byte|int|real|any|void)\b', Keyword.Type),
# "from" is not a keyword unless used with import
(r'(import)(\s+)(\*)([^\S\n]+)(from)\b',
bygroups(Keyword.Namespace, Text, Punctuation, Text, Keyword.Namespace)),
(r'(import)(\s+)([a-zA-Z_]\w*)([^\S\n]+)(from)\b',
bygroups(Keyword.Namespace, Text, Name, Text, Keyword.Namespace)),
(r'(package|import)\b', Keyword.Namespace),
# standard library: https://github.com/Whiley/WhileyLibs/
(words((
# types defined in whiley.lang.Int
'i8', 'i16', 'i32', 'i64',
'u8', 'u16', 'u32', 'u64',
'uint', 'nat',
# whiley.lang.Any
'toString'), suffix=r'\b'), Name.Builtin),
# byte literal
(r'[01]+b', Number.Bin),
# decimal literal
(r'[0-9]+\.[0-9]+', Number.Float),
# match "1." but not ranges like "3..5"
(r'[0-9]+\.(?!\.)', Number.Float),
# integer literal
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
# character literal
(r"""'[^\\]'""", String.Char),
(r"""(')(\\['"\\btnfr])(')""",
bygroups(String.Char, String.Escape, String.Char)),
# string literal
(r'"', String, 'string'),
# operators and punctuation
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?'
# unicode operators
r'\u2200\u2203\u2205\u2282\u2286\u2283\u2287'
r'\u222A\u2229\u2264\u2265\u2208\u2227\u2228'
r']', Operator),
# identifier
(r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\[btnfr]', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\.', String),
(r'[^\\"]+', String),
],
}
| 4,018 | Python | 33.350427 | 86 | 0.46665 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/text.py | """
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.configs import ApacheConfLexer, NginxConfLexer, \
SquidConfLexer, LighttpdConfLexer, IniLexer, RegeditLexer, PropertiesLexer, \
UnixConfigLexer
from pygments.lexers.console import PyPyLogLexer
from pygments.lexers.textedit import VimLexer
from pygments.lexers.markup import BBCodeLexer, MoinWikiLexer, RstLexer, \
TexLexer, GroffLexer
from pygments.lexers.installers import DebianControlLexer, SourcesListLexer
from pygments.lexers.make import MakefileLexer, BaseMakefileLexer, CMakeLexer
from pygments.lexers.haxe import HxmlLexer
from pygments.lexers.sgf import SmartGameFormatLexer
from pygments.lexers.diff import DiffLexer, DarcsPatchLexer
from pygments.lexers.data import YamlLexer
from pygments.lexers.textfmts import IrcLogsLexer, GettextLexer, HttpLexer
__all__ = []
| 1,029 | Python | 37.148147 | 81 | 0.790087 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/c_like.py | """
pygments.lexers.c_like
~~~~~~~~~~~~~~~~~~~~~~
Lexers for other C-like languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \
default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers import _mql_builtins
__all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer',
'CudaLexer', 'SwigLexer', 'MqlLexer', 'ArduinoLexer', 'CharmciLexer',
'OmgIdlLexer']
class PikeLexer(CppLexer):
"""
For `Pike <http://pike.lysator.liu.se/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Pike'
aliases = ['pike']
filenames = ['*.pike', '*.pmod']
mimetypes = ['text/x-pike']
tokens = {
'statements': [
(words((
'catch', 'new', 'private', 'protected', 'public', 'gauge',
'throw', 'throws', 'class', 'interface', 'implement', 'abstract',
'extends', 'from', 'this', 'super', 'constant', 'final', 'static',
'import', 'use', 'extern', 'inline', 'proto', 'break', 'continue',
'if', 'else', 'for', 'while', 'do', 'switch', 'case', 'as', 'in',
'version', 'return', 'true', 'false', 'null',
'__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__',
'__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__',
'__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__',
'__amigaos__', '_Pragma', 'static_assert', 'defined', 'sscanf'), suffix=r'\b'),
Keyword),
(r'(bool|int|long|float|short|double|char|string|object|void|mapping|'
r'array|multiset|program|function|lambda|mixed|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'[~!%^&*+=|?:<>/@-]', Operator),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Whitespace, '#pop'),
],
}
class NesCLexer(CLexer):
"""
For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
directives.
.. versionadded:: 2.0
"""
name = 'nesC'
aliases = ['nesc']
filenames = ['*.nc']
mimetypes = ['text/x-nescsrc']
tokens = {
'statements': [
(words((
'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component',
'components', 'configuration', 'event', 'extends', 'generic',
'implementation', 'includes', 'interface', 'module', 'new', 'norace',
'post', 'provides', 'signal', 'task', 'uses'), suffix=r'\b'),
Keyword),
(words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t',
'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t',
'nx_uint64_t'), suffix=r'\b'),
Keyword.Type),
inherit,
],
}
class ClayLexer(RegexLexer):
"""
For `Clay <http://claylabs.com/clay/>`_ source.
.. versionadded:: 2.0
"""
name = 'Clay'
filenames = ['*.clay']
aliases = ['clay']
mimetypes = ['text/x-clay']
tokens = {
'root': [
(r'\s+', Whitespace),
(r'//.*?$', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\b(public|private|import|as|record|variant|instance'
r'|define|overload|default|external|alias'
r'|rvalue|ref|forward|inline|noinline|forceinline'
r'|enum|var|and|or|not|if|else|goto|return|while'
r'|switch|case|break|continue|for|in|true|false|try|catch|throw'
r'|finally|onerror|staticassert|eval|when|newtype'
r'|__FILE__|__LINE__|__COLUMN__|__ARG__'
r')\b', Keyword),
(r'[~!%^&*+=|:<>/-]', Operator),
(r'[#(){}\[\],;.]', Punctuation),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'\d+[LlUu]*', Number.Integer),
(r'\b(true|false)\b', Name.Builtin),
(r'(?i)[a-z_?][\w?]*', Name),
(r'"""', String, 'tdqs'),
(r'"', String, 'dqs'),
],
'strings': [
(r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape),
(r'[^\\"]+', String),
],
'nl': [
(r'\n', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings'),
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl'),
],
}
class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
.. versionadded:: 1.5
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
tokens = {
'statements': [
(words((
'virtual', 'class', 'private', 'public', 'property', 'import',
'delete', 'new', 'new0', 'renew', 'renew0', 'define', 'get',
'set', 'remote', 'dllexport', 'dllimport', 'stdcall', 'subclass',
'__on_register_module', 'namespace', 'using', 'typed_object',
'any_object', 'incref', 'register', 'watch', 'stopwatching', 'firewatchers',
'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset',
'class_default_property', 'property_category', 'class_data',
'class_property', 'thisclass', 'dbtable', 'dbindex',
'database_open', 'dbfield'), suffix=r'\b'), Keyword),
(words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte',
'unichar', 'int64'), suffix=r'\b'),
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'(null|value|this)\b', Name.Builtin),
inherit,
]
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
.. versionadded:: 1.1
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'[L@]?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(?s)""".*?"""', String), # verbatim strings
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(words((
'as', 'base', 'break', 'case', 'catch', 'construct', 'continue',
'default', 'delete', 'do', 'else', 'enum', 'finally', 'for',
'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params',
'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try',
'typeof', 'while', 'yield'), suffix=r'\b'),
Keyword),
(words((
'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern',
'inline', 'internal', 'override', 'owned', 'private', 'protected',
'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned',
'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'),
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Whitespace),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'class'),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(words((
'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16',
'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string',
'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'ulong', 'unichar', 'ushort'), suffix=r'\b'),
Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
(r'[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
],
}
class CudaLexer(CLexer):
"""
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
.. versionadded:: 1.6
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
aliases = ['cuda', 'cu']
mimetypes = ['text/x-cuda']
function_qualifiers = {'__device__', '__global__', '__host__',
'__noinline__', '__forceinline__'}
variable_qualifiers = {'__device__', '__constant__', '__shared__',
'__restrict__'}
vector_types = {'char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2',
'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1',
'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
'ulonglong2', 'float1', 'float2', 'float3', 'float4',
'double1', 'double2', 'dim3'}
variables = {'gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'}
functions = {'__threadfence_block', '__threadfence', '__threadfence_system',
'__syncthreads', '__syncthreads_count', '__syncthreads_and',
'__syncthreads_or'}
execution_confs = {'<<<', '>>>'}
def get_tokens_unprocessed(self, text, stack=('root',)):
for index, token, value in CLexer.get_tokens_unprocessed(self, text, stack):
if token is Name:
if value in self.variable_qualifiers:
token = Keyword.Type
elif value in self.vector_types:
token = Keyword.Type
elif value in self.variables:
token = Name.Builtin
elif value in self.execution_confs:
token = Keyword.Pseudo
elif value in self.function_qualifiers:
token = Keyword.Reserved
elif value in self.functions:
token = Name.Function
yield index, token, value
class SwigLexer(CppLexer):
"""
For `SWIG <http://www.swig.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'SWIG'
aliases = ['swig']
filenames = ['*.swg', '*.i']
mimetypes = ['text/swig']
priority = 0.04 # Lower than C/C++ and Objective C/C++
tokens = {
'root': [
# Match it here so it won't be matched as a function in the rest of root
(r'\$\**\&?\w+', Name),
inherit
],
'statements': [
# SWIG directives
(r'(%[a-z_][a-z0-9_]*)', Name.Function),
# Special variables
(r'\$\**\&?\w+', Name),
# Stringification / additional preprocessor directives
(r'##*[a-zA-Z_]\w*', Comment.Preproc),
inherit,
],
}
# This is a far from complete set of SWIG directives
swig_directives = {
# Most common directives
'%apply', '%define', '%director', '%enddef', '%exception', '%extend',
'%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
'%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
'%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
# Less common directives
'%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
'%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
'%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
'%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
'%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
'%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
'%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
'%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
'%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
'%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
'%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
'%trackobjects', '%types', '%unrefobject', '%varargs', '%warn',
'%warnfilter'}
def analyse_text(text):
rv = 0
# Search for SWIG directives, which are conventionally at the beginning of
# a line. The probability of them being within a line is low, so let another
# lexer win in this case.
matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
for m in matches:
if m in SwigLexer.swig_directives:
rv = 0.98
break
else:
rv = 0.91 # Fraction higher than MatlabLexer
return rv
class MqlLexer(CppLexer):
"""
For `MQL4 <http://docs.mql4.com/>`_ and
`MQL5 <http://www.mql5.com/en/docs>`_ source code.
.. versionadded:: 2.0
"""
name = 'MQL'
aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5']
filenames = ['*.mq4', '*.mq5', '*.mqh']
mimetypes = ['text/x-mql']
tokens = {
'statements': [
(words(_mql_builtins.keywords, suffix=r'\b'), Keyword),
(words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type),
(words(_mql_builtins.types, suffix=r'\b'), Name.Function),
(words(_mql_builtins.constants, suffix=r'\b'), Name.Constant),
(words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'),
Name.Constant),
inherit,
],
}
class ArduinoLexer(CppLexer):
"""
For `Arduino(tm) <https://arduino.cc/>`_ source.
This is an extension of the CppLexer, as the Arduino® Language is a superset
of C++
.. versionadded:: 2.1
"""
name = 'Arduino'
aliases = ['arduino']
filenames = ['*.ino']
mimetypes = ['text/x-arduino']
# Language sketch main structure functions
structure = {'setup', 'loop'}
# Language operators
operators = {'not', 'or', 'and', 'xor'}
# Language 'variables'
variables = {
'DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE', 'REPORT_DIGITAL',
'REPORT_ANALOG', 'INPUT_PULLUP', 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET',
'LED_BUILTIN', 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL', 'HIGH',
'LOW', 'INPUT', 'OUTPUT', 'INPUT_PULLUP', 'LED_BUILTIN', 'true', 'false',
'void', 'boolean', 'char', 'unsigned char', 'byte', 'int', 'unsigned int',
'word', 'long', 'unsigned long', 'short', 'float', 'double', 'string', 'String',
'array', 'static', 'volatile', 'const', 'boolean', 'byte', 'word', 'string',
'String', 'array', 'int', 'float', 'private', 'char', 'virtual', 'operator',
'sizeof', 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int8_t', 'int16_t',
'int32_t', 'int64_t', 'dynamic_cast', 'typedef', 'const_cast', 'const',
'struct', 'static_cast', 'union', 'unsigned', 'long', 'volatile', 'static',
'protected', 'bool', 'public', 'friend', 'auto', 'void', 'enum', 'extern',
'class', 'short', 'reinterpret_cast', 'double', 'register', 'explicit',
'signed', 'inline', 'delete', '_Bool', 'complex', '_Complex', '_Imaginary',
'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
'atomic_llong', 'atomic_ullong', 'PROGMEM'}
# Language shipped functions and class ( )
functions = {
'KeyboardController', 'MouseController', 'SoftwareSerial', 'EthernetServer',
'EthernetClient', 'LiquidCrystal', 'RobotControl', 'GSMVoiceCall',
'EthernetUDP', 'EsploraTFT', 'HttpClient', 'RobotMotor', 'WiFiClient',
'GSMScanner', 'FileSystem', 'Scheduler', 'GSMServer', 'YunClient', 'YunServer',
'IPAddress', 'GSMClient', 'GSMModem', 'Keyboard', 'Ethernet', 'Console',
'GSMBand', 'Esplora', 'Stepper', 'Process', 'WiFiUDP', 'GSM_SMS', 'Mailbox',
'USBHost', 'Firmata', 'PImage', 'Client', 'Server', 'GSMPIN', 'FileIO',
'Bridge', 'Serial', 'EEPROM', 'Stream', 'Mouse', 'Audio', 'Servo', 'File',
'Task', 'GPRS', 'WiFi', 'Wire', 'TFT', 'GSM', 'SPI', 'SD',
'runShellCommandAsynchronously', 'analogWriteResolution',
'retrieveCallingNumber', 'printFirmwareVersion', 'analogReadResolution',
'sendDigitalPortPair', 'noListenOnLocalhost', 'readJoystickButton',
'setFirmwareVersion', 'readJoystickSwitch', 'scrollDisplayRight',
'getVoiceCallStatus', 'scrollDisplayLeft', 'writeMicroseconds',
'delayMicroseconds', 'beginTransmission', 'getSignalStrength',
'runAsynchronously', 'getAsynchronously', 'listenOnLocalhost',
'getCurrentCarrier', 'readAccelerometer', 'messageAvailable',
'sendDigitalPorts', 'lineFollowConfig', 'countryNameWrite', 'runShellCommand',
'readStringUntil', 'rewindDirectory', 'readTemperature', 'setClockDivider',
'readLightSensor', 'endTransmission', 'analogReference', 'detachInterrupt',
'countryNameRead', 'attachInterrupt', 'encryptionType', 'readBytesUntil',
'robotNameWrite', 'readMicrophone', 'robotNameRead', 'cityNameWrite',
'userNameWrite', 'readJoystickY', 'readJoystickX', 'mouseReleased',
'openNextFile', 'scanNetworks', 'noInterrupts', 'digitalWrite', 'beginSpeaker',
'mousePressed', 'isActionDone', 'mouseDragged', 'displayLogos', 'noAutoscroll',
'addParameter', 'remoteNumber', 'getModifiers', 'keyboardRead', 'userNameRead',
'waitContinue', 'processInput', 'parseCommand', 'printVersion', 'readNetworks',
'writeMessage', 'blinkVersion', 'cityNameRead', 'readMessage', 'setDataMode',
'parsePacket', 'isListening', 'setBitOrder', 'beginPacket', 'isDirectory',
'motorsWrite', 'drawCompass', 'digitalRead', 'clearScreen', 'serialEvent',
'rightToLeft', 'setTextSize', 'leftToRight', 'requestFrom', 'keyReleased',
'compassRead', 'analogWrite', 'interrupts', 'WiFiServer', 'disconnect',
'playMelody', 'parseFloat', 'autoscroll', 'getPINUsed', 'setPINUsed',
'setTimeout', 'sendAnalog', 'readSlider', 'analogRead', 'beginWrite',
'createChar', 'motorsStop', 'keyPressed', 'tempoWrite', 'readButton',
'subnetMask', 'debugPrint', 'macAddress', 'writeGreen', 'randomSeed',
'attachGPRS', 'readString', 'sendString', 'remotePort', 'releaseAll',
'mouseMoved', 'background', 'getXChange', 'getYChange', 'answerCall',
'getResult', 'voiceCall', 'endPacket', 'constrain', 'getSocket', 'writeJSON',
'getButton', 'available', 'connected', 'findUntil', 'readBytes', 'exitValue',
'readGreen', 'writeBlue', 'startLoop', 'IPAddress', 'isPressed', 'sendSysex',
'pauseMode', 'gatewayIP', 'setCursor', 'getOemKey', 'tuneWrite', 'noDisplay',
'loadImage', 'switchPIN', 'onRequest', 'onReceive', 'changePIN', 'playFile',
'noBuffer', 'parseInt', 'overflow', 'checkPIN', 'knobRead', 'beginTFT',
'bitClear', 'updateIR', 'bitWrite', 'position', 'writeRGB', 'highByte',
'writeRed', 'setSpeed', 'readBlue', 'noStroke', 'remoteIP', 'transfer',
'shutdown', 'hangCall', 'beginSMS', 'endWrite', 'attached', 'maintain',
'noCursor', 'checkReg', 'checkPUK', 'shiftOut', 'isValid', 'shiftIn', 'pulseIn',
'connect', 'println', 'localIP', 'pinMode', 'getIMEI', 'display', 'noBlink',
'process', 'getBand', 'running', 'beginSD', 'drawBMP', 'lowByte', 'setBand',
'release', 'bitRead', 'prepare', 'pointTo', 'readRed', 'setMode', 'noFill',
'remove', 'listen', 'stroke', 'detach', 'attach', 'noTone', 'exists', 'buffer',
'height', 'bitSet', 'circle', 'config', 'cursor', 'random', 'IRread', 'setDNS',
'endSMS', 'getKey', 'micros', 'millis', 'begin', 'print', 'write', 'ready',
'flush', 'width', 'isPIN', 'blink', 'clear', 'press', 'mkdir', 'rmdir', 'close',
'point', 'yield', 'image', 'BSSID', 'click', 'delay', 'read', 'text', 'move',
'peek', 'beep', 'rect', 'line', 'open', 'seek', 'fill', 'size', 'turn', 'stop',
'home', 'find', 'step', 'tone', 'sqrt', 'RSSI', 'SSID', 'end', 'bit', 'tan',
'cos', 'sin', 'pow', 'map', 'abs', 'max', 'min', 'get', 'run', 'put',
'isAlphaNumeric', 'isAlpha', 'isAscii', 'isWhitespace', 'isControl', 'isDigit',
'isGraph', 'isLowerCase', 'isPrintable', 'isPunct', 'isSpace', 'isUpperCase',
'isHexadecimalDigit'}
# do not highlight
suppress_highlight = {
'namespace', 'template', 'mutable', 'using', 'asm', 'typeid',
'typename', 'this', 'alignof', 'constexpr', 'decltype', 'noexcept',
'static_assert', 'thread_local', 'restrict'}
def get_tokens_unprocessed(self, text, stack=('root',)):
for index, token, value in CppLexer.get_tokens_unprocessed(self, text, stack):
if value in self.structure:
yield index, Name.Builtin, value
elif value in self.operators:
yield index, Operator, value
elif value in self.variables:
yield index, Keyword.Reserved, value
elif value in self.suppress_highlight:
yield index, Name, value
elif value in self.functions:
yield index, Name.Function, value
else:
yield index, token, value
class CharmciLexer(CppLexer):
"""
For `Charm++ <https://charm.cs.illinois.edu>`_ interface files (.ci).
.. versionadded:: 2.4
"""
name = 'Charmci'
aliases = ['charmci']
filenames = ['*.ci']
mimetypes = []
tokens = {
'keywords': [
(r'(module)(\s+)', bygroups(Keyword, Text), 'classname'),
(words(('mainmodule', 'mainchare', 'chare', 'array', 'group',
'nodegroup', 'message', 'conditional')), Keyword),
(words(('entry', 'aggregate', 'threaded', 'sync', 'exclusive',
'nokeep', 'notrace', 'immediate', 'expedited', 'inline',
'local', 'python', 'accel', 'readwrite', 'writeonly',
'accelblock', 'memcritical', 'packed', 'varsize',
'initproc', 'initnode', 'initcall', 'stacksize',
'createhere', 'createhome', 'reductiontarget', 'iget',
'nocopy', 'mutable', 'migratable', 'readonly')), Keyword),
inherit,
],
}
class OmgIdlLexer(CLexer):
"""
Lexer for Object Management Group Interface Definition Language.
.. versionadded:: 2.9
"""
name = 'OMG Interface Definition Language'
url = 'https://www.omg.org/spec/IDL/About-IDL/'
aliases = ['omg-idl']
filenames = ['*.idl', '*.pidl']
mimetypes = []
scoped_name = r'((::)?\w+)+'
tokens = {
'values': [
(words(('true', 'false'), prefix=r'(?i)', suffix=r'\b'), Number),
(r'([Ll]?)(")', bygroups(String.Affix, String.Double), 'string'),
(r'([Ll]?)(\')(\\[^\']+)(\')',
bygroups(String.Affix, String.Char, String.Escape, String.Char)),
(r'([Ll]?)(\')(\\\')(\')',
bygroups(String.Affix, String.Char, String.Escape, String.Char)),
(r'([Ll]?)(\'.\')', bygroups(String.Affix, String.Char)),
(r'[+-]?\d+(\.\d*)?[Ee][+-]?\d+', Number.Float),
(r'[+-]?(\d+\.\d*)|(\d*\.\d+)([Ee][+-]?\d+)?', Number.Float),
(r'(?i)[+-]?0x[0-9a-f]+', Number.Hex),
(r'[+-]?[1-9]\d*', Number.Integer),
(r'[+-]?0[0-7]*', Number.Oct),
(r'[\+\-\*\/%^&\|~]', Operator),
(words(('<<', '>>')), Operator),
(scoped_name, Name),
(r'[{};:,<>\[\]]', Punctuation),
],
'annotation_params': [
include('whitespace'),
(r'\(', Punctuation, '#push'),
include('values'),
(r'=', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'annotation_params_maybe': [
(r'\(', Punctuation, 'annotation_params'),
include('whitespace'),
default('#pop'),
],
'annotation_appl': [
(r'@' + scoped_name, Name.Decorator, 'annotation_params_maybe'),
],
'enum': [
include('whitespace'),
(r'[{,]', Punctuation),
(r'\w+', Name.Constant),
include('annotation_appl'),
(r'\}', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
(words((
'typedef', 'const',
'in', 'out', 'inout', 'local',
), prefix=r'(?i)', suffix=r'\b'), Keyword.Declaration),
(words((
'void', 'any', 'native', 'bitfield',
'unsigned', 'boolean', 'char', 'wchar', 'octet', 'short', 'long',
'int8', 'uint8', 'int16', 'int32', 'int64', 'uint16', 'uint32', 'uint64',
'float', 'double', 'fixed',
'sequence', 'string', 'wstring', 'map',
), prefix=r'(?i)', suffix=r'\b'), Keyword.Type),
(words((
'@annotation', 'struct', 'union', 'bitset', 'interface',
'exception', 'valuetype', 'eventtype', 'component',
), prefix=r'(?i)', suffix=r'(\s+)(\w+)'), bygroups(Keyword, Whitespace, Name.Class)),
(words((
'abstract', 'alias', 'attribute', 'case', 'connector',
'consumes', 'context', 'custom', 'default', 'emits', 'factory',
'finder', 'getraises', 'home', 'import', 'manages', 'mirrorport',
'multiple', 'Object', 'oneway', 'primarykey', 'private', 'port',
'porttype', 'provides', 'public', 'publishes', 'raises',
'readonly', 'setraises', 'supports', 'switch', 'truncatable',
'typeid', 'typename', 'typeprefix', 'uses', 'ValueBase',
), prefix=r'(?i)', suffix=r'\b'), Keyword),
(r'(?i)(enum|bitmask)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Class), 'enum'),
(r'(?i)(module)(\s+)(\w+)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
(r'(\w+)(\s*)(=)', bygroups(Name.Constant, Whitespace, Operator)),
(r'[\(\)]', Punctuation),
include('values'),
include('annotation_appl'),
],
}
| 29,203 | Python | 42.784108 | 97 | 0.506763 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/installers.py | """
pygments.lexers.installers
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for installer/packager DSLs and formats.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation, Generic, Number, Whitespace
__all__ = ['NSISLexer', 'RPMSpecLexer', 'SourcesListLexer',
'DebianControlLexer']
class NSISLexer(RegexLexer):
"""
For NSIS scripts.
.. versionadded:: 1.6
"""
name = 'NSIS'
url = 'http://nsis.sourceforge.net/'
aliases = ['nsis', 'nsi', 'nsh']
filenames = ['*.nsi', '*.nsh']
mimetypes = ['text/x-nsis']
flags = re.IGNORECASE
tokens = {
'root': [
(r'([;#].*)(\n)', bygroups(Comment, Whitespace)),
(r"'.*?'", String.Single),
(r'"', String.Double, 'str_double'),
(r'`', String.Backtick, 'str_backtick'),
include('macro'),
include('interpol'),
include('basic'),
(r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo),
(r'/[a-z_]\w*', Name.Attribute),
(r'\s+', Whitespace),
(r'[\w.]+', Text),
],
'basic': [
(r'(\n)(Function)(\s+)([._a-z][.\w]*)\b',
bygroups(Whitespace, Keyword, Whitespace, Name.Function)),
(r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b',
bygroups(Keyword.Namespace, Punctuation, Name.Function)),
(r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)),
(r'(\b[ULS]|\B)([!<>=]?=|\<\>?|\>)\B', Operator),
(r'[|+-]', Operator),
(r'\\', Punctuation),
(r'\b(Abort|Add(?:BrandingImage|Size)|'
r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|'
r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|'
r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|'
r'ComponentText|CopyFiles|CRCCheck|'
r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|'
r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|'
r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|'
r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|'
r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|'
r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|'
r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|'
r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|'
r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|'
r'InstDirError|LabelAddress|TempFileName)|'
r'Goto|HideWindow|Icon|'
r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|'
r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|'
r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|'
r'IsWindow|LangString(?:UP)?|'
r'License(?:BkColor|Data|ForceSelection|LangString|Text)|'
r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|'
r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|'
r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|'
r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|'
r'Return|RMDir|SearchPath|Section(?:Divider|End|'
r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|'
r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|'
r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|'
r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|'
r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|'
r'Silent|StaticBkColor)|'
r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|'
r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|'
r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|'
r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|'
r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|'
r'XPStyle)\b', Keyword),
(r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?'
r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|'
r'HK(CC|CR|CU|DD|LM|PD|U)|'
r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|'
r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|'
r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|'
r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|'
r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|'
r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|'
r'YESNO(?:CANCEL)?)|SET|SHCTX|'
r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|'
r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|'
r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|'
r'listonly|lzma|nevershow|none|normal|off|on|pop|push|'
r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|'
r'true|try|user|zlib)\b', Name.Constant),
],
'macro': [
(r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|'
r'delfilefile|echo(?:message)?|else|endif|error|execute|'
r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|'
r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|'
r'warning)\b', Comment.Preproc),
],
'interpol': [
(r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers
(r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|'
r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|'
r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|'
r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|'
r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|'
r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})',
Name.Builtin),
(r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global),
(r'\$[a-z_]\w*', Name.Variable),
],
'str_double': [
(r'"', String.Double, '#pop'),
(r'\$(\\[nrt"]|\$)', String.Escape),
include('interpol'),
(r'[^"]+', String.Double),
],
'str_backtick': [
(r'`', String.Double, '#pop'),
(r'\$(\\[nrt"]|\$)', String.Escape),
include('interpol'),
(r'[^`]+', String.Double),
],
}
class RPMSpecLexer(RegexLexer):
"""
For RPM ``.spec`` files.
.. versionadded:: 1.6
"""
name = 'RPMSpec'
aliases = ['spec']
filenames = ['*.spec']
mimetypes = ['text/x-rpm-spec']
_directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|'
'post[a-z]*|trigger[a-z]*|files)')
tokens = {
'root': [
(r'#.*$', Comment),
include('basic'),
],
'description': [
(r'^(%' + _directives + ')(.*)$',
bygroups(Name.Decorator, Text), '#pop'),
(r'\s+', Whitespace),
(r'.', Text),
],
'changelog': [
(r'\*.*$', Generic.Subheading),
(r'^(%' + _directives + ')(.*)$',
bygroups(Name.Decorator, Text), '#pop'),
(r'\s+', Whitespace),
(r'.', Text),
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
include('interpol'),
(r'.', String.Double),
],
'basic': [
include('macro'),
(r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|'
r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|'
r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|'
r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$',
bygroups(Generic.Heading, Punctuation, using(this))),
(r'^%description', Name.Decorator, 'description'),
(r'^%changelog', Name.Decorator, 'changelog'),
(r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)),
(r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|'
r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)',
Keyword),
include('interpol'),
(r"'.*?'", String.Single),
(r'"', String.Double, 'string'),
(r'\s+', Whitespace),
(r'.', Text),
],
'macro': [
(r'%define.*$', Comment.Preproc),
(r'%\{\!\?.*%define.*\}', Comment.Preproc),
(r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$',
bygroups(Comment.Preproc, Text)),
],
'interpol': [
(r'%\{?__[a-z_]+\}?', Name.Function),
(r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo),
(r'%\{\?\w+\}', Name.Variable),
(r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global),
(r'%\{[a-zA-Z]\w+\}', Keyword.Constant),
]
}
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
.. versionadded:: 0.7
"""
name = 'Debian Sourcelist'
aliases = ['debsources', 'sourceslist', 'sources.list']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Whitespace), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Whitespace, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Whitespace),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.splitlines():
line = line.strip()
if line.startswith('deb ') or line.startswith('deb-src '):
return True
class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
.. versionadded:: 0.9
"""
name = 'Debian Control file'
url = 'https://www.debian.org/doc/debian-policy/ch-controlfields.html'
aliases = ['debcontrol', 'control']
filenames = ['control']
tokens = {
'root': [
(r'^(Description)', Keyword, 'description'),
(r'^(Maintainer|Uploaders)(:\s*)', bygroups(Keyword, Text),
'maintainer'),
(r'^((?:Build-|Pre-)?Depends(?:-Indep|-Arch)?)(:\s*)',
bygroups(Keyword, Text), 'depends'),
(r'^(Recommends|Suggests|Enhances)(:\s*)', bygroups(Keyword, Text),
'depends'),
(r'^((?:Python-)?Version)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^((?:Installed-)?Size)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
bygroups(Keyword, Whitespace, String)),
],
'maintainer': [
(r'<[^>]+>$', Generic.Strong, '#pop'),
(r'<[^>]+>', Generic.Strong),
(r',\n?', Text),
(r'[^,<]+$', Text, '#pop'),
(r'[^,<]+', Text),
],
'description': [
(r'(.*)(Homepage)(: )(\S+)',
bygroups(Text, String, Name, Name.Class)),
(r':.*\n', Generic.Strong),
(r' .*\n', Text),
default('#pop'),
],
'depends': [
(r'(\$)(\{)(\w+\s*:\s*\w+)(\})',
bygroups(Operator, Text, Name.Entity, Text)),
(r'\(', Text, 'depend_vers'),
(r'\|', Operator),
(r',\n', Text),
(r'\n', Text, '#pop'),
(r'[,\s]', Text),
(r'[+.a-zA-Z0-9-]+', Name.Function),
(r'\[.*?\]', Name.Entity),
],
'depend_vers': [
(r'\)', Text, '#pop'),
(r'([><=]+)(\s*)([^)]+)', bygroups(Operator, Text, Number)),
]
}
| 13,178 | Python | 39.179878 | 85 | 0.492563 |
swadaskar/Isaac_Sim_Folder/exts/omni.isaac.repl/pip_prebundle/pygments/lexers/hdl.py | """
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
.. versionadded:: 1.4
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Text),
'import'),
(words((
'always', 'always_comb', 'always_ff', 'always_latch', 'and',
'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1',
'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign',
'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase',
'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive',
'endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for',
'force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0',
'highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large',
'localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge',
'nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed',
'parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1',
'pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return',
'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed',
'small', 'specify', 'specparam', 'strength', 'string', 'strong0',
'strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1',
'type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait',
'weak0', 'weak1', 'while', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'accelerate', 'autoexpand_vectornets', 'celldefine', 'default_nettype',
'else', 'elsif', 'endcelldefine', 'endif', 'endprotect', 'endprotected',
'expand_vectornets', 'ifdef', 'ifndef', 'include', 'noaccelerate',
'noexpand_vectornets', 'noremove_gatenames', 'noremove_netnames',
'nounconnected_drive', 'protect', 'protected', 'remove_gatenames',
'remove_netnames', 'resetall', 'timescale', 'unconnected_drive',
'undef'), prefix=r'`', suffix=r'\b'),
Comment.Preproc),
(words((
'bits', 'bitstoreal', 'bitstoshortreal', 'countdrivers', 'display', 'fclose',
'fdisplay', 'finish', 'floor', 'fmonitor', 'fopen', 'fstrobe', 'fwrite',
'getpattern', 'history', 'incsave', 'input', 'itor', 'key', 'list', 'log',
'monitor', 'monitoroff', 'monitoron', 'nokey', 'nolog', 'printtimescale',
'random', 'readmemb', 'readmemh', 'realtime', 'realtobits', 'reset',
'reset_count', 'reset_value', 'restart', 'rtoi', 'save', 'scale', 'scope',
'shortrealtobits', 'showscopes', 'showvariables', 'showvars', 'sreadmemb',
'sreadmemh', 'stime', 'stop', 'strobe', 'time', 'timeformat', 'write'),
prefix=r'\$', suffix=r'\b'),
Name.Builtin),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wor'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
(r'\\(\S+)', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Whitespace, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def analyse_text(text):
"""Verilog code will use one of reg/wire/assign for sure, and that
is not common elsewhere."""
result = 0
if 'reg' in text:
result += 0.1
if 'wire' in text:
result += 0.1
if 'assign' in text:
result += 0.1
return result
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
.. versionadded:: 1.5
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^(\s*)(`define)', bygroups(Whitespace, Comment.Preproc), 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace)),
(r'^(\s*)(import)(\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace), 'import'),
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([1-9][_0-9]*)?\s*\'[sS]?[bB]\s*[xXzZ?01][_xXzZ?01]*',
Number.Bin),
(r'([1-9][_0-9]*)?\s*\'[sS]?[oO]\s*[xXzZ?0-7][_xXzZ?0-7]*',
Number.Oct),
(r'([1-9][_0-9]*)?\s*\'[sS]?[dD]\s*[xXzZ?0-9][_xXzZ?0-9]*',
Number.Integer),
(r'([1-9][_0-9]*)?\s*\'[sS]?[hH]\s*[xXzZ?0-9a-fA-F][_xXzZ?0-9a-fA-F]*',
Number.Hex),
(r'\'[01xXzZ]', Number),
(r'[0-9][_0-9]*', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(words(('inside', 'dist'), suffix=r'\b'), Operator.Word),
(r'[()\[\],.;\'$]', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(words((
'accept_on', 'alias', 'always', 'always_comb', 'always_ff',
'always_latch', 'and', 'assert', 'assign', 'assume', 'automatic',
'before', 'begin', 'bind', 'bins', 'binsof', 'break', 'buf',
'bufif0', 'bufif1', 'case', 'casex', 'casez', 'cell',
'checker', 'clocking', 'cmos', 'config',
'constraint', 'context', 'continue', 'cover', 'covergroup',
'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design',
'disable', 'do', 'edge', 'else', 'end', 'endcase',
'endchecker', 'endclocking', 'endconfig', 'endfunction',
'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage',
'endprimitive', 'endprogram', 'endproperty', 'endsequence',
'endspecify', 'endtable', 'endtask', 'enum', 'eventually',
'expect', 'export', 'extern', 'final', 'first_match',
'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function',
'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff',
'ifnone', 'ignore_bins', 'illegal_bins', 'implies', 'implements', 'import',
'incdir', 'include', 'initial', 'inout', 'input',
'instance', 'interconnect', 'interface', 'intersect', 'join',
'join_any', 'join_none', 'large', 'let', 'liblist', 'library',
'local', 'localparam', 'macromodule', 'matches',
'medium', 'modport', 'module', 'nand', 'negedge', 'nettype', 'new', 'nexttime',
'nmos', 'nor', 'noshowcancelled', 'not', 'notif0', 'notif1', 'null',
'or', 'output', 'package', 'packed', 'parameter', 'pmos', 'posedge',
'primitive', 'priority', 'program', 'property', 'protected', 'pull0',
'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect',
'pulsestyle_onevent', 'pure', 'rand', 'randc', 'randcase',
'randsequence', 'rcmos', 'ref',
'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos',
'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually',
's_nexttime', 's_until', 's_until_with', 'scalared', 'sequence',
'showcancelled', 'small', 'soft', 'solve',
'specify', 'specparam', 'static', 'strong', 'strong0',
'strong1', 'struct', 'super', 'sync_accept_on',
'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1',
'typedef', 'union', 'unique', 'unique0', 'until',
'until_with', 'untyped', 'use', 'vectored',
'virtual', 'wait', 'wait_order', 'weak', 'weak0',
'weak1', 'while', 'wildcard', 'with', 'within',
'xnor', 'xor'),
suffix=r'\b'),
Keyword),
(r'(class)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(extends)(\s+)([a-zA-Z_]\w*)',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(endclass\b)(?:(\s*)(:)(\s*)([a-zA-Z_]\w*))?',
bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace, Name.Class)),
(words((
# Variable types
'bit', 'byte', 'chandle', 'const', 'event', 'int', 'integer',
'logic', 'longint', 'real', 'realtime', 'reg', 'shortint',
'shortreal', 'signed', 'string', 'time', 'type', 'unsigned',
'var', 'void',
# Net types
'supply0', 'supply1', 'tri', 'triand', 'trior', 'trireg',
'tri0', 'tri1', 'uwire', 'wand', 'wire', 'wor'),
suffix=r'\b'),
Keyword.Type),
(words((
'`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine',
'`default_nettype', '`define', '`else', '`elsif', '`end_keywords',
'`endcelldefine', '`endif', '`ifdef', '`ifndef', '`include',
'`line', '`nounconnected_drive', '`pragma', '`resetall',
'`timescale', '`unconnected_drive', '`undef', '`undefineall'),
suffix=r'\b'),
Comment.Preproc),
(words((
# Simulation control tasks (20.2)
'$exit', '$finish', '$stop',
# Simulation time functions (20.3)
'$realtime', '$stime', '$time',
# Timescale tasks (20.4)
'$printtimescale', '$timeformat',
# Conversion functions
'$bitstoreal', '$bitstoshortreal', '$cast', '$itor',
'$realtobits', '$rtoi', '$shortrealtobits', '$signed',
'$unsigned',
# Data query functions (20.6)
'$bits', '$isunbounded', '$typename',
# Array query functions (20.7)
'$dimensions', '$high', '$increment', '$left', '$low', '$right',
'$size', '$unpacked_dimensions',
# Math functions (20.8)
'$acos', '$acosh', '$asin', '$asinh', '$atan', '$atan2',
'$atanh', '$ceil', '$clog2', '$cos', '$cosh', '$exp', '$floor',
'$hypot', '$ln', '$log10', '$pow', '$sin', '$sinh', '$sqrt',
'$tan', '$tanh',
# Bit vector system functions (20.9)
'$countbits', '$countones', '$isunknown', '$onehot', '$onehot0',
# Severity tasks (20.10)
'$info', '$error', '$fatal', '$warning',
# Assertion control tasks (20.12)
'$assertcontrol', '$assertfailoff', '$assertfailon',
'$assertkill', '$assertnonvacuouson', '$assertoff', '$asserton',
'$assertpassoff', '$assertpasson', '$assertvacuousoff',
# Sampled value system functions (20.13)
'$changed', '$changed_gclk', '$changing_gclk', '$falling_gclk',
'$fell', '$fell_gclk', '$future_gclk', '$past', '$past_gclk',
'$rising_gclk', '$rose', '$rose_gclk', '$sampled', '$stable',
'$stable_gclk', '$steady_gclk',
# Coverage control functions (20.14)
'$coverage_control', '$coverage_get', '$coverage_get_max',
'$coverage_merge', '$coverage_save', '$get_coverage',
'$load_coverage_db', '$set_coverage_db_name',
# Probabilistic distribution functions (20.15)
'$dist_chi_square', '$dist_erlang', '$dist_exponential',
'$dist_normal', '$dist_poisson', '$dist_t', '$dist_uniform',
'$random',
# Stochastic analysis tasks and functions (20.16)
'$q_add', '$q_exam', '$q_full', '$q_initialize', '$q_remove',
# PLA modeling tasks (20.17)
'$async$and$array', '$async$and$plane', '$async$nand$array',
'$async$nand$plane', '$async$nor$array', '$async$nor$plane',
'$async$or$array', '$async$or$plane', '$sync$and$array',
'$sync$and$plane', '$sync$nand$array', '$sync$nand$plane',
'$sync$nor$array', '$sync$nor$plane', '$sync$or$array',
'$sync$or$plane',
# Miscellaneous tasks and functions (20.18)
'$system',
# Display tasks (21.2)
'$display', '$displayb', '$displayh', '$displayo', '$monitor',
'$monitorb', '$monitorh', '$monitoro', '$monitoroff',
'$monitoron', '$strobe', '$strobeb', '$strobeh', '$strobeo',
'$write', '$writeb', '$writeh', '$writeo',
# File I/O tasks and functions (21.3)
'$fclose', '$fdisplay', '$fdisplayb', '$fdisplayh',
'$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', '$fgets',
'$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', '$fopen',
'$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb',
'$fstrobeh', '$fstrobeo', '$ftell', '$fwrite', '$fwriteb',
'$fwriteh', '$fwriteo', '$rewind', '$sformat', '$sformatf',
'$sscanf', '$swrite', '$swriteb', '$swriteh', '$swriteo',
'$ungetc',
# Memory load tasks (21.4)
'$readmemb', '$readmemh',
# Memory dump tasks (21.5)
'$writememb', '$writememh',
# Command line input (21.6)
'$test$plusargs', '$value$plusargs',
# VCD tasks (21.7)
'$dumpall', '$dumpfile', '$dumpflush', '$dumplimit', '$dumpoff',
'$dumpon', '$dumpports', '$dumpportsall', '$dumpportsflush',
'$dumpportslimit', '$dumpportsoff', '$dumpportson', '$dumpvars',
), suffix=r'\b'),
Name.Builtin),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
(r'\\(\S+)', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?$', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Whitespace, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
.. versionadded:: 1.5
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(String.Escape, Whitespace)), # line continuation
(r'--.*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-z_]\w*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\"]*"', String),
(r'(library)(\s+)([a-z_]\w*)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Whitespace, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*\.)(all)',
bygroups(Keyword, Whitespace, Name.Namespace, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'(std|ieee)(\.[a-z_]\w*)',
bygroups(Name.Namespace, Name.Namespace)),
(words(('std', 'ieee', 'work'), suffix=r'\b'),
Name.Namespace),
(r'(entity|component)(\s+)([a-z_]\w*)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)'
r'(of)(\s+)([a-z_]\w*)(\s+)(is)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword, Whitespace,
Name.Class, Whitespace, Keyword)),
(r'([a-z_]\w*)(:)(\s+)(process|for)',
bygroups(Name.Class, Operator, Whitespace, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Whitespace), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-z_]\w*', Name),
],
'endblock': [
include('keywords'),
(r'[a-z_]\w*', Name.Class),
(r'\s+', Whitespace),
(r';', Punctuation, '#pop'),
],
'types': [
(words((
'boolean', 'bit', 'character', 'severity_level', 'integer', 'time',
'delay_length', 'natural', 'positive', 'string', 'bit_vector',
'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector',
'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix=r'\b'),
Keyword.Type),
],
'keywords': [
(words((
'abs', 'access', 'after', 'alias', 'all', 'and',
'architecture', 'array', 'assert', 'attribute', 'begin', 'block',
'body', 'buffer', 'bus', 'case', 'component', 'configuration',
'constant', 'disconnect', 'downto', 'else', 'elsif', 'end',
'entity', 'exit', 'file', 'for', 'function', 'generate',
'generic', 'group', 'guarded', 'if', 'impure', 'in',
'inertial', 'inout', 'is', 'label', 'library', 'linkage',
'literal', 'loop', 'map', 'mod', 'nand', 'new',
'next', 'nor', 'not', 'null', 'of', 'on',
'open', 'or', 'others', 'out', 'package', 'port',
'postponed', 'procedure', 'process', 'pure', 'range', 'record',
'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select',
'severity', 'signal', 'shared', 'sla', 'sll', 'sra',
'srl', 'subtype', 'then', 'to', 'transport', 'type',
'units', 'until', 'use', 'variable', 'wait', 'when',
'while', 'with', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-f_]+#?', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)E[+-]?\d+', Number.Float),
(r'X"[0-9a-f_]+"', Number.Hex),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[01_]+"', Number.Bin),
],
}
| 22,520 | Python | 47.328326 | 100 | 0.452043 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.