file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.hydra_texture/omni/hydratexture/__init__.py | from ._hydra_texture import *
| 30 | Python | 14.499993 | 29 | 0.733333 |
omniverse-code/kit/exts/omni.kit.hydra_texture/omni/hydratexture/_hydra_texture.pyi | from __future__ import annotations
import omni.hydratexture._hydra_texture
import typing
import carb._carb
import carb.events._events
import omni.gpu_foundation_factory._gpu_foundation_factory
import omni.usd._usd
__all__ = [
"EVENT_TYPE_DRAWABLE_CHANGED",
"EVENT_TYPE_HYDRA_ENGINE_CHANGED",
"EVENT_TYPE_RENDER_SETTINGS_CHANGED",
"IHydraTexture",
"IHydraTextureFactory",
"TextureGpuReference",
"acquire_hydra_texture_factory_interface"
]
class IHydraTexture():
def cancel_all_picking(self) -> None:
"""
Cancel any picking or query requests that are in flight or queued.
"""
def get_aov_info(self, result_handle: int = 0, aov_name: str = None, include_texture: bool = False) -> typing.List[dict]:
"""
Get AOV data durring EVENT_TYPE_DRAWABLE_CHANGED as a list of dictionaries: {
'name' : str,
'texture': dict(),
}
Args:
include_texture: bool Include a dictionary for the AOVs texture, under the key 'texture'.
"""
def get_async(self) -> bool:
"""
Returns whether it is desirable to perform rendering on another thread.
"""
def get_camera_path(self) -> str:
"""
Returns the USD camera prim path that will be used by the HydraEngine.
"""
def get_drawable_ldr_resource(self, result_handle: int = 0) -> omni.gpu_foundation_factory._gpu_foundation_factory.RpResource:
"""
Get the drawable resource for the low-dynamic-range color buffer.
"""
def get_drawable_resource(self, result_handle: int = 0, aov_name: str = '') -> omni.gpu_foundation_factory._gpu_foundation_factory.RpResource:
"""
Get the drawable resource for an AOV.
Args:
aov_name: str The name of the AOV for the resource to return, defaults to the low-dynamic-range color AOV.
"""
def get_event_stream(self) -> carb.events._events.IEventStream:
"""
Returns the event stream where events like drawable change are pumped.
"""
def get_frame_info(self, result_handle: int = 0, include_aov_list: bool = False) -> dict:
"""
Get additional data durring EVENT_TYPE_DRAWABLE_CHANGED in dictionary form : {
'view' : double[16],
'projection' : double[16],
'fps' : float,
'resolution' : (uin32_t, uin32_t),
'progression' : uin32_t
'frame_number': size_t
'subframe_count': uin32_t
'progression' : uin32_t
'aovs' : list
}
"""
def get_height(self) -> int:
"""
Returns the texture height.
"""
def get_hydra_engine(self) -> str:
"""
Returns HydraEngine that is used currently to render to the associated texture.
"""
def get_imgui_reference(self, result_handle: int = 0, aov_name: str = '') -> capsule:
"""
Returns a reference for an AOV that can be used in ImGui-based UI widgets.
Args:
aov_name: str The name of the AOV for the reference to return, defaults to the low-dynamic-range color AOV.
"""
def get_name(self) -> str:
"""
Returns name of the HydraTexture.
"""
def get_render_product_path(self) -> str:
"""
Returns the prim path for the render product
"""
def get_settings_path(self) -> str:
"""
Returns string path to the settings section where this HydraTexture tracks its state.
"""
def get_updates_enabled(self) -> bool:
"""
Returns viewport updates state.
"""
def get_usd_context_name(self) -> str:
"""
Returns name of a USD context associated with the HydraTexture.
"""
def get_width(self) -> int:
"""
Returns the texture width.
"""
def pick(self, x_left: int, y_top: int, x_right: int = 0, y_bottom: int = 0, mode: omni.usd._usd.PickingMode = PickingMode.TRACK, pick_name: str = '', y_down: bool = True) -> None:
"""
Pick a pixel in the HydraTexture.
"""
def query(self, x: int, y: int, callback: typing.Callable[[str, carb._carb.Double3, carb._carb.Uint2], None] = None, add_outline: bool = False, query_name: str = '', y_down: bool = True) -> None:
"""
Query a pixel in the HydraTexture.
"""
def request_pick(self, p0: carb._carb.Uint2, p1: carb._carb.Uint2, mode: omni.usd._usd.PickingMode = PickingMode.TRACK, pick_name: str = '', y_down: bool = True) -> bool:
"""
Pick a pixel in the HydraTexture.
"""
@typing.overload
def request_query(self, pixel: carb._carb.Uint2, callback: typing.Callable[[str, carb._carb.Double3, carb._carb.Uint2], None] = None, query_name: str = '', add_outline: bool = False, y_down: bool = True) -> bool:
"""
Query a pixel in the HydraTexture.
Query a pixel in the HydraTexture.
"""
@typing.overload
def request_query(self, pixel: carb._carb.Uint2, callback: typing.Callable[[str, carb._carb.Double3, carb._carb.Uint2], None] = None, query_name: str = '', view: handle = None, projection: handle = None, add_outline: bool = False, y_down: bool = True) -> bool: ...
def set_async(self, is_async: bool) -> None:
"""
Sets whether it is desirable to perform rendering on another thread.
"""
def set_camera_path(self, usd_camera_path: str = '/OmniverseKit_Persp') -> None:
"""
Sets the USD camera prim path that will be used by the HydraEngine.
"""
def set_height(self, height: int) -> None:
"""
Sets the texture height.
"""
def set_hydra_engine(self, hydra_engine_name: str = 'rtx') -> None:
"""
Sets the desired HydraEngine that should render to the associated texture.
"""
def set_render_product_path(self, prim_path: str, keep_camera: bool = False, keep_resolution: bool = False) -> bool:
"""
Sets the prim path for the render product.
Args:
prim_path (str): The prim path to a valid UsdRenderProduct.
keep_camera (bool) = False: Keep the viewport's current camera.
keep_resolution (bool) = False: Keep the viewport's current resolution.
"""
def set_updates_enabled(self, updates_enabled: bool = True) -> None:
"""
Allows to pause/resume viewport updates. When paused, calls to associated HydraEngine are not made.
"""
def set_width(self, width: int) -> None:
"""
Sets the texture width.
"""
@property
def camera_path(self) -> str:
"""
Gets/sets the USD camera prim path that will be used by the HydraEngine.
:type: str
"""
@camera_path.setter
def camera_path(self, arg1: str) -> None:
"""
Gets/sets the USD camera prim path that will be used by the HydraEngine.
"""
@property
def height(self) -> int:
"""
Gets/sets the texture height.
:type: int
"""
@height.setter
def height(self, arg1: int) -> None:
"""
Gets/sets the texture height.
"""
@property
def hydra_engine(self) -> str:
"""
Gets/sets the desired HydraEngine that should render to the associated texture.
:type: str
"""
@hydra_engine.setter
def hydra_engine(self, arg1: str) -> None:
"""
Gets/sets the desired HydraEngine that should render to the associated texture.
"""
@property
def is_async(self) -> bool:
"""
Gets/sets whether it is desirable to perform rendering on another thread.
:type: bool
"""
@is_async.setter
def is_async(self, arg1: bool) -> None:
"""
Gets/sets whether it is desirable to perform rendering on another thread.
"""
@property
def updates_enabled(self) -> bool:
"""
Gets/sets viewport updates state. Allows to pause/resume viewport updates. When paused, calls to associated HydraEngine are not made.
:type: bool
"""
@updates_enabled.setter
def updates_enabled(self, arg1: bool) -> None:
"""
Gets/sets viewport updates state. Allows to pause/resume viewport updates. When paused, calls to associated HydraEngine are not made.
"""
@property
def width(self) -> int:
"""
Gets/sets the texture width.
:type: int
"""
@width.setter
def width(self, arg1: int) -> None:
"""
Gets/sets the texture width.
"""
pass
class IHydraTextureFactory():
def create_hydra_texture(self, name: str, width: int, height: int, usd_context_name: str = '', usd_camera_path: str = '/OmniverseKit_Persp', hydra_engine_name: str = 'rtx', is_async: bool = True, is_async_low_latency: bool = False, hydra_tick_rate: int = 0, engine_creation_flags: int = 0, is_asyncLowLatency: bool = False, hydraTickRate: int = 0) -> IHydraTexture: ...
def get_hydra_texture_from_handle(self, handle: int) -> IHydraTexture: ...
def shutdown(self) -> bool: ...
def startup(self) -> bool: ...
pass
class TextureGpuReference():
@property
def gpu_index(self) -> int:
"""
:type: int
"""
pass
def acquire_hydra_texture_factory_interface(*args, **kwargs) -> typing.Any:
pass
EVENT_TYPE_DRAWABLE_CHANGED = 11183105052706112355
EVENT_TYPE_HYDRA_ENGINE_CHANGED = 13015938697279081123
EVENT_TYPE_RENDER_SETTINGS_CHANGED = 5475677776228743414
| 9,678 | unknown | 36.808594 | 373 | 0.595061 |
omniverse-code/kit/exts/omni.kit.hydra_texture/omni/kit/hydra_texture/scripts/extension.py | import omni.ext
import omni.hydratexture
class Extension(omni.ext.IExt):
def __init__(self):
super().__init__()
pass
def on_startup(self):
self._hydra_texture_factory = omni.hydratexture.acquire_hydra_texture_factory_interface()
self._hydra_texture_factory.startup()
def on_shutdown(self):
self._hydra_texture_factory.shutdown()
self._hydra_texture_factory = None
| 427 | Python | 25.749998 | 97 | 0.653396 |
omniverse-code/kit/exts/omni.kit.hydra_texture/omni/kit/hydra_texture/tests/test_hydra_texture.py | import pathlib
import asyncio
import carb
import carb.settings
import carb.tokens
import omni.kit.app
import omni.kit.test
import omni.hydratexture
import omni.usd
from typing import Callable
# FIXME: omni.ui.ImageProvider holds the carb.Format conversion routine
import omni.ui
from pxr import Gf, UsdRender
EXTENSION_FOLDER_PATH = pathlib.Path(omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__))
DATA_DIR = EXTENSION_FOLDER_PATH.joinpath("data/tests")
class HydraTextureTest(omni.kit.test.AsyncTestCase):
async def setUp(self):
self._settings = carb.settings.acquire_settings_interface()
self._hydra_texture_factory = omni.hydratexture.acquire_hydra_texture_factory_interface()
self._usd_context_name = ''
self._usd_context = omni.usd.get_context(self._usd_context_name)
await self._usd_context.new_stage_async()
async def tearDown(self):
self._hydra_texture_factory = None
self._settings = None
wait_iterations = 6
for i in range(wait_iterations):
await omni.kit.app.get_app().next_update_async()
async def _create_hydra_texture_test(self, filename: str, texture_test: Callable,
renderer: str = 'pxr', res_x: int = 320, res_y: int = 320,
engine_options: dict = None):
wait_iterations = 6
try:
if renderer not in self._usd_context.get_attached_hydra_engine_names():
omni.usd.add_hydra_engine(renderer, self._usd_context)
test_usd_asset = DATA_DIR.joinpath(filename)
print("Opening '%s'" % (test_usd_asset))
await self._usd_context.open_stage_async(str(test_usd_asset))
if engine_options is None:
engine_options = {"is_async": False}
hydra_texture = self._hydra_texture_factory.create_hydra_texture(
"test_viewport",
res_x,
res_y,
self._usd_context_name,
"/test_cam",
renderer,
**engine_options
)
return await texture_test(hydra_texture)
finally:
for i in range(wait_iterations):
await omni.kit.app.get_app().next_update_async()
# test1 - test1g simulate possible failure on low memory Linux configs from
# bad gpu-foundation shutdown and startup.
async def test_1_simple_context_attach(self):
async def no_more_test(hydra_texture):
pass
await self._create_hydra_texture_test('simple_cubes_mat.usda', no_more_test)
async def test_1b_simple_context_attach(self):
async def no_more_test(hydra_texture):
pass
await self._create_hydra_texture_test('simple_cubes_mat.usda', no_more_test)
async def test_1c_simple_context_attach(self):
async def no_more_test(hydra_texture):
pass
await self._create_hydra_texture_test('simple_cubes_mat.usda', no_more_test)
async def test_1d_simple_context_attach(self):
async def no_more_test(hydra_texture):
pass
await self._create_hydra_texture_test('simple_cubes_mat.usda', no_more_test)
async def test_1e_simple_context_attach(self):
async def no_more_test(hydra_texture):
pass
await self._create_hydra_texture_test('simple_cubes_mat.usda', no_more_test)
async def test_1f_simple_context_attach(self):
async def no_more_test(hydra_texture):
pass
await self._create_hydra_texture_test('simple_cubes_mat.usda', no_more_test)
async def test_1g_simple_context_attach(self):
async def no_more_test(hydra_texture):
pass
await self._create_hydra_texture_test('simple_cubes_mat.usda', no_more_test)
async def test_2_drawable_changed(self):
drawable_result = asyncio.Future()
async def drawable_changed_test(hydra_texture):
def on_drawable_changed(event: carb.events.IEvent):
if event.type != omni.hydratexture.EVENT_TYPE_DRAWABLE_CHANGED:
carb.log_error("Wrong event captured for DRAWABLE_CHANGED!")
return
result_handle = event.payload['result_handle']
aov_info = hydra_texture.get_aov_info(result_handle)
self.assertEqual(aov_info[0]['name'], 'LdrColor')
ldr_info = hydra_texture.get_aov_info(result_handle, 'LdrColor')
self.assertEqual(ldr_info[0]['name'], 'LdrColor')
ldr_info = hydra_texture.get_aov_info(result_handle, 'LdrColor', include_texture=True)
self.assertEqual(ldr_info[0]['name'], 'LdrColor')
ldr_tex = ldr_info[0]['texture']
self.assertEqual(ldr_tex['resolution'][0], 640)
self.assertEqual(ldr_tex['resolution'][1], 320)
self.assertEqual(ldr_tex['format'], omni.ui.TextureFormat.RGBA8_UNORM)
self.assertIsNotNone(ldr_tex['rp_resource'])
frame_info = hydra_texture.get_frame_info(result_handle)
self.assertIsNotNone(frame_info['view'])
self.assertIsNotNone(frame_info['projection'])
self.assertIsNotNone(frame_info['fps'])
self.assertIsNotNone(frame_info['resolution'])
self.assertIsNotNone(frame_info['progression'])
self.assertIsNotNone(frame_info['frame_number'])
self.assertIsNotNone(frame_info['device_mask'])
nonlocal drawable_result
# EVENT_TYPE_DRAWABLE_CHANGED can be called multiple times before yielding for async loop
if not drawable_result.done():
drawable_result.set_result(True)
return hydra_texture.get_event_stream().create_subscription_to_push_by_type(
omni.hydratexture.EVENT_TYPE_DRAWABLE_CHANGED,
on_drawable_changed,
name="Viewport Texture drawable change",
)
drawable_change_sub = await self._create_hydra_texture_test('simple_cubes_mat.usda', drawable_changed_test,
res_x = 640, res_y = 320)
result = await drawable_result
drawable_change_sub = None
self.assertTrue(result)
async def test_3_custom_product(self):
# Build up the custom RenderProduct
custom_prod_path = '/Render/renderproduct_custom'
custom_var_path = '/Render/rendervar_custom'
custom_cam_path = '/test_cam_custom'
custom_resolution = Gf.Vec2i(512, 512)
# XXX: This used to be handled entirely by set_render_product_path and usd-abi
# But usd-abi now validates portions of the RenderProduct.
# So just set up the complete UsdRender.Product and UsdRender.Var and validate it ourselves along the way.
async def setup_custom_product(stage):
custom_prod = UsdRender.Product.Define(stage, custom_prod_path)
self.assertIsNotNone(custom_prod)
custom_var = UsdRender.Var.Define(stage, custom_var_path)
self.assertIsNotNone(custom_var)
ordered_vars = custom_prod.GetOrderedVarsRel()
self.assertIsNotNone(ordered_vars)
ordered_vars.SetTargets([custom_var_path])
self.assertEqual([sdf_path.pathString for sdf_path in ordered_vars.GetForwardedTargets()], [custom_var_path])
camera_rel = custom_prod.GetCameraRel()
self.assertIsNotNone(camera_rel)
camera_rel.SetTargets([custom_cam_path])
self.assertEqual([sdf_path.pathString for sdf_path in camera_rel.GetForwardedTargets()], [custom_cam_path])
res_attr = custom_prod.GetResolutionAttr()
self.assertIsNotNone(res_attr)
res_attr.Set(custom_resolution)
source_name = custom_var.GetSourceNameAttr()
self.assertIsNotNone(source_name)
source_name.Set('LdrColor')
# Wait one frame to make sure Usd edits are absorbed
return await omni.kit.app.get_app().next_update_async()
async def product_test(hydra_texture):
dflt_path = hydra_texture.get_render_product_path()
await setup_custom_product(self._usd_context.get_stage())
# Move the product to a custom one keeping width, height, and camera
success = hydra_texture.set_render_product_path(custom_prod_path, keep_camera=True, keep_resolution=True)
cstm_path = hydra_texture.get_render_product_path()
self.assertTrue(success)
self.assertNotEqual(dflt_path, cstm_path)
self.assertEqual(hydra_texture.get_camera_path(), '/test_cam')
self.assertEqual(hydra_texture.get_width(), 320)
self.assertEqual(hydra_texture.get_height(), 320)
# Move the product to a custom one, reading width, height, and camera
success = hydra_texture.set_render_product_path(custom_prod_path)
cstm_path = hydra_texture.get_render_product_path()
self.assertTrue(success)
self.assertNotEqual(dflt_path, cstm_path)
# XXX: Can't test this here, needs to process the Viewport
# self.assertEqual(hydra_texture.get_camera_path(), custom_cam_path)
self.assertEqual(hydra_texture.get_width(), custom_resolution[0])
self.assertEqual(hydra_texture.get_height(), custom_resolution[1])
# Move it back to the default
success = hydra_texture.set_render_product_path(dflt_path)
self.assertTrue(success)
self.assertEqual(dflt_path, hydra_texture.get_render_product_path())
await self._create_hydra_texture_test('custom_product.usda', product_test)
async def test_tick_rate_creation(self):
"""Test arguments passed to create_hydra_texture"""
engine_options = {
"is_async": False,
"hydra_tick_rate": 30
}
async def check_engine_tick_rate(hydra_texture):
settings = carb.settings.get_settings()
tick_rate = settings.get(f"{hydra_texture.get_settings_path()}hydraTickRate")
is_async = settings.get(f"{hydra_texture.get_settings_path()}async")
self.assertEqual(tick_rate, engine_options.get("hydra_tick_rate"))
self.assertEqual(is_async, engine_options.get("is_async"))
await self._create_hydra_texture_test('simple_cubes_mat.usda', check_engine_tick_rate, engine_options=engine_options)
async def test_async_engine_creation(self):
"""Test arguments passed to create_hydra_texture"""
engine_options = {
"is_async": True,
}
async def check_engine_async(hydra_texture):
settings = carb.settings.get_settings()
is_async = settings.get(f"{hydra_texture.get_settings_path()}async")
self.assertEqual(is_async, engine_options.get("is_async"))
await self._create_hydra_texture_test('simple_cubes_mat.usda', check_engine_async, engine_options=engine_options)
# There isn't a great mechanism for having a test fail mean success; which whould allow
# these two tests that a failure wont result in a crash.
# Until then, hopefully these can be run on new test additions or manually.
#
# async def test_0_disabled_test_failure_wont_crash(self):
# self.assertTrue(False)
# async def test_z_disabled_test_failure_wont_crash(self):
# self.assertTrue(False)
| 11,710 | Python | 43.359848 | 125 | 0.624765 |
omniverse-code/kit/exts/omni.kit.hydra_texture/omni/kit/hydra_texture/tests/__init__.py | from .test_hydra_texture import *
| 34 | Python | 16.499992 | 33 | 0.764706 |
omniverse-code/kit/exts/omni.kit.widget.layers/config/extension.toml | [package]
title = "Omni::UI Layer View"
category = "Internal"
feature = true
description = "Omniverse Kit Layer Window"
version = "1.6.11"
keywords = ["layers"]
changelog = "docs/CHANGELOG.md"
authors = ["NVIDIA"]
repository = ""
[dependencies]
"omni.activity.core" = {}
"omni.usd" = {}
"omni.kit.clipboard" = {}
"omni.kit.context_menu" = {}
"omni.client" = {}
"omni.ui" = {}
"omni.kit.window.filepicker" = {}
"omni.kit.window.content_browser" = { optional = true }
"omni.kit.notification_manager" = {}
"omni.kit.window.drop_support" = {}
"omni.kit.widget.stage" = {}
"omni.kit.widget.prompt" = {}
"omni.kit.window.file" = {}
"omni.kit.menu.utils" = {}
"omni.kit.usd.layers" = {}
"omni.kit.collaboration.channel_manager" = {}
"omni.kit.widget.live_session_management" = {}
"omni.kit.actions.core" = {}
"omni.kit.hotkeys.core" = { optional = true }
[[python.module]]
name = "omni.kit.widget.layers"
[[test]]
args = [
"--/rtx/materialDb/syncLoads=true",
"--/omni.kit.plugin/syncUsdLoads=true",
"--/rtx/hydra/materialSyncLoads=true",
"--/app/file/ignoreUnsavedOnExit=true",
"--/renderer/enabled=pxr",
"--/renderer/active=pxr",
"--/renderer/multiGpu/enabled=false",
"--/renderer/multiGpu/autoEnable=false", # Disable mGPU with PXR due to OM-51026, OM-53611
"--/renderer/multiGpu/maxGpuCount=1",
"--/app/asyncRendering=false",
"--/app/file/ignoreUnsavedOnExit=true",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
"--/persistent/app/omniverse/filepicker/options_menu/show_details=false",
"--no-window"
]
dependencies = [
"omni.hydra.rtx",
"omni.kit.hydra_texture",
"omni.kit.commands",
"omni.kit.selection",
"omni.kit.renderer.capture",
"omni.kit.mainwindow",
"omni.kit.window.content_browser",
"omni.kit.ui_test",
"omni.kit.test_suite.helpers"
]
stdoutFailPatterns.include = []
stdoutFailPatterns.exclude = [
"*Failed to acquire interface*while unloading all plugins*",
]
[settings]
exts."omni.kit.widget.layers".enable_server_tests = false | 2,071 | TOML | 27.383561 | 94 | 0.664413 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/layer_link_window.py | import asyncio
import re
from typing import List
import carb
import carb.settings
import omni.kit.app
import omni.kit.ui
import omni.ui as ui
from .models.prim_model import PrimModel
from .layer_model import LayerModel
from .link_delegate import LayerLinkDelegate, PrimLinkDelegate
from omni.kit.window.file import DialogOptions
class LayerLinkWindow(): # pragma: no cover
WINDOW_NAME = "Layer Linking"
MENU_PATH = f"Window/{WINDOW_NAME}"
def __init__(self):
self._window = None
self._prims_widget = None
self._layerlink_widget = None
self._stage_centric = True
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
self._menu = editor_menu.add_item(LayerLinkWindow.MENU_PATH, self.show_window, toggle=True, value=False)
def __del__(self):
self.destroy()
def destroy(self):
self._menu = None
self._clear()
def _clear(self):
if self._prims_widget:
self._prims_widget.destroy()
self._prims_widget = None
if self._layerlink_widget:
self._layerlink_widget.destroy()
self._layerlink_widget = None
if self._window:
self._window.destroy()
self._window = None
def _set_menu(self, value):
"""Set the menu to create this window on and off"""
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
editor_menu.set_value(LayerLinkWindow.MENU_PATH, value)
async def _destroy_window_async(self):
# wait one frame, this is due to the one frame defer
# in Window::_moveToMainOSWindow()
await omni.kit.app.get_app().next_update_async()
self._clear()
def _visiblity_changed_fn(self, visible):
self._set_menu(visible)
if not visible:
# Destroy the window, since we are creating new window
# in show_window
asyncio.ensure_future(self._destroy_window_async())
def show_window(self, menu, value):
if value:
self._build_ui()
self._window.set_visibility_changed_fn(self._visiblity_changed_fn)
elif self._window:
self._window.visible = False
def _set_centric(self, value):
self._stage_centric = value
self._window.frame.rebuild()
def _build_ui(self):
self._window = ui.Window(
LayerLinkWindow.WINDOW_NAME, width=600, height=400,
flags=ui.WINDOW_FLAGS_NO_SCROLLBAR | ui.WINDOW_FLAGS_MENU_BAR,
)
with self._window.menu_bar:
with ui.Menu("File"):
ui.MenuItem(
"Load",
triggered_fn=lambda: omni.kit.window.file.open()
)
ui.MenuItem(
"Save",
triggered_fn=lambda: omni.kit.window.file.save(dialog_options=DialogOptions.HIDE),
)
with ui.Menu("Window"):
ui.MenuItem(
"Stage centric linking",
triggered_fn=lambda: self._set_centric(True)
)
ui.MenuItem(
"Layer centric linking",
triggered_fn=lambda: self._set_centric(False),
)
"""Creates all the widgets in the window"""
self._style = {
"Button::filter": {"background_color": 0x0, "margin": 0},
"Button::options": {"background_color": 0x0, "margin": 0},
"Button::visibility": {"background_color": 0x0, "margin": 0, "margin_width": 1},
"Button::visibility:checked": {"background_color": 0x0},
"Button::visibility:hovered": {"background_color": 0x0},
"Button::visibility:pressed": {"background_color": 0x0},
"Label::search": {"color": 0xFF808080, "margin_width": 4},
"TreeView": {
"background_color": 0xFF23211F,
"background_selected_color": 0x664F4D43,
"secondary_selected_color": 0x0,
"border_width": 1.5,
},
"TreeView.ScrollingFrame": {"background_color": 0xFF23211F},
"TreeView.Header": {"background_color": 0xFF343432, "color": 0xFFCCCCCC, "font_size": 12},
"TreeView.Image::object_icon_grey": {"color": 0x80FFFFFF},
"TreeView.Image:disabled": {"color": 0x60FFFFFF},
"TreeView.Item": {"color": 0xFF8A8777},
"TreeView.Item:disabled": {"color": 0x608A8777},
"TreeView.Item::object_name_grey": {"color": 0xFF4D4B42},
"TreeView.Item::object_name_missing": {"color": 0xFF6F72FF},
"TreeView.Item:selected": {"color": 0xFF23211F},
"TreeView:selected": {"background_color": 0xFF8A8777},
"TreeView:drop": {
"background_color": ui.color.shade(ui.color("#34C7FF3B")),
"background_selected_color": ui.color.shade(ui.color("#34C7FF3B")),
"border_color": ui.color.shade(ui.color("#2B87AA")),
},
"Splitter": {"background_color": 0xFFE0E0E0, "margin_width": 2},
"Splitter:hovered": {"background_color": 0xFF00707B},
"Splitter:pressed": {"background_color": 0xFF00003B},
}
self._window.frame.set_build_fn(self._on_frame_built)
self._window.frame.rebuild()
def _on_frame_built(self):
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
with self._window.frame:
with ui.HStack(style=self._style):
with ui.ZStack(width=0):
with ui.HStack():
if self._stage_centric:
self._prims_widget = PrimsWidget(usd_context, stage)
else:
self._layerlink_widget = LayerLinkWidget(usd_context, stage)
ui.Spacer(width=2)
with ui.Placer(offset_x=300, draggable=True, drag_axis=ui.Axis.X):
ui.Rectangle(width=4, style_type_name_override="Splitter")
if self._stage_centric:
self._layerlink_widget = LayerLinkWidget(usd_context, stage)
else:
self._prims_widget = PrimsWidget(usd_context, stage)
self._prims_widget.set_layerlink_widget(self._layerlink_widget)
self._layerlink_widget.set_prim_widget(self._prims_widget)
class PrimsWidget(): # pragma: no cover
def __init__(self, usd_context, stage):
self._model = PrimModel(stage)
self._delegate = PrimLinkDelegate(usd_context)
self.build_ui()
# The filtering logic
self._begin_filter_subscription = self._search.subscribe_begin_edit_fn(
lambda _: PrimsWidget._set_widget_visible(self._search_label, False)
)
self._end_filter_subscription = self._search.subscribe_end_edit_fn(
lambda m: self._filter_by_text(m.as_string)
or PrimsWidget._set_widget_visible(self._search_label, not m.as_string)
)
self._stage_subscription = usd_context.get_stage_event_stream().create_subscription_to_pop(
self._on_stage_event, name="Layer link window"
)
def __del__(self):
self.destroy()
def destroy(self):
self._begin_filter_subscription = None
self._end_filter_subscription = None
self._stage_subscription = None
if self._model:
self._model.destroy()
self._model = None
self._delegate = None
def build_ui(self):
self._stack = ui.VStack()
with self._stack:
ui.Spacer(height=4)
with ui.ZStack(height=0):
# Search filed
self._search = ui.StringField(name="search").model
# The label on the top of the search field
self._search_label = ui.Label("Search", name="search")
ui.Spacer(height=7)
with ui.ScrollingFrame(
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style_type_name_override="TreeView.ScrollingFrame",
):
with ui.ZStack():
self._tree_view = ui.TreeView(
self._model,
delegate=self._delegate,
column_widths=[20, 20, ui.Fraction(1)],
header_visible=False,
root_visible=False,
columns_resizable=False,
)
self._delegate.set_tree_view(self._tree_view)
def _on_stage_event(self, event: carb.events.IEvent):
if event.type == int(omni.usd.StageEventType.OPENED):
stage = omni.usd.get_context().get_stage()
self._open_stage(stage)
elif event.type == int(omni.usd.StageEventType.CLOSING):
self._open_stage(None)
def _open_stage(self, stage):
"""Called when opening a new stage"""
if self._model:
self._model.destroy()
self._model = PrimModel(stage)
if self._tree_view:
self._tree_view.model = self._model
def _filter_by_text(self, filter_text: str):
"""Set the search filter string to the models and widgets"""
self._tree_view.keep_alive = not not filter_text
self._tree_view.keep_expanded = not not filter_text
self._tree_view.model.filter_by_text(filter_text)
def set_width(self, width):
self._stack.width = ui.Pixel(width)
@staticmethod
def _set_widget_visible(widget: ui.Widget, visible):
"""Utility for using in lambdas"""
widget.visible = visible
def set_layerlink_widget(self, layerlink_widget):
self._delegate.set_layerlink_widget(layerlink_widget)
def select(self, spec_paths: List[str]):
selection = []
for path in spec_paths:
path_subs = re.split('[/.]', path.strip('/'))
item = self._model.find(path_subs)
if item:
selection.append(item)
self._tree_view.selection = selection
def get_select_specs(self):
specs = []
for item in self._tree_view.selection:
specs.append(item.path.pathString)
return specs
class LayerLinkSettings: # pragma: no cover
SETTINGS_ENABLE_SPEC_LINKING_MODE = "/persistent/app/layerwindow/enableSpecLinkingMode"
def __init__(self):
self._settings = carb.settings.get_settings()
self._show_missing_reference = False
self._show_layer_contents = False
self._show_session_layer = False
self._show_metricsassembler_layer = False
self._show_layer_file_extension = False
self._file_dialog_show_root_layer_location = False
self._show_info_notification = False
self._show_warning_notification = False
self._enable_spec_linking_mode = self._settings.get_as_bool(LayerLinkSettings.SETTINGS_ENABLE_SPEC_LINKING_MODE)
self._show_merge_or_flatten_warning = False
self._enable_auto_authoring_mode = False
@property
def show_missing_reference(self):
return self._show_missing_reference
@show_missing_reference.setter
def show_missing_reference(self, show: bool):
self._show_missing_reference = show
@property
def show_layer_contents(self):
return self._show_layer_contents
@show_layer_contents.setter
def show_layer_contents(self, show: bool):
self._show_layer_contents = show
@property
def show_session_layer(self):
return self._show_session_layer
@show_session_layer.setter
def show_session_layer(self, show: bool):
self._show_session_layer = show
@property
def show_metricsassembler_layer(self):
return self._show_metricsassembler_layer
@show_metricsassembler_layer.setter
def show_metricsassembler_layer(self, show: bool):
self._show_metricsassembler_layer = show
@property
def show_layer_file_extension(self):
return self._show_layer_file_extension
@show_layer_file_extension.setter
def show_layer_file_extension(self, show: bool):
self._show_layer_file_extension = show
@property
def file_dialog_show_root_layer_location(self):
return self._file_dialog_show_root_layer_location
@file_dialog_show_root_layer_location.setter
def file_dialog_show_root_layer_location(self, root_layer: bool):
self._file_dialog_show_root_layer_location = root_layer
@property
def show_info_notification(self):
return self._show_info_notification
@show_info_notification.setter
def show_info_notification(self, enabled: bool):
self._show_info_notification = enabled
@property
def show_warning_notification(self):
return self._show_warning_notification
@show_warning_notification.setter
def show_warning_notification(self, enabled: bool):
self._show_warning_notification = enabled
@property
def enable_auto_authoring_mode(self):
return self._enable_auto_authoring_mode
@enable_auto_authoring_mode.setter
def enable_auto_authoring_mode(self, enabled: bool):
self._enable_auto_authoring_mode = enabled
@property
def enable_spec_linking_mode(self):
return self._enable_spec_linking_mode
@enable_spec_linking_mode.setter
def enable_spec_linking_mode(self, enabled: bool):
self._enable_spec_linking_mode = enabled
@property
def show_merge_or_flatten_warning(self):
return self._show_merge_or_flatten_warning
@show_merge_or_flatten_warning.setter
def show_merge_or_flatten_warning(self, enabled: bool):
self._show_merge_or_flatten_warning = enabled
class LayerLinkWidget(): # pragma: no cover
def __init__(self, usd_context, stage):
self._delegate = LayerLinkDelegate(usd_context)
self._model = LayerModel(usd_context, layer_settings=LayerLinkSettings())
self._model.add_stage_attach_listener(self._on_stage_attached)
self.build_ui()
# The filtering logic
self._begin_filter_subscription = self._search.subscribe_begin_edit_fn(
lambda _: LayerLinkWidget._set_widget_visible(self._search_label, False)
)
self._end_filter_subscription = self._search.subscribe_end_edit_fn(
lambda m: self._filter_by_text(m.as_string)
or LayerLinkWidget._set_widget_visible(self._search_label, not m.as_string)
)
def __del__(self):
self.destroy()
def destroy(self):
if self._model:
self._model.destroy()
self._model = None
if self._delegate:
self._delegate.destroy()
self._delegate = None
self._begin_filter_subscription = None
self._end_filter_subscription = None
def build_ui(self):
with ui.VStack():
ui.Spacer(height=4)
with ui.ZStack(height=0):
# Search filed
self._search = ui.StringField(name="search").model
# The label on the top of the search field
self._search_label = ui.Label("Search", name="search")
ui.Spacer(height=7)
with ui.ScrollingFrame(
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style_type_name_override="TreeView.ScrollingFrame",
):
with ui.ZStack():
self._tree_view = ui.TreeView(
self._model,
delegate=self._delegate,
column_widths=[ui.Fraction(1), 0, 0, 0, 0, 0],
header_visible=False,
root_visible=False,
drop_between_items=True,
)
self._delegate.set_tree_view(self._tree_view)
def _on_stage_attached(self, attached: bool):
if attached:
self._delegate.on_stage_attached()
def set_prim_widget(self, prim_widegt: PrimsWidget):
self._delegate.set_prim_widget(prim_widegt)
def select(self, layers):
selection = []
for layer in layers:
item = self._model.get_layer_item_by_identifier(layer)
if item:
selection.append(item)
self._tree_view.selection = selection
def get_select_layers(self):
layers = [selected.identifier for selected in self._tree_view.selection]
return layers
@staticmethod
def _set_widget_visible(widget: ui.Widget, visible):
"""Utility for using in lambdas"""
widget.visible = visible
def _filter_by_text(self, filter_text: str):
"""Set the search filter string to the models and widgets"""
self._tree_view.visible = True
self._tree_view.keep_alive = not not filter_text
self._tree_view.keep_expanded = not not filter_text
self._tree_view.model.filter_by_text(filter_text)
| 17,187 | Python | 35.492569 | 120 | 0.588119 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/layer_color_scheme.py | from .singleton import Singleton
@Singleton
class LayerColorScheme:
def __init__(self):
import carb.settings
self._style = carb.settings.get_settings().get_as_string("/persistent/app/window/uiStyle") or "NvidiaDark"
# For now, the style is uniform for dark and light
self.EDIT_TARGET_BACKGROUND_COLOR = 0x668A8778
self.NON_EDIT_TARGET_BACKGROUND_COLOR = 0x0
self.LAYER_LABEL_DISABLED = 0xFFA0A0A0
self.LAYER_LABEL_MISSING = 0xFF6F72FF
self.LAYER_LABEL_MISSING_SELECTED = 0xFF2424AE
if self._style == "NvidiaDark":
self.LAYER_LABEL_NORMAL = 0xFF8A8777
else:
self.LAYER_LABEL_NORMAL = 0xFF535354
self.LAYER_MUTE_BUTTON_DISABLED = 0xFF808080
self.LAYER_MUTE_BUTTON_ENABLED = 0xFFFFFFFF
self.LAYER_SAVE_BUTTON_NOT_DIRTY = 0xFF808080
self.LAYER_SAVE_BUTTON_DIRTY = 0xFFFF901E
self.LAYER_SAVE_BUTTON_READ_ONLY = 0xFF6F72FF
self.LAYER_LIVE_MODE_BUTTON_DISABLED = 0xFF808080
self.LAYER_LIVE_MODE_BUTTON_ENABLED = 0xFF00B976
self.LAYER_LOCK_BUTTON_LOCKED_BY_ME = 0xFFFF901E
self.OUTDATED = 0xFF0097FF
| 1,182 | Python | 31.86111 | 114 | 0.668359 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/layer_delegate.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import weakref
from .prim_spec_item import PrimSpecItem
from .layer_item import LayerItem
from .layer_icons import LayerIcons
from .layer_widgets import build_layer_widget, build_prim_spec_widget
from .layer_model_utils import LayerModelUtils
from .context_menu import ContextMenu, ContextMenuEvent
from pathlib import Path
from omni import ui
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("icons")
def on_mouse_pressed(button, context_menu: weakref, item: weakref, expanded):
"""Called when the user press the mouse button on the item"""
if button != 1:
# It's for context menu only
return
if not context_menu() or not item():
return
# Form the event
event = ContextMenuEvent(item, expanded)
# Show the menu
context_menu().on_mouse_event(event)
class LayerDelegate(ui.AbstractItemDelegate):
def __init__(self, usd_context):
super().__init__()
self._usd_context = usd_context
self._context_menu = ContextMenu(self._usd_context)
self._tree_view = None
self._initialized = False
self._last_selected_layer_items = []
def on_stage_attached(self):
self._initialized = False
def destroy(self):
self._last_selected_layer_items.clear()
def set_tree_view(self, tree_view: ui.TreeView):
self._tree_view = weakref.ref(tree_view)
self._context_menu.tree_view = self._tree_view
def _on_mouse_pressed(x, y, button, c):
if button != 1:
# It's for context menu only
return
if not self._context_menu:
return
treeview = self._tree_view()
if not tree_view:
return
root_layer_item = treeview.model.root_layer_item
if not LayerModelUtils.can_edit_sublayer(root_layer_item):
return
# Form the event
event = ContextMenuEvent(None, False)
self._context_menu.on_mouse_event(event)
if self._tree_view():
self.on_selection_changed(self._tree_view().selection)
self._tree_view().set_mouse_pressed_fn(_on_mouse_pressed)
def on_selection_changed(self, selection):
for item in self._last_selected_layer_items:
if item():
item().selected = False
self._last_selected_layer_items.clear()
for item in selection:
if isinstance(item, LayerItem):
item.selected = True
self._last_selected_layer_items.append(weakref.ref(item))
def build_branch(self, model, item, column_id, level, expanded):
"""Create a branch widget that opens or closes subtree"""
if not isinstance(item, PrimSpecItem) and not isinstance(item, LayerItem):
return
if column_id == 0:
is_layer_item = isinstance(item, LayerItem)
if model.can_item_have_children(item):
if is_layer_item:
if not item.is_live_session_layer or not item.is_in_live_session:
background_height = 28
else:
background_height = 30
with ui.ZStack(height=32):
with ui.VStack():
if not item.is_in_live_session:
ui.Spacer()
with ui.HStack(height=background_height):
ui.Spacer(width=4)
if item.selected:
ui.Rectangle(name="selected")
else:
ui.Rectangle(name="normal")
if not item.is_live_session_layer:
ui.Spacer()
if item.is_live_session_layer:
with ui.VStack():
ui.Spacer()
with ui.HStack(height=1):
ui.Spacer(width=4)
ui.Line(style={"color": 0xFF606060})
with ui.VStack(height=32):
ui.Spacer()
with ui.HStack(width=16 * (level + 2), height=0):
ui.Spacer()
# Draw the +/- icon
image_name = "Minus" if expanded else "Plus"
ui.Image(
LayerIcons().get(image_name), width=10, height=10, style_type_name_override="LayerView.Item"
)
ui.Spacer(width=4)
ui.Spacer()
else:
with ui.HStack(width=16 * (level + 2), height=0):
ui.Spacer()
# Draw the +/- icon
image_name = "Minus" if expanded else "Plus"
ui.Image(
LayerIcons().get(image_name), width=10, height=10, style_type_name_override="LayerView.Item"
)
ui.Spacer(width=4)
elif is_layer_item:
if not item.is_live_session_layer or not item.is_in_live_session:
background_height = 28
else:
background_height = 30
with ui.ZStack(height=32):
with ui.VStack():
if not item.is_in_live_session:
ui.Spacer()
with ui.HStack(height=background_height):
ui.Spacer(width=4)
if item.selected:
ui.Rectangle(name="selected")
else:
ui.Rectangle(name="normal")
if not item.is_live_session_layer:
ui.Spacer()
if item.is_live_session_layer:
with ui.VStack():
ui.Spacer()
with ui.HStack(height=1):
ui.Spacer(width=4)
ui.Line(style={"color": 0xFF606060})
with ui.HStack(width=16 * (level + 2), height=0):
ui.Spacer()
else:
with ui.HStack(width=16 * (level + 2), height=0):
ui.Spacer()
def build_widget(self, model, item, column_id, level, expanded):
"""Create a widget per item"""
if isinstance(item, PrimSpecItem):
widget = ui.ZStack(height=0)
with widget:
build_prim_spec_widget(self._context_menu, model, item, column_id, expanded)
elif isinstance(item, LayerItem):
if not self._initialized and self._tree_view() and item == model.root_layer_item:
self._initialized = True
self._tree_view().set_expanded(item, True, False)
if not item.is_live_session_layer or not item.is_in_live_session:
background_height = 28
else:
background_height = 30
widget = ui.ZStack(height=32)
with widget:
with ui.VStack():
if not item.is_in_live_session:
ui.Spacer()
if item.selected:
ui.Rectangle(name="selected", height=background_height)
else:
ui.Rectangle(name="normal", height=background_height)
# TRICK to bound live session layer and base layer together visually
if not item.is_live_session_layer:
ui.Spacer()
if item.is_live_session_layer:
with ui.VStack():
ui.Spacer()
ui.Line(height=1, style={"color": 0xFF606060})
with ui.VStack(height=32):
ui.Spacer()
with ui.HStack(height=20, identifier="layer_widget"):
build_layer_widget(self._context_menu, model, item, column_id, expanded)
ui.Spacer()
else:
widget = None
if widget:
weakref_menu = weakref.ref(self._context_menu)
weakref_item = weakref.ref(item)
widget.set_mouse_pressed_fn(
lambda x, y, b, _: on_mouse_pressed(b, weakref_menu, weakref_item, expanded)
)
def build_header(self, column_id):
pass
def set_highlighting(self, enable: bool = None, text: str = None):
pass
| 9,484 | Python | 38.686192 | 128 | 0.49167 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/globals.py |
class LayerGlobals:
_missing_layer_map = set([])
@staticmethod
def on_stage_attached(stage):
pass
@staticmethod
def on_stage_detached():
pass
@staticmethod
def is_layer_missing(layer_identifier: str) -> bool:
return layer_identifier in LayerGlobals._missing_layer_map
@staticmethod
def add_missing_layer(layer_identifier: str):
LayerGlobals._missing_layer_map.add(layer_identifier)
| 456 | Python | 20.761904 | 66 | 0.662281 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/layer_item.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
import omni
import omni.ui as ui
import omni.usd
import os
import omni.kit.usd.layers as layers
from .models.layer_live_update_model import LayerLiveUpdateModel
from .models.lock_model import LockModel
from .models.muteness_model import MutenessModel
from .models.layer_name_model import LayerNameModel
from .models.save_model import SaveModel
from .models.layer_latest_model import LayerLatestModel
from .models.live_session_user_model import LiveSessionUserModel
from .layer_settings import LayerSettings
from .path_utils import PathUtils
from .prim_spec_item import PrimSpecItem
from typing import List, Dict, Set, Union
from pxr import Sdf, Trace
from omni.kit.usd.layers import LayerUtils
class LayerItem(ui.AbstractItem):
"""A single AbstractItemModel item that represents a single sublayer"""
def __init__(self, usd_context, identifier: str, layer: Sdf.Layer, model, parent_item):
super().__init__()
self._usd_context = usd_context
self._model = model
self._layer = layer
self._parent = parent_item
# Models
self._name_model = LayerNameModel(self)
self._save_model = SaveModel(self)
self._local_mute_model = MutenessModel(self._usd_context, self, True)
self._global_mute_model = MutenessModel(self._usd_context, self, False)
self._live_update_model = LayerLiveUpdateModel(self._usd_context, self)
self._latest_model = LayerLatestModel(self._usd_context, self)
self._lock_model = LockModel(self)
self._first_live_session_user_model = LiveSessionUserModel(None)
self._second_live_session_user_model = LiveSessionUserModel(None)
self._ellipsis_model = ui.SimpleStringModel("...")
# Children
self._sublayers: List[LayerItem] = []
# Prim Spec of Sdf.Path.absoluteRootPath.
# It's root of children specs.
self._absolute_root_prim_spec = PrimSpecItem(self._usd_context, Sdf.Path.absoluteRootPath, self)
# Cache for quick prim item access indexed by prim path
self._prim_specs_cache: Dict[Sdf.Path, PrimSpecItem] = {}
self._prim_specs_cache[Sdf.Path.absoluteRootPath] = self._absolute_root_prim_spec
# Filtering for search.
self._filtered = False
# True if it has a child that is filtered
self._child_filtered = False
# If this item is root layer or session layer
self._is_reserved_layer = False
# Layer info
self._identifier = identifier
self._is_omni_layer = PathUtils.is_omni_path(self._identifier)
self._is_omni_live_layer = PathUtils.is_omni_live(self._identifier)
stage = self._usd_context.get_stage()
# Gets layer name
self._layers = layers.get_layers(self._usd_context)
self._layers_state = self._layers.get_layers_state()
self._layers_specs_locking = self._layers.get_specs_locking()
self._layers_specs_linking = self._layers.get_specs_linking()
self._layers_live_syncing = self._layers.get_live_syncing()
if stage.GetRootLayer().identifier == identifier:
self._is_reserved_layer = True
elif stage.GetSessionLayer().identifier == identifier:
self._is_reserved_layer = True
# If this layer is missing.
self._is_missing_layer = False if self._layer else True
live_session = self._layers_live_syncing.get_live_session_for_live_layer(self._identifier)
self._is_live_session_layer = live_session is not None
if self._is_live_session_layer:
self._name = f"Session '{live_session.name}'"
self.__update_live_session_user_models(live_session)
self._session_url = live_session.url
else:
self._name = self._layers_state.get_layer_name(identifier)
self._session_url = None
self._user_join_event_subscription = self._layers.get_event_stream().create_subscription_to_pop_by_type(
layers.LayerEventType.LIVE_SESSION_USER_JOINED,
self._on_layer_events, name="Layers Item User Join"
)
self._user_left_event_subscription = self._layers.get_event_stream().create_subscription_to_pop_by_type(
layers.LayerEventType.LIVE_SESSION_USER_LEFT,
self._on_layer_events, name="Layers Item User Left"
)
self._is_read_only = self._layers_state.is_layer_readonly_on_disk(identifier)
# If this item is edit target
self._is_edit_target = False
# If this item includes children that are edit targets
self._has_edit_target = False
# Initializes edit layer status.
# Edit layer is a concept that when stage is in auto-authoring mode,
# it's where the new prims will be hosted in.
self._is_edit_layer_in_auto_authoring_mode = False
# If this item includes children that are edit layers
self._has_edit_layer = False
# If layer is dirty or not
self._is_dirty = self._layer.dirty if self._layer else False
# Current saved muteness, which is used to check if it's chnaged.
self._old_mute_state = stage.IsLayerMuted(self.identifier)
# Current saved lock status, which is used to check if it's changed.
self._locked_status = self._layers_state.is_layer_locked(self._identifier)
# If the layer item is selected in the treeview.
# FIXME: WA for refreshing selection status.
self._selected = False
# If this layer item is in the session layer tree.
self._is_from_session_layer = False
self._outdated = self._layers_state.is_layer_outdated(self._identifier)
self._auto_reload = self._layers_state.is_auto_reload_layer(self._identifier)
self._has_content = len(self._layer.rootPrims) > 0 if self._layer else False
self.update_flags()
def destroy(self):
if self._name_model:
self._name_model.destroy()
self._name_model = None
if self._local_mute_model:
self._local_mute_model.destroy()
self._local_mute_model = None
if self._global_mute_model:
self._global_mute_model.destroy()
self._global_mute_model = None
if self._save_model:
self._save_model.destroy()
self._save_model = None
if self._lock_model:
self._lock_model.destroy()
self._lock_model = None
if self._live_update_model:
self._live_update_model.destroy()
self._live_update_model = None
if self._latest_model:
self._latest_model.destroy()
self._latest_model = None
if self._first_live_session_user_model:
self._first_live_session_user_model.destroy()
self._first_live_session_user_model = None
if self._second_live_session_user_model:
self._second_live_session_user_model.destroy()
self._second_live_session_user_model = None
self._ellipsis_model = None
for sublayer in self.sublayers:
sublayer.destroy()
self.sublayers.clear()
if self._absolute_root_prim_spec:
self._absolute_root_prim_spec.destroy()
self._clear_cache()
self._layer = None
self._layers = None
self._layers_state = None
self._layers_specs_linking = None
self._layers_specs_locking = None
self._layers_live_syncing = None
self._model = None
self._parent = None
self._user_join_event_subscription = None
self._user_left_event_subscription = None
def _on_layer_events(self, event):
payload = layers.get_layer_event_payload(event)
if not payload:
return
if (
payload.event_type == layers.LayerEventType.LIVE_SESSION_USER_JOINED or
payload.event_type == layers.LayerEventType.LIVE_SESSION_USER_LEFT
):
# Updates peer user models for live session layer only.
if not self.is_live_session_layer:
return
# Gets base layer identifier for this live session layer
base_layer = self.base_layer
if not base_layer:
carb.log_warn(f"Base layer for live layer {self._identifier} cannot be found.")
return
base_layer_identifier = base_layer.identifier
if not payload.is_layer_influenced(base_layer_identifier):
return
live_session = self._layers_live_syncing.get_current_live_session(base_layer_identifier)
if live_session:
self.__update_live_session_user_models(live_session)
@property
def layer(self):
return self._layer
@property
def is_omni_layer(self):
return self._is_omni_layer
@property
def is_omni_live_path(self):
return self._is_omni_live_layer
@property
def model(self):
return self._model
@property
def usd_context(self):
return self._usd_context
@property
def from_session_layer(self):
"""If this layer is under the session layer."""
return self._is_from_session_layer
@property
def sublayers(self):
"""Subayer items under this layer."""
return self._sublayers
@sublayers.setter
def sublayers(self, value):
self._sublayers = value
@property
def absolute_root_spec(self):
"""The prim spec that represents the root of prim tree."""
return self._absolute_root_prim_spec
@property
def prim_specs(self):
"""Prim specs under this layer."""
return self._absolute_root_prim_spec.children
@property
def outdated(self):
return self._outdated
@property
def has_children(self):
return len(self._sublayers) > 0 or self._absolute_root_prim_spec.has_children
@property
def parent(self):
"""Parent layer item."""
return self._parent
@property
def filtered(self):
"""If this prim spec is filtered in the search list."""
return self._filtered or self._child_filtered
@property
def identifier(self):
return self._identifier
@property
def name(self):
return self._name
@property
def reserved(self):
"""If this is the root or session layer."""
return self._is_reserved_layer
@property
def anonymous(self):
if self._layer:
return self._layer.anonymous
else:
return Sdf.Layer.IsAnonymousLayerIdentifier(self._identifier)
@property
def dirty(self):
"""If this layer has unsaved changes."""
return self._is_dirty
@dirty.setter
def dirty(self, value):
if self._is_dirty != value:
self._is_dirty = value
self._save_model._value_changed()
@property
def auto_reload(self):
"""Reload changes automatically."""
return self._auto_reload
@auto_reload.setter
def auto_reload(self, value):
if self._auto_reload != value:
self._auto_reload = value
@property
def version(self):
"""The version of this layer. It only applies to omniverse layer."""
return ""
@property
def latest(self):
"""If this layer is latest. It only applies to omniverse layer."""
if self.is_omni_live_path:
return True
return not self._outdated
@property
def live(self):
"""If this live is in live sync. It only applies to omniverse layer."""
if not self._layer or not self.can_live_update:
return False
_, ext = os.path.splitext(self.identifier)
if ext == '.live':
return True
return False
@property
def is_live_session_layer(self):
"""
A layer is a live session layer if it's from a live session and it's the root layer
of that session with extension .live.
"""
return self._is_live_session_layer
@property
def base_layer(self):
"""
If this layer is a live session layer, this property can be used to access its
base layer item.
"""
if not self.is_live_session_layer:
return None
current_live_session = self._layers_live_syncing.get_live_session_for_live_layer(self.identifier)
return self.model.get_layer_item_by_identifier(current_live_session.base_layer_identifier)
@property
def is_in_live_session(self):
"""
A layer is in live session means it joins a live session. This is only true when it's
the base layer of the live session. For live session layer, it's false.
"""
return self._layers_live_syncing.is_layer_in_live_session(self.identifier)
@property
def live_session_layer(self):
"""
If this layer is in live session, this property can be used to access its
corresponding live session layer item.
"""
if not self.is_in_live_session:
return None
current_live_session = self._layers_live_syncing.get_current_live_session(self.identifier)
return self.model.get_layer_item_by_identifier(current_live_session.root)
@property
def current_live_session(self):
"""
If this layer is in a live session or it's a live session layer, it's to return the live session.
"""
if self._session_url:
return self._layers_live_syncing.get_live_session_by_url(self._session_url)
else:
return self._layers_live_syncing.get_current_live_session(self.identifier)
@property
def muted_or_parent_muted(self):
"""If this layer is muted or its parent is muted."""
parent = self
while parent and not parent.muted:
parent = parent.parent
return not not parent
@property
def muted(self):
stage = self._usd_context.get_stage()
if not stage:
return True
return stage.IsLayerMuted(self._identifier)
@muted.setter
def muted(self, value):
stage = self._usd_context.get_stage()
if not stage:
return True
omni.kit.commands.execute("SetLayerMuteness", layer_identifier=self._identifier, muted=value)
@property
def globally_muted(self):
"""Globally mute is the mute value saved in custom data. The muteness of USD
layer is dependent on the muteness scope (local or global). When it's in global mode,
the muteness of USD layer is the same as this value."""
return self._layers_state.is_layer_globally_muted(self._identifier)
@property
def locally_muted(self):
"""Local mute is the muteness when it's in local scope."""
return self._layers_state.is_layer_locally_muted(self._identifier)
@property
def selected(self):
"""If this layer is selected in layer window."""
return self._selected
@selected.setter
def selected(self, value):
if self._selected != value and self.model:
self._selected = value
self.model._item_changed(self)
@property
def missing(self):
return self._is_missing_layer
@missing.setter
def missing(self, value):
self._is_missing_layer = value
self._name_model._value_changed()
@property
def editable(self):
return self._layers_state.is_layer_writable(self._identifier)
@property
def read_only_on_disk(self):
return self._is_read_only
@property
def can_live_update(self):
return self._is_omni_layer
@property
def edit_layer_in_auto_authoring_mode(self):
return self._is_edit_layer_in_auto_authoring_mode
@edit_layer_in_auto_authoring_mode.setter
def edit_layer_in_auto_authoring_mode(self, value):
if not self.model.auto_authoring_mode and not self.model.spec_linking_mode:
return
old_value = self._is_edit_layer_in_auto_authoring_mode
if old_value != value:
self._is_edit_layer_in_auto_authoring_mode = value
self.model._item_changed(self)
# Clears parent flags.
parent = self.parent
while parent:
parent._has_edit_layer = self._is_edit_layer_in_auto_authoring_mode
parent._name_model._value_changed()
parent = parent.parent
@property
def is_edit_target(self):
return self._is_edit_target
@is_edit_target.setter
def is_edit_target(self, value):
old_value = self._is_edit_target
if old_value != value:
self._is_edit_target = value
self.model._item_changed(self)
# Clears parent flags.
parent = self.parent
while parent:
parent._has_edit_target = self._is_edit_target
parent._name_model._value_changed()
parent = parent.parent
@property
def has_child_edit_target(self):
return self._has_edit_target
@property
def has_child_edit_layer(self):
return self._has_edit_layer
@property
def locked(self):
return self._locked_status
@locked.setter
def locked(self, value):
omni.kit.commands.execute(
"LockLayer", layer_identifier=self._identifier, locked=value
)
@property
def has_content(self):
return self._has_content
@property
def add_sublayer(self, sublayer_item):
if sublayer_item not in self._sublayers:
self._sublayers.append(sublayer_item)
def __update_live_session_user_models(self, live_session):
user_count = len(live_session.peer_users)
self._first_live_session_user_model.peer_user = None
self._second_live_session_user_model.peer_user = None
if user_count >= 1:
self._first_live_session_user_model.peer_user = live_session.peer_users[0]
if user_count >= 2:
self._second_live_session_user_model.peer_user = live_session.peer_users[1]
self.model._item_changed(self)
def update_flags(self):
stage = self._usd_context.get_stage()
if stage:
if self.layer and self.layer.identifier != self._identifier:
self._identifier = self.layer.identifier
self._name_model._value_changed()
self.edit_layer_in_auto_authoring_mode = self.model.default_edit_layer == self.identifier
edit_target_identifier = LayerUtils.get_edit_target(stage)
if edit_target_identifier:
is_edit_target = self._identifier == edit_target_identifier
else:
is_edit_target = False
self.is_edit_target = is_edit_target
# If layer is from session layer tree or not.
session_layer_identifier = stage.GetSessionLayer().identifier
parent = self
while parent and parent._identifier != session_layer_identifier:
parent = parent.parent
self._is_from_session_layer = not not parent
def reload(self):
if self.layer:
self.layer.Reload()
def save(self, on_save_done=None):
if not self._layer:
carb.log_warn(f"You cannot save a missing layer: {self._identifier}")
return
if self.is_live_session_layer:
return
success = self._layer.Save()
if not success:
error = f"Failed to save layer {self._identifier} because of permission issue."
else:
error = ""
if on_save_done:
on_save_done(success, error, [self._identifier])
if success:
LayerUtils.create_checkpoint(self._identifier, "")
def _notify_muteness_changed(self):
self._name_model._value_changed()
self._local_mute_model._value_changed()
self._global_mute_model._value_changed()
self._save_model._value_changed()
self._absolute_root_prim_spec.on_layer_muteness_changed()
for sublayer in self.sublayers:
sublayer._notify_muteness_changed()
def on_muteness_changed(self):
stage = self._usd_context.get_stage()
if not stage:
return
muted = stage.IsLayerMuted(self._identifier)
if self._old_mute_state != muted:
self._old_mute_state = muted
self._notify_muteness_changed()
def on_live_session_state_changed(self):
live_session = self._layers_live_syncing.get_live_session_for_live_layer(self._identifier)
is_live_session_layer = live_session is not None
if is_live_session_layer:
name = f"Session '{live_session.name}'"
self._session_url = live_session.url
else:
name = self._layers_state.get_layer_name(self.identifier)
if self._is_live_session_layer != is_live_session_layer:
refresh_all = True
self._is_live_session_layer = is_live_session_layer
self.model._item_changed(self)
# Refresh base layer
if self.base_layer:
self.model._item_changed(self.base_layer)
else:
refresh_all = False
if self._name != name:
self._name = name
if not refresh_all:
self._name_model._value_changed()
if not refresh_all:
self._live_update_model._value_changed()
self._save_model._value_changed()
def on_muteness_scope_changed(self):
stage = self._usd_context.get_stage()
if not stage:
return
muted = stage.IsLayerMuted(self._identifier)
if self._old_mute_state != muted:
self.on_muteness_changed()
else:
self._local_mute_model._value_changed()
self._global_mute_model._value_changed()
def on_layer_lock_changed(self):
stage = self._usd_context.get_stage()
if not stage:
return
locked = LayerUtils.get_layer_lock_status(stage.GetRootLayer(), self._identifier)
is_read_only = self._layers_state.is_layer_readonly_on_disk(self._identifier)
if locked != self._locked_status or is_read_only != self._is_read_only:
self._locked_status = locked
self._lock_model._value_changed()
self._name_model._value_changed()
self._save_model._value_changed()
def on_layer_edit_mode_changed(self):
for _, item in self._prim_specs_cache.items():
item.on_layer_edit_mode_changed()
def on_layer_outdate_state_changed(self):
auto_reload = self._layers_state.is_auto_reload_layer(self._identifier)
if self._auto_reload != auto_reload:
self._auto_reload = auto_reload
outdated = self._layers_state.is_layer_outdated(self._identifier)
if self._outdated != outdated:
self._outdated = outdated
self._latest_model._value_changed()
self._name_model._value_changed()
this_layer = self.model.get_layer_item_by_identifier(self._identifier)
if this_layer:
self.model._item_changed(this_layer)
@Trace.TraceFunction
def on_content_changed(self, changed_prim_spec_paths: List[str]):
flags_update: Set[PrimSpecItem] = set([])
children_update: Set[PrimSpecItem] = set([])
if LayerSettings().show_layer_contents and changed_prim_spec_paths:
carb.log_verbose(f"Handle changed {len(changed_prim_spec_paths)} prims for layer {self.identifier}.")
# Finds its common path and loads its subtree
all_parent_paths = [Sdf.Path(path) for path in changed_prim_spec_paths]
common_prefix = all_parent_paths[0]
for path in all_parent_paths[1:]:
common_prefix = Sdf.Path.GetCommonPrefix(common_prefix, path)
flag_updated_prims, children_updated_prims = self._load_prim_spec_subtree(common_prefix)
flags_update.update(flag_updated_prims)
children_update.update(children_updated_prims)
carb.log_verbose(f"Handle changed prims for layer {self.identifier} done.")
has_content = len(self._layer.rootPrims) > 0 if self._layer else False
if self._has_content != has_content:
self._has_content = has_content
if self.is_live_session_layer:
self._save_model._value_changed()
return flags_update, children_update
@Trace.TraceFunction
def find_all_specs(self, paths: List[Sdf.Path]):
"""
Find the child node with given name and return the list of all the
parent nodes and the found node. It populates the children during
search.
"""
result = []
for path in paths:
prim_spec = self._layer.GetPrimAtPath(path)
if not prim_spec:
continue
item, created = self._get_item_from_cache(path, True)
# Create all parents
if created:
self._create_all_parent_items(item)
result.append(item)
return result
def _create_all_parent_items(self, item: PrimSpecItem, filtered=False):
parent_path = item.path.GetParentPath()
while parent_path != Sdf.Path.absoluteRootPath:
parent_item, created = self._get_item_from_cache(parent_path, True)
if filtered:
if parent_item.filtered:
break
else:
parent_item.filtered = True
elif not created:
break
parent_path = parent_path.GetParentPath()
def prefilter(self, text: str):
if not self._layer:
return
text = text.lower()
# Clear all old states
for _, child in self._prim_specs_cache.items():
child.filtered = False
self._prefilter_internal(text)
@Trace.TraceFunction
def _prefilter_internal(self, text: str):
"""Recursively mark items that meet the filtering rule"""
# Has the search string in the name
self._filtered = not text or text in self._name.lower()
self._child_filtered = False
for child in self._sublayers:
child.prefilter(text)
if not self._child_filtered:
self._child_filtered = child._child_filtered or child._filtered
if not text:
return
# FIXME: Don't use Sdf.PrimSpec.nameChildren to traverse whole stage as it's pretty slow.
def on_prim_spec_path(prim_spec_path):
if prim_spec_path.IsPropertyPath() or prim_spec_path == Sdf.Path.absoluteRootPath:
return
prim_spec_item, _ = self._get_item_from_cache(prim_spec_path)
if prim_spec_item and prim_spec_item.filtered:
return
if text in prim_spec_path.name.lower():
prim_spec_item, _ = self._get_item_from_cache(prim_spec_path, True)
prim_spec_item.filtered = True
self._child_filtered = True
self._create_all_parent_items(prim_spec_item, True)
self._layer.Traverse(Sdf.Path.absoluteRootPath, on_prim_spec_path)
@Trace.TraceFunction
def _load_prim_spec_subtree(self, prim_spec_path):
carb.log_verbose(f"Load prim spec tree rooted from {prim_spec_path}")
old_prim_specs = []
flags_updated_prim_specs: Set[PrimSpecItem] = set([])
children_updated_specs: Set[PrimSpecItem] = set([])
if prim_spec_path == self._absolute_root_prim_spec.path:
children_updated_specs.add(self._absolute_root_prim_spec)
item, _ = self._get_item_from_cache(prim_spec_path)
# This is new item, returning and refreshing it immediately if its parent is existed.
if not item:
parent, _ = self._get_item_from_cache(prim_spec_path.GetParentPath())
if parent:
children_updated_specs.add(parent)
return flags_updated_prim_specs, children_updated_specs
for path, item in self._prim_specs_cache.items():
if item == self._absolute_root_prim_spec:
continue
if path.HasPrefix(prim_spec_path):
old_prim_specs.append(item)
if self._layer:
for item in old_prim_specs:
prim_spec = self._layer.GetPrimAtPath(item.path)
if prim_spec:
flags_updated_prim_specs.add(item)
children_updated_specs.add(item)
else:
# Prim spec is removed
parent = item.parent
if parent:
children_updated_specs.add(parent)
self._remove_cache_item(item.path)
else:
# Remove destroyed prim specs from cache.
for item in old_prim_specs:
parent_item = item.parent
if parent_item:
children_updated_specs.add(parent_item)
self._remove_cache_item(item.path)
# Left old prim specs are destroyed ones.
return flags_updated_prim_specs, children_updated_specs
def update_spec_links_status(self, spec_paths: List[Union[str, Sdf.Path]]):
if not spec_paths:
return
for spec_path in spec_paths:
spec_path = Sdf.Path(spec_path)
if not spec_path.IsPrimPath():
continue
spec, _ = self._get_item_from_cache(spec_path)
if spec:
spec.linked = self._layers_specs_linking.is_spec_linked(spec_path)
def update_spec_locks_status(self, spec_paths: List[Union[str, Sdf.Path]]):
if not spec_paths:
return
for spec_path in spec_paths:
spec_path = Sdf.Path(spec_path)
if not spec_path.IsPrimPath():
continue
spec, _ = self._get_item_from_cache(spec_path)
if spec:
spec.locked = self._layers_specs_locking.is_spec_locked(spec_path)
def get_item_value_model(self, column_id):
if column_id == 0:
return self._name_model
elif column_id == 1:
return self._live_update_model
elif column_id == 2:
return self._save_model
elif column_id == 3:
return self._local_mute_model
else:
if self.is_live_session_layer:
if column_id == 4:
return self._first_live_session_user_model
elif column_id == 5:
return self._second_live_session_user_model
elif column_id == 6:
return self._ellipsis_model
else:
if column_id == 4:
return self._global_mute_model
elif column_id == 5:
return self._latest_model
elif column_id == 6:
return self._lock_model
return None
def _remove_cache_item(self, prim_spec_path):
if prim_spec_path == Sdf.Path.absoluteRootPath:
return False
item = self._prim_specs_cache.pop(prim_spec_path, None)
if item:
item.destroy()
def _clear_cache(self):
for _, item in self._prim_specs_cache.items():
item.destroy()
self._prim_specs_cache.clear()
def _get_item_from_cache(self, prim_spec_path: Sdf.Path, create=False):
item = self._prim_specs_cache.get(prim_spec_path, None)
created = False
if not item and create:
created = True
item = PrimSpecItem(self._usd_context, prim_spec_path, self)
self._prim_specs_cache[prim_spec_path] = item
return item, created
@Trace.TraceFunction
def _get_item_children(self, prim_spec_path: Sdf.Path):
item = self._prim_specs_cache.get(prim_spec_path, None)
if not item or not item.prim_spec:
return []
all_children = []
for child in item.prim_spec.nameChildren:
child_item = self._prim_specs_cache.get(child.path, None)
if not child_item:
child_item = PrimSpecItem(self._usd_context, child.path, self)
self._prim_specs_cache[child.path] = child_item
all_children.append(child_item)
return all_children
def __repr__(self):
return f"<Omni::UI Layer Item '{self.identifier}'>"
def __str__(self):
return f"{self.identifier}"
| 33,290 | Python | 33.427094 | 113 | 0.603575 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/external_drag_drop_helper.py | import omni.usd
import omni.ui as ui
from typing import List
from pxr import Sdf
from omni.kit.window.drop_support import ExternalDragDrop
external_drag_drop = None
def setup_external_drag_drop(window_name :str, model: ui.AbstractItemModel):
global external_drag_drop
destroy_external_drag_drop()
external_drag_drop = ExternalDragDrop(window_name=window_name,
drag_drop_fn=lambda e, p, m=model: _on_ext_drag_drop(e, p, m))
def destroy_external_drag_drop():
global external_drag_drop
if external_drag_drop:
external_drag_drop.destroy()
external_drag_drop = None
def _on_ext_drag_drop(edd: ExternalDragDrop, payload: List[str], model: ui.AbstractItemModel):
target_item = model._root_layer
for file_path in edd.expand_payload(payload):
model.drop(target_item, file_path.replace("\\", "/"))
| 889 | Python | 30.785713 | 104 | 0.685039 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/layer_model_utils.py | import omni
import omni.client
import omni.usd
import weakref
import os
import carb
import omni.kit.notification_manager as nm
from .file_picker import FilePicker
from .filebrowser import FileBrowserSelectionType, FileBrowserMode
from .layer_settings import LayerSettings
from typing import List
from pxr import Sdf
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.prompt import PromptButtonInfo, PromptManager
_file_picker = None
def _show_confirm_layer_insert_prompt(layer_identifier, confirm_fn):
PromptManager.post_simple_prompt(
"Insert Layer",
f"Do you want to insert {os.path.basename(layer_identifier)} into stage?",
PromptButtonInfo("Yes", confirm_fn),
PromptButtonInfo("No")
)
def _show_outdated_layer_prompt(layer_identifier, confirm_fn, middle_fn, middle_2_fn, cancel_fn):
if middle_2_fn:
middle_2_button = PromptButtonInfo("Fetch Latest", middle_2_fn)
else:
middle_2_button = None
PromptManager.post_simple_prompt(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Merge Conflict',
f"({os.path.basename(layer_identifier)}) has been updated on disk.",
ok_button_info=PromptButtonInfo("Save", confirm_fn),
cancel_button_info=PromptButtonInfo("Cancel", confirm_fn),
middle_button_info=PromptButtonInfo("Save As", middle_fn),
middle_2_button_info=middle_2_button,
modal=True
)
def _show_outdated_layers_prompt(outdated_layers, confirm_fn, middle_fn, cancel_fn):
global _layers_are_outdated_prompt
if not outdated_layers:
if confirm_fn:
confirm_fn()
return
if middle_fn:
middle_button = PromptButtonInfo("Fetch Latest", middle_fn)
else:
middle_button = None
message = os.path.basename(outdated_layers[0])
for i in range(1, len(outdated_layers)):
message += f", {os.path.basename(outdated_layers[i])}"
PromptManager.post_simple_prompt(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Merge Conflict',
f"Layers ({message}) have been updated on disk.",
ok_button_info=PromptButtonInfo("Save", confirm_fn),
cancel_button_info=PromptButtonInfo("Cancel"),
middle_button_info=middle_button,
modal=True
)
def _show_transfer_root_content_prompt(confirm_fn, cancel_fn):
PromptManager.post_simple_prompt(
"Transfer Content",
"Root Layer is not empty. Transfer Root Layer contents to the new sublayer?",
ok_button_info=PromptButtonInfo("Yes", confirm_fn),
cancel_button_info=PromptButtonInfo("No", cancel_fn),
)
def _show_save_file_picker(title: str, file_handler, default_location=None, default_filename=None):
global _file_picker
if _file_picker:
_file_picker.destroy()
filter_options = [
(r"^(?=.*.usd$)((?!.*\.(sublayer)\.usd).)*$", "USD File (*.usd)"),
(r"^(?=.*.usda$)((?!.*\.(sublayer)\.usda).)*$", "USDA File (*.usda)"),
(r"^(?=.*.usdc$)((?!.*\.(sublayer)\.usdc).)*$", "USDC File (*.usdc)"),
(r"^(?=.*.live$)((?!.*\.(sublayer)\.live).)*$", "Live File (*.live)"),
("(.*?)", "All Files (*.*)"),
]
_file_picker = FilePicker(
title,
FileBrowserMode.SAVE,
FileBrowserSelectionType.FILE_ONLY,
filter_options,
[".usd", ".usda", ".usdc", ".live", ".usd"],
)
_file_picker.set_file_selected_fn(file_handler)
_file_picker.show(default_location, default_filename)
def _show_save_error_prompt(layer_identifier):
PromptManager.post_simple_prompt(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Save Failed',
f"Failed to save layer {os.path.basename(layer_identifier)}."
)
def _show_remove_dirty_layer_prompt(layer_identifier, dirty, confirm_fn, multiple=False):
layer_name = os.path.basename(layer_identifier)
if dirty:
if multiple:
message = "Several layers have unsaved changes. Do you want to remove them from stage?"
else:
message = f"Layer {layer_name} has unsaved changes. Do you want to remove this layer from stage?"
else:
if multiple:
message = "Several layers are not empty. Do you want to remove them from stage?"
else:
message = f"Layer {layer_name} content is not empty. Do you want to remove this layer from stage?"
PromptManager.post_simple_prompt(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer',
message,
ok_button_info=PromptButtonInfo("Yes", confirm_fn),
cancel_button_info=PromptButtonInfo("No")
)
def _show_reload_dirty_layer_prompt(layer_identifier, confirm_fn):
layer_name = os.path.basename(layer_identifier)
PromptManager.post_simple_prompt(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Reload Layer',
f"Layer {layer_name} has unsaved changes. Do you want to reload this layer?",
ok_button_info=PromptButtonInfo("Yes", confirm_fn),
cancel_button_info=PromptButtonInfo("No")
)
def _show_file_insert_picker(title: str, file_handler, default_location=None, default_filename=None):
global _file_picker
if _file_picker:
_file_picker.destroy()
filter_options = [
(omni.usd.readable_usd_re(), omni.usd.readable_usd_files_desc()),
("(.*?)", "All Files (*.*)"),
]
_file_picker = FilePicker(
title, FileBrowserMode.OPEN, FileBrowserSelectionType.FILE_ONLY, filter_options, allow_multi_selections=True,
enable_checkpoints=True
)
_file_picker.set_file_selected_fn(file_handler)
_file_picker.show(default_location, default_filename)
def _show_move_prim_spec_warning_prompt(layer_identifier, confirm_fn):
layer_name = os.path.basename(layer_identifier)
PromptManager.post_simple_prompt(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Merge Prim Spec',
f"Do you want to merge this prim spec to layer {layer_name}?",
ok_button_info=PromptButtonInfo("Yes", confirm_fn),
cancel_button_info=PromptButtonInfo("No")
)
class LayerModelUtils:
@staticmethod
def on_shutdown():
global _file_picker
if _file_picker:
_file_picker.destroy()
_file_picker = None
@staticmethod
def _prompt_content_transfer_and_create_layer(weakref_layer_item, file_path, position):
layer_item = weakref_layer_item()
if not layer_item:
return
# Skips it if this file is already in sublayer list
for sublayer in layer_item.sublayers:
if sublayer.identifier == file_path:
return
def create_layer_callback(weakref_item, file_path, transfer_content):
layer_item = weakref_item()
if not layer_item:
return
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=layer_item.identifier,
sublayer_position=position,
new_layer_path=file_path,
transfer_root_content=transfer_content,
create_or_insert=True,
layer_name="",
)
root_layer_item = layer_item.model.root_layer_item
if len(root_layer_item.sublayers) == 0:
_show_transfer_root_content_prompt(
lambda: create_layer_callback(weakref_layer_item, file_path, True),
lambda: create_layer_callback(weakref_layer_item, file_path, False),
)
else:
create_layer_callback(weakref_layer_item, file_path, False)
@staticmethod
def can_set_as_edit_target(layer_item):
model = layer_item.model
if model.root_layer_item.is_in_live_session and not layer_item.is_live_session_layer:
return False
if not model.root_layer_item.is_in_live_session and layer_item.is_in_live_session:
return False
return True
@staticmethod
def can_edit_sublayer(layer_item):
if layer_item.is_live_session_layer or layer_item.is_in_live_session:
return False
return True
@staticmethod
def _found_existing_sublayer(layer_item, layer_identifier):
for sublayer in layer_item.sublayers:
if os.path.normpath(sublayer.identifier) == os.path.normpath(layer_identifier):
return True
return False
@staticmethod
def create_sublayer(layer_item, position: int, create_anonymous=False):
if not LayerModelUtils.can_edit_sublayer(layer_item):
nm.post_notification("Cannot create sublayers in live-syncing mode.")
return False
if create_anonymous:
# Create anonymous layer for session layer only
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=layer_item.identifier,
sublayer_position=position,
new_layer_path="",
transfer_root_content=False,
create_or_insert=True,
layer_name="",
)
else:
model = layer_item.model
weakref_item = weakref.ref(layer_item)
def create_sublayer_fn(file_paths: List[str], overwrite_existing: bool):
LayerModelUtils._prompt_content_transfer_and_create_layer(weakref_item, file_paths[-1], position)
if (
not Sdf.Layer.IsAnonymousLayerIdentifier(model.root_layer_item.identifier)
and LayerSettings().file_dialog_show_root_layer_location
):
save_location = os.path.dirname(model.root_layer_item.identifier)
else:
save_location = None
_show_save_file_picker("Create Sublayer", create_sublayer_fn, save_location)
@staticmethod
def remove_prim_spec_items(prim_item_list):
layer_paths = {}
for prim_item in prim_item_list:
prim_path = prim_item.path
layer_item = prim_item.layer_item
if layer_item.identifier in layer_paths:
layer_paths[layer_item.identifier].append(prim_path)
else:
layer_paths[layer_item.identifier] = [prim_path]
with omni.kit.undo.group():
for layer_identifier, paths in layer_paths.items():
if not paths:
continue
# Prune paths
paths = sorted(paths)
base_path = paths[0]
prune_paths = [base_path]
for path in paths:
if not path.HasPrefix(base_path):
base_path = path
prune_paths.append(path)
omni.kit.commands.execute(
"RemovePrimSpec", layer_identifier=layer_identifier,
prim_spec_path=prune_paths
)
@staticmethod
def insert_sublayer(layer_item, position: int):
if not LayerModelUtils.can_edit_sublayer(layer_item):
nm.post_notification("Cannot insert sublayers in live-syncing mode.")
return False
weakref_item = weakref.ref(layer_item)
def insert_sublayer_fn(file_paths: List[str], overwrite_existing: bool):
# overwrite_existing param does not apply in "Open" dialog
layer_item = weakref_item()
if not layer_item:
return
for file_path in file_paths:
parent_item = layer_item
while parent_item and parent_item.identifier != file_path:
parent_item = parent_item.parent
if not parent_item:
found = LayerModelUtils._found_existing_sublayer(layer_item, file_path)
if found:
nm.post_notification(
f"Duplicate sublayer {file_path} found in the parent.",
status=nm.NotificationStatus.WARNING,
duration=4
)
else:
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=layer_item.identifier,
sublayer_position=position,
new_layer_path=file_path,
transfer_root_content=False,
create_or_insert=False,
layer_name="",
)
else:
nm.post_notification(
f"Skip the insert as duplicate parent {file_path} found.",
status=nm.NotificationStatus.WARNING,
duration=4
)
model = layer_item.model
if (
not Sdf.Layer.IsAnonymousLayerIdentifier(model.root_layer_item.identifier)
and LayerSettings().file_dialog_show_root_layer_location
):
insert_location = os.path.dirname(model.root_layer_item.identifier)
else:
insert_location = None
_show_file_insert_picker("Insert Sublayer", insert_sublayer_fn, insert_location)
@staticmethod
def save_layer(layer_item):
if not layer_item.dirty:
return
layer_identifier = layer_item.identifier
def on_save_done(result, error, saved_layers):
if not result:
_show_save_error_prompt(layer_identifier)
def on_save_as_done(result, error, saved_layers):
if not result:
_show_save_error_prompt(layer_identifier)
else:
layer_item.reload()
if not layer_item.latest:
def confirm_fn():
layer_item.save(on_save_done)
def middle_button_fn():
LayerModelUtils.save_layer_as(layer_item, False, True, on_save_as_done, True)
_show_outdated_layer_prompt(layer_item.identifier, confirm_fn, middle_button_fn, None, None)
else:
layer_item.save(on_save_done)
@staticmethod
def _create_layer(layer_identifier: str):
layer = Sdf.Layer.FindOrOpen(layer_identifier)
if layer:
layer.Clear()
else:
layer = Sdf.Layer.CreateNew(layer_identifier)
return layer
@staticmethod
def save_layer_as(layer_item, replace=False, insert_before=False, on_save_done=None, confirm_before_insert=False):
"""Save layer as new layer.
Args:
layer_item (LayerItem): Layer item to be saved.
replace (bool): After save, if it needs to replace the item to be saved.
insert_before (bool): After save, if it needs to be inserted before this item.
`replace` and `insert_before` cannot be true at the same time.
confirm_before_insert (bool): Before insert, it needs to confirm or not.
"""
if not layer_item.layer:
return
if not layer_item.parent or not layer_item.parent.layer:
replace = False
# Uses weakref to avoid filepicker hold it's strong reference
weakref_item = weakref.ref(layer_item)
def on_file_selected(file_paths: List[str], overwrite_existing: bool):
layer_item = weakref_item()
if not layer_item or not layer_item.layer:
return
layer = layer_item.layer
file_path = file_paths[-1]
new_layer = LayerModelUtils._create_layer(file_path)
if not new_layer:
carb.log_error(f"Save layer failed. Failed to create layer {file_path}")
return
new_layer.TransferContent(layer)
if not new_layer.Save():
if on_save_done:
on_save_done(False, f"Save layer {layer.identifier} failed.", [])
elif replace:
parent = layer_item.parent
position = LayerUtils.get_sublayer_position_in_parent(parent.identifier, layer_item.identifier)
omni.kit.commands.execute(
"ReplaceSublayer",
layer_identifier=parent.identifier,
sublayer_position=position,
new_layer_path=new_layer.identifier,
)
LayerUtils.resolve_paths(layer, new_layer)
# If edit target changes to one that's not in layer stack or session layer.
usd_context = layer_item.usd_context
stage = usd_context.get_stage()
edit_target = stage.GetEditTarget()
edit_target_identifier = LayerUtils.get_edit_target(stage)
if (
edit_target_identifier == stage.GetSessionLayer().identifier
or edit_target.GetLayer() not in stage.GetLayerStack()
):
LayerUtils.set_edit_target(stage, stage.GetRootLayer().identifier)
elif insert_before:
def insert_layer():
parent = layer_item.parent
usd_context = layer_item.usd_context
stage = usd_context.get_stage()
if not parent:
parent_identifier = stage.GetRootLayer().identifier
position = 0
else:
parent_identifier = parent.identifier
position = LayerUtils.get_sublayer_position_in_parent(parent_identifier, layer_item.identifier)
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=parent_identifier,
sublayer_position=position,
new_layer_path=new_layer.identifier,
transfer_root_content=False,
create_or_insert=False,
)
LayerUtils.resolve_paths(layer, new_layer)
edit_target_identifier = LayerUtils.get_edit_target(stage)
if edit_target_identifier == layer.identifier:
LayerUtils.set_edit_target(stage, new_layer.identifier)
if confirm_before_insert:
_show_confirm_layer_insert_prompt(new_layer.identifier, insert_layer)
else:
insert_layer()
if on_save_done:
on_save_done(True, "", [new_layer.identifier])
comment = ""
if overwrite_existing:
if layer.anonymous:
comment = "Replaced with new file"
else:
comment = f"Replaced with {layer.identifier}"
LayerUtils.create_checkpoint(new_layer.identifier, comment)
if (
not layer_item.anonymous
and LayerSettings().file_dialog_show_root_layer_location
):
save_location = os.path.dirname(layer_item.identifier)
else:
save_location = None
save_name = os.path.splitext(layer_item.layer.GetDisplayName())[0]
_show_save_file_picker("Save Layer As", on_file_selected, save_location, save_name)
@staticmethod
def remove_layer(layer_item):
if not LayerModelUtils.can_edit_sublayer(layer_item):
nm.post_notification("Cannot remove sublayers in live-syncing mode.")
return False
if not layer_item.parent or not layer_item.parent.layer:
return
if not LayerModelUtils.can_edit_sublayer(layer_item.parent):
nm.post_notification("Cannot remove sublayers in live-syncing mode.")
return False
layer_identifier = layer_item.identifier
parent_layer = layer_item.parent.layer
position = LayerUtils.get_sublayer_position_in_parent(parent_layer.identifier, layer_identifier)
is_empty_layer = not layer_item.layer or len(layer_item.layer.pseudoRoot.nameChildren) == 0
def remove_layer_command(layer_identifier, position):
omni.kit.commands.execute(
"RemoveSublayer", layer_identifier=layer_identifier, sublayer_position=position
)
if not is_empty_layer or layer_item.dirty:
_show_remove_dirty_layer_prompt(
layer_item.identifier,
layer_item.dirty,
lambda: remove_layer_command(parent_layer.identifier, position),
)
else:
remove_layer_command(parent_layer.identifier, position)
@staticmethod
def remove_layers(layer_items):
if not layer_items:
return False
can_edit_sublayers = True
for layer_item in layer_items:
if not LayerModelUtils.can_edit_sublayer(layer_item):
can_edit_sublayers = False
break
if not can_edit_sublayers:
nm.post_notification("Cannot remove sublayers in live-syncing mode.")
return False
has_dirty_layer = False
has_non_empty_layer = False
for layer_item in layer_items:
if not layer_item.layer:
continue
if layer_item.dirty:
has_dirty_layer = True
break
if len(layer_item.layer.pseudoRoot.nameChildren) != 0:
has_non_empty_layer = True
break
def remove_layers(layer_items):
with omni.kit.undo.group():
for layer_item in layer_items:
parent = layer_item.parent
if not parent or not parent.layer:
continue
position = LayerUtils.get_sublayer_position_in_parent(parent.identifier, layer_item.identifier)
omni.kit.commands.execute(
"RemoveSublayer", layer_identifier=parent.identifier, sublayer_position=position
)
if has_non_empty_layer or has_dirty_layer:
_show_remove_dirty_layer_prompt(
"",
has_dirty_layer,
lambda: remove_layers(layer_items),
multiple=True
)
else:
remove_layers(layer_items)
@staticmethod
def reload_layer(layer_item):
if layer_item.dirty:
_show_reload_dirty_layer_prompt(layer_item.identifier, lambda: layer_item.reload())
else:
layer_item.reload()
@staticmethod
def flatten_all_layers(layer_model):
if not layer_model:
return
if layer_model.is_in_live_session:
nm.post_notification("Cannot flatten sublayers in live-syncing mode.")
return False
if LayerSettings().show_merge_or_flatten_warning:
PromptManager.post_simple_prompt(
"Flatten All Layers",
"Flatten all layers will remove all sublayers except root layer. Do you want to flatten them?",
ok_button_info=PromptButtonInfo("Yes", lambda: layer_model.flatten_all_layers()),
cancel_button_info=PromptButtonInfo("No")
)
else:
layer_model.flatten_all_layers()
@staticmethod
def merge_layer_down(layer_item):
if not LayerModelUtils.can_edit_sublayer(layer_item):
nm.post_notification("Cannot merge sublayers in live-syncing mode.")
return False
def _merge_internal():
if not layer_item.parent:
return
if layer_item.locked:
return
position_in_parent = LayerUtils.get_sublayer_position_in_parent(
layer_item.parent.identifier, layer_item.identifier
)
if position_in_parent < len(layer_item.parent.sublayers) - 1:
layer_item_down = layer_item.parent.sublayers[position_in_parent + 1]
omni.kit.commands.execute(
"MergeLayers",
dst_parent_layer_identifier=layer_item.parent.identifier,
dst_layer_identifier=layer_item_down.identifier,
src_parent_layer_identifier=layer_item.parent.identifier,
src_layer_identifier=layer_item.identifier,
dst_stronger_than_src=False,
)
if LayerSettings().show_merge_or_flatten_warning:
layer_name = os.path.basename(layer_item.identifier)
PromptManager.post_simple_prompt(
"Merge Layer Down",
f"Layer ({layer_name}) will be removed after merging down. Do you want to merge this layer?",
ok_button_info=PromptButtonInfo("Yes", _merge_internal),
cancel_button_info=PromptButtonInfo("No")
)
else:
_merge_internal()
@staticmethod
def can_move_prim_spec_to_layer(target_layer, prim_spec):
if prim_spec.layer_item == target_layer:
return False
if (
not target_layer.editable
or target_layer.locked
or target_layer.missing
or target_layer.muted_or_parent_muted
):
return False
return True
@staticmethod
def can_create_layer_to_location(target_layer, drop_location):
# Checkes to see if layer is moved to the location between live and base layer.
target_parent = target_layer.parent
as_sublayer = drop_location == -1
if not LayerModelUtils.can_edit_sublayer(target_layer) and as_sublayer:
return False
if not as_sublayer and target_parent:
total_prim_specs = len(target_parent.prim_specs)
sublayer_location = drop_location - total_prim_specs
if sublayer_location < 0:
return False
if sublayer_location > 0 and sublayer_location < len(target_parent.sublayers):
sublayer = target_parent.sublayers[sublayer_location]
up_sublayer = target_parent.sublayers[sublayer_location - 1]
if (
not LayerModelUtils.can_edit_sublayer(sublayer) and
not LayerModelUtils.can_edit_sublayer(up_sublayer)
):
return False
return True
@staticmethod
def can_move_layer(target_layer, source_layer, drop_location):
"""Checkes if it's possible to move source layer to position of target layer."""
as_sublayer = drop_location == -1
if (
not LayerModelUtils.can_edit_sublayer(source_layer) or
(not LayerModelUtils.can_edit_sublayer(target_layer) and as_sublayer)
):
return False
if (
target_layer == source_layer or
target_layer.base_layer == source_layer or
target_layer.live_session_layer == source_layer or
source_layer.base_layer == target_layer or
source_layer.live_session_layer == target_layer
):
return False
# Checkes to see if layer is moved to the location between live and base layer.
if not LayerModelUtils.can_create_layer_to_location(target_layer, drop_location):
return False
if source_layer.reserved:
return False
if not target_layer.reserved and not as_sublayer:
target_parent = target_layer.parent
else:
target_parent = target_layer
if (
source_layer.muted_or_parent_muted
or target_parent.muted_or_parent_muted
or not target_parent.editable
or target_parent.locked
or not target_parent.layer
or not LayerModelUtils.can_edit_sublayer(target_parent)
):
return False
return True
@staticmethod
def move_prim_spec(model, target_item, prim_item):
target_layer_identifier = target_item.identifier
source_layer_identifier = prim_item.layer_item.identifier
dst_stronger_than_src = LayerModelUtils.is_stronger_than(model, target_item, prim_item.layer_item)
prim_spec_path = prim_item.path
def _move_prim_spec():
omni.kit.commands.execute(
"MovePrimSpecsToLayer",
dst_layer_identifier=target_layer_identifier,
src_layer_identifier=source_layer_identifier,
prim_spec_path=str(prim_spec_path),
dst_stronger_than_src=dst_stronger_than_src,
)
if LayerUtils.has_prim_spec(target_layer_identifier, prim_spec_path):
_show_move_prim_spec_warning_prompt(target_layer_identifier, _move_prim_spec)
else:
_move_prim_spec()
@staticmethod
def move_layer(target_layer, source_layer, drop_location):
"""Moving from source layer to sublayer position of target layer."""
if not LayerModelUtils.can_move_layer(target_layer, source_layer, drop_location):
return False
source_parent_layer_identifier = source_layer.parent.identifier
source_layer_position = LayerUtils.get_sublayer_position_in_parent(
source_parent_layer_identifier, source_layer.identifier
)
target_parent_layer_identifier = target_layer.identifier
if drop_location == -1:
target_layer_position = 0
else:
target_layer_position = drop_location
parent = target_layer.parent
while parent and parent != source_layer:
parent = parent.parent
# If it's to move from up to down.
if (
source_parent_layer_identifier == target_parent_layer_identifier and
source_layer_position < target_layer_position
):
target_layer_position -= 1
# If source layer is ancestor of target.
if parent:
remove_source = False
else:
remove_source = True
omni.kit.commands.execute(
"MoveSublayer",
from_parent_layer_identifier=source_parent_layer_identifier,
from_sublayer_position=source_layer_position,
to_parent_layer_identifier=target_parent_layer_identifier,
to_sublayer_position=target_layer_position,
remove_source=remove_source,
)
@staticmethod
def _traverse(layer_item, target_layer, source_layer):
parent = layer_item.parent
while parent and parent.identifier != layer_item.identifier:
parent = parent.parent
# Stop traverse since circular reference found
if parent:
return -1
if layer_item == target_layer:
return 0
elif layer_item == source_layer:
return 1
for sublayer in layer_item.sublayers:
ret = LayerModelUtils._traverse(sublayer, target_layer, source_layer)
if ret == 0 or ret == 1:
return ret
return -1
@staticmethod
def is_stronger_than(model, target_layer, source_layer):
"""Checkes if target_layer is stronger than source_layer"""
target_from_session_layer = target_layer.from_session_layer
source_from_session_layer = source_layer.from_session_layer
if target_from_session_layer and not source_from_session_layer:
return True
elif not target_from_session_layer and source_from_session_layer:
return False
else:
result = LayerModelUtils._traverse(model.root_layer_item, target_layer, source_layer)
return result == 0
@staticmethod
def save_model(model):
"""Saves all dirty layers of this model"""
dirty_layers = model.get_all_dirty_layer_identifiers()
if not dirty_layers:
return
def save_dirty_layers():
model.save_layers(dirty_layers)
if model.has_outdated_layers():
_show_outdated_layers_prompt(dirty_layers, save_dirty_layers, None, None)
else:
save_dirty_layers()
@staticmethod
def lock_layer(layer_item, locked):
with omni.kit.undo.group():
queue = [layer_item]
while queue:
item = queue.pop()
if (
not item.missing and
not item.read_only_on_disk and
not item.anonymous and
not item.reserved
):
item.locked = locked
queue.extend(item.sublayers)
@staticmethod
def set_auto_authoring_mode(layer_model, enabled):
layer_model.auto_authoring_mode = enabled
| 32,974 | Python | 36.599772 | 119 | 0.5844 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/layer_model.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .layer_settings import LayerSettings
from .layer_model_utils import LayerModelUtils
from .path_utils import PathUtils
from .layer_item import LayerItem
from .prim_spec_item import PrimSpecItem
from .globals import LayerGlobals
from pxr import Sdf
from pxr import Usd
from pxr import Tf
from pxr import Trace
from typing import Dict, Set, List, Callable
from omni.kit.usd.layers import LayerUtils
from omni.kit.async_engine import run_coroutine
import omni
import omni.ui as ui
import omni.kit.notification_manager as nm
import omni.kit.usd.layers as layers
import omni.usd
import os
import carb
class LayerModel(ui.AbstractItemModel):
def __init__(self, usd_context, layer_settings=LayerSettings()):
super().__init__()
self._usd_context = usd_context
self._layers = layers.get_layers(self._usd_context)
self._layers_state = self._layers.get_layers_state()
self._layers_auto_authoring = self._layers.get_auto_authoring()
self._layers_specs_linking = self._layers.get_specs_linking()
self._layers_live_syncing = self._layers.get_live_syncing()
self._app = omni.kit.app.get_app()
self._layer_settings = layer_settings
# Cache for fast sublayer accessing
self._sublayers_cache: Dict[str, List[LayerItem]] = {}
# Pending changed prim specs.
self._pending_changed_prim_spec_paths: Dict[str, Set[str]] = {}
self._dirtiness_listeners = []
self._stage_attach_listeners = []
self._muteness_scope_listeners = []
self._dirtiness_subscription = None
self._root_layer: LayerItem = None
self._session_layer: LayerItem = None
# It's an array as the same layer may appear multiple times
self._edit_target_identifier: str = None
# The string that the shown objects should have.
self._filter_name_text: str = None
# Notifications
self._base_layers_changed_notification = None
self._authoring_layer_changed_notification = None
self._events = self._usd_context.get_stage_event_stream()
self._stage_event_sub = self._events.create_subscription_to_pop(
self._on_stage_event, name="LayerModel Subscription"
)
self._merging_live_layers = False
self._on_attach()
def destroy(self):
self._clear()
self._dirtiness_listeners.clear()
self._stage_attach_listeners.clear()
self._muteness_scope_listeners.clear()
self._base_layers_changed_notification = None
self._authoring_layer_changed_notification = None
self._stage_event_sub = None
self._layers = None
self._layers_state = None
self._layers_auto_authoring = None
self._layers_specs_linking = None
self._layers_live_syncing = None
def _clear(self):
LayerGlobals.on_stage_detached()
if self._root_layer:
self._root_layer.destroy()
self._root_layer = None
if self._session_layer:
self._session_layer.destroy()
self._session_layer = None
# It's an array as the same layer may appear multiple times
self._edit_target_identifier = None
# The string that the shown objects should have.
self._filter_name_text = None
self._clear_sublayer_cache()
self._update_subscription = None
self._layers_event_subscription = None
def _initialize_subscriptions(self):
self._update_subscription = self._app.get_update_event_stream().create_subscription_to_pop(
self._on_update, name="Omni Layers"
)
self._layers_event_subscription = self._layers.get_event_stream().create_subscription_to_pop(
self._on_layer_events, name="Layers Model Extension"
)
@property
def usd_context(self):
return self._usd_context
@property
def root_layer_item(self):
return self._root_layer
@property
def session_layer_item(self):
return self._session_layer
@property
def is_in_live_session(self):
return self._layers_live_syncing.is_stage_in_live_session()
@property
def normal_mode(self):
return self._layers.get_edit_mode() == layers.LayerEditMode.NORMAL
@property
def auto_authoring_mode(self):
return self._layers.get_edit_mode() == layers.LayerEditMode.AUTO_AUTHORING
@auto_authoring_mode.setter
def auto_authoring_mode(self, value):
if value:
edit_mode = layers.LayerEditMode.AUTO_AUTHORING
else:
edit_mode = layers.LayerEditMode.NORMAL
self._layers.set_edit_mode(edit_mode)
@property
def spec_linking_mode(self):
return self._layers.get_edit_mode() == layers.LayerEditMode.SPECS_LINKING
@spec_linking_mode.setter
def spec_linking_mode(self, value):
if value:
edit_mode = layers.LayerEditMode.SPECS_LINKING
else:
edit_mode = layers.LayerEditMode.NORMAL
self._layers.set_edit_mode(edit_mode)
@property
def default_edit_layer(self):
"""It's only useful when edit mode == layers.LayerEditMode.AUTO_AUTHORING or SPECS_LINKING."""
return self._layers_auto_authoring.get_default_layer()
@default_edit_layer.setter
def default_edit_layer(self, value):
self._layers_auto_authoring.set_default_layer(value)
@property
def global_muteness_scope(self):
return self._layers_state.is_muteness_global()
@global_muteness_scope.setter
def global_muteness_scope(self, value: bool):
self._layers_state.set_muteness_scope(value)
def add_dirtiness_listener(self, fn: Callable[[], None]):
if fn and fn not in self._dirtiness_listeners:
self._dirtiness_listeners.append(fn)
def add_stage_attach_listener(self, fn: Callable[[bool], None]):
if fn and fn not in self._stage_attach_listeners:
self._stage_attach_listeners.append(fn)
def add_layer_muteness_scope_listener(self, fn: Callable[[], None]):
if fn and fn not in self._muteness_scope_listeners:
self._muteness_scope_listeners.append(fn)
def _on_layer_events(self, event: carb.events.IEvent):
payload = layers.get_layer_event_payload(event)
if not payload:
return
if payload.event_type == layers.LayerEventType.MUTENESS_STATE_CHANGED:
self._on_layer_muteness_changed()
elif payload.event_type == layers.LayerEventType.MUTENESS_SCOPE_CHANGED:
self._on_layer_muteness_scope_changed()
elif payload.event_type == layers.LayerEventType.LOCK_STATE_CHANGED:
self._on_layer_lock_update()
elif payload.event_type == layers.LayerEventType.EDIT_MODE_CHANGED:
self._on_layer_edit_mode_update()
elif payload.event_type == layers.LayerEventType.DEFAULT_LAYER_CHANGED:
self._on_default_edit_layer_update()
elif payload.event_type == layers.LayerEventType.EDIT_TARGET_CHANGED:
edit_target_identifier = LayerUtils.get_edit_target(self._usd_context.get_stage())
self._update_edit_target(edit_target_identifier)
elif payload.event_type == layers.LayerEventType.SUBLAYERS_CHANGED:
self._on_sublayer_changed()
elif payload.event_type == layers.LayerEventType.PRIM_SPECS_CHANGED:
for layer_identifier, spec_paths in payload.layer_spec_paths.items():
self._on_prim_spec_changed(layer_identifier, spec_paths)
elif payload.event_type == layers.LayerEventType.DIRTY_STATE_CHANGED:
self._update_dirtiness()
elif payload.event_type == layers.LayerEventType.SPECS_LINKING_CHANGED:
for layer_identifier, spec_paths in payload.layer_spec_paths.items():
self._on_spec_links_changed(spec_paths)
elif payload.event_type == layers.LayerEventType.SPECS_LOCKING_CHANGED:
self._on_spec_locks_changed(payload.identifiers_or_spec_paths)
elif payload.event_type == layers.LayerEventType.OUTDATE_STATE_CHANGED:
outdated_layer_identifiers = payload.identifiers_or_spec_paths
for layer_identifier in outdated_layer_identifiers:
sublayers = self._sublayers_cache.get(layer_identifier, [])
for sublayer in sublayers:
sublayer.on_layer_outdate_state_changed()
elif payload.event_type == layers.LayerEventType.LIVE_SESSION_STATE_CHANGED:
for _, layer_items in self._sublayers_cache.items():
for layer_item in layer_items:
layer_item.on_live_session_state_changed()
elif payload.event_type == layers.LayerEventType.LAYER_FILE_PERMISSION_CHANGED:
outdated_layer_identifiers = payload.identifiers_or_spec_paths
for layer_identifier in outdated_layer_identifiers:
sublayers = self._sublayers_cache.get(layer_identifier, [])
for sublayer in sublayers:
sublayer.on_layer_lock_changed()
def _on_spec_links_changed(self, spec_paths: List[str]):
for _, layer_items in self._sublayers_cache.items():
for layer_item in layer_items:
layer_item.update_spec_links_status(spec_paths)
def _on_spec_locks_changed(self, spec_paths: List[str]):
for _, layer_items in self._sublayers_cache.items():
for layer_item in layer_items:
layer_item.update_spec_locks_status(spec_paths)
def _on_prim_spec_changed(self, layer_identifier: str, prim_spec_paths: List[str]):
paths = self._pending_changed_prim_spec_paths.get(layer_identifier, None)
if not paths:
self._pending_changed_prim_spec_paths[layer_identifier] = set(prim_spec_paths)
else:
paths.update(prim_spec_paths)
def _on_sublayer_changed(self):
all_items = []
for _, sublayer_items in self._sublayers_cache.items():
all_items.extend(sublayer_items)
for item in all_items:
self._load_sublayers(item)
def _update_default_edit_layer(self, default_edit_layer):
if not self.normal_mode:
# Clears old status
for _, sublayer_items in self._sublayers_cache.items():
for sublayer_item in sublayer_items:
sublayer_item.edit_layer_in_auto_authoring_mode = False
sublayer_items = self._sublayers_cache.get(default_edit_layer, [])
for item in sublayer_items:
item.edit_layer_in_auto_authoring_mode = True
def _on_layer_edit_mode_update(self):
for _, sublayer_items in self._sublayers_cache.items():
for sublayer_item in sublayer_items:
sublayer_item.on_layer_edit_mode_changed()
edit_mode = self._layers.get_edit_mode()
if edit_mode == layers.LayerEditMode.AUTO_AUTHORING or edit_mode == layers.LayerEditMode.SPECS_LINKING:
for _, sublayer_items in self._sublayers_cache.items():
for sublayer_item in sublayer_items:
sublayer_item.is_edit_target = False
else:
for _, sublayer_items in self._sublayers_cache.items():
for sublayer_item in sublayer_items:
sublayer_item.edit_layer_in_auto_authoring_mode = False
self._edit_target_identifier = ""
def _on_default_edit_layer_update(self):
if not self.normal_mode and self._layer_settings.show_info_notification:
if not self._authoring_layer_changed_notification or self._authoring_layer_changed_notification.dismissed:
self._authoring_layer_changed_notification = nm.post_notification(
f"Default Edit Layer has been changed.",
duration=3,
status=nm.NotificationStatus.INFO)
self._update_default_edit_layer(self._layers_auto_authoring.get_default_layer())
def _on_layer_lock_update(self):
stage = self._usd_context.get_stage()
if not stage:
return
carb.log_info("Layer lock status changed.")
# If there is live update change of global muteness
for _, sublayers in self._sublayers_cache.items():
for sublayer in sublayers:
sublayer.on_layer_lock_changed()
if (
not self.normal_mode and
sublayer.identifier == self.default_edit_layer and
sublayer.locked
):
self.default_edit_layer = self.root_layer_item.identifier
# Switches to root layer as edit target if current one is locked.
edit_target_identifier = LayerUtils.get_edit_target(stage)
locked = LayerUtils.get_layer_lock_status(
stage.GetRootLayer(), self._edit_target_identifier
)
if not edit_target_identifier or locked:
self.set_edit_target(self._root_layer, True)
def _on_layer_muteness_changed(self):
carb.log_info("Muteness changed.")
# If there is live update change of global muteness
for _, sublayers in self._sublayers_cache.items():
for sublayer in sublayers:
sublayer.on_muteness_changed()
stage = self._usd_context.get_stage()
if stage:
edit_target_identifier = LayerUtils.get_edit_target(stage)
if (
not edit_target_identifier or
(self._edit_target_identifier and stage.IsLayerMuted(self._edit_target_identifier))
):
self.set_edit_target(self._root_layer, True)
def _on_layer_muteness_scope_changed(self):
carb.log_info("Muteness scope changed.")
# Notify all items to refresh ui
for _, sublayers in self._sublayers_cache.items():
for sublayer in sublayers:
sublayer.on_muteness_scope_changed()
# Notify listeners
for fn in self._muteness_scope_listeners:
fn()
def _update_dirtiness(self):
# FIXME: It has an issue that sometimes it cannot receive dirtiness
# change. It has to query this per frame currently.
notify = False
for _, items in self._sublayers_cache.items():
for item in items:
if item.layer and item.dirty != item.layer.dirty:
item.dirty = item.layer.dirty
if not notify:
notify = True
if notify:
for listener in self._dirtiness_listeners:
listener()
@Trace.TraceFunction
def _handle_pending_prim_specs(self):
# Handling pending changed prim spec paths
for changed_layer, paths in self._pending_changed_prim_spec_paths.items():
layer_items = self._sublayers_cache.get(changed_layer, [])
for layer_item in layer_items:
flags_updated, children_updated = layer_item.on_content_changed(paths)
for prim_spec_item in flags_updated:
prim_spec_item.update_flags()
for prim_spec_item in children_updated:
if prim_spec_item.path.IsAbsoluteRootPath():
self._item_changed(layer_item)
else:
self._item_changed(prim_spec_item)
self._pending_changed_prim_spec_paths.clear()
def _on_update(self, _):
stage = self._usd_context.get_stage()
if not stage:
return
if self._pending_changed_prim_spec_paths:
self._handle_pending_prim_specs()
if self._authoring_layer_changed_notification and self._authoring_layer_changed_notification.dismissed:
self._authoring_layer_changed_notification = None
def _reset_root(self):
stage = self._usd_context.get_stage()
if stage:
self._root_layer = LayerItem(
self._usd_context, stage.GetRootLayer().identifier, stage.GetRootLayer(), self, None
)
self._load_sublayers(self._root_layer)
self._session_layer = LayerItem(
self._usd_context, stage.GetSessionLayer().identifier, stage.GetSessionLayer(), self, None
)
self._load_sublayers(self._session_layer)
self._cache_sublayer(self._root_layer)
self._cache_sublayer(self._session_layer)
self._item_changed(None)
@Trace.TraceFunction
def _on_stage_event(self, stage_event):
if stage_event.type == int(omni.usd.StageEventType.OPENED):
self._on_attach()
elif stage_event.type == int(omni.usd.StageEventType.CLOSING):
self._on_detach()
elif stage_event.type == int(omni.usd.StageEventType.SAVED):
if self.root_layer_item and self.root_layer_item.layer:
# https://nvidia-omniverse.atlassian.net/browse/OM-34143
# It's on-the-fly save-as that only changes identifier
if self.root_layer_item.identifier != self.root_layer_item.layer.identifier:
# https://nvidia-omniverse.atlassian.net/browse/OM-34885
# Update current edit target identifier to avoid update it to custom data
# of root layer to make it dirty.
if self._edit_target_identifier == self.root_layer_item.identifier:
self._edit_target_identifier = self.root_layer_item.layer.identifier
self._sublayers_cache.pop(self.root_layer_item.identifier)
self._sublayers_cache[self.root_layer_item.layer.identifier] = [self.root_layer_item]
self.root_layer_item.update_flags()
elif stage_event.type == int(omni.usd.StageEventType.SETTINGS_SAVING):
stage = self._usd_context.get_stage()
LayerUtils.save_authoring_layer_to_custom_data(stage)
@Trace.TraceFunction
def _on_attach(self):
"""Called when opening a new stage"""
self._clear()
stage = self._usd_context.get_stage()
if stage:
LayerGlobals.on_stage_attached(stage)
# Restore authoring layer. Don't restore edit target when root is in
# a live session.
if not self._layers_live_syncing.get_current_live_session():
LayerUtils.restore_authoring_layer_from_custom_data(stage)
# Initialize edit target
edit_target_identifier = LayerUtils.get_edit_target(stage)
if edit_target_identifier:
self._edit_target_identifier = edit_target_identifier
else:
self._edit_target_identifier = stage.GetRootLayer().identifier
edit_target = Usd.EditTarget(stage.GetRootLayer())
stage.SetEditTarget(edit_target)
self._initialize_subscriptions()
# Initialize root items.
self._reset_root()
# Restores edit mode
if self._layer_settings.enable_auto_authoring_mode:
self.auto_authoring_mode = self._layer_settings.enable_auto_authoring_mode
# Restores edit mode
if self._layer_settings.enable_spec_linking_mode:
self.spec_linking_mode = self._layer_settings.enable_spec_linking_mode
# Notify listeners
for fn in self._stage_attach_listeners:
fn(True)
def _on_detach(self):
"""Called when close the stage"""
# Notify listeners
for fn in self._stage_attach_listeners:
fn(False)
self._clear()
self._item_changed(None)
def can_item_have_children(self, item):
stage = self._usd_context.get_stage()
if not stage:
return False
show_contents = self._layer_settings.show_layer_contents
show_metricsassembler = self._layer_settings.show_metricsassembler_layer
if item is None:
if show_contents:
return self._session_layer.filtered or self._root_layer.filtered
else:
return self._root_layer.filtered
prim_spec_item = None
layer_item = None
if show_contents:
if isinstance(item, PrimSpecItem):
prim_spec_item = item
elif isinstance(item, LayerItem):
prim_spec_item = item.absolute_root_spec
layer_item = item
elif isinstance(item, LayerItem):
layer_item = item
if not self._filter_name_text:
if show_metricsassembler:
if layer_item and len(layer_item.sublayers) > 0:
return True
else:
if layer_item:
filtered_sublayers = [sublayer for sublayer in layer_item.sublayers if not sublayer.identifier.startswith("metrics:")]
if len(filtered_sublayers) > 0:
return True
if prim_spec_item and prim_spec_item.prim_spec:
return len(prim_spec_item.prim_spec.nameChildren) > 0
else:
if layer_item:
if show_metricsassembler:
for sublayer in layer_item.sublayers:
if sublayer.filtered:
return True
else:
filtered_sublayers = [sublayer for sublayer in layer_item.sublayers if not sublayer.identifier.startswith("metrics:")]
for sublayer in filtered_sublayers:
if sublayer.filtered:
return True
if prim_spec_item and prim_spec_item.prim_spec:
for child in prim_spec_item.prim_spec.nameChildren:
layer_item = prim_spec_item.layer_item
child_item, _ = layer_item._get_item_from_cache(child.path)
if child_item and child_item.filtered:
return True
return False
def get_item_children(self, item):
"""Reimplemented from AbstractItemModel"""
if item is None:
if self._layer_settings.show_session_layer:
return [self._session_layer, self._root_layer]
else:
return [self._root_layer]
show_contents = self._layer_settings.show_layer_contents
show_metricsassembler = self._layer_settings.show_metricsassembler_layer
if not self._filter_name_text:
# If _filter_name_text is empty, then user didn't request filtered result and we can just return children.
if isinstance(item, PrimSpecItem):
if show_contents:
return item.children
else:
return None
elif isinstance(item, LayerItem):
if show_metricsassembler:
if show_contents:
return item.prim_specs + item.sublayers
else:
return item.sublayers
else:
filtered_sublayers = [sublayer for sublayer in item.sublayers if not sublayer.identifier.startswith("metrics:")]
if show_contents:
return item.prim_specs + filtered_sublayers
else:
return filtered_sublayers
else:
return None
else:
# Return
if isinstance(item, PrimSpecItem):
if show_contents:
return [child for child in item.children if child.filtered]
else:
return None
elif isinstance(item, LayerItem):
filtered_sublayers = [sublayer for sublayer in item.sublayers if sublayer.filtered]
if not show_metricsassembler:
filtered_sublayers = [sublayer for sublayer in filtered_sublayers if not sublayer.identifier.startswith("metrics:")]
if show_contents:
filtered_prims = [prim_spec for prim_spec in item.prim_specs if prim_spec.filtered]
else:
filtered_prims = []
return filtered_prims + filtered_sublayers
else:
return None
def _update_edit_target(self, layer_identifier: str):
if (
layer_identifier
and layer_identifier != self._edit_target_identifier
and layer_identifier in self._sublayers_cache
):
new_edit_target_identifier = layer_identifier
edit_target_items = self._sublayers_cache.get(layer_identifier)
if self.normal_mode and self._layer_settings.show_info_notification:
if not self._authoring_layer_changed_notification or self._authoring_layer_changed_notification.dismissed:
self._authoring_layer_changed_notification = nm.post_notification(
"Authoring Layer has been changed.",
duration=3,
status=nm.NotificationStatus.INFO)
carb.log_info(f"Switching authoring layer from {self._edit_target_identifier} to {layer_identifier}")
# Clear old authoring items
old_edit_target_items = self._sublayers_cache.get(self._edit_target_identifier, [])
for layer_item in old_edit_target_items:
layer_item.is_edit_target = False
self._edit_target_identifier = new_edit_target_identifier
for layer_item in edit_target_items:
layer_item.is_edit_target = True
def set_edit_target(self, layer_item: LayerItem, saved=False):
if not LayerModelUtils.can_set_as_edit_target(layer_item):
return
omni.kit.commands.execute("SetEditTarget", layer_identifier=layer_item.identifier)
def get_item_value_model_count(self, item):
"""Reimplemented from AbstractItemModel"""
return 7
def get_item_value_model(self, item, column_id):
"""Reimplemented from AbstractItemModel"""
if item is None:
return None
return item.get_item_value_model(column_id)
def drop_accepted(self, target_item, source, drop_location=-1):
"""Reimplemented from AbstractItemModel. Called to highlight target when drag and drop."""
if not source:
return False
if target_item and isinstance(target_item, LayerItem) and isinstance(source, PrimSpecItem):
return LayerModelUtils.can_move_prim_spec_to_layer(target_item, source)
elif target_item and isinstance(target_item, LayerItem) and isinstance(source, LayerItem):
return LayerModelUtils.can_move_layer(target_item, source, drop_location)
elif isinstance(source, str) or type(source).__name__ in ["NucleusItem", "FileSystemItem"]:
# Drag and drop from the content browser
if target_item and isinstance(target_item, LayerItem):
if not LayerModelUtils.can_create_layer_to_location(target_item, drop_location):
return False
return True
try:
from omni.kit.widget.versioning.checkpoints_model import CheckpointItem
if isinstance(source, CheckpointItem):
if target_item and isinstance(target_item, LayerItem):
if not LayerModelUtils.can_create_layer_to_location(target_item, drop_location):
return False
return True
except:
pass
return False
def drop(self, target_item, source, drop_location=-1):
"""Reimplemented from AbstractItemModel. Called when dropping something to the item."""
if not source:
return
if not target_item:
target_item = self.root_layer_item
sublayer_position = 0
elif isinstance(target_item, PrimSpecItem):
target_item = target_item.layer_item
sublayer_position = 0
elif not isinstance(target_item, LayerItem):
return
elif drop_location != -1 and target_item.parent:
# Finds insert position in its parent
target_item = target_item.parent
sublayer_position = drop_location - len(target_item.prim_specs)
else:
sublayer_position = 0
if not target_item:
return
if type(source).__name__ in ["NucleusItem", "FileSystemItem"]:
# Drag and drop from the TreeView of Content Browser
source = source._path
if type(source).__name__ == "CheckpointItem":
# Drag and drop from the TreeView of Content Browser
source = source.get_full_url()
if (
(
# Cannot modify target layer when it's in live session already
(target_item.is_in_live_session or self.root_layer_item.is_in_live_session) and
not target_item.is_live_session_layer
) or
(
# Cannot add sublayer for live session layer.
(isinstance(source, LayerItem) or isinstance(source, str)) and
target_item.is_live_session_layer
)
):
nm.post_notification("Cannot modify target layer in live-syncing mode.")
return
if target_item and isinstance(target_item, LayerItem) and isinstance(source, PrimSpecItem) and drop_location == -1:
LayerModelUtils.move_prim_spec(self, target_item, source)
elif target_item and isinstance(target_item, LayerItem) and isinstance(source, LayerItem):
LayerModelUtils.move_layer(target_item, source, sublayer_position)
elif isinstance(source, str):
# Drag and drop from the content browser
with omni.kit.undo.group():
for source_url in source.splitlines():
if omni.usd.is_usd_readable_filetype(source_url):
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=target_item.identifier,
sublayer_position=sublayer_position,
new_layer_path=source_url,
transfer_root_content=False,
create_or_insert=False,
)
def get_drag_mime_data(self, item):
"""Returns Multipurpose Internet Mail Extensions (MIME) data for be able to drop this item somewhere"""
# As we don't do Drag and Drop to the operating system, we return the string.
if isinstance(item, LayerItem):
return item.identifier if item else ""
elif isinstance(item, PrimSpecItem):
return str(item.path) if item else "/"
@Trace.TraceFunction
def find_all_specs(self, paths: List[Sdf.Path]):
"""Return the list of all the parent nodes and the node representing the given path"""
if self._edit_target_identifier:
edit_target_items = self._sublayers_cache.get(self._edit_target_identifier, [])
if edit_target_items:
edit_target = next(iter(edit_target_items))
return edit_target, edit_target.find_all_specs(paths)
return None, []
@Trace.TraceFunction
def filter_by_text(self, filter_name_text):
"""Specify the filter string that is used to reduce the model"""
if self._filter_name_text == filter_name_text:
return
self._filter_name_text = filter_name_text
if filter_name_text:
self._session_layer.prefilter(filter_name_text.lower())
self._root_layer.prefilter(filter_name_text.lower())
self._item_changed(None)
def refresh(self):
"""Force full re-update"""
self._item_changed(None)
for _, sublayers in self._sublayers_cache.items():
for sublayer in sublayers:
self._item_changed(sublayer)
def has_outdated_layers(self):
for _, sublayers in self._sublayers_cache.items():
if sublayers:
sublayer = next(iter(sublayers))
if not sublayer.latest:
return True
return False
def has_dirty_layers(self, include_omni_layers=True, include_local_layers=True):
stage = self._usd_context.get_stage()
sublayers = LayerUtils.get_all_sublayers(stage)
for sublayer_identifier in sublayers:
is_omni_layer = PathUtils.is_omni_path(sublayer_identifier)
if Sdf.Layer.IsAnonymousLayerIdentifier(sublayer_identifier):
continue
if not include_omni_layers and is_omni_layer:
continue
if not include_local_layers and not is_omni_layer:
continue
sublayer = Sdf.Find(sublayer_identifier)
if not sublayer:
continue
is_writable = LayerUtils.is_layer_writable(sublayer_identifier)
if is_writable and sublayer.dirty:
return True
return False
def get_all_dirty_layer_identifiers(self, include_omni_layers=True, include_local_layers=True):
dirty_layer_sublayers = []
stage = self._usd_context.get_stage()
sublayers = LayerUtils.get_all_sublayers(stage)
for sublayer_identifier in sublayers:
if Sdf.Layer.IsAnonymousLayerIdentifier(sublayer_identifier):
continue
if not include_omni_layers and PathUtils.is_omni_path(sublayer_identifier):
continue
if not include_local_layers and not PathUtils.is_omni_path(sublayer_identifier):
continue
sublayer = Sdf.Find(sublayer_identifier)
if not sublayer:
continue
is_writable = LayerUtils.is_layer_writable(sublayer_identifier)
if is_writable and sublayer.dirty:
dirty_layer_sublayers.append(sublayer_identifier)
return dirty_layer_sublayers
def get_layer_item_by_identifier(self, layer_identifier):
"""Find the first layer item that has the identifier"""
layer_items = self._sublayers_cache.get(layer_identifier, None)
return next(iter(layer_items)) if layer_items else None
def has_any_layers_locked(self):
all_sublayers = LayerUtils.get_all_sublayers(self._usd_context.get_stage())
for sublayer in all_sublayers:
locked = self._layers_state.is_layer_locked(sublayer)
if locked:
return True
return False
def save_layers(self, layer_identifiers, on_save_done: Callable[[bool, str, List[str]], None] = None):
async def save_layers():
result, error, saved_layers = await self._usd_context.save_layers_async(
"", layer_identifiers
)
if on_save_done:
on_save_done(result, error, saved_layers)
run_coroutine(save_layers())
def _is_sublayer_cached(self, layer_item: LayerItem):
cached_items = self._sublayers_cache.get(layer_item.identifier, None)
if not cached_items:
return False
return layer_item in cached_items
def _cache_sublayer(self, layer_item: LayerItem):
cached_items = self._sublayers_cache.get(layer_item.identifier, None)
if cached_items:
if layer_item not in cached_items:
cached_items.append(layer_item)
else:
self._sublayers_cache[layer_item.identifier] = [layer_item]
def _remove_cached_sublayer(self, layer_item: LayerItem):
cached_items = self._sublayers_cache.get(layer_item.identifier, None)
if layer_item in cached_items:
cached_items.remove(layer_item)
layer_item.destroy()
def _clear_sublayer_cache(self):
for _, sublayers in self._sublayers_cache.items():
for sublayer in sublayers:
sublayer.destroy()
self._sublayers_cache.clear()
def _gather_all_sublayer_descendants(self, item: LayerItem):
all_children = item.sublayers
for child in item.sublayers:
all_children.extend(self._gather_all_sublayer_descendants(child))
return all_children
@Trace.TraceFunction
def _load_sublayers(self, layer_item: LayerItem):
carb.log_info(f"Load sublayers of layer {layer_item.identifier}.")
if not layer_item.layer:
return
layer = layer_item.layer
all_sublayer_items = []
sublayer_paths = layer.subLayerPaths
changed = False
index = 0
old_sublayer_items = layer_item.sublayers[:]
for sublayer_path in sublayer_paths:
# If it existed item, use it, don't recreate it.
# If it's re-ordered, refresh list.
sublayer_identifier = layer.ComputeAbsolutePath(sublayer_path)
if self._layers_auto_authoring.is_auto_authoring_layer(sublayer_identifier):
continue
found_old = False
index_matched = False
for i in range(len(old_sublayer_items)):
if os.path.normpath(sublayer_identifier) == os.path.normpath(old_sublayer_items[i].identifier):
found_old = old_sublayer_items[i]
index_matched = i == index
if not index_matched:
changed = True
if found_old:
sublayer_item = found_old
old_sublayer_items.remove(found_old)
else:
# Create a new item
sublayer = LayerUtils.find_layer(sublayer_identifier)
sublayer_item = LayerItem(self._usd_context, sublayer_identifier, sublayer, self, layer_item)
# Checks if there are any circular references.
# If yes, skips to load its sublayer tree.
parent = sublayer_item.parent
while parent and parent.identifier != sublayer_item.identifier:
parent = parent.parent
if not parent:
self._load_sublayers(sublayer_item)
# Add cache item
self._cache_sublayer(sublayer_item)
# Initialize flags
sublayer_item.update_flags()
all_sublayer_items.append(sublayer_item)
index += 1
# This means some sublayers have been deleted
if len(old_sublayer_items) > 0:
changed = True
# Remove all cached items and all its descendants
all_destroyed_items = []
for item in old_sublayer_items:
all_destroyed_items.append(item)
all_destroyed_items.extend(self._gather_all_sublayer_descendants(item))
for descendant in all_destroyed_items:
self._remove_cached_sublayer(descendant)
layer_item.sublayers = all_sublayer_items
if changed:
self._item_changed(layer_item)
def flatten_all_layers(self):
if self.has_any_layers_locked():
return
omni.kit.commands.execute("FlattenLayers")
| 39,909 | Python | 40.017472 | 138 | 0.605477 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/selection_watch.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from pxr import Trace, Sdf
from .layer_item import LayerItem
from .prim_spec_item import PrimSpecItem
import weakref
import omni.usd
class SelectionWatch(object):
"""
The object that update selection in TreeView when the scene selection is
changed and updated scene selection when TreeView selection is changed.
"""
def __init__(self, usd_context, tree_view, tree_view_delegate):
self._usd_context = usd_context
self._selection = None
self._in_selection = False
self._select_with_command = True
self._current_selected_layer_item = None
self._last_selected_prim_paths = []
self._on_layer_selection_changed_listeners = set([])
if self._usd_context is not None:
self._selection = self._usd_context.get_selection()
self._events = self._usd_context.get_stage_event_stream()
self._stage_event_sub = self._events.create_subscription_to_pop(
self._on_stage_event, name="Layer 2 Selection Update"
)
self.set_tree_view(tree_view, tree_view_delegate)
def destroy(self):
for listener in self._on_layer_selection_changed_listeners:
listener(None)
self._on_layer_selection_changed_listeners.clear()
self._current_selected_layer_item = None
self._tree_view = None
self._selection = None
self._events = None
self._stage_event_sub = None
@property
def select_with_command(self):
return self._select_with_command
@select_with_command.setter
def select_with_command(self, value):
self._select_with_command = value
def set_tree_view(self, tree_view, tree_view_delegate):
"""Replace TreeView that should show the selection"""
self._tree_view = tree_view
self._tree_view_delegate = tree_view_delegate
self._tree_view.set_selection_changed_fn(self._on_widget_selection_changed)
self._on_kit_selection_changed()
def _on_stage_event(self, event):
"""Called by stage_event_stream"""
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
self._on_kit_selection_changed()
@Trace.TraceFunction
def _on_kit_selection_changed(self):
"""Send the selection from Kit to TreeView"""
if not self._tree_view or self._in_selection:
return
# Make sure it's a new selection. It happens that omni.usd sends the same selection twice. No sorting because
# the order of selection is important.
prim_paths = self._selection.get_selected_prim_paths()
if prim_paths == self._last_selected_prim_paths:
return
self._last_selected_prim_paths = prim_paths
sdf_paths = [Sdf.Path(path) for path in prim_paths]
# Get the selected item and its parents. Expand all the parents of the new selection.
edit_target, selection = self._tree_view.model.find_all_specs(sdf_paths)
if selection:
self._tree_view.set_expanded(edit_target, True, False)
for item in selection:
if not item:
continue
parent_item = item.parent
parent_chain = []
while parent_item and parent_item != edit_target.absolute_root_spec:
parent_chain.append(parent_item)
parent_item = parent_item.parent
for parent in reversed(parent_chain):
self._tree_view.set_expanded(parent, True, False)
# Send all of this to TreeView
self._in_selection = True
self._tree_view.selection = selection
self._in_selection = False
@Trace.TraceFunction
def _on_widget_selection_changed(self, selection):
"""Send the selection from TreeView to Kit"""
if self._in_selection:
return
if self._tree_view_delegate:
self._tree_view_delegate.on_selection_changed(selection)
changed = False
selected_item = None
if len(selection) == 1 and isinstance(selection[0], LayerItem):
selected_item = selection[0]
if (
not self._current_selected_layer_item
or not self._current_selected_layer_item()
or self._current_selected_layer_item() != selected_item
):
changed = True
if selected_item:
self._current_selected_layer_item = weakref.ref(selected_item)
else:
self._current_selected_layer_item = None
if changed:
for listener in self._on_layer_selection_changed_listeners:
if self._current_selected_layer_item and self._current_selected_layer_item():
listener(self._current_selected_layer_item())
else:
listener(None)
# Send the selection to Kit
prim_paths = [item.path.pathString for item in selection if isinstance(item, PrimSpecItem)]
if prim_paths == self._last_selected_prim_paths:
return
self._in_selection = True
if self._select_with_command:
omni.kit.commands.execute(
"SelectPrims", old_selected_paths=self._last_selected_prim_paths,
new_selected_paths=prim_paths, expand_in_stage=True
)
else:
self._selection.set_selected_prim_paths(prim_paths, False)
self._in_selection = False
self._last_selected_prim_paths = prim_paths
def add_layer_selection_changed_fn(self, fn):
self._on_layer_selection_changed_listeners.add(fn)
def remove_layer_selection_changed_fn(self, fn):
self._on_layer_selection_changed_listeners.discard(fn)
def get_current_focused_layer_item(self):
return self._current_selected_layer_item() if self._current_selected_layer_item else None
def set_current_focused_layer_item(self, layer_item):
if layer_item:
self._tree_view.selection = [layer_item]
| 6,517 | Python | 38.26506 | 117 | 0.627129 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/extension.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Callable
import carb
import os
import asyncio
import omni.ext
import omni.usd
import omni.kit.ui
import omni.kit.app
import omni.ui as ui
import omni.kit.notification_manager as nm
import omni.kit.usd.layers as layers
from functools import partial
from omni.kit.usd.layers import LayerUtils
from typing import List
from pxr import Sdf
from .window import LayerWindow
from .layer_item import LayerItem
from .layer_model import LayerModel
from .layer_link_window import LayerLinkWindow
from .layer_model_utils import LayerModelUtils
from .actions import ActionManager
_extension_instance = None
class LayerExtension(omni.ext.IExt):
"""The entry point for Layer 2"""
CONTEXT_MENU_ITEM_INSERT_SUBLAYER = "Insert As Sublayer"
WINDOW_NAME = "Layer"
MENU_PATH = f"Window/{WINDOW_NAME}"
def on_startup(self, ext_id):
global _extension_instance
_extension_instance = self
# Register command related actions
self._ext_name = omni.ext.get_extension_name(ext_id)
self._action_manager = ActionManager()
self._action_manager.on_startup(self)
ui.Workspace.set_show_window_fn(LayerExtension.WINDOW_NAME, partial(self.show_window, None))
self._usd_context = omni.usd.get_context()
self._layers = layers.get_layers(self._usd_context)
self._selection_listeners = set([])
self._window = None
try:
self._menu = omni.kit.ui.get_editor_menu().add_item(
LayerExtension.MENU_PATH, self.show_window, toggle=True, value=True
)
except Exception:
self._menu = None
ui.Workspace.show_window(LayerExtension.WINDOW_NAME)
self._link_window = None
app = omni.kit.app.get_app()
manager = app.get_extension_manager()
self._context_icon_menu_items = []
self._extensions_subscription = manager.get_change_event_stream().create_subscription_to_pop(
self._on_event, name="omni.kit.widget.layers"
)
self._menu_registered = False
self._on_layer_edit_mode_update(self._layers.get_edit_mode())
self._layer_edit_mode_subscription = self._layers.get_event_stream().create_subscription_to_pop_by_type(
int(layers.LayerEventType.EDIT_MODE_CHANGED),
self._on_layer_events, name="Layers Extension"
)
def _on_layer_events(self, event: carb.events.IEvent):
payload = layers.get_layer_event_payload(event)
if payload and payload.event_type == layers.LayerEventType.EDIT_MODE_CHANGED:
edit_mode = self._layers.get_edit_mode()
self._on_layer_edit_mode_update(edit_mode)
def _on_layer_edit_mode_update(self, edit_mode):
if edit_mode == layers.LayerEditMode.SPECS_LINKING:
if not self._link_window:
self._link_window = LayerLinkWindow()
elif self._link_window:
self._link_window.destroy()
self._link_window = None
def _on_event(self, event):
if not self._menu_registered:
self._register_menus()
self._menu_registered = True
def on_shutdown(self):
self._action_manager.on_shutdown()
# Clean up global resources
LayerModelUtils.on_shutdown()
global _extension_instance
_extension_instance = None
if self._link_window:
self._link_window.destroy()
self._link_window = None
# remove menu
self._menu = None
if self._window:
self._window.destroy()
self._window = None
for fn in self._selection_listeners:
fn(None)
self._selection_listeners.clear()
self._menu_registered = False
self._extensions_subscription = None
self._unregister_menus()
self._layers = None
ui.Workspace.set_show_window_fn(LayerExtension.WINDOW_NAME, None)
def _get_content_window(self):
try:
import omni.kit.window.content_browser as content
return content.get_content_window()
except ImportError:
pass
return None
def _unregister_menus(self):
self._context_icon_menu_items.clear()
extension_manager = omni.kit.app.get_app_interface().get_extension_manager()
content_window = self._get_content_window()
if content_window and extension_manager.is_extension_enabled("omni.kit.window.content_browser"):
content_window.delete_context_menu(self.CONTEXT_MENU_ITEM_INSERT_SUBLAYER)
def _register_menus(self):
content_window = self._get_content_window()
if content_window:
content_window.add_context_menu(
self.CONTEXT_MENU_ITEM_INSERT_SUBLAYER,
"external_link.svg",
lambda b, c: self._on_icon_menu_click(b, c),
LayerExtension._is_show_insert_visible,
separator_name=None,
index=3 # Index
)
@staticmethod
def _is_show_insert_visible(content_url):
return omni.usd.is_usd_writable_filetype(content_url)
def _on_icon_menu_click(self, menu, value):
file_path = value
stage = self._usd_context.get_stage()
if not stage:
return
root_layer = stage.GetRootLayer()
if root_layer.identifier != file_path:
found = False
for sublayer_path in root_layer.subLayerPaths:
absolute_path = root_layer.ComputeAbsolutePath(sublayer_path)
if os.path.normpath(value) == os.path.normpath(absolute_path):
found = True
if found:
nm.post_notification(
f"Duplicate sublayer found in the Root Layer.",
status=nm.NotificationStatus.WARNING,
duration=4
)
else:
LayerUtils.insert_sublayer(root_layer, 0, file_path)
else:
nm.post_notification(
f"Cannot insert Root Layer as sublayer.",
status=nm.NotificationStatus.WARNING,
duration=4
)
async def _destroy_window_async(self):
# wait one frame, this is due to the one frame defer
# in Window::_moveToMainOSWindow()
await omni.kit.app.get_app().next_update_async()
if self._window:
self._window.destroy()
self._window = None
def _visiblity_changed_fn(self, visible):
if self._menu:
omni.kit.ui.get_editor_menu().set_value(LayerExtension.MENU_PATH, visible)
if not visible:
# Destroy the window, since we are creating new window
# in show_window
asyncio.ensure_future(self._destroy_window_async())
def show_window(self, menu, value):
if value and not self._window:
self._window = LayerWindow(LayerExtension.WINDOW_NAME, self._usd_context)
self._window.set_visibility_changed_listener(self._visiblity_changed_fn)
# Register selection listeners again
for fn in self._selection_listeners:
self._window.add_layer_selection_changed_fn(fn)
elif self._window:
self._window.set_visible(value)
for fn in self._selection_listeners:
fn(None)
def get_current_focused_layer_item(self) -> LayerItem:
"""Gets the current focused layer item in Layer Window."""
if not self._window:
return None
return self._window.get_current_focused_layer_item()
def set_current_focused_layer_item(self, layer_identifier: str):
"""Sets the focused layer item in Layer Window."""
if not self._window:
return
return self._window.set_current_focused_layer_item(layer_identifier)
def remove_layer_selection_changed_fn(self, fn: Callable[[LayerItem], None]):
"""Removes selection listener."""
if not fn:
return
self._selection_listeners.discard(fn)
if not self._window:
return
return self._window.remove_layer_selection_changed_fn(fn)
def add_layer_selection_changed_fn(self, fn: Callable[[LayerItem], None]):
if not fn:
return
self._selection_listeners.add(fn)
if not self._window:
return
return self._window.add_layer_selection_changed_fn(fn)
def get_layer_model(self) -> LayerModel:
if not self._window:
return None
return self._window.get_layer_model()
def get_selected_items(self) -> List[ui.AbstractItem]:
if not self._window:
return []
return list(self._window.layer_view.selection)
@staticmethod
def get_instance():
return _extension_instance
| 9,331 | Python | 32.934545 | 112 | 0.621262 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/__init__.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .extension import LayerExtension
from .context_menu import ContextMenu
from .layer_item import LayerItem
from .prim_spec_item import PrimSpecItem
from .layer_model import LayerModel
from omni.kit.usd.layers import LayerUtils
def get_instance():
return LayerExtension.get_instance()
| 727 | Python | 35.399998 | 76 | 0.808803 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/file_picker.py | import omni
import os
import omni.client
from omni.kit.widget.prompt import PromptManager, PromptButtonInfo
from typing import List, Tuple, Callable
from omni.kit.widget.filebrowser import FileBrowserItem
from .filebrowser import FileBrowserMode, FileBrowserSelectionType
from .filebrowser.app_filebrowser import FileBrowserUI
class FilePicker:
def __init__(
self,
title: str,
mode: FileBrowserMode,
file_type: FileBrowserSelectionType,
filter_options: List[Tuple[str, str]],
save_extensions: List[str] = [],
allow_multi_selections=False,
options_pane_build_fn: Callable[[List[str]], bool] = None,
on_selection_changed: Callable[[List[str]], bool] = None,
**kwargs
):
self._mode = mode
self._app = omni.kit.app.get_app()
self._open_handler = None
self._cancel_handler = None
if self._mode == FileBrowserMode.SAVE:
self._allow_multi_selections = False
else:
self._allow_multi_selections = allow_multi_selections
self._options_pane_build_fn = options_pane_build_fn
self._on_selection_changed = on_selection_changed
if self._options_pane_build_fn:
build_fn = self._build_options_pane
else:
build_fn = None
if self._on_selection_changed:
selection_fn = self._selection_changed
else:
selection_fn = None
self._ui_handler = FileBrowserUI(
title, mode, file_type, filter_options, save_extensions, self._allow_multi_selections, build_fn, selection_fn, **kwargs
)
self._prompt = None
def _selection_changed(self, paths: List[FileBrowserItem]):
to_convert_paths = []
for path_item in paths:
if isinstance(path_item, FileBrowserItem):
if path_item.is_folder:
continue
to_convert_paths.append(path_item.path)
else:
to_convert_paths.append(path_item)
if self._on_selection_changed:
self._on_selection_changed(to_convert_paths)
def _build_options_pane(self, paths: List[FileBrowserItem]):
to_convert_paths = []
for path_item in paths:
if isinstance(path_item, FileBrowserItem):
if path_item.is_folder:
continue
to_convert_paths.append(path_item.path)
else:
to_convert_paths.append(path_item)
if self._options_pane_build_fn:
return self._options_pane_build_fn(to_convert_paths)
else:
return False
def _save_and_prompt_if_exists(self, file_path: str, file_save_handler: Callable[[str, bool], None] = None):
result, _ = omni.client.stat(file_path)
existed = result == omni.client.Result.OK
if existed:
PromptManager.post_simple_prompt(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Overwrite',
f"File {os.path.basename(file_path)} already exists, do you want to overwrite it?",
ok_button_info=PromptButtonInfo("YES", lambda: file_save_handler(file_path, True)),
cancel_button_info=PromptButtonInfo("No")
)
elif file_save_handler:
file_save_handler(file_path, False)
def _on_file_open(self, paths: List[str]):
if not paths:
return
def file_open(paths: List[str], overwrite: bool):
if not self._open_handler:
return
self._open_handler(paths, overwrite)
if self._mode == FileBrowserMode.SAVE:
self._save_and_prompt_if_exists(paths[-1], lambda path, overwrite: file_open([path], overwrite))
else:
file_open(paths, False)
def _on_cancel_open(self):
if self._cancel_handler:
self._cancel_handler()
def set_file_selected_fn(self, file_open_handler: Callable[[List[str], bool], None]):
self._open_handler = file_open_handler
def set_cancel_fn(self, cancel_handler: Callable[[], None]):
self._cancel_handler = cancel_handler
def show(self, dir: str = None, filename: str = None):
if self._ui_handler:
if dir:
self._ui_handler.set_current_directory(dir)
if filename:
self._ui_handler.set_current_filename(filename)
self._ui_handler.open(self._on_file_open, self._on_cancel_open)
def hide(self):
if self._ui_handler:
self._ui_handler.hide()
def set_current_directory(self, dir: str):
if self._ui_handler:
self._ui_handler.set_current_directory(dir)
def set_current_filename(self, filename: str):
if self._ui_handler:
self._ui_handler.set_current_filename(filename)
def get_current_filename(self):
if self._ui_handler:
return self._ui_handler.get_current_filename()
return None
def destroy(self):
self._options_pane_build_fn = None
self._on_selection_changed = None
if self._prompt:
self._prompt.destroy()
self._prompt = None
self._open_handler = None
self._cancel_handler = None
if self._ui_handler:
self._ui_handler.destroy()
self._ui_handler = None
| 5,440 | Python | 33.656051 | 131 | 0.59136 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/context_menu.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import weakref
import carb
import omni.kit.context_menu
import omni.kit.ui
import omni.kit.undo
import omni.kit.notification_manager as nm
from omni import ui
from functools import partial
from pxr import Sdf
from .prim_spec_item import PrimSpecItem, PrimSpecSpecifier
from .layer_item import LayerItem
from .layer_model_utils import LayerModelUtils
from omni.kit.usd.layers import LayerUtils
from .singleton import Singleton
from omni.kit.context_menu import ContextMenuExtension
@Singleton
class CustomMenuList:
"""
The singleton object that holds custom context menu. Other extensions can
add items to this object using `ContextMenu.get_instance`.
"""
def __init__(self):
self.__custom_menu_list = {}
self.__counter = 0
def add_menu(self, menu):
menu_id = self.__counter
self.__custom_menu_list[menu_id] = menu
self.__counter += 1
return menu_id
def remove_menu(self, menu_id):
del self.__custom_menu_list[menu_id]
def get_menu_list(self):
result = []
for key, menu in self.__custom_menu_list.items():
result += menu
return result
class ContextMenuEvent:
"""The object comatible with ContextMenu"""
def __init__(self, item: weakref, expanded=None):
self.type = 0
self.payload = {"item": item, "node_open": expanded}
class ContextMenu:
def __init__(self, usd_context):
self.tree_view = None
self._usd_context = usd_context
def on_mouse_event(self, event):
# check its expected event
if event.type != int(omni.kit.ui.MenuEventType.ACTIVATE):
return
# get context menu core functionality & check its enabled
context_menu = omni.kit.context_menu.get_instance()
if context_menu is None:
carb.log_error("context_menu is disabled!")
return None
# get stage
stage = self._usd_context.get_stage()
if stage is None:
carb.log_error("stage not avaliable")
return None
# get parameters passed by event
item = event.payload["item"]
node_expanded = event.payload["node_open"]
prim_item_list = []
layer_item_list = []
selections = self.tree_view().selection
if item and item() and self.tree_view and self.tree_view():
for selected_item in selections:
if isinstance(selected_item, PrimSpecItem):
prim_item_list.append(weakref.ref(selected_item))
elif isinstance(selected_item, LayerItem):
layer_item_list.append(weakref.ref(selected_item))
# setup objects, this is passed to all functions
objects = {}
objects["item"] = item
objects["prim_item_list"] = prim_item_list
objects["layer_item_list"] = layer_item_list
objects["node_open"] = node_expanded
objects["stage"] = stage
objects["tree_view"] = self.tree_view
if self.is_over_specifier(objects):
delete_prim_title = "Delete Delta"
else:
delete_prim_title = "Delete"
# setup menu
menu_list = [
{
"name": "Set Default Edit Layer",
"glyph": "menu_rename.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_auto_authoring_or_spec_linking_mode,
ContextMenu.is_not_missing_layer,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.is_not_edit_layer,
],
"onclick_fn": ContextMenu.set_edit_layer,
},
{
"name": "Set Authoring Layer",
"glyph": "menu_rename.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.is_not_auto_authoring_and_spec_linking_mode,
ContextMenu.can_set_as_edit_target
],
"onclick_fn": ContextMenu.set_authoring_layer,
},
{
"name": "Create Sublayer",
"glyph": "menu_create_sublayer.svg",
"show_fn": [
ContextMenu.no_items_selected,
ContextMenu.can_edit_root_layer,
],
"onclick_fn": ContextMenu.create_sublayer,
},
{
"name": "Insert Sublayer",
"glyph": "menu_insert_sublayer.svg",
"show_fn": [
ContextMenu.no_items_selected,
ContextMenu.can_edit_root_layer,
],
"onclick_fn": ContextMenu.insert_sublayer,
},
{"name": ""},
{
"name": "Create Sublayer",
"glyph": "menu_create_sublayer.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.can_edit_sublayer
],
"onclick_fn": ContextMenu.create_sublayer,
},
{
"name": "Insert Sublayer",
"glyph": "menu_insert_sublayer.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.can_edit_sublayer
],
"onclick_fn": ContextMenu.insert_sublayer,
},
{
"name": "New Anonymous Sublayer",
"glyph": "menu_create_sublayer.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.is_from_session_layer_tree,
ContextMenu.can_edit_sublayer
],
"onclick_fn": ContextMenu.create_anonymous_sublayer,
},
{
"name": "Merge Down One",
"glyph": "menu_merge_down.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.is_not_reserved_layer,
ContextMenu.can_merge_layer_down,
ContextMenu.is_not_from_session_layer_tree,
ContextMenu.can_edit_sublayer
],
"onclick_fn": ContextMenu.merge_down_one,
},
{
"name": "Flatten Sublayers",
"glyph": "menu_flatten_layers.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.has_sublayers,
ContextMenu.is_not_missing_layer,
ContextMenu.is_not_from_session_layer_tree,
ContextMenu.has_no_layers_locked,
ContextMenu.can_flatten_sublayers
],
"onclick_fn": ContextMenu.flatten_sublayers,
},
{"name": ""},
{
"name": "Save",
"glyph": "menu_save.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.is_not_anonymous_layer,
ContextMenu.is_layer_dirty,
ContextMenu.is_not_live_layer
],
"onclick_fn": ContextMenu.save_layer,
},
{
"name": "Save a Copy",
"glyph": "menu_save_as.svg",
"show_fn": [ContextMenu.is_layer_item, ContextMenu.is_not_missing_layer],
"onclick_fn": ContextMenu.save_layer_as,
},
{
"name": "Save As",
"glyph": "menu_save_as.svg",
"show_fn":
[
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.is_not_reserved_layer,
ContextMenu.is_not_live_layer,
ContextMenu.can_edit_sublayer
],
"onclick_fn": ContextMenu.save_layer_as_and_replace,
},
{"name": ""},
{
"name": "Reload Layer",
"glyph": "menu_refresh.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_not_anonymous_layer,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.is_not_live_layer
],
"onclick_fn": ContextMenu.reload_layer,
},
{
"name": "Remove Layer",
"glyph": "menu_remove_layer.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_layer_not_locked,
ContextMenu.is_not_reserved_layer,
ContextMenu.is_not_live_layer,
ContextMenu.can_edit_sublayer,
ContextMenu.can_edit_sublayer_parent,
ContextMenu.is_not_authoring_layer,
],
"onclick_fn": ContextMenu.remove_layer,
},
{
"name": delete_prim_title,
"glyph": "menu_delete.svg",
"show_fn": [
ContextMenu.is_prim_spec_item,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.can_delete_prim,
],
"onclick_fn": ContextMenu.prim_delete,
},
{
"name": "Move Selections To This Layer",
"glyph": "menu_duplicate.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_layer_writable,
ContextMenu.is_layer_not_locked,
ContextMenu.is_layer_and_parent_unmuted,
ContextMenu.can_edit_sublayer,
self.has_selections
],
"onclick_fn": self.move_prims,
},
{"name": ""},
{
"name": "Refresh Reference",
"glyph": "sync.svg",
"name_fn": ContextMenu.refresh_reference_payload_name,
"show_fn": [ContextMenu.is_prim_spec_item, ContextMenu.has_payload_or_reference],
"onclick_fn": lambda o: context_menu.refresh_payload_or_reference(ContextMenu._stage_window_object(o))
},
{
"name": "Select Bound Objects",
"glyph": "menu_search.svg",
"show_fn": ContextMenu.is_material,
"onclick_fn": lambda o: context_menu.select_prims_using_material(ContextMenu._stage_window_object(o))
},
{"populate_fn": ContextMenu.show_open_close_tree},
{"name": ""},
{
"name": "Copy URL Link",
"glyph": "menu_link.svg",
"show_fn": [
ContextMenu.has_any_items_selected
],
"onclick_fn": ContextMenu.copy_url},
{
"name": "Find in Content Browser",
"glyph": "menu_search.svg",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
ContextMenu.is_not_anonymous_layer,
],
"onclick_fn": ContextMenu.find_in_browser,
}
]
"""
menu_list = [
]
"""
layer_item = ContextMenu._get_layer_item(objects)
if layer_item and layer_item.model.spec_linking_mode:
menu_list.append({
"name":
{
"Prim Linking":
[
{
"name": "Link Selected",
"show_fn": [
self.has_selections,
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
],
"onclick_fn": self.link_selected_prims
},
{
"name": "Link Selected Hierarchy",
"show_fn": [
self.has_selections,
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
],
"onclick_fn": self.link_selected_prims_with_hierarchy,
},
{
"name": "Unlink Selected",
"show_fn": [
self.has_selections,
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
],
"onclick_fn": self.unlink_selected_prims,
},
{
"name": "Unlink Selected Hierarchy",
"show_fn": [
self.has_selections,
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
],
"onclick_fn": self.unlink_selected_prims_with_hierarchy,
},
{
"name": "Select Linked Prims",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
],
"onclick_fn": self.select_linked_prims,
},
{
"name": "Clear All Linked Prims",
"show_fn": [
ContextMenu.is_layer_item,
ContextMenu.is_not_missing_layer,
],
"onclick_fn": self.clear_all_linked_prims,
}
]
},
"glyph": "menu_link.svg",
})
menu_list += CustomMenuList().get_menu_list()
# show menu
context_menu.show_context_menu("layers_widget", objects, menu_list)
def is_over_specifier(self, objects):
prim_item = ContextMenu._get_prim_spec_item(objects)
if not prim_item:
return False
return prim_item.specifier == PrimSpecSpecifier.OVER_ONLY
# ---------------------------------------------- menu show test functions ----------------------------------------------
def _get_layer_item(objects) -> LayerItem:
item = objects["item"]
if not item or not item():
return None
if isinstance(item(), PrimSpecItem):
return item().layer_item
elif isinstance(item(), LayerItem):
return item()
return None
def _get_prim_spec_item(objects):
item = objects["item"]
if not item or not item():
return None
if isinstance(item(), PrimSpecItem):
return item()
return None
def link_selected_prims(self, objects): # pragma: no cover
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
selections = self._usd_context.get_selection().get_selected_prim_paths()
omni.kit.commands.execute(
"LinkSpecs",
spec_paths=selections,
layer_identifiers=layer_item.identifier,
additive=True, hierarchy=False,
usd_context=self._usd_context
)
def link_selected_prims_with_hierarchy(self, objects): # pragma: no cover
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
selections = self._usd_context.get_selection().get_selected_prim_paths()
omni.kit.commands.execute(
"LinkSpecs",
spec_paths=selections,
layer_identifiers=layer_item.identifier,
additive=True, hierarchy=True,
usd_context=self._usd_context
)
def unlink_selected_prims(self, objects): # pragma: no cover
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
selections = self._usd_context.get_selection().get_selected_prim_paths()
omni.kit.commands.execute(
"UnlinkSpecs",
spec_paths=selections,
layer_identifiers=layer_item.identifier,
hierarchy=False,
usd_context=self._usd_context
)
def unlink_selected_prims_with_hierarchy(self, objects): # pragma: no cover
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
selections = self._usd_context.get_selection().get_selected_prim_paths()
omni.kit.commands.execute(
"UnlinkSpecs",
spec_paths=selections,
layer_identifiers=layer_item.identifier,
hierarchy=True,
usd_context=self._usd_context
)
def select_linked_prims(self, objects): # pragma: no cover
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
all_links = omni.kit.usd.layers.get_spec_links_for_layers(self._usd_context, layer_item.identifier)
spec_paths = all_links.get(layer_item.identifier, None)
if spec_paths:
prim_paths = [spec_path for spec_path in spec_paths if Sdf.Path(spec_path).IsPrimPath()]
if prim_paths:
old_prim_paths = self._usd_context.get_selection().get_selected_prim_paths()
omni.kit.commands.execute(
"SelectPrims", old_selected_paths=old_prim_paths,
new_selected_paths=prim_paths, expand_in_stage=True
)
def clear_all_linked_prims(self, objects): # pragma: no cover
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
omni.kit.commands.execute(
"UnlinkSpecs",
spec_paths="/",
layer_identifiers=layer_item.identifier,
hierarchy=True,
usd_context=self._usd_context
)
def show_open_close_tree(objects):
tree_view = objects["tree_view"]
item = objects["item"]
if not item or not tree_view() or not item():
return False
if not item().has_children:
return
def expand_item(objects, expanded):
tree_view = objects["tree_view"]
item = objects["item"]
if not tree_view() or not item():
return False
if tree_view().is_expanded(item()) != expanded:
return
tree_view().set_expanded(item(), not expanded, False)
omni.kit.context_menu.get_instance().separator("layer")
expanded = tree_view().is_expanded(item())
if expanded:
objects["tree_item"] = ContextMenuExtension.uiMenuItem(
f'Collapse Tree',
triggered_fn=partial(expand_item, objects, expanded),
glyph="menu_minus.svg"
)
else:
objects["tree_item"] = ContextMenuExtension.uiMenuItem(
f'Expand Tree',
triggered_fn=partial(expand_item, objects, expanded),
glyph="menu_plus.svg"
)
def is_item_expaned(objects): # pragma: no cover
"""Unused."""
tree_view = objects["tree_view"]
item = objects["item"]
if not tree_view() or not item():
return False
return tree_view.is_expanded(item)
def has_any_items_selected(objects):
return not ContextMenu.no_items_selected(objects)
def no_items_selected(objects):
item = objects["item"]
return item is None
def is_layer_item(objects):
item = objects["item"]
return item and item() and isinstance(item(), LayerItem)
def has_selections(self, objects):
selection = self._usd_context.get_selection()
paths = selection.get_selected_prim_paths()
return len(paths) > 0
def is_prim_spec_item(objects):
item = objects["item"]
return item and item() and isinstance(item(), PrimSpecItem)
def is_auto_authoring_or_spec_linking_mode(objects):
return ContextMenu.is_spec_linking_mode(objects) or ContextMenu.is_auto_authoring_mode(objects)
def is_not_auto_authoring_and_spec_linking_mode(objects):
return not ContextMenu.is_auto_authoring_or_spec_linking_mode(objects)
def can_be_set_as_authoring_target(objects): # pragma: no cover
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return False
value = not layer_item.model.root_layer_item.is_in_live_session
return ContextMenu.is_not_auto_authoring_and_spec_linking_mode(objects) and value
def is_spec_linking_mode(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return False
return layer_item.model.spec_linking_mode
def is_auto_authoring_mode(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return False
return layer_item.model.auto_authoring_mode
def is_edit_layer(objects):
item = ContextMenu._get_layer_item(objects)
return item and item.edit_layer_in_auto_authoring_mode
def is_not_edit_layer(objects):
return not ContextMenu.is_edit_layer(objects)
def is_authoring_layer(objects):
item = ContextMenu._get_layer_item(objects)
return item and item.is_edit_target
def is_not_authoring_layer(objects):
return not ContextMenu.is_authoring_layer(objects)
def is_reserved_layer(objects):
item = ContextMenu._get_layer_item(objects)
return item and item.reserved
def is_not_reserved_layer(objects):
return not ContextMenu.is_reserved_layer(objects)
def is_omni_layer(objects): # pragma: no cover
item = ContextMenu._get_layer_item(objects)
return item.is_omni_layer
def is_not_omni_layer(objects): # pragma: no cover
return not ContextMenu.is_omni_layer(objects)
def is_missing_layer(objects):
item = ContextMenu._get_layer_item(objects)
return item and item.missing
def is_not_missing_layer(objects):
return not ContextMenu.is_missing_layer(objects)
def is_from_session_layer_tree(objects):
item = ContextMenu._get_layer_item(objects)
return item and item.from_session_layer
def is_not_from_session_layer_tree(objects):
return not ContextMenu.is_from_session_layer_tree(objects)
def can_delete_prim(objects):
prim_item = ContextMenu._get_prim_spec_item(objects)
if not prim_item:
return False
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return False
stage = objects["stage"]
prim = stage.GetPrimAtPath(prim_item.path)
read_only = LayerUtils.is_read_only_prim(prim)
if read_only:
return False
# You can remove prims always for live session layer.
is_live_sync_layer = ContextMenu.is_live_syncing_layer(objects)
if is_live_sync_layer:
return True
# Otherwise, you cannot remove anything if root layer is in live.
root_in_live = layer_item.model.root_layer_item.is_in_live_session
if root_in_live:
return False
# Or if the current layer is in live, you cannot remove base prims.
if layer_item.is_in_live_session:
return False
return True
def is_material(objects):
prim_item = ContextMenu._get_prim_spec_item(objects)
if not prim_item:
return False
return prim_item.type_name == "Material"
def is_anonymous_layer(objects):
item = ContextMenu._get_layer_item(objects)
return item and Sdf.Layer.IsAnonymousLayerIdentifier(item.identifier)
def is_not_anonymous_layer(objects):
return not ContextMenu.is_anonymous_layer(objects)
def is_layer_or_parent_muted(objects):
layer_item = ContextMenu._get_layer_item(objects)
return layer_item.muted_or_parent_muted
def is_layer_and_parent_unmuted(objects):
return not ContextMenu.is_layer_or_parent_muted(objects)
def is_layer_locked_by_other(objects): # pragma: no cover
"""Deprecated."""
return False
def is_layer_not_locked_by_other(objects): # pragma: no cover
return not ContextMenu.is_layer_locked_by_other(objects)
def is_layer_locked(objects):
layer_item = ContextMenu._get_layer_item(objects)
return layer_item.locked
def is_layer_not_locked(objects):
return not ContextMenu.is_layer_locked(objects)
def is_layer_read_only(objects):
layer_item = ContextMenu._get_layer_item(objects)
return not layer_item.editable
def is_layer_writable(objects):
return not ContextMenu.is_layer_read_only(objects)
def is_layer_dirty(objects):
layer_item = ContextMenu._get_layer_item(objects)
return layer_item.dirty
def has_no_layers_locked(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return
return not layer_item.model.has_any_layers_locked()
def has_sublayers(objects):
stage = objects["stage"]
return len(stage.GetRootLayer().subLayerPaths) > 0
def can_merge_layer_down(objects):
layer_item = ContextMenu._get_layer_item(objects)
parent_layer = layer_item.parent
if not parent_layer:
return False
position = LayerUtils.get_sublayer_position_in_parent(parent_layer.identifier, layer_item.identifier)
if position == -1:
return False
if position < len(parent_layer.sublayers) - 1:
return True
return False
def has_payload_or_reference(objects: dict):
"""
checks if prim has references
"""
prim_item = ContextMenu._get_prim_spec_item(objects)
if not prim_item or not prim_item.prim_spec:
return False
return prim_item.prim_spec.HasInfo(Sdf.PrimSpec.ReferencesKey) or prim_item.prim_spec.HasInfo(Sdf.PrimSpec.PayloadKey)
def refresh_reference_payload_name(objects: dict):
"""
checks if prims have references/payload and returns name
"""
prim_item = ContextMenu._get_prim_spec_item(objects)
if not prim_item or not prim_item.prim_spec:
return None
prim = objects["stage"].GetPrimAtPath(prim_item.prim_spec.path)
if not prim.HasAuthoredReferences() and not prim.HasAuthoredPayloads():
return None
if prim.HasAuthoredReferences() and prim.HasAuthoredPayloads():
return "Refresh Payload & Reference"
if prim.HasAuthoredReferences():
return "Refresh Reference"
if prim.HasAuthoredPayloads():
return "Refresh Payload"
return None
# ---------------------------------------------- menu onClick functions ----------------------------------------------
@staticmethod
def _get_content_window():
try:
import omni.kit.window.content_browser as content
return content.get_content_window()
except Exception as e:
pass
return None
def _stage_window_object(objects: dict) -> dict:
prim = objects["stage"].GetPrimAtPath(ContextMenu._get_prim_spec_item(objects).prim_spec.path)
return {"prim_list": [prim], "stage": objects["stage"]}
def find_in_browser(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
content_window = ContextMenu._get_content_window()
if not content_window:
return
content_window.navigate_to(layer_item.identifier)
def copy_url(objects):
item = objects["item"]
if not item():
return
if isinstance(item(), PrimSpecItem):
url = item().path.pathString
elif isinstance(item(), LayerItem):
url = item().identifier
omni.kit.clipboard.copy(url)
def set_edit_layer(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return
layer_item.model.default_edit_layer = layer_item.identifier
def set_authoring_layer(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return
layer_item.model.set_edit_target(layer_item, True)
def can_edit_root_layer(objects):
tree_view = objects["tree_view"]
if not tree_view():
return False
layer_item = tree_view().model.root_layer_item
return LayerModelUtils.can_edit_sublayer(layer_item)
def can_edit_sublayer(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return
return LayerModelUtils.can_edit_sublayer(layer_item)
def can_edit_sublayer_parent(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model or not layer_item.parent:
return
return LayerModelUtils.can_edit_sublayer(layer_item.parent)
def can_set_as_edit_target(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return
return LayerModelUtils.can_set_as_edit_target(layer_item)
def can_not_edit_sublayer(objects):
return not ContextMenu.can_edit_sublayer(objects)
def can_flatten_sublayers(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.model:
return False
return not layer_item.model.is_in_live_session
def copy_layer_url(objects): # pragma: no cover
"""Deprecated."""
if "layer" not in objects:
return
omni.kit.clipboard.copy(objects["layer_id"])
def create_sublayer(objects, anonymous=False):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
tree_view = objects["tree_view"]
if not tree_view():
return False
layer_item = tree_view().model.root_layer_item
LayerModelUtils.create_sublayer(layer_item, 0, anonymous)
def create_anonymous_sublayer(objects):
ContextMenu.create_sublayer(objects, True)
def is_live_syncing_layer(objects):
layer_item = ContextMenu._get_layer_item(objects)
if layer_item:
return layer_item.is_live_session_layer
return False
def is_not_live_layer(objects):
return not ContextMenu.is_live_syncing_layer(objects)
def is_live_session_layer(objects): # pragma: no cover
layer_item = ContextMenu._get_layer_item(objects)
if layer_item:
return layer_item.is_live_session_layer
return False
def is_not_live_session_layer(objects): # pragma: no cover
return not ContextMenu.is_live_session_layer(objects)
def insert_sublayer(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
tree_view = objects["tree_view"]
if not tree_view():
return False
layer_item = tree_view().model.root_layer_item
LayerModelUtils.insert_sublayer(layer_item, 0)
def save_layer(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
LayerModelUtils.save_layer(layer_item)
def save_layer_as(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
LayerModelUtils.save_layer_as(layer_item)
def save_layer_as_and_replace(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
LayerModelUtils.save_layer_as(layer_item, True)
def merge_down_one(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
LayerModelUtils.merge_layer_down(layer_item)
def flatten_sublayers(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
LayerModelUtils.flatten_all_layers(layer_item.model)
def remove_layer(objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item:
return
layer_item_list = objects["layer_item_list"]
if len(layer_item_list) <= 1:
LayerModelUtils.remove_layer(layer_item)
else:
all_items = []
locked_layers = []
for weak_item in layer_item_list:
if not weak_item or not weak_item():
continue
if layer_item == weak_item():
continue
if weak_item().locked:
locked_layers.append(weak_item().identifier)
continue
all_items.append(weak_item())
all_items.append(layer_item)
LayerModelUtils.remove_layers(all_items)
if locked_layers:
identifiers = ""
for identifier in locked_layers:
identifiers += f"\n{identifier}"
nm.post_notification(
"Locked layers cannot be removed:\n" + identifiers,
status=nm.NotificationStatus.WARNING
)
def reload_layer(objects):
layer_item = ContextMenu._get_layer_item(objects)
LayerModelUtils.reload_layer(layer_item)
def prim_delete(objects):
if not ContextMenu.is_prim_spec_item(objects):
return
prim_item_list = objects["prim_item_list"]
valid_prim_items = []
for prim_item in prim_item_list:
if prim_item and prim_item():
valid_prim_items.append(prim_item())
LayerModelUtils.remove_prim_spec_items(valid_prim_items)
def move_prims(self, objects):
layer_item = ContextMenu._get_layer_item(objects)
if not layer_item or not layer_item.layer:
return
selection = self._usd_context.get_selection()
paths = selection.get_selected_prim_paths()
stage = self._usd_context.get_stage()
if stage and paths:
omni.kit.commands.execute(
"StitchPrimSpecsToLayer",
prim_paths=paths,
target_layer_identifier=layer_item.identifier
)
@staticmethod
def add_menu(menu_list):
"""
Add the menu to the end of the context menu. Return the object that
should be alive all the time. Once the returned object is destroyed,
the added menu is destroyed as well.
"""
class MenuSubscription:
def __init__(self, menu_id):
self.__id = menu_id
def __del__(self):
CustomMenuList().remove_menu(self.__id)
menu_id = CustomMenuList().add_menu(menu_list)
return MenuSubscription(menu_id)
| 38,349 | Python | 34.542169 | 126 | 0.544004 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/layer_widgets.py | import carb
import weakref
from functools import partial
from omni.kit.widget.live_session_management import stop_or_show_live_session_widget, build_live_session_user_layout
from omni.kit.widget.live_session_management.reload_widget import build_reload_widget
from omni import ui
from .prim_spec_item import PrimSpecSpecifier, PrimSpecItem
from .layer_item import LayerItem
from .layer_model import LayerModel
from .layer_icons import LayerIcons
from .context_menu import ContextMenu, ContextMenuEvent
from .path_utils import PathUtils
from .layer_settings import LayerSettings
TOOLTIP_STYLE = {
"color": ui.color("#979797"),
"Tooltip": {"background_color": 0xEE222222}
}
def get_type_icon(node_type):
"""Convert USD Type to icon file name"""
icons = LayerIcons()
if node_type in ["DistantLight", "SphereLight", "RectLight", "DiskLight", "CylinderLight", "DomeLight"]:
return icons.get(node_type, "Light")
return icons.get(node_type, "Prim")
def create_icon_images(layout, item: PrimSpecItem):
specifier = item.specifier
node_type = item.type_name
instanceable = item.instanceable
# Gray out the icon if the filter string is not in the text
iconname = "object_icon"
icon_filenames = []
icon_filenames.append(get_type_icon(node_type))
icons = LayerIcons()
if instanceable:
icon_filenames.append(icons.get("Instance"))
if specifier == PrimSpecSpecifier.OVER_ONLY:
icon_filenames.append(icons.get("layer_delta"))
elif specifier == PrimSpecSpecifier.OVER_WITH_REFERENCE:
icon_filenames.append(icons.get("Reference"))
icon_filenames.append(icons.get("layer_delta"))
elif specifier == PrimSpecSpecifier.DEF_WITH_REFERENCE:
icon_filenames.append(icons.get("Reference"))
elif specifier == PrimSpecSpecifier.OVER_WITH_PAYLOAD:
icon_filenames.append(icons.get("Payload"))
icon_filenames.append(icons.get("layer_delta"))
elif specifier == PrimSpecSpecifier.DEF_WITH_PAYLOAD:
icon_filenames.append(icons.get("Payload"))
elif specifier == PrimSpecSpecifier.DEF_ONLY and not node_type:
prim_icon = icons.get("Prim")
if prim_icon not in icon_filenames:
icon_filenames.append(icons.get("Prim"))
if item.locked:
icon_filenames.append(icons.get("menu_lock"))
elif item.linked:
icon_filenames.append(icons.get("link"))
layout.clear()
with layout:
for icon_filename in icon_filenames:
if (
icon_filename == icons.get("layer_delta") or
icon_filename == icons.get("menu_lock") or
icon_filename == icons.get("link")
):
# TODO: There is a bug of alignment of svg in stack
# The following is to use spacer to implement RIGHT_BOTTOM align
with ui.VStack(width=20, height=20):
ui.Spacer(width=20, height=8)
with ui.HStack(width=20, height=12):
ui.Spacer(width=8)
ui.Image(
icon_filename,
width=12,
height=12,
alignment=ui.Alignment.RIGHT_BOTTOM,
name=iconname,
style_type_name_override="LayerView.Image",
)
else:
ui.Image(icon_filename, name=iconname, style_type_name_override="LayerView.Image")
def build_prim_spec_widget(
context_menu: ContextMenu, model: LayerModel, item: PrimSpecItem, column_id: int, expanded: bool
):
value_model = model.get_item_value_model(item, column_id)
if not value_model:
return
if column_id == 0:
with ui.HStack(spacing=4, height=20):
with ui.VStack(width=0):
ui.Spacer()
# Draw all icons on top of each other
image_layout = ui.ZStack(width=20, height=20)
create_icon_images(image_layout, item)
ui.Spacer()
text = value_model.get_value_as_string()
weakref_item = weakref.ref(item)
weakref_menu = weakref.ref(context_menu)
with ui.HStack():
name_label = ui.Label(
text,
width=ui.Fraction(1),
name="object_name",
style_type_name_override="LayerView.Item",
)
def prim_flags_changed(value_model):
is_muted = item.layer_item.muted_or_parent_muted
if is_muted:
name_label.name = "object_name_grey"
elif item.has_missing_reference:
name_label.name = "object_name_missing"
name_label.set_tooltip("Missing references found.")
else:
name_label.name = "object_name"
create_icon_images(image_layout, item)
prim_flags_changed(value_model)
value_model.callback_id = value_model.subscribe_value_changed_fn(prim_flags_changed)
else:
return
def create_layer_name_widget(model: LayerModel, value_model, item: LayerItem, context_menu: ContextMenu, expanded: bool):
text = value_model.get_value_as_string()
layout = ui.HStack(width=ui.Fraction(1))
with layout:
with ui.ZStack(width=0, height=0):
with ui.ZStack(width=0, height=0):
with ui.VStack(width=0):
ui.Spacer()
layers_icon = ui.Image(width=20, height=20, name="layers")
ui.Spacer()
with ui.VStack():
ui.Spacer(height=4)
with ui.HStack():
ui.Spacer()
lightning_image = ui.Image(width=14, height=14, name="layers_lightning")
ui.Spacer()
ui.Spacer()
with ui.VStack(width=0):
ui.Spacer(height=6)
with ui.HStack(width=0, height=0):
ui.Spacer(width=6)
lock_image = ui.Image(width=14, height=14, name="layer_read_only_lock")
ui.Spacer(width=3)
with ui.ZStack():
with ui.VStack():
ui.Spacer()
if not model.normal_mode and item.edit_layer_in_auto_authoring_mode:
ui.Rectangle(name="edit_layer_with_corner", height=20)
elif item.is_edit_target:
ui.Rectangle(name="edit_target_with_corner", height=20)
elif item.selected:
ui.Rectangle(name="selected", height=20)
else:
ui.Rectangle(name="normal", height=20)
ui.Spacer()
with ui.HStack():
if (
(not model.normal_mode and item.edit_layer_in_auto_authoring_mode) or
item.is_edit_target
):
ui.Spacer(width=3)
label = ui.Label(
text,
name="object_name",
style_type_name_override="LayerView.Item",
)
label.set_tooltip(item.identifier)
return layout, layers_icon, label, lock_image, lightning_image
def _build_live_users_tooltip(live_session, icon_size):
all_users = live_session.peer_users
total_users = len(all_users)
with ui.VStack():
with ui.HStack(style={"color": ui.color("#757575")}):
ui.Spacer(width=20)
ui.Label(f"{total_users} Users Connected", style={"font_size": 12}, width=0)
ui.Spacer(width=20)
ui.Spacer(height=0)
ui.Separator(style={"color": ui.color("#4f4f4f")})
ui.Spacer(height=4)
for user in all_users:
item_title = f"{user.user_name} ({user.from_app})"
if live_session.owner == user.user_name:
item_title += " - owner"
with ui.HStack(identifier=user.user_id):
build_live_session_user_layout(user, icon_size, "")
ui.Spacer(width=4)
ui.Label(item_title, style={"font_size": 14})
ui.Spacer(height=2)
def build_layer_widget(context_menu: ContextMenu, model: LayerModel, item: LayerItem, column_id: int, expanded: bool):
value_model = model.get_item_value_model(item, column_id)
if not value_model:
return
selected = item.edit_layer_in_auto_authoring_mode or item.is_edit_target
is_live_path = PathUtils.is_omni_live(item.identifier)
is_layer_in_live_session = item.is_in_live_session
is_root_layer_in_live_session = model.root_layer_item.is_in_live_session
is_in_live_session = is_root_layer_in_live_session or is_layer_in_live_session
is_omni_path = PathUtils.is_omni_path(item.identifier)
if column_id == 0:
label_layout, layers_icon, name_label, lock_image, lightning_image = create_layer_name_widget(
model, value_model, item, context_menu, expanded
)
def name_model_changed(model):
normal_mode = item.model.normal_mode
if item.is_omni_live_path:
layers_icon.name = "layers_edit_target"
elif item.missing:
layers_icon.name = "layers_missing"
elif not item.latest:
layers_icon.name = "layers_outdate"
elif not normal_mode and item.edit_layer_in_auto_authoring_mode:
layers_icon.name = "layers_edit_target"
elif normal_mode and item.is_edit_target:
layers_icon.name = "layers_edit_target"
elif not normal_mode and item.has_child_edit_layer:
layers_icon.name = "layers_has_child_edit_target"
elif normal_mode and item.has_child_edit_target:
layers_icon.name = "layers_has_child_edit_target"
else:
layers_icon.name = "layers"
lock_image.visible = not item.editable
is_muted = item.muted_or_parent_muted
if item.missing:
name_label.name = "object_name_missing"
elif not item.latest:
name_label.name = "object_name_outdated"
elif is_muted:
name_label.name = "object_name_grey"
elif (item.model.auto_authoring_mode or item.model.spec_linking_mode) and item.edit_layer_in_auto_authoring_mode:
name_label.name = "edit_target"
elif item.is_edit_target:
name_label.name = "edit_target"
else:
name_label.name = "object_name"
text = model.get_value_as_string()
name_label.text = text
name_label.set_tooltip(item.identifier)
if item.is_omni_live_path:
lightning_image.visible = True
else:
lightning_image.visible = False
name_model_changed(value_model)
value_model.callback_id = value_model.subscribe_value_changed_fn(name_model_changed)
def double_clicked(model: LayerModel, item: LayerItem):
# Double click on base layer will forward it to live session layer.
if item.is_in_live_session:
model.set_edit_target(item.live_session_layer, True)
else:
muted = item.muted_or_parent_muted
writable = item.editable
is_missing_layer = item.missing
is_edit_target = item.is_edit_target
if not muted and writable and not is_missing_layer:
if model.auto_authoring_mode or model.spec_linking_mode:
if not item.edit_layer_in_auto_authoring_mode:
model.default_edit_layer = item.identifier
elif not is_edit_target:
model.set_edit_target(item, True)
label_layout.set_mouse_double_clicked_fn(lambda *_: double_clicked(model, item))
elif (
column_id == 1 and not item.missing and not is_live_path and is_omni_path
and not item.read_only_on_disk
):
with ui.ZStack(width=0, height=0):
with ui.VStack(width=0, height=0):
ui.Spacer(width=20, height=18)
with ui.HStack(width=0):
ui.Spacer(width=14)
ui.Image(width=6, height=6, alignment=ui.Alignment.RIGHT_BOTTOM, name="drop_down")
live_button = ui.ToolButton(value_model, name="live_update", image_width=18, image_height=18)
def on_button_clicked(x, y, b, m):
quick = (b == 0)
if quick:
menu_widget = stop_or_show_live_session_widget(
item.model.usd_context,
stop_session_forcely=True,
layer_identifier=item.identifier,
quick_join="Default"
)
item.auto_reload = False
item.model.refresh()
return
menu_widget = stop_or_show_live_session_widget(
item.model.usd_context, show_join_options=True, layer_identifier=item.identifier
)
if not menu_widget:
return
# Try to align it with the button.
button = live_button
drop_down_x = button.screen_position_x
drop_down_y = button.screen_position_y
drop_down_height = button.computed_height
# FIXME: The width of context menu cannot be got. Using fixed width here.
menu_widget.show_at(
drop_down_x - 104,
drop_down_y + drop_down_height / 2 + 2
)
live_button.set_mouse_pressed_fn(on_button_clicked)
elif (
column_id == 2 and not item.anonymous and not item.missing and
not is_live_path and not item.is_live_session_layer
):
dirty_button = ui.ToolButton(value_model, name="dirty", image_width=14, image_height=14)
def save_model_changed(_):
if not item.editable:
dirty_button.enabled = False
dirty_button.set_tooltip("Read only")
dirty_button.name = "dirty_readonly"
elif item.is_live_session_layer or item.dirty:
if item.is_live_session_layer:
if item.selected:
dirty_button.name = "merge_down_selected"
else:
dirty_button.name = "merge_down"
dirty_button.checked = item.has_content
dirty_button.enabled = item.has_content
else:
if is_in_live_session:
dirty_button.set_tooltip("Cannot save Layer in Live Session.")
else:
dirty_button.set_tooltip("Save Layer")
if item.selected:
dirty_button.name = "dirty_selected"
else:
dirty_button.name = "dirty"
dirty_button.checked = not is_in_live_session
dirty_button.enabled = not is_in_live_session
else:
if item.selected:
dirty_button.name = "dirty_selected"
else:
dirty_button.name = "dirty"
dirty_button.checked = False
dirty_button.enabled = False
save_model_changed(value_model)
value_model.callback_id = value_model.subscribe_value_changed_fn(save_model_changed)
elif column_id == 3 and not item.reserved and not item.missing:
local_muteness_button = ui.ToolButton(value_model, identifier="local_mute", image_width=14, image_height=14)
def local_muteness_model_changed(value_model):
muted = value_model.get_value_as_bool()
if model.global_muteness_scope or selected:
local_muteness_button.name = "muteness_disable"
local_muteness_button.enabled = False
local_muteness_button.set_tooltip(
"Cannot mute authoring layer."
)
else:
local_muteness_button.enabled = True
local_muteness_button.name = "muteness_enable"
local_muteness_button.set_tooltip(
"Mute layer"
)
local_muteness_button.checked = muted
local_muteness_model_changed(value_model)
value_model.callback_id = value_model.subscribe_value_changed_fn(local_muteness_model_changed)
else:
if item.is_live_session_layer:
if column_id == 4 or column_id == 5:
peer_user = value_model.peer_user
if not peer_user:
return
tooltip = f"{peer_user.user_name} ({peer_user.from_app})"
with ui.ZStack(identifier=peer_user.user_id, width=0, height=0):
build_live_session_user_layout(peer_user, size=18, tooltip=tooltip)
elif column_id == 6:
current_live_session = item.current_live_session
if current_live_session:
peer_users = current_live_session.peer_users
user_count = len(peer_users)
if user_count > 2:
with ui.ZStack():
ui.Label(value_model.get_value_as_string(), style={"font_size": 16}, aligment=ui.Alignment.V_CENTER)
button = ui.InvisibleButton(style=TOOLTIP_STYLE)
button.set_tooltip_fn(partial(_build_live_users_tooltip, current_live_session, 18))
elif column_id == 4 and not item.reserved and not item.missing and not item.from_session_layer:
global_muteness_button = ui.ToolButton(value_model, identifier="global_mute", image_width=14, image_height=14)
def global_muteness_model_changed(value_model):
muted = value_model.get_value_as_bool()
if not model.global_muteness_scope or selected:
global_muteness_button.name = "muteness_disable"
global_muteness_button.enabled = False
if selected and model.global_muteness_scope:
global_muteness_button.set_tooltip(
"Cannot mute authoring layer."
)
else:
global_muteness_button.enabled = True
global_muteness_button.name = "muteness_enable"
global_muteness_button.checked = muted
if not selected or not model.global_muteness_scope:
global_muteness_button.set_tooltip(
"Mute layer and persist the change into root layer"
)
global_muteness_model_changed(value_model)
value_model.callback_id = value_model.subscribe_value_changed_fn(global_muteness_model_changed)
elif column_id == 5 and not item.is_omni_live_path and item.is_omni_layer and not item.missing and not item.read_only_on_disk:
g_auto = LayerSettings().auto_reload_sublayers
button = build_reload_widget(item.identifier, item.usd_context, item.outdated, item.auto_reload, g_auto)
elif (
column_id == 6 and not item.read_only_on_disk and not item.reserved and
not item.anonymous and not item.missing and not item.from_session_layer
):
button = ui.ToolButton(value_model, identifier="lock", image_width=14, image_height=14)
def lock_model_changed(value_model):
button.name = "lock"
if selected:
button.enabled = False
button.set_tooltip("Cannot lock authoring target.")
else:
button.set_tooltip(
"Layer lock is an extended concept in Kit. It does not change\n"
"real file permission but adds a flag inside layer's custom data.\n"
"When a layer is locked, you cannot set it as edit target nor edit it.\n"
)
locked = value_model.get_value_as_bool()
button.checked = locked
lock_model_changed(value_model)
value_model.callback_id = value_model.subscribe_value_changed_fn(lock_model_changed)
else:
return
| 20,619 | Python | 42.965885 | 134 | 0.561084 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/prompt.py | # Keep it here for back compatibility as some extensions needs this package
from omni.kit.widget.prompt import Prompt # pragma: no cover
| 138 | Python | 45.333318 | 75 | 0.797101 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/actions.py | import carb
import omni.kit.actions.core
import weakref
from typing import Callable
from .prim_spec_item import PrimSpecItem
from .layer_model_utils import LayerModelUtils
ACTIONS_TAG = "Layers Actions"
ACTION_DELETE_PRIM_SPEC_ITEMS = "delete_deltas"
class Action:
def __init__(
self,
extension_name: str,
action_name: str,
action_display_name: str,
action_description: str,
on_action_fn: Callable[[], None],
hotkey: carb.input.KeyboardInput,
modifiers: int = 0,
):
self._extension_name = extension_name
self._action_registry = omni.kit.actions.core.get_action_registry()
self._input = carb.input.acquire_input_interface()
self._action_name = action_name
self._action_display_name = action_display_name
self._action_description = action_description
self._hotkey = hotkey
self._modifiers = modifiers
self._registered_hotkey = None
self._hotkey_registry = None
self._on_action_fn = on_action_fn
self._action = None
# Register actions and hotkeys
self._register()
def _register(self):
# actions
self._action = self._action_registry.register_action(
self._extension_name,
self._action_name,
self._on_action_fn,
display_name=self._action_display_name,
description=self._action_description,
tag=ACTIONS_TAG
)
self._register_hotkey()
def destroy(self):
self._action = None
self._hotkey_registry = None
self._registered_hotkey = None
self._on_action_fn = None
def _register_hotkey(self):
if self._registered_hotkey:
return
try:
from omni.kit.hotkeys.core import KeyCombination, get_hotkey_registry, filter
self._hotkey_registry = get_hotkey_registry()
hotkey_combo = KeyCombination(self._hotkey, self._modifiers)
hotkey_filter = filter.HotkeyFilter(windows=["Layer"])
self._registered_hotkey = self._hotkey_registry.register_hotkey(
self._extension_name, hotkey_combo, self._extension_name, self._action_name,
filter=hotkey_filter
)
except ImportError:
self._registered_hotkey = None
pass
def _unregister_hotkey(self):
self._registered_hotkey = None
class ActionManager:
def __init__(self):
self.__all_hotkeys = []
def on_startup(self, layer_extension):
self._manager = omni.kit.app.get_app().get_extension_manager()
self._extension_name = omni.ext.get_extension_name(self._manager.get_extension_id_by_module(__name__))
weakref_layer_extension = weakref.ref(layer_extension)
def on_delete_clicked():
layers = weakref_layer_extension()
if not layers:
return
items = layers.get_selected_items()
prim_spec_items = [item for item in items if isinstance(item, PrimSpecItem)]
LayerModelUtils.remove_prim_spec_items(prim_spec_items)
remove_prim_spec_items_hotkey = Action(
extension_name=self._extension_name,
action_name=ACTION_DELETE_PRIM_SPEC_ITEMS,
action_display_name="Layer->Delete Prim Specs",
action_description="Delete selected prim specs.",
on_action_fn=on_delete_clicked,
hotkey=carb.input.KeyboardInput.DEL
)
self.__all_hotkeys.append(remove_prim_spec_items_hotkey)
self._hotkey_extension_subscription = self._manager.subscribe_to_extension_enable(
lambda _: self.__register_hotkeys(),
lambda _: self.__unregister_hotkeys(),
ext_name="omni.kit.hotkeys.core",
hook_name="omni.kit.widget.layers hotkey listener",
)
def __register_hotkeys(self):
for action in self.__all_hotkeys:
action._register_hotkey()
def __unregister_hotkeys(self):
try:
from omni.kit.hotkeys.core import get_hotkey_registry
hotkey_registry = get_hotkey_registry()
hotkey_registry.deregister_all_hotkeys_for_extension(self._extension_name)
except Exception:
pass
for action in self.__all_hotkeys:
action._unregister_hotkey()
def on_shutdown(self):
for hotkey in self.__all_hotkeys:
hotkey.destroy()
self.__unregister_hotkeys()
self._hotkey_extension_subscription = None
action_registry = omni.kit.actions.core.get_action_registry()
action_registry.deregister_all_actions_for_extension(self._extension_name)
self.__all_hotkeys = []
| 4,796 | Python | 31.632653 | 110 | 0.611134 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/link_delegate.py | import omni.usd
import omni.kit.usd.layers
import weakref
from .layer_item import LayerItem
from .layer_icons import LayerIcons
from omni import ui
class LayerLinkDelegate(ui.AbstractItemDelegate): # pragma: no cover
def __init__(self, usd_context):
super().__init__()
self._usd_context = usd_context
self._tree_view = None
self._initialized = False
self._prim_widget = None
self._popup_menu = None
def on_stage_attached(self):
self._initialized = False
def destroy(self):
self._tree_view = None
self._prim_widget = None
self._popup_menu = None
def set_tree_view(self, tree_view: ui.TreeView):
self._tree_view = weakref.ref(tree_view)
def set_prim_widget(self, layerlink_widget):
self._prim_widget = weakref.ref(layerlink_widget)
def build_branch(self, model, item, column_id, level, expanded):
"""Create a branch widget that opens or closes subtree"""
pass
def build_widget(self, model, item, column_id, level, expanded):
"""Create a widget per item"""
if isinstance(item, LayerItem):
if not self._initialized and self._tree_view() and item == model.root_layer_item:
self._initialized = True
self._tree_view().set_expanded(item, True, False)
if column_id == 0:
value_model = model.get_item_value_model(item, column_id)
text = value_model.get_value_as_string()
with ui.HStack(
height=32,
mouse_double_clicked_fn=lambda x, y, b, _: self._on_mouse_double_clicked(b, item, expanded),
mouse_pressed_fn=lambda x, y, b, _: self._on_mouse_pressed(b, item, expanded),
accept_drop_fn=lambda url: self._on_accept_drop(item, url),
drop_fn=lambda e: self._on_drop(item, e),
):
ui.Label(
text,
name="object_name",
style_type_name_override="TreeView.Item",
tooltip=item.identifier,
)
def build_header(self, column_id):
pass
def _on_accept_drop(self, item, url):
return True
def _on_drop(self, item, e: ui.WidgetMouseDropEvent):
if self._prim_widget is None:
return
specs = self._prim_widget().get_select_specs()
if e.mime_data in specs:
omni.kit.commands.execute(
"LinkSpecs",
spec_paths=specs,
layer_identifiers=item.identifier,
hierarchy=True,
usd_context=self._usd_context
)
def _on_mouse_double_clicked(self, button, item, expanded):
links = omni.kit.usd.layers.get_spec_links_for_layers(self._usd_context, item.identifier)
prim_links = links.get(item.identifier, [])
if self._prim_widget:
self._prim_widget().select(prim_links)
def _on_mouse_pressed(self, button, item, expanded):
if button == 1 and self._tree_view:
# If the selection doesn't contain the node we drag, we should clear the selection and select the node.
if item not in self._tree_view().selection:
self._tree_view().selection = [item]
self._build_popup_menu()
def _get_target_layers(self):
layers = [selected.identifier for selected in self._tree_view().selection]
return layers
def _link_specs(self):
if self._prim_widget is None:
return
specs = self._prim_widget().get_select_specs()
if len(specs) == 0:
return
layers = self._get_target_layers()
omni.kit.commands.execute(
"LinkSpecs",
spec_paths=specs,
layer_identifiers=layers,
hierarchy=True,
usd_context=self._usd_context
)
def _unlink_specs(self):
if self._prim_widget is None:
return
specs = self._prim_widget().get_select_specs()
if len(specs) == 0:
return
layers = self._get_target_layers()
omni.kit.commands.execute(
"UnlinkSpecs",
spec_paths=specs,
layer_identifiers=layers,
hierarchy=True,
usd_context=self._usd_context
)
def clear_layer_links(self):
layers = self._get_target_layers()
omni.kit.commands.execute(
"UnlinkSpecs",
spec_paths="/",
layer_identifiers=layers,
hierarchy=True,
usd_context=self._usd_context
)
def _build_popup_menu(self):
self._popup_menu = ui.Menu("Layerlink popup menu", name="this")
with self._popup_menu:
ui.MenuItem("link selection specs", triggered_fn=self._link_specs)
ui.MenuItem("unlink selection specs", triggered_fn=self._unlink_specs)
ui.MenuItem("clear layer links", triggered_fn=self.clear_layer_links)
self._popup_menu.show()
# elif isinstance(target_item, LayerItem):
# omni.kit.commands.execute(
# "LinkSpecsCommand",
# usd_context=self._usd_context,
# spec_paths=source,
# layer_identifiers=target_item.identifier,
# )
class PrimLinkDelegate(ui.AbstractItemDelegate): # pragma: no cover
def __init__(self, usd_context):
super().__init__()
self._usd_context = usd_context
self._tree_view = None
self._layerlink_widget = None
self._popup_menu = None
def destroy(self):
self._usd_context = None
self._tree_view = None
self._layerlink_widget = None
self._popup_menu = None
def set_tree_view(self, treeview: ui.TreeView):
self._tree_view = weakref.ref(treeview)
def set_layerlink_widget(self, layerlink_widget):
self._layerlink_widget = weakref.ref(layerlink_widget)
def build_branch(self, model, item, column_id, level, expanded):
"""Create a branch widget that opens or closes subtree"""
if column_id == 2:
with ui.HStack(width=20 * (level + 1), height=0):
ui.Spacer()
if model.can_item_have_children(item):
# Draw the +/- icon
image_name = "Minus" if expanded else "Plus"
ui.Image(
LayerIcons().get(image_name), width=10, height=10, style_type_name_override="TreeView.Item"
)
ui.Spacer(width=5)
def build_widget(self, model, item, column_id, level, expanded):
"""Create a widget per item"""
if column_id == 0:
link_filename = LayerIcons().get("link")
with ui.VStack(width=16, height=20):
ui.Spacer()
link_image = ui.Image(
link_filename,
width=12,
height=12,
alignment=ui.Alignment.CENTER,
name="object_icon",
style_type_name_override="LayerView.Image",
)
ui.Spacer()
link_image.visible = item.linked
item.set_linked_image(link_image)
if column_id == 1:
filename = LayerIcons().get("lock") if item.locked else LayerIcons().get("lock_open")
image_style = {"": {"image_url": f'{filename}'}}
with ui.VStack(width=16, height=20):
ui.Spacer()
lock_image = ui.Image(
width=12,
height=12,
alignment=ui.Alignment.CENTER,
style=image_style,
mouse_pressed_fn=lambda x, y, b, _: self._on_lock_pressed(b, item, expanded),
)
ui.Spacer()
lock_image.visible = True
item.set_locked_image(lock_image)
if column_id == 2:
value_model = model.get_item_value_model(item, column_id)
text = value_model.get_value_as_string()
with ui.HStack(
height=20,
width=100,
mouse_double_clicked_fn=lambda x, y, b, _: self._on_mouse_double_clicked(b, item, expanded),
mouse_pressed_fn=lambda x, y, b, _: self._on_mouse_pressed(b, item, expanded),
):
ui.Label(
text,
name="object_name",
style_type_name_override="TreeView.Item",
)
def _on_mouse_double_clicked(self, button, item, expanded):
if button == 0:
links = omni.kit.usd.layers.get_spec_layer_links(self._usd_context, item.path, True)
layers = links.get(item.path.pathString, [])
if self._layerlink_widget:
self._layerlink_widget().select(layers)
def _on_lock_pressed(self, button, item, expanded):
if item.locked:
omni.kit.commands.execute(
"UnlockSpecsCommand",
usd_context=self._usd_context,
spec_paths=item.path,
)
else:
omni.kit.commands.execute(
"LockSpecsCommand",
usd_context=self._usd_context,
spec_paths=item.path,
)
def _get_select_specs(self):
specs = [item.path.pathString for item in self._tree_view().selection]
return specs
def _link_layers(self):
layers = self._layerlink_widget().get_select_layers()
if len(layers) == 0:
return
specs = self._get_select_specs()
omni.kit.commands.execute(
"LinkSpecs",
spec_paths=specs,
layer_identifiers=layers,
hierarchy=True,
usd_context=self._usd_context
)
def _unlink_layers(self):
layers = self._layerlink_widget().get_select_layers()
if len(layers) == 0:
return
specs = self._get_select_specs()
omni.kit.commands.execute(
"UnlinkSpecs",
spec_paths=specs,
layer_identifiers=layers,
hierarchy=True,
usd_context=self._usd_context
)
def clear_specs_links(self):
specs = self._get_select_specs()
omni.kit.commands.execute(
"LinkSpecs",
spec_paths=specs,
layer_identifiers=[],
additive=False,
hierarchy=True,
usd_context=self._usd_context
)
def _on_mouse_pressed(self, button, item, expanded):
if button == 1 and self._tree_view:
# If the selection doesn't contain the node we drag, we should clear the selection and select the node.
if item not in self._tree_view().selection:
self._tree_view().selection = [item]
self._build_popup_menu()
def _build_popup_menu(self):
self._popup_menu = ui.Menu("Layerlink popup menu", name="this")
with self._popup_menu:
ui.MenuItem("link to selected layers", triggered_fn=self._link_layers)
ui.MenuItem("unlink from selected layers", triggered_fn=self._unlink_layers)
ui.MenuItem("clear all", triggered_fn=self.clear_specs_links)
self._popup_menu.show()
| 11,537 | Python | 33.648649 | 115 | 0.539308 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/path_utils.py | import os
import omni.client
class PathUtils:
@staticmethod
def is_omni_path(path: str):
url = omni.client.break_url(path)
return url and url.scheme == "omniverse"
@staticmethod
def is_omni_live(path: str):
url = omni.client.break_url(path)
if not url:
return False
_, ext = os.path.splitext(url.path)
return ext == ".live"
| 404 | Python | 20.315788 | 48 | 0.589109 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/prim_spec_item.py | import omni
import omni.ui as ui
import omni.usd
from omni.kit.usd.layers import LayerUtils
from typing import List, Set
from .models.prim_name_model import PrimNameModel
from .layer_settings import LayerSettings
from .globals import LayerGlobals
from pxr import Sdf, Trace
class PrimSpecSpecifier:
DEF_ONLY = 1
DEF_WITH_REFERENCE = 2
DEF_WITH_PAYLOAD = 3
OVER_ONLY = 4
OVER_WITH_REFERENCE = 5
OVER_WITH_PAYLOAD = 6
UNKNOWN = 7
def get_prim_specifier(spec):
if not spec:
return PrimSpecSpecifier.UNKNOWN
if spec.HasInfo(Sdf.PrimSpec.PayloadKey):
op = spec.GetInfo(Sdf.PrimSpec.PayloadKey)
items = []
items = op.ApplyOperations(items)
has_payload = len(items) > 0
else:
has_payload = False
if spec.HasInfo(Sdf.PrimSpec.ReferencesKey):
op = spec.GetInfo(Sdf.PrimSpec.ReferencesKey)
items = []
items = op.ApplyOperations(items)
has_reference = len(items) > 0
else:
has_reference = False
if spec.specifier == Sdf.SpecifierOver:
if has_reference:
return PrimSpecSpecifier.OVER_WITH_REFERENCE
elif has_payload:
return PrimSpecSpecifier.OVER_WITH_PAYLOAD
else:
return PrimSpecSpecifier.OVER_ONLY
elif spec.specifier == Sdf.SpecifierDef:
if has_reference:
return PrimSpecSpecifier.DEF_WITH_REFERENCE
elif has_payload:
return PrimSpecSpecifier.DEF_WITH_PAYLOAD
else:
return PrimSpecSpecifier.DEF_ONLY
else:
return PrimSpecSpecifier.UNKNOWN
class PrimSpecItem(ui.AbstractItem):
"""A single AbstractItemModel item that represents a single prim"""
def __init__(self, usd_context, path: Sdf.Path, layer_item):
super().__init__()
self._usd_context = usd_context
self._path: Sdf.Path = path
self._layer_item = layer_item
self._type_name: str = None
self._name_model = None
self._specifier: PrimSpecSpecifier = PrimSpecSpecifier.UNKNOWN
self._has_missing_reference = False
self._instanceable = False
self._pending_to_update_flags = True
self._flags_initialized = False
# Link if it's existed
self._links_initialized = False
self._linked = False
self._locked = False
# Filtering
self._filtered = False
def _get_name_model(self):
if not self._name_model:
self._name_model = PrimNameModel(self)
return self._name_model
def destroy(self):
if self._name_model:
self._name_model.destroy()
self._name_model = None
self._layer_item = None
self._flags_initialized = False
@property
def layer_item(self):
return self._layer_item
def _initialize_link_and_lock_states(self):
if not self._links_initialized:
links = omni.kit.usd.layers.get_spec_layer_links(self._usd_context, self._path, False)
if links:
self._linked = True
else:
self._linked = False
self._locked = omni.kit.usd.layers.is_spec_locked(self._usd_context, self._path)
self._links_initialized = True
@property
def locked(self):
self._initialize_link_and_lock_states()
return self._locked
@locked.setter
def locked(self, value):
self._initialize_link_and_lock_states()
if value != self._locked:
self._locked = value
if self._name_model:
self._name_model._value_changed()
@property
def linked(self):
self._initialize_link_and_lock_states()
return self._linked
@linked.setter
def linked(self, value):
self._initialize_link_and_lock_states()
if value != self._linked:
self._linked = value
if self._name_model:
self._name_model._value_changed()
@property
def name(self):
self.__update_flags_internal()
return self._path.name
@property
def path(self):
return self._path
@property
def prim_spec(self):
"""Handle of Sdf.PrimSpec."""
self.__update_flags_internal()
if self.layer:
return self.layer.GetPrimAtPath(self._path)
return None
@property
def layer(self):
"""Handle of Sdf.Layer this prim spec resides in."""
return self._layer_item._layer
@property
def type_name(self):
"""Type name of this prim spec in stage."""
self.__update_flags_internal()
return self._type_name
@property
def children(self):
"""List of children."""
# OM-45514: All prim_spec_items are maintained centralized inside layer item
# to remove memory cost.
return self._layer_item._get_item_children(self._path)
@property
def parent(self):
"""Parent spec."""
if self._path == Sdf.Path.absoluteRootPath or not self._layer_item:
return None
parent_path = self._path.GetParentPath()
found, _ = self._layer_item._get_item_from_cache(parent_path)
return found
@property
def specifier(self):
"""Specifier of prim spec."""
self.__update_flags_internal()
return self._specifier
@property
def has_missing_reference(self):
"""If this prim spec includes missing references."""
self.__update_flags_internal()
return self._has_missing_reference
@property
def instanceable(self):
self.__update_flags_internal()
return self._instanceable
@property
def filtered(self):
"""If this prim spec is filtered in the search list."""
return self._filtered
@filtered.setter
def filtered(self, value):
self._filtered = value
@property
def has_children(self):
return len(self.children) > 0
def on_layer_muteness_changed(self):
if self._name_model:
self._name_model._value_changed()
for child in self.children:
child.on_layer_muteness_changed()
def on_layer_edit_mode_changed(self):
self._links_initialized = False
if self._name_model:
self._name_model._value_changed()
def update_flags(self):
# Do lazy load only if it's necessary.
# Since tree widget only populates visible items,
# this is helpful for loading large stage.
self._pending_to_update_flags = True
if self._flags_initialized:
# If it's to refresh flags, loading it immediately.
self.__update_flags_internal()
def __update_flags_internal(self):
if not self._pending_to_update_flags:
return True
self._flags_initialized = True
self._pending_to_update_flags = False
layer = self._layer_item.layer
stage = self._usd_context.get_stage()
if not stage or not layer:
return False
prim_spec = self.prim_spec
if not prim_spec:
return False
specifier = get_prim_specifier(prim_spec)
changed = False
if self._specifier != specifier:
changed = True
self._specifier = specifier
prim = stage.GetPrimAtPath(self._path)
if prim:
type_name = prim.GetTypeName()
instanceable = prim.GetMetadata("instanceable")
if not instanceable:
instanceable = False
else:
type_name = None
instanceable = False
if self._instanceable != instanceable:
changed = True
self._instanceable = instanceable
if self._type_name != type_name:
changed = True
self._type_name = type_name
if LayerSettings().show_missing_reference:
has_missing_reference = self._has_missing_references(layer, prim_spec)
if self._has_missing_reference != has_missing_reference:
changed = True
self._has_missing_reference = has_missing_reference
if changed and self._name_model:
self._name_model._value_changed()
return True
return False
def _has_missing_reference_in_layer(self, layer_identifier):
queue = [layer_identifier]
accessed_layers = []
while len(queue) > 0:
identifier = queue.pop(0)
if identifier in accessed_layers:
continue
accessed_layers.append(identifier)
if LayerGlobals.is_layer_missing(identifier):
return True
layer = LayerUtils.find_layer(identifier)
if not layer:
LayerGlobals.add_missing_layer(identifier)
if layer:
for reference in layer.externalReferences:
if len(reference) > 0:
absolute_path = layer.ComputeAbsolutePath(reference)
queue.append(absolute_path)
else:
return True
return False
def _has_missing_references(self, layer, prim_spec):
def has_missing_item(items):
for item in items:
if omni.usd.is_usd_readable_filetype(item.assetPath):
filename = layer.ComputeAbsolutePath(item.assetPath)
if self._has_missing_reference_in_layer(filename):
return True
return False
reference_list = prim_spec.referenceList
# DON'T USE referencelist.prependedItems since it's VERY SLOW to
# access.
return has_missing_item(reference_list.GetAddedOrExplicitItems())
def get_item_value_model(self, column_id):
if column_id == 0:
return self._get_name_model()
return None
def __repr__(self):
return f"<Omni::UI Prim Spec Item '{self._path}'>"
def __str__(self):
return f"{self._path}"
| 10,085 | Python | 28.234783 | 98 | 0.588795 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/window.py | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .layer_delegate import LayerDelegate
from .layer_icons import LayerIcons
from .layer_model import LayerModel, LayerItem, PrimSpecItem
from .layer_model_utils import LayerModelUtils
from .layer_settings import LayerSettings
from .layer_color_scheme import LayerColorScheme
from .selection_watch import SelectionWatch
from .models.save_all_model import SaveAllModel
from .models.layer_scope_model import LayerScopeModel
from .models.layer_auto_authoring import LayerAutoAuthoringModel
from .external_drag_drop_helper import setup_external_drag_drop, destroy_external_drag_drop
from omni.kit.usd.layers import LayerUtils
from enum import Enum
from functools import partial
from pxr import Sdf
import asyncio
import carb
import carb.settings
import omni
import omni.ui as ui
import weakref
import omni.kit.usd.layers as layers
import omni.kit.notification_manager as nm
class LiveSessionButtonOptions(Enum):
QUIT_ONLY = 0
MERGE_AND_QUIT = 1
class LayerWindow:
"""The Layer 2 window"""
def __init__(self, window_name, usd_context):
self._handling_mode_change = False
self._settings = carb.settings.get_settings()
self._visiblity_changed_listener = None
window_flags = ui.WINDOW_FLAGS_NO_SCROLLBAR
self._window = ui.Window(
window_name, width=600, height=800, flags=window_flags, dockPreference=ui.DockPreference.RIGHT_TOP
)
self._window.set_visibility_changed_fn(self._visibility_changed_fn)
self._window.deferred_dock_in("Stage", ui.DockPolicy.TARGET_WINDOW_IS_ACTIVE)
self._window.dock_order = 1
self._usd_context = usd_context
self._layers = layers.get_layers(self._usd_context)
self._layers_state = layers.get_layers_state(self._usd_context)
self._model = LayerModel(self._usd_context)
weakref_model = weakref.ref(self._model)
self._delegate = LayerDelegate(self._usd_context)
self._save_all_model = SaveAllModel(self._model)
self._layer_scope_model = LayerScopeModel(self._model)
self._auto_authoring_model = LayerAutoAuthoringModel(self._model)
self._model.add_dirtiness_listener(self._on_dirtiness_changed)
self._model.add_layer_muteness_scope_listener(self._on_muteness_scope_changed)
self._model.add_stage_attach_listener(self._on_stage_attached)
self._layers_event_subs = []
for event in [
layers.LayerEventType.LIVE_SESSION_STATE_CHANGED,
layers.LayerEventType.EDIT_MODE_CHANGED,
]:
layers_event_sub = self._layers.get_event_stream().create_subscription_to_pop_by_type(
event, self._on_layer_events, name=f"Layers Window {str(event)}"
)
self._layers_event_subs.append(layers_event_sub)
# Options menu
self._options_menu = ui.Menu("Options")
with self._options_menu:
ui.MenuItem("Options", enabled=False)
ui.Separator()
ui.MenuItem(
"Auto Reload Layers",
checkable=True,
checked=LayerSettings().auto_reload_sublayers,
checked_changed_fn=self._on_auto_reload_sublayers,
)
ui.MenuItem("Reload Outdated Layers", triggered_fn=self._on_reload_layers)
ui.Separator()
ui.MenuItem(
"Show Layer Contents",
checkable=True,
checked=LayerSettings().show_layer_contents,
checked_changed_fn=self._on_show_layer_contents,
)
ui.MenuItem(
"Show Session Layer",
checkable=True,
checked=LayerSettings().show_session_layer,
checked_changed_fn=self._on_show_session_layer,
)
ui.MenuItem(
"Show MetricsAssembler Layer",
checkable=True,
checked=LayerSettings().show_metricsassembler_layer,
checked_changed_fn=self._on_show_metricsassembler_layer,
)
ui.MenuItem(
"Choose Root Layer As Default Location",
checkable=True,
checked=LayerSettings().file_dialog_show_root_layer_location,
checked_changed_fn=self._on_file_dialog_default_location_changed,
)
ui.MenuItem(
"Show Info Notification",
checkable=True,
checked=LayerSettings().show_info_notification,
checked_changed_fn=self._on_show_info_notification,
)
ui.MenuItem(
"Show Warning Notification",
checkable=True,
checked=LayerSettings().show_warning_notification,
checked_changed_fn=self._on_show_warning_notification,
)
ui.MenuItem(
"Show Missing Reference",
checkable=True,
checked=LayerSettings().show_missing_reference,
checked_changed_fn=self._on_show_missing_reference,
)
ui.MenuItem(
"Show Warning for Layer Merge or Flatten Operations",
checkable=True,
checked=LayerSettings().show_merge_or_flatten_warning,
checked_changed_fn=self._on_show_merge_or_flatten_warning,
)
ui.MenuItem(
"Ignore Notifications for Outdated Layers",
checkable=True,
checked=LayerSettings().ignore_outdate_notification,
checked_changed_fn=self._on_ignore_outdate_notification,
)
ui.Separator()
self._auto_authoring_menu_item = ui.MenuItem(
"Auto Authoring Layers (Experimental)",
checkable=True,
checked=LayerSettings().enable_auto_authoring_mode,
checked_changed_fn=self._on_enable_auto_authoring_mode,
)
self._continuous_update_menu_item = ui.MenuItem(
"Continuous Updates in Auto-Authoring Mode",
checkable=True,
checked=LayerSettings().continuous_update_in_auto_authoring,
checked_changed_fn=self._on_continuous_update_in_auto_authoring,
)
self._spec_linking_menu_item = ui.MenuItem(
"Spec Linking Mode (Experimental)",
checkable=True,
checked=LayerSettings().enable_spec_linking_mode,
checked_changed_fn=self._on_enable_spec_linking_mode,
)
style = {
"Image::avartar": {"image_url": LayerIcons().get("avartar")},
"Image::drop_down": {"image_url": LayerIcons().get("drop_down")},
"Image::layers_lightning": {"image_url": LayerIcons().get("lightning"), "color": 0xFF00FFFF},
"Image::layer_auto_authoring": {"image_url": LayerIcons().get("layers")},
"Image::layers": {"image_url": LayerIcons().get("layers")},
"Image::layers_outdate": {"image_url": LayerIcons().get("layers"), "color": 0xFF0097FF},
"Image::layers_edit_target": {"image_url": LayerIcons().get("layers"), "color": 0xFF57B44D},
"Image::layers_edit_target:selected": {"image_url": LayerIcons().get("layers"), "color": 0xFF10781A},
"Image::layers_live_sync": {"image_url": LayerIcons().get("live_syncing")},
"Image::layers_live_sync:selected": {"image_url": LayerIcons().get("live_syncing"), "color": 0xFF10781A},
"Image::layers_missing": {"image_url": LayerIcons().get("layers"), "color": LayerColorScheme().LAYER_LABEL_MISSING},
"Image::layers_missing:selected": {"image_url": LayerIcons().get("layers"), "color": LayerColorScheme().LAYER_LABEL_MISSING},
"Image::layers_has_child_edit_target": {"image_url": LayerIcons().get("layers_half_green")},
"Image::layer_read_only_lock": {"image_url": LayerIcons().get("layer_read_only_lock")},
"Button.Image::filter": {"image_url": LayerIcons().get("filter"), "color": 0xFF8A8777},
"Button.Image::options": {"image_url": LayerIcons().get("options"), "color": 0xFF8A8777},
"Button.Image::layer_save": {"image_url": LayerIcons().get("layer_save")},
"Button.Image::layer_save:checked": {"image_url": LayerIcons().get("layer_save")},
"Button.Image::layer_scope": {"image_url": LayerIcons().get("layers_switch_local")},
"Button.Image::layer_scope:checked": {"image_url": LayerIcons().get("layers_switch_global")},
"Button.Image::layerdelete": {"image_url": LayerIcons().get("trash"), "color": 0xFFFFFFFF},
"Button.Image::layerinsert": {"image_url": LayerIcons().get("insert_layer"), "color": 0xFFFFFFFF},
"Button.Image::layeradd": {"image_url": LayerIcons().get("new_layer"), "color": 0xFFFFFFFF},
"Button.Image::dirty": {"image_url": LayerIcons().get("layer_save"), "color": 0xFFB0B0B0},
"Button.Image::dirty_selected": {"image_url": LayerIcons().get("layer_save"), "color": 0xFF575757},
"Button.Image::dirty_readonly": {"image_url": LayerIcons().get("layer_read_only")},
"Button.Image::dirty:checked": {"image_url": LayerIcons().get("layer_save"), "color": 0xFFFFBF00},
"Button.Image::dirty_selected:checked": {"image_url": LayerIcons().get("layer_save"), "color": 0xFF8B622D},
"Button.Image::live_update": {"image_url": LayerIcons().get("lightning"), "color": 0xFFA0A0A0},
"Button.Image::live_update:checked": {"image_url": LayerIcons().get("lightning"), "color": 0xFF00B86B},
"Button.Image::lock": {"image_url": LayerIcons().get("lock_open")},
"Button.Image::lock:checked": {"image_url": LayerIcons().get("lock"), "color": 0xFFFF901E},
"Button.Image::muteness_enable": {"image_url": LayerIcons().get("eye_on")},
"Button.Image::muteness_enable:checked": {"image_url": LayerIcons().get("eye_off")},
"Button.Image::muteness_enable:selected": {"color": 0xFFFFFFFF},
"Button.Image::muteness_disable": {"image_url": LayerIcons().get("eye_on"), "color": 0xFFA0A0A0},
"Button.Image::muteness_disable:checked": {
"image_url": LayerIcons().get("eye_off"),
"color": 0xFFB0B0B0,
},
"Button.Image::muteness_disable:selected": {"color": 0xFF23211F},
"Button::filter": {"background_color": 0x0, "margin": 0},
"Button::options": {"background_color": 0x0, "margin": 0},
"Button::options:hovered": {"background_color": 0x0, "margin": 0},
"Button::layer_save": {"background_color": 0x0, "margin": 0},
"Button::layer_save:checked": {"background_color": 0x0, "margin": 0},
"Button::layer_save:pressed": {"background_color": 0x0, "margin": 0},
"Button::layer_save:hovered": {"background_color": 0x0, "margin": 0},
"Button::layer_scope": {"background_color": 0x0, "margin": 0},
"Button::layer_scope:checked": {"background_color": 0x0, "margin": 0},
"Button::layer_scope:pressed": {"background_color": 0x0, "margin": 0},
"Button::layer_scope:hovered": {"background_color": 0x0, "margin": 0},
"Button::layerdelete": {"background_color": 0x0, "margin": 0},
"Button::layerinsert": {"background_color": 0x0, "margin": 0},
"Button::layeradd": {"background_color": 0x0, "margin": 0},
"Button::dirty": {"background_color": 0x0, "margin": 0},
"Button::dirty_selected": {"background_color": 0x0, "margin": 0},
"Button::dirty:checked": {"background_color": 0x0},
"Button::dirty_selected:checked": {"background_color": 0x0},
"Button::dirty_readonly": {"background_color": 0x0, "margin": 0},
"Button::dirty_readonly:checked": {"background_color": 0x0},
"Button::dirty_readonly:pressed": {"background_color": 0x0},
"Button::lock": {"background_color": 0x0, "margin": 0},
"Button::lock:checked": {"background_color": 0x0},
"Button::lock:hovered": {"background_color": 0x0},
"Button::lock:pressed": {"background_color": 0x0},
"Button::muteness_enable": {"background_color": 0x0, "margin": 0},
"Button::muteness_enable:checked": {"background_color": 0x0},
"Button::muteness_enable:hovered": {"background_color": 0x0},
"Button::muteness_enable:pressed": {"background_color": 0x0},
"Button::muteness_disable": {"background_color": 0x0, "margin": 0},
"Button::muteness_disable:checked": {"background_color": 0x0},
"Button::muteness_disable:hovered": {"background_color": 0x0},
"Button::muteness_disable:pressed": {"background_color": 0x0},
"Button::live_update": {"background_color": 0x0, "margin": 0},
"Button::live_update:checked": {"background_color": 0x0},
"Button::live_update:hovered": {"background_color": 0x0},
"Button::live_update:pressed": {"background_color": 0x0},
"Label::search": {"color": 0xFF808080, "margin_width": 4},
"Label::auto_authoring_off": {"color": 0xFF4B4BB0, "font_size": 14},
"Label::auto_authoring_on": {"color": 0xFF00B775, "font_size": 14},
"Rectangle::edit_target": {"background_color": 0xFF3E652F},
"Rectangle::edit_target_with_corner": {"background_color": 0xFF3E652F, "border_radius": 4},
"Rectangle::edit_layer_with_corner": {"background_color": 0xFF12697B, "border_radius": 4},
"Rectangle::normal": {"background_color": 0xFF444444},
"Rectangle::selected": {"background_color": 0x0},
"Rectangle::hovering": {"background_color": 0x0, "border_radius": 2, "margin": 0, "padding": 0},
"Rectangle::hovering:hovered": {"background_color": 0xFF9E9E9E},
"TreeView": {
"background_color": 0xFF23211F,
"background_selected_color": 0x664F4D43,
"secondary_color": 0x0,
"secondary_selected_color": 0x0,
"border_width": 1.5,
},
"LayerView.ScrollingFrame": {"background_color": 0xFF23211F},
"LayerView.Header": {"background_color": 0xFF343432, "color": 0xFFCCCCCC, "font_size": 13.0},
"LayerView.Image::object_icon_grey": {"color": 0x80FFFFFF},
"LayerView.Item": {"color": LayerColorScheme().LAYER_LABEL_NORMAL},
"LayerView.Item::object_name_grey": {"color": LayerColorScheme().LAYER_LABEL_DISABLED},
"LayerView.Item::object_name_missing": {"color": LayerColorScheme().LAYER_LABEL_MISSING},
"LayerView.Item::object_name_missing:selected": {"color": LayerColorScheme().LAYER_LABEL_MISSING_SELECTED},
"LayerView.Item:selected": {"color": 0xFFFFFFFF},
"LayerView.Item::object_name_outdated": {"color": LayerColorScheme().OUTDATED},
"LayerView.Item::object_name_outdated:selected": {"color": LayerColorScheme().OUTDATED},
"LayerView.Item::edit_target": {"color": 0xFFFFFFFF},
"LayerView:selected": {"background_color": 0xFF8A8777},
"TreeView:drop": {
"background_color": ui.color.shade(ui.color("#34C7FF3B")),
"background_selected_color": ui.color.shade(ui.color("#34C7FF3B")),
"border_color": ui.color.shade(ui.color("#2B87AA")),
},
"Button.Image::merge_down": {"image_url": LayerIcons().get("merge_down"), "color": 0xFFB0B0B0},
"Button.Image::merge_down_selected": {"image_url": LayerIcons().get("merge_down"), "color": 0xFF575757},
"Button.Image::merge_down:checked": {"image_url": LayerIcons().get("merge_down"), "color": 0xFFFFC118},
"Button.Image::merge_down_selected:checked": {"image_url": LayerIcons().get("merge_down"), "color": 0xFFFFC118},
"Button::merge_down": {"background_color": 0x0, "margin": 0},
"Button::merge_down:checked": {"background_color": 0x0},
"Button::merge_down:hovered": {"background_color": 0x0},
"Button::merge_down:pressed": {"background_color": 0x0},
"Button::merge_down_selected": {"background_color": 0x0, "margin": 0},
"Button::merge_down_selected:checked": {"background_color": 0x0},
"Button::merge_down_selected:hovered": {"background_color": 0x0},
"Button::merge_down_selected:pressed": {"background_color": 0x0},
"Button.Image::auto_authoring": {"image_url": LayerIcons().get("layers")},
"Button::auto_authoring": {"background_color": 0x0, "margin": 0},
"Button::auto_authoring:checked": {"background_color": 0x0},
"Button::auto_authoring:hovered": {"background_color": 0x0},
"Button::auto_authoring:pressed": {"background_color": 0x0},
"Button.Image::latest": {"image_url": LayerIcons().get("reload_dark"), "color": 0xFFB0B0B0},
"Button.Image::latest:checked": {"image_url": LayerIcons().get("reload_dark"), "color": LayerColorScheme().OUTDATED},
"Button::latest": {"background_color": 0x0, "margin": 0},
"Button::latest:checked": {"background_color": 0x0},
"Button::latest:hovered": {"background_color": 0x0},
"Button::latest:pressed": {"background_color": 0x0},
}
use_default_style = carb.settings.get_settings().get_as_string("/persistent/app/window/useDefaultStyle") or False
if use_default_style:
style = {}
with self._window.frame:
with ui.VStack(spacing=3, style=style):
with ui.ZStack(height=0):
with ui.HStack():
# Search field
with ui.VStack(height=0):
ui.Spacer(height=4)
self._search = ui.StringField(name="search", width=ui.Fraction(1), height=20).model
ui.Spacer(height=4)
# Delta button
with ui.HStack(width=0):
with ui.ZStack(width=0, height=0):
with ui.VStack(width=0, height=0):
ui.Spacer()
with ui.HStack(width=24, height=0):
ui.Spacer()
with ui.VStack(width=0, height=0):
self._auto_authoring_image = ui.ToolButton(
self._auto_authoring_model, name="auto_authoring", width=24, height=24
)
ui.Spacer()
self._auto_authoring_image.set_tooltip(
"Auto Authoring Mode (Experimental)\n\n"
"Under auto authoring mode, all edits will be automatically forwarded to\n"
"the layer that has the strongest opinion to ease the burden of working with\n"
"multiple sublayers to manage delta changes."
)
with ui.VStack(height=0):
ui.Spacer(height=20)
self._auto_authoring_label = ui.Label(
"AA", style={"font_size": 12}, height=0, alignment=omni.ui.Alignment.CENTER
)
# Options button
with ui.HStack(width=0, height=0):
ui.Spacer(width=2)
with ui.VStack(height=0):
ui.Spacer(height=4)
with ui.ZStack(width=20, height=20):
ui.Rectangle(name="hovering")
self._save_all_button = ui.ToolButton(self._save_all_model, name="dirty")
ui.Spacer(height=4)
ui.Spacer(width=2)
with ui.HStack(width=48, spacing=0):
ui.Spacer()
scope_button = ui.ToolButton(
self._layer_scope_model, name="layer_scope", width=48, height=28
)
scope_button.set_tooltip("Switch L/G to persist muteness or not. (Mode G for save)")
ui.Spacer()
with ui.HStack(width=24, height=0):
ui.Spacer()
with ui.VStack(width=0, height=0):
ui.Spacer(height=4)
with ui.ZStack(width=20, height=20):
ui.Rectangle(name="hovering")
ui.Button(name="options", clicked_fn=lambda: self._options_menu.show())
ui.Spacer(height=4)
ui.Spacer()
# Place holder to align buttons
ui.Spacer(width=24)
ui.Spacer(width=16, height=0)
# The label on the top of the search field
self._search_label = ui.Label("Search", name="search")
with ui.ScrollingFrame(
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
style_type_name_override="LayerView.ScrollingFrame",
):
self._layer_view = ui.TreeView(
self._model,
delegate=self._delegate,
column_widths=[ui.Fraction(1), 24, 24, 24, 24, 24, 26],
header_visible=False,
root_visible=False,
drop_between_items=True,
)
self._delegate.set_tree_view(self._layer_view)
weakref_treeview = weakref.ref(self._layer_view)
with ui.ScrollingFrame(
height=32,
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_OFF,
style_type_name_override="LayerView.ScrollingFrame",
):
with ui.HStack(height=0):
ui.Spacer()
button = ui.Button(
width=32,
height=32,
name="layerinsert",
clicked_fn=lambda: self._toolbar_action(weakref_treeview, weakref_model, 0),
)
button.set_tooltip("Insert Sublayer")
button = ui.Button(
width=32,
height=32,
name="layeradd",
clicked_fn=lambda: self._toolbar_action(weakref_treeview, weakref_model, 1),
)
button.set_tooltip("Create Sublayer")
with ui.VStack(width=0):
button = ui.Button(
width=32,
height=32,
name="layerdelete",
clicked_fn=lambda: self._toolbar_action(weakref_treeview, weakref_model, 2),
)
button.set_tooltip("Remove Sublayer")
ui.Spacer()
self._selection = SelectionWatch(self._usd_context, self._layer_view, self._delegate)
# The filtering logic
self._begin_filter_subscription = self._search.subscribe_begin_edit_fn(
lambda _: LayerWindow._set_widget_visible(self._search_label, False)
)
self._end_filter_subscription = self._search.subscribe_end_edit_fn(
lambda m: self._filter_by_text(m.as_string)
or LayerWindow._set_widget_visible(self._search_label, not m.as_string)
)
self._update_save_all_button()
self._update_delta_button()
def _update_delta_button(self):
edit_mode = self._layers.get_edit_mode()
if edit_mode == layers.LayerEditMode.NORMAL:
self._auto_authoring_image.set_style({})
else:
self._auto_authoring_image.set_style({"color": LayerColorScheme().LAYER_LIVE_MODE_BUTTON_ENABLED})
def _visibility_changed_fn(self, visible):
if self._visiblity_changed_listener:
self._visiblity_changed_listener(visible)
if not visible:
self._layer_view.selection = []
self._selection.destroy()
self._selection = None
else:
self._selection = SelectionWatch(self._usd_context, self._layer_view, self._delegate)
def set_visibility_changed_listener(self, listener):
self._visiblity_changed_listener = listener
@property
def layer_view(self):
return self._layer_view
def destroy(self):
"""
Called by extension before destroying this object. It doesn't happen automatically.
Without this hot reloading doesn't work.
"""
self._auto_authoring_menu_item = None
self._continuous_update_menu_item = None
self._spec_linking_menu_item = None
self._handling_mode_change = False
self._auto_authoring_image = None
self._visiblity_changed_listener = None
self._layers_event_subs = []
self._begin_filter_subscription = None
self._edit_filter_subscription = None
self._end_filter_subscription = None
if self._layer_view:
self._layer_view.set_mouse_pressed_fn(None)
self._layer_view = None
self._search_label = None
self._search = None
self._save_all_button = None
self._auto_authoring_label = None
if self._model:
self._model.destroy()
self._model = None
if self._save_all_model:
self._save_all_model.destroy()
self._save_all_model = None
if self._layer_scope_model:
self._layer_scope_model.destroy()
self._layer_scope_model = None
if self._auto_authoring_model:
self._auto_authoring_model.destroy()
self._auto_authoring_model = None
self._window = None
if self._selection:
self._selection.destroy()
self._selection = None
if self._delegate:
self._delegate.destroy()
self._delegate = None
destroy_external_drag_drop()
def _clear_filter_types(self):
self.animations_menu.checked = False
self.audio_menu.checked = False
self.cameras_menu.checked = False
self.lights_menu.checked = False
self.materials_menu.checked = False
def _on_continuous_update_in_auto_authoring(self, enable):
LayerSettings().continuous_update_in_auto_authoring = enable
def _on_auto_reload_sublayers(self, value):
LayerSettings().auto_reload_sublayers = value
if value:
self._on_reload_layers()
self._model.refresh()
def _on_reload_layers(self):
self._layers_state.reload_outdated_sublayers()
def _on_show_layer_contents(self, show):
LayerSettings().show_layer_contents = show
self._model.refresh()
def _on_show_session_layer(self, show):
if LayerSettings().show_session_layer != show:
LayerSettings().show_session_layer = show
self._model.refresh()
def _on_show_metricsassembler_layer(self, show):
if LayerSettings().show_metricsassembler_layer != show:
LayerSettings().show_metricsassembler_layer = show
self._model.refresh()
def _on_file_dialog_default_location_changed(self, root_layer):
LayerSettings().file_dialog_show_root_layer_location = root_layer
def _on_show_info_notification(self, enabled):
LayerSettings().show_info_notification = enabled
def _on_show_warning_notification(self, enabled):
LayerSettings().show_warning_notification = enabled
def _on_show_missing_reference(self, enabled):
LayerSettings().show_missing_reference = enabled
def _on_show_merge_or_flatten_warning(self, enabled):
LayerSettings().show_merge_or_flatten_warning = enabled
def _on_ignore_outdate_notification(self, enabled):
LayerSettings().ignore_outdate_notification = enabled
def _on_enable_auto_authoring_mode(self, enabled):
if self._handling_mode_change:
return
self._handling_mode_change = True
self._spec_linking_menu_item.checked = False
LayerSettings().enable_auto_authoring_mode = enabled
LayerModelUtils.set_auto_authoring_mode(self._model, enabled)
self._handling_mode_change = False
def _on_enable_spec_linking_mode(self, enabled):
if self._handling_mode_change:
return
self._handling_mode_change = True
if enabled:
self._auto_authoring_menu_item.checked = False
edit_mode = layers.LayerEditMode.SPECS_LINKING
else:
edit_mode = layers.LayerEditMode.NORMAL
self._layers.set_edit_mode(edit_mode)
self._handling_mode_change = False
@staticmethod
def _set_widget_visible(widget: ui.Widget, visible):
"""Utility for using in lambdas"""
widget.visible = visible
def _filter_by_text(self, filter_text: str):
"""Set the search filter string to the models and widgets"""
layer_view = self._layer_view
layer_view.visible = True
layer_view.keep_alive = not not filter_text
layer_view.keep_expanded = not not filter_text
layer_view.model.filter_by_text(filter_text)
self._delegate.set_highlighting(text=filter_text)
def _update_save_all_button(self):
if not self._model.root_layer_item:
return
is_in_live_session = self._model.root_layer_item.is_in_live_session
if is_in_live_session:
self._save_all_button.set_tooltip("Cannot save Layer edits in Live Session.")
else:
self._save_all_button.set_tooltip("Save all Layer edits")
if not is_in_live_session and self._save_all_model.get_value_as_bool():
self._save_all_button.enabled = True
self._save_all_button.checked = True
else:
self._save_all_button.enabled = False
self._save_all_button.checked = False
def _on_dirtiness_changed(self):
self._update_save_all_button()
def _is_selection_from_same_layer(self, selection):
if not selection:
return None
item = selection[0]
if isinstance(item, LayerItem):
layer_item = item
elif isinstance(item, PrimSpecItem):
layer_item = item.layer_item
else:
layer_item = None
for i in range(1, len(selection)):
if isinstance(selection[i], LayerItem):
item = selection[i]
elif isinstance(selection[i], PrimSpecItem):
item = selection[i].layer_item
if layer_item != item:
return None
return layer_item
# action == 0: insert
# action == 1: create
# action == 2: delete
def _toolbar_action(self, weakref_treeview: weakref, weakref_model: weakref, action: int):
tree_view = weakref_treeview()
model = weakref_model()
if not tree_view or not model:
return
selection = tree_view.selection
if action == 2:
to_remove_layer_items = []
for item in selection:
if isinstance(item, LayerItem):
to_remove_layer_items.append(item)
if len(to_remove_layer_items) > 1:
LayerModelUtils.remove_layers(to_remove_layer_items)
if len(to_remove_layer_items) == 1:
LayerModelUtils.remove_layer(to_remove_layer_items[0])
else:
layer_item = self._is_selection_from_same_layer(selection)
# By default, it will insert layer for root layer
if not layer_item or layer_item.reserved:
if layer_item and layer_item == model.session_layer_item:
parent_item = model.session_layer_item
else:
parent_item = model.root_layer_item
sublayer_position = 0
elif layer_item and layer_item.parent:
if layer_item.is_in_live_session:
layer_item = layer_item.live_session_layer
parent_item = layer_item.parent
sublayer_position = LayerUtils.get_sublayer_position_in_parent(
parent_item.identifier, layer_item.identifier
)
else:
parent_item = model.root_layer_item
sublayer_position = 0
if action == 0:
LayerModelUtils.insert_sublayer(parent_item, sublayer_position)
elif action == 1:
LayerModelUtils.create_sublayer(parent_item, sublayer_position)
def _on_muteness_scope_changed(self):
self._layer_scope_model._value_changed()
def _on_stage_attached(self, attached: bool):
if attached:
self._update_save_all_button()
self._delegate.on_stage_attached()
self._layer_scope_model._value_changed()
else:
# https://nvidia-omniverse.atlassian.net/browse/OM-29539
# It must clear selection to notify listeners.
self._layer_view.selection = []
setup_external_drag_drop("Layer", self._model)
def _on_layer_events(self, events):
payload = layers.get_layer_event_payload(events)
if not payload:
return
if payload.event_type == layers.LayerEventType.EDIT_MODE_CHANGED:
edit_mode = self._layers.get_edit_mode()
self._handling_mode_change = True
self._auto_authoring_menu_item.checked = edit_mode == layers.LayerEditMode.AUTO_AUTHORING
self._spec_linking_menu_item.checked = edit_mode == layers.LayerEditMode.SPECS_LINKING
self._handling_mode_change = False
self._update_delta_button()
elif payload.event_type == layers.LayerEventType.LIVE_SESSION_STATE_CHANGED:
if not self._model.root_layer_item:
return
is_live_mode = self._model.root_layer_item.is_in_live_session
if is_live_mode:
self._on_show_session_layer(True)
self._layer_view.set_expanded(self._model.session_layer_item, is_live_mode, False)
self._layer_view.set_expanded(self._model.root_layer_item, not is_live_mode, False)
self._update_save_all_button()
def get_layer_model(self):
return self._model
def remove_layer_selection_changed_fn(self, fn):
if self._selection:
self._selection.remove_layer_selection_changed_fn(fn)
def add_layer_selection_changed_fn(self, fn):
if self._selection:
self._selection.add_layer_selection_changed_fn(fn)
def get_current_focused_layer_item(self):
if self._selection:
return self._selection.get_current_focused_layer_item()
else:
return None
def set_current_focused_layer_item(self, layer_identifier):
if not self._model or not self._selection:
return
layer_item = self._model.get_layer_item_by_identifier(layer_identifier)
self._selection.set_current_focused_layer_item(layer_item)
def set_visible(self, value):
self._window.visible = value
def is_visible(self):
if self._window:
return self._window.visible
return False
| 37,113 | Python | 47.770039 | 137 | 0.568453 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/layer_settings.py | import carb.settings
import omni.kit.usd.layers as layers
from .singleton import Singleton
SETTINGS_SHOW_SESSION_LAYER = "/persistent/app/layerwindow/showSessionLayer"
SETTINGS_SHOW_METRICSASSEMBLER_LAYER = "/persistent/app/layerwindow/showMetricsAssemblerLayer"
SETTINGS_SHOW_LAYER_CONTENTS = "/persistent/app/layerwindow/showLayerContents"
SETTINGS_SHOW_LAYER_FILE_EXTENSION = "/persistent/app/layerwindow/showLayerFileExtension"
SETTINGS_FILE_DIALOG_SHOW_ROOT_LAYER_LOCATION = "/persistent/app/layerwindow/filedialog/showRootLayerLocation"
SETTINGS_SHOW_INFO_NOTIFICATION = "/persistent/app/layerwindow/showInfoNotification"
SETTINGS_SHOW_WARNING_NOTIFICATION = "/persistent/app/layerwindow/showWarningNotification"
SETTINGS_ENABLE_AUTO_AUTHORING_MODE = "/persistent/app/layerwindow/enableAutoAuthoringMode"
SETTINGS_ENABLE_SPEC_LINKING_MODE = "/persistent/app/layerwindow/enableSpecLinkingMode"
SETTINGS_SHOW_MISSING_REFERENCE = "/persistent/app/layerwindow/showMissingReference"
SETTINGS_SHOW_MERGE_OR_FLATTEN_WARNING = "/persistent/app/layerwindow/showMergeOrFlattenWarning"
SETTINGS_CONTINOUS_UPDATE_IN_AUTO_AUTHORING = "/persistent/app/layersinterface/continousUpdate"
@Singleton
class LayerSettings:
def __init__(self):
super().__init__()
self._settings = carb.settings.get_settings()
self._settings.set_default_bool(layers.SETTINGS_AUTO_RELOAD_SUBLAYERS, False)
self._settings.set_default_bool(SETTINGS_SHOW_LAYER_CONTENTS, True)
self._settings.set_default_bool(SETTINGS_SHOW_SESSION_LAYER, False)
self._settings.set_default_bool(SETTINGS_SHOW_METRICSASSEMBLER_LAYER, False)
self._settings.set_default_bool(SETTINGS_SHOW_LAYER_FILE_EXTENSION, True)
self._settings.set_default_bool(SETTINGS_FILE_DIALOG_SHOW_ROOT_LAYER_LOCATION, True)
self._settings.set_default_bool(SETTINGS_SHOW_INFO_NOTIFICATION, True)
self._settings.set_default_bool(SETTINGS_SHOW_WARNING_NOTIFICATION, True)
self._settings.set_default_bool(SETTINGS_ENABLE_AUTO_AUTHORING_MODE, False)
self._settings.set_default_bool(SETTINGS_ENABLE_SPEC_LINKING_MODE, False)
self._settings.set_default_bool(SETTINGS_SHOW_MISSING_REFERENCE, False)
self._settings.set_default_bool(SETTINGS_SHOW_MERGE_OR_FLATTEN_WARNING, True)
self._settings.set_default_bool(SETTINGS_CONTINOUS_UPDATE_IN_AUTO_AUTHORING, True)
@property
def auto_reload_sublayers(self):
return self._settings.get_as_bool(layers.SETTINGS_AUTO_RELOAD_SUBLAYERS)
@auto_reload_sublayers.setter
def auto_reload_sublayers(self, value):
self._settings.set(layers.SETTINGS_AUTO_RELOAD_SUBLAYERS, value)
@property
def continuous_update_in_auto_authoring(self):
return self._settings.get_as_bool(SETTINGS_CONTINOUS_UPDATE_IN_AUTO_AUTHORING)
@continuous_update_in_auto_authoring.setter
def continuous_update_in_auto_authoring(self, value):
self._settings.set(SETTINGS_CONTINOUS_UPDATE_IN_AUTO_AUTHORING, value)
@property
def show_missing_reference(self):
return self._settings.get_as_bool(SETTINGS_SHOW_MISSING_REFERENCE)
@show_missing_reference.setter
def show_missing_reference(self, show: bool):
self._settings.set(SETTINGS_SHOW_MISSING_REFERENCE, show)
@property
def show_layer_contents(self):
return self._settings.get_as_bool(SETTINGS_SHOW_LAYER_CONTENTS)
@show_layer_contents.setter
def show_layer_contents(self, show: bool):
self._settings.set(SETTINGS_SHOW_LAYER_CONTENTS, show)
@property
def show_session_layer(self):
return self._settings.get_as_bool(SETTINGS_SHOW_SESSION_LAYER)
@show_session_layer.setter
def show_session_layer(self, show: bool):
self._settings.set(SETTINGS_SHOW_SESSION_LAYER, show)
@property
def show_metricsassembler_layer(self):
return self._settings.get_as_bool(SETTINGS_SHOW_METRICSASSEMBLER_LAYER)
@show_metricsassembler_layer.setter
def show_metricsassembler_layer(self, show: bool):
self._settings.set(SETTINGS_SHOW_METRICSASSEMBLER_LAYER, show)
@property
def show_layer_file_extension(self):
return self._settings.get_as_bool(SETTINGS_SHOW_LAYER_FILE_EXTENSION)
@show_layer_file_extension.setter
def show_layer_file_extension(self, show: bool):
self._settings.set(SETTINGS_SHOW_LAYER_FILE_EXTENSION, show)
# The default location of file dialog. By default, it will be the root layer's location if it's not
# anonymous. Otherwise, it will be last access location.
@property
def file_dialog_show_root_layer_location(self):
return self._settings.get_as_bool(SETTINGS_FILE_DIALOG_SHOW_ROOT_LAYER_LOCATION)
@file_dialog_show_root_layer_location.setter
def file_dialog_show_root_layer_location(self, root_layer: bool):
self._settings.set(SETTINGS_FILE_DIALOG_SHOW_ROOT_LAYER_LOCATION, root_layer)
@property
def show_info_notification(self):
return self._settings.get_as_bool(SETTINGS_SHOW_INFO_NOTIFICATION)
@show_info_notification.setter
def show_info_notification(self, enabled: bool):
self._settings.set(SETTINGS_SHOW_INFO_NOTIFICATION, enabled)
@property
def show_warning_notification(self):
return self._settings.get_as_bool(SETTINGS_SHOW_WARNING_NOTIFICATION)
@show_warning_notification.setter
def show_warning_notification(self, enabled: bool):
self._settings.set(SETTINGS_SHOW_WARNING_NOTIFICATION, enabled)
@property
def enable_auto_authoring_mode(self):
return self._settings.get_as_bool(SETTINGS_ENABLE_AUTO_AUTHORING_MODE)
@enable_auto_authoring_mode.setter
def enable_auto_authoring_mode(self, enabled: bool):
self._settings.set(SETTINGS_ENABLE_AUTO_AUTHORING_MODE, enabled)
if enabled:
self._settings.set(SETTINGS_ENABLE_SPEC_LINKING_MODE, False)
@property
def enable_spec_linking_mode(self):
return self._settings.get_as_bool(SETTINGS_ENABLE_SPEC_LINKING_MODE)
@enable_spec_linking_mode.setter
def enable_spec_linking_mode(self, enabled: bool):
self._settings.set(SETTINGS_ENABLE_SPEC_LINKING_MODE, enabled)
if enabled:
self._settings.set(SETTINGS_ENABLE_AUTO_AUTHORING_MODE, False)
@property
def show_merge_or_flatten_warning(self):
return self._settings.get_as_bool(SETTINGS_SHOW_MERGE_OR_FLATTEN_WARNING)
@show_merge_or_flatten_warning.setter
def show_merge_or_flatten_warning(self, enabled: bool):
self._settings.set(SETTINGS_SHOW_MERGE_OR_FLATTEN_WARNING, enabled)
@property
def ignore_outdate_notification(self):
return self._settings.get_as_bool(layers.SETTINGS_IGNORE_OUTDATE_NOTIFICATION)
@ignore_outdate_notification.setter
def ignore_outdate_notification(self, value: bool):
self._settings.set(layers.SETTINGS_IGNORE_OUTDATE_NOTIFICATION, value)
| 6,974 | Python | 44 | 110 | 0.736306 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/filebrowser/app_filebrowser.py | import asyncio
import os
import re
import carb
import omni.ui
import omni.client
from typing import Callable, List, Tuple
from omni.kit.window.filepicker import FilePickerDialog
from omni.kit.widget.filebrowser import FileBrowserItem
from . import FileBrowserSelectionType, FileBrowserMode
LAST_LOCATION = "/persistent/app/omni.kit.widget.layers/last_file_picker_location"
class FileBrowserUI:
def __init__(
self,
title: str,
mode: FileBrowserMode,
selection_type: FileBrowserSelectionType,
filter_options: List[Tuple[str, str]],
save_extensions: List[str] = [],
allow_multi_selection=False,
build_options_pane_fn: Callable[[List[FileBrowserItem]], bool] = None,
on_selection_changed: Callable[[List[FileBrowserItem]], bool] = None,
**kwargs
):
if mode == FileBrowserMode.OPEN:
confirm_text = "Open"
else:
confirm_text = "Save"
self._file_picker = FilePickerApp(
title,
confirm_text,
mode,
selection_type,
filter_options,
save_extensions,
allow_multi_selection,
build_options_pane_fn,
on_selection_changed,
**kwargs
)
def destroy(self):
self._file_picker.hide_dialog()
self._file_picker.destroy()
self._file_picker = None
def set_current_directory(self, dir: str):
self._file_picker.set_current_directory(dir)
def set_current_filename(self, filename: str):
self._file_picker.set_current_filename(filename)
def get_current_filename(self):
return self._file_picker.get_current_filename()
def open(self, select_fn: Callable[[List[str]], None], cancel_fn: Callable[[], None]):
self._file_picker.set_custom_fn(select_fn, cancel_fn)
self._file_picker.show_dialog()
def destroy(self):
self._file_picker.destroy()
self._file_picker = None
def hide(self):
self._file_picker.hide_dialog()
class FilePickerApp:
"""
Standalone app to demonstrate the use of the FilePicker dialog.
Args:
title (str): Title of the window.
apply_button_name (str): Name of the confirm button.
mode (FileBrowserMode): The file picker mode that whether it's to open or save.
selection_type (FileBrowserSelectionType): The file type that confirm event will respond to.
item_filter_options (list): Array of filter options. Element of array
is a tuple that first element of this tuple is the regex string for filtering,
and second element of this tuple is the descriptions, like ("*.*", "All Files").
By default, it will list all files.
save_extensions: The real extenion name that will be saved.
allow_multi_selections: Allow to select multiple files.
build_options_pane_fn (Callable[[List[FileBrowserItem]], bool]): Function to
build options panel.
on_selection_changed (Callable[[List[FileBrowserItem]], bool]): Function to
monitor selection changed.
"""
def __init__(
self,
title: str,
apply_button_name: str,
mode: FileBrowserMode,
selection_type: FileBrowserSelectionType = FileBrowserSelectionType.ALL,
item_filter_options: list = [("*.*", "All Files (*.*)")],
save_extensions: list = [".usd"],
allow_multi_selections: bool = False,
build_options_pane_fn: Callable[[List[FileBrowserItem]], bool] = None,
on_selection_changed: Callable[[List[FileBrowserItem]], bool] = None,
**kwargs
):
self._settings = carb.settings.get_settings()
self._settings.set_default(LAST_LOCATION, "")
self._title = title
self._filepicker = None
self._mode = mode
self._selection_type = selection_type
self._custom_select_fn = None
self._custom_cancel_fn = None
self._apply_button_name = apply_button_name
self._filter_regexes = []
self._filter_descriptions = []
self._current_directory = self._settings.get_as_string(LAST_LOCATION)
self._allow_multi_selections = allow_multi_selections
self._build_options_pane_fn = build_options_pane_fn
self._selection_changed_fn = on_selection_changed
for regex, desc in item_filter_options:
if not isinstance(regex, re.Pattern):
regex = re.compile(regex, re.IGNORECASE)
self._filter_regexes.append(regex)
self._filter_descriptions.append(desc)
self._save_extensions = save_extensions
self._build_ui(**kwargs)
def destroy(self):
self._settings = None
self._custom_select_fn = None
self._custom_cancel_fn = None
self._build_options_pane_fn = None
self._build_options_pane_fn = None
self._selection_changed_fn = None
self._on_selection_changed = None
self.hide_dialog()
self._filepicker.destroy()
self._filepicker = None
def set_custom_fn(self, select_fn: Callable[[List[str]], None], cancel_fn: Callable[[], None]):
self._custom_select_fn = select_fn
self._custom_cancel_fn = cancel_fn
def show_dialog(self):
self._filepicker.show(self._current_directory)
def hide_dialog(self):
self._filepicker.hide()
def set_current_directory(self, dir: str):
if not dir:
return
self._current_directory = dir
if not self._current_directory.endswith("/"):
self._current_directory += "/"
self._settings.set_string(LAST_LOCATION, self._current_directory)
def set_current_filename(self, filename: str):
self._filepicker.set_filename(filename)
def get_current_filename(self):
return self._filepicker.get_filename()
def _build_ui(self, **kwargs):
# Create the dialog
self._filepicker = FilePickerDialog(
self._title,
allow_multi_selection=self._allow_multi_selections,
apply_button_label=self._apply_button_name,
click_apply_handler=self._on_click_open,
click_cancel_handler=self._on_click_cancel,
item_filter_options=self._filter_descriptions,
item_filter_fn=lambda item: self._on_filter_item(item),
error_handler=lambda m: self._on_error(m),
options_pane_build_fn=self._build_options_pane_fn,
selection_changed_fn=self._on_selection_changed,
**kwargs
)
if self._selection_type == FileBrowserSelectionType.DIRECTORY_ONLY:
self._filepicker.set_filebar_label_name("Folder name")
elif self._selection_type == FileBrowserSelectionType.FILE_ONLY:
self._filepicker.set_filebar_label_name("File name")
else:
self._filepicker.set_filebar_label_name("File or Folder name")
# Start off hidden
self.hide_dialog()
def _on_selection_changed(self, items: List[FileBrowserItem]):
if self._selection_changed_fn:
self._selection_changed_fn(items)
if len(items) != 1:
return False
item = items[0]
if item.is_folder and self._selection_type == FileBrowserSelectionType.FILE_ONLY:
return False
if not item.is_folder and self._selection_type == FileBrowserSelectionType.DIRECTORY_ONLY:
return False
self.set_current_filename(item.name)
def _on_filter_item(self, item: FileBrowserItem) -> bool:
if not item or item.is_folder:
return True
if self._selection_type == FileBrowserSelectionType.DIRECTORY_ONLY:
return False
if self._filepicker.current_filter_option >= len(self._filter_regexes):
return False
regex = self._filter_regexes[self._filepicker.current_filter_option]
if regex.match(item.path):
return True
else:
return False
def _on_error(self, msg: str):
pass
def _on_click_open(self, filename: str, dirname: str):
"""
The meat of the App is done in this callback when the user clicks 'Accept'. This is
a potentially costly operation so we implement it as an async operation. The inputs
are the filename and directory name. Together they form the fullpath to the selected
file.
"""
if not dirname:
return
if self._mode == FileBrowserMode.SAVE and not filename:
return
selection_paths = self._filepicker.get_current_selections()
if dirname:
if filename:
current_directory_path = self._filepicker.get_current_directory()
# Stores current directory
self._settings.set_string(LAST_LOCATION, current_directory_path)
if current_directory_path.endswith("/"):
current_directory_path = current_directory_path[:-1]
current_directory_name = os.path.basename(current_directory_path)
# FIXME: It's possible that the file name in the input dialog
# is the same as the current folder name, which is selected.
# But FileDialog class will not include folders in selection_paths,
# so here it's exclude that it's folder selection.
if not selection_paths and current_directory_name == filename:
fullpath = f"{dirname}"
else:
if not dirname.endswith("/"):
dirname = dirname + "/"
fullpath = omni.client.combine_urls(dirname, filename)
fullpath = fullpath.replace("\\", "/")
else:
fullpath = dirname
else:
fullpath = filename
if self._selection_type == FileBrowserSelectionType.FILE_ONLY and not filename:
return
if not selection_paths:
selection_paths = [fullpath]
async def timeout_stat(path, timeout):
try:
result, entry = await asyncio.wait_for(omni.client.stat_async(path), timeout=timeout)
except (Exception, asyncio.TimeoutError) as e:
result = omni.client.Result.ERROR_NOT_FOUND
entry = None
return result, entry
if not self._allow_multi_selections:
result, entry = asyncio.get_event_loop().run_until_complete(timeout_stat(fullpath, timeout=1))
if result == omni.client.Result.OK and entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
is_folder = True
else:
is_folder = False
if self._mode != FileBrowserMode.SAVE:
if (is_folder and self._selection_type == FileBrowserSelectionType.FILE_ONLY) or (
not is_folder and self._selection_type == FileBrowserSelectionType.DIRECTORY_ONLY
):
return
self.hide_dialog()
if self._mode == FileBrowserMode.SAVE:
_, ext = os.path.splitext(fullpath)
if self._save_extensions and ext not in self._save_extensions:
if self._filepicker.current_filter_option < len(self._save_extensions):
fullpath += self._save_extensions[self._filepicker.current_filter_option]
if self._custom_select_fn:
if self._allow_multi_selections and len(selection_paths) > 1:
self._custom_select_fn(selection_paths)
else:
self._custom_select_fn([fullpath])
def _on_click_cancel(self, filename: str, dirname: str):
"""
This function is called when the user clicks 'Cancel'.
"""
self.hide_dialog()
if self._custom_cancel_fn:
self._custom_cancel_fn()
| 11,974 | Python | 36.776025 | 106 | 0.606815 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_name_model.py | import omni.ui as ui
class LayerNameModel(ui.AbstractValueModel):
def __init__(self, layer_item):
super().__init__()
self._layer_item = layer_item
def destroy(self):
self._layer_item = None
def get_value_as_string(self):
if not self._layer_item.model.normal_mode and self._layer_item.edit_layer_in_auto_authoring_mode:
return self._layer_item.name + " (Default Layer)"
elif self._layer_item.model.normal_mode and self._layer_item.is_edit_target:
return self._layer_item.name + " (Authoring Layer)"
else:
return self._layer_item.name
def set_value(self, value):
# Cannot change layer name
pass
| 718 | Python | 30.260868 | 105 | 0.614206 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_scope_model.py | import omni.ui as ui
class LayerScopeModel(ui.AbstractValueModel):
def __init__(self, layer_model):
super().__init__()
self._layer_model = layer_model
def destroy(self):
self._layer_model = None
def get_value_as_bool(self):
# False means local mode
return self._layer_model.global_muteness_scope
def set_value(self, value):
self._layer_model.global_muteness_scope = value
| 445 | Python | 23.777776 | 55 | 0.626966 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_live_update_model.py | import omni.ui as ui
class LayerLiveUpdateModel(ui.AbstractValueModel):
def __init__(self, usd_context, layer_item):
super().__init__()
self._usd_context = usd_context
self._layer_item = layer_item
def destroy(self):
self._usd_context = None
self._layer_item = None
def get_value_as_bool(self):
return self._layer_item.is_in_live_session
def set_value(self, value):
pass
| 461 | Python | 22.099999 | 50 | 0.59436 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_auto_authoring.py | import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class LayerAutoAuthoringModel(ui.AbstractValueModel):
def __init__(self, layer_model):
super().__init__()
self._layer_model = layer_model
def destroy(self):
self._layer_model = None
def get_value_as_bool(self):
# False means local mode
return self._layer_model.auto_authoring_mode
def set_value(self, value):
LayerModelUtils.set_auto_authoring_mode(self._layer_model, value)
| 538 | Python | 27.36842 | 73 | 0.669145 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/save_model.py | import weakref
import omni
import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class SaveModel(ui.AbstractValueModel):
def __init__(self, layer_item):
super().__init__()
self._layer_item = layer_item
def destroy(self):
self._layer_item = None
def get_value_as_bool(self):
return self._layer_item.dirty or (self._layer_item.is_live_session_layer and self._layer_item.has_content)
def set_value(self, value):
if (
value
or not self._layer_item.dirty
or self._layer_item.missing
or not self._layer_item.editable
or self._layer_item.is_live_session_layer
):
return
LayerModelUtils.save_layer(self._layer_item)
| 798 | Python | 26.551723 | 114 | 0.616541 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/live_session_user_model.py | import omni.ui as ui
import omni.kit.usd.layers as layers
class LiveSessionUserModel(ui.AbstractValueModel):
def __init__(self, peer_user: layers.LiveSessionUser):
super().__init__()
self._peer_user = peer_user
@property
def peer_user(self):
return self._peer_user
@peer_user.setter
def peer_user(self, value):
self._peer_user = value
def destroy(self):
self._peer_user = None
def get_value_as_string(self):
if self._peer_user:
return layers.get_short_user_name(self._peer_user.user_name)
else:
return ""
def set_value(self, value):
# Cannot change layer name
pass
| 701 | Python | 21.645161 | 72 | 0.601997 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/save_all_model.py | import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class SaveAllModel(ui.AbstractValueModel):
def __init__(self, layer_model):
super().__init__()
self._layer_model = layer_model
def destroy(self):
self._layer_model = None
def get_value_as_bool(self):
return self._layer_model.has_dirty_layers()
def set_value(self, value):
if value:
return
LayerModelUtils.save_model(self._layer_model)
| 511 | Python | 23.380951 | 68 | 0.634051 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/prim_model.py | import re
from pxr import Sdf, Usd
from typing import List
from typing import Optional
import omni.usd
import omni.ui as ui
import omni.kit.usd.layers as layers
from omni.kit.widget.stage.stage_model import StageModel, StageItem
from ..layer_icons import LayerIcons
class PrimItem(StageItem): # pragma: no cover
def __init__(
self,
path: Sdf.Path,
stage: Usd.Stage,
stage_model: StageModel,
flat=False,
root_identifier=None,
load_payloads=False,
check_missing_references=False,
):
super().__init__(
path,
stage,
stage_model,
flat=flat,
root_identifier=root_identifier,
load_payloads=load_payloads,
check_missing_references=check_missing_references,
)
usd_context = omni.usd.get_context()
links = omni.kit.usd.layers.get_spec_layer_links(usd_context, path, False)
if links:
self._linked = True
else:
self._linked = False
self._linked_image = None
self._locked = omni.kit.usd.layers.is_spec_locked(usd_context, path)
self._locked_image = None
@property
def locked(self):
return self._locked
@locked.setter
def locked(self, value: bool):
if value != self._locked:
self._locked = value
if self._locked_image:
filename = LayerIcons().get("lock") if value else LayerIcons().get("lock_open")
image_style = {"": {"image_url": f'{filename}'} }
self._locked_image.set_style(image_style)
@property
def linked(self):
return self._linked
@linked.setter
def linked(self, value: bool):
if value != self._linked:
self._linked = value
if self._linked_image:
self._linked_image.visible = value
def set_linked_image(self, image: ui.Image):
self._linked_image = image
def set_locked_image(self, image: ui.Image):
self._locked_image = image
class PrimModel(StageModel): # pragma: no cover
def __init__(self, stage: Usd.Stage):
super().__init__(stage, flat=None, load_payloads=False, check_missing_references=False)
# replace the root item with PrimItem
if self._root:
self._root = PrimItem(
Sdf.Path.absoluteRootPath,
self.stage,
self,
False,
self.stage.GetRootLayer().identifier,
load_payloads=self.load_payloads,
check_missing_references=self.check_missing_references,
)
self._layers = layers.get_layers()
self._specs_linking = self._layers.get_specs_linking()
self._specs_locking = self._layers.get_specs_locking()
self._layers_event_subscription = self._layers.get_event_stream().create_subscription_to_pop(self._on_layer_events, name="Layers Prim Model")
def destroy(self):
self._layers_event_subscription = None
self._layers = None
self._specs_linking = None
self._specs_locking = None
super().destroy()
# this copy from StageItem._get_stage_item_from_cache()
# there two differents from StageItem,
# if code of StageItem.populate_children_get_stage_item_from_cache() changed, should copy and change this too
def _get_stage_item_from_cache(self, path: Sdf.Path, create_if_not_existed=False):
stage_item = super()._get_stage_item_from_cache(path, False)
if stage_item:
return stage_item
elif not create_if_not_existed:
return None
# Creates new
stage_item = super()._get_stage_item_from_cache(path, True)
if not stage_item:
return None
# Replaces it with customized PrimItem
stage_item = PrimItem(
path,
self.stage,
self,
self.flat,
load_payloads=self.load_payloads,
check_missing_references=self.check_missing_references,
)
super()._remove_stage_item_from_cache(path)
super()._cache_stage_item(stage_item)
return stage_item
def find(self, path: Sdf.Path):
"""Return item with the given path"""
path = Sdf.Path(path)
if path == Sdf.Path.absoluteRootPath:
return self.root
return super()._get_stage_item_from_cache(path)
def get_item_value_model_count(self, item):
"""Reimplemented from AbstractItemModel"""
return 3
def get_item_value_model(self, item, column_id):
"""Reimplemented from AbstractItemModel"""
if item is None:
item = self.root
if not item:
return None
if column_id == 2:
return item.name_model
def drop_accepted(self, target_item, source):
return False
def drop(self, target_item, source):
return
def _on_layer_events(self, event):
payload = layers.get_layer_event_payload(event)
if not payload:
return
if payload.event_type == layers.LayerEventType.SPECS_LINKING_CHANGED:
for _, spec_paths in payload.layer_spec_paths.items():
self._on_spec_links_changed(spec_paths)
elif payload.event_type == layers.LayerEventType.SPECS_LOCKING_CHANGED:
self._on_spec_locks_changed(payload.identifiers_or_spec_paths)
def _on_spec_links_changed(self, spec_paths: List[str]):
for spec_path in spec_paths:
item = self.find(spec_path)
if item:
item.linked = self._specs_linking.is_spec_linked(spec_path)
def _on_spec_locks_changed(self, spec_paths: List[str]):
for spec_path in spec_paths:
item = self.find(spec_path)
if item:
item.locked = self._specs_locking.is_spec_locked(spec_path)
| 5,985 | Python | 31.710382 | 153 | 0.589808 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/muteness_model.py | import weakref
import omni.ui as ui
import omni.kit.usd.layers as layers
class MutenessModel(ui.AbstractValueModel):
def __init__(self, usd_context, layer_item, local: bool):
super().__init__()
self.local = local
self._layer_item = layer_item
self._usd_context = usd_context
def destroy(self):
self._layer_item = None
self._usd_context = None
def get_value_as_bool(self):
if self.local:
return self._layer_item.locally_muted
else:
return self._layer_item.globally_muted
def set_value(self, value):
self._layer_item.muted = value
| 650 | Python | 25.039999 | 61 | 0.609231 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/prim_name_model.py | import omni
import weakref
import omni.ui as ui
class PrimNameModel(ui.AbstractValueModel):
def __init__(self, prim_item):
super().__init__()
self._prim_item = prim_item
def destroy(self):
self._prim_item = None
def get_value_as_string(self):
return self._prim_item.name
def set_value(self, value):
# Cannot change layer name
pass
| 407 | Python | 19.399999 | 43 | 0.601966 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/lock_model.py | import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class LockModel(ui.AbstractValueModel):
def __init__(self, layer_item):
super().__init__()
self._layer_item = layer_item
def destroy(self):
self._layer_item = None
def get_value_as_bool(self):
return self._layer_item.locked
def set_value(self, value):
LayerModelUtils.lock_layer(self._layer_item, value)
| 459 | Python | 24.555554 | 68 | 0.651416 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_latest_model.py | import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class LayerLatestModel(ui.AbstractValueModel):
def __init__(self, usd_context, layer_item):
super().__init__()
self._usd_context = usd_context
self._layer_item = layer_item
def destroy(self):
self._usd_context = None
self._layer_item = None
def get_value_as_bool(self):
return not self._layer_item.latest
def set_value(self, _):
if not self._layer_item.latest:
LayerModelUtils.reload_layer(self._layer_item)
| 587 | Python | 26.999999 | 68 | 0.642249 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_selection.py | import omni.kit.test
import os
import uuid
import omni.client
import omni.kit.commands
from .base import TestLayerUIBase
from omni.kit.widget.layers import LayerUtils
from pxr import Sdf, Usd
class TestLayerSelection(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
self.layers_instance.show_window(None, True)
await self.usd_context.close_stage_async()
await super().tearDown()
async def _wait(self, frames=4):
for i in range(frames):
await self.app.next_update_async()
async def test_layer_selection(self):
layer1 = Sdf.Layer.CreateAnonymous()
layer2 = Sdf.Layer.CreateAnonymous()
self.stage.GetRootLayer().subLayerPaths.append(layer1.identifier)
self.stage.GetRootLayer().subLayerPaths.append(layer2.identifier)
await self._wait()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(len(root_layer_item.sublayers), 2)
selected_item = None
def on_selected(item):
nonlocal selected_item
selected_item = item
self.layers_instance.add_layer_selection_changed_fn(on_selected)
# Select two layers by simulating UI clicks.
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
layer_tree_view.selection = root_layer_item.sublayers
await self._wait()
# get_current_focused_layer_item returns none for multiple selection.
self.assertEqual(self.layers_instance.get_current_focused_layer_item(), None)
self.assertEqual(self.layers_instance.get_selected_items(), root_layer_item.sublayers)
self.assertEqual(selected_item, None)
layer_tree_view.selection = [root_layer_item.sublayers[0]]
await self._wait()
self.assertEqual(self.layers_instance.get_current_focused_layer_item(), root_layer_item.sublayers[0])
self.assertEqual(self.layers_instance.get_selected_items(), [root_layer_item.sublayers[0]])
self.assertEqual(selected_item, root_layer_item.sublayers[0])
layer_tree_view.selection = []
await self._wait()
self.assertEqual(self.layers_instance.get_current_focused_layer_item(), None)
self.assertEqual(self.layers_instance.get_selected_items(), [])
self.assertEqual(selected_item, None)
# Manually set focused layer item.
self.layers_instance.set_current_focused_layer_item(root_layer_item.sublayers[0].identifier)
await self._wait()
self.assertEqual(self.layers_instance.get_current_focused_layer_item(), root_layer_item.sublayers[0])
self.assertEqual(layer_tree_view.selection, [root_layer_item.sublayers[0]])
self.assertEqual(self.layers_instance.get_selected_items(), [root_layer_item.sublayers[0]])
self.assertEqual(selected_item, root_layer_item.sublayers[0])
# After listener is removed, it should not receive changed event anymore.
selected_item = None
self.layers_instance.remove_layer_selection_changed_fn(on_selected)
self.layers_instance.set_current_focused_layer_item(root_layer_item.sublayers[0].identifier)
await self._wait()
self.assertEqual(selected_item, None)
self.layers_instance.add_layer_selection_changed_fn(on_selected)
self.layers_instance.show_window(None, False)
await self._wait()
# When window is hidden, it cannot focus any layer item.
self.assertEqual(root_layer_item.sublayers, [])
self.layers_instance.set_current_focused_layer_item(root_layer_item)
await self._wait()
self.assertEqual(selected_item, None)
self.layers_instance.show_window(None, True)
await self._wait()
layer_model = self.layers_instance.get_layer_model()
# Old items are released after window is hidden, re-fetching it.
root_layer_item = layer_model.root_layer_item
self.layers_instance.set_current_focused_layer_item(root_layer_item.sublayers[0].identifier)
await self._wait()
self.assertEqual(selected_item, root_layer_item.sublayers[0])
async def test_prim_selection(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self._wait()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, prim_spec_paths)
omni.kit.commands.execute("SelectAll")
await self._wait()
all_prim_paths = []
for prim in self.stage.TraverseAll():
all_prim_paths.append(prim.GetPath())
all_root_specs = root_layer_item.absolute_root_spec.children
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
selections = layer_tree_view.selection
self.assertTrue(len(selections) != 0)
self.assertEqual(set(all_root_specs), set(selections))
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertEqual(len(selections), 0)
selection_api = self.usd_context.get_selection()
selection_api.set_selected_prim_paths([str(path) for path in all_prim_paths[10:20]], False)
await self._wait()
selections = layer_tree_view.selection
all_selected_paths = []
for selection in selections:
all_selected_paths.append(selection.path)
self.assertEqual(set(all_prim_paths[10:20]), set(all_selected_paths))
async def test_prim_selection_with_ui(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10])
await self._wait()
all_root_specs = root_layer_item.absolute_root_spec.children
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
# Select item one by one
layer_tree_view.selection = [all_root_specs[0]]
layer_tree_view.selection = [all_root_specs[1]]
layer_tree_view.selection = [all_root_specs[2]]
# Return back to item 1
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) != 0)
self.assertEqual(set([all_root_specs[1]]), set(selections))
# Return back to item 0
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) != 0)
self.assertEqual(set([all_root_specs[0]]), set(selections))
# Empty selection as startup
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) == 0)
# Select all and undo
layer_tree_view.selection = all_root_specs
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) == 0)
# OM-45941: Select one item and remove it will not trigger new command.
layer_tree_view.selection = [all_root_specs[0]]
layer_tree_view.selection = [all_root_specs[1]]
stage = omni.usd.get_context().get_stage()
LayerUtils.remove_prim_spec(stage.GetRootLayer(), all_root_specs[1].path)
await self._wait()
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) != 0)
self.assertEqual(set([all_root_specs[0]]), set(selections))
| 7,948 | Python | 40.401041 | 109 | 0.659285 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_performance.py | import omni.kit.test
import time
from .base import TestLayerUIBase
from pxr import Sdf, Usd
class TestLayerPerformance(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
await self.usd_context.close_stage_async()
await super().tearDown()
async def test_create_10_sublayers(self):
start_time = time.time()
root_layer = self.stage.GetRootLayer()
self.create_sublayers(root_layer, [10])
print(f"Time costed to create 10 sublayers: {time.time() - start_time}")
async def test_search_1000_prim_specs(self):
temp_layer = Sdf.Layer.CreateAnonymous()
temp_stage = Usd.Stage.Open(temp_layer)
self.create_prim_specs(temp_stage, Sdf.Path.absoluteRootPath, [1000])
await self.usd_context.attach_stage_async(temp_stage)
start_time = time.time()
layer_model = self.layers_instance.get_layer_model()
layer_model.filter_by_text("xform")
print(f"Time costed to search 1000 prim specs: {time.time() - start_time}")
| 1,130 | Python | 35.48387 | 83 | 0.664602 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_live_session.py | import carb
import omni.kit.test
import omni.usd
import omni.client
import unittest
import omni.kit.app
import carb
import omni.kit.test
import omni.usd
import omni.client
import unittest
import omni.kit.app
import omni.kit.widget.live_session_management as lsm
from .base import TestLayerUIBase
from omni.kit.usd.layers.tests.mock_utils import (
MockLiveSyncingApi, join_new_simulated_user, quit_simulated_user,
quit_all_simulated_users
)
from omni.kit.usd.layers import get_layers, get_layer_event_payload, LayerEventType, LayerUtils
from pxr import Usd, Sdf
class TestLiveSession(TestLayerUIBase):
# Before running each test
async def setUp(self):
await super().setUp()
layers = get_layers()
self.live_syncing = layers.get_live_syncing()
async def tearDown(self):
await super().tearDown()
async def wait(self, frames=10):
for i in range(frames):
await self.app.next_update_async()
async def test_non_omniverse_stage(self):
import omni.kit.ui_test as ui_test
# For non-omniverse stage, it cannot start live session.
await self.usd_context.new_stage_async()
await ui_test.find("Layer").focus()
await ui_test.human_delay(100)
window = ui_test.find("Live Session")
self.assertFalse(window)
async def __create_fake_stage(self):
format = Sdf.FileFormat.FindByExtension(".usd")
# Sdf.Layer.New will not save layer so it won't fail.
# This can be used to test layer identifier with omniverse sheme without
# touching real server.
layer = Sdf.Layer.New(format, "omniverse://__fake_omniverse_server__/test/test.usd")
stage = self.usd_context.get_stage()
stage.GetRootLayer().subLayerPaths.append(layer.identifier)
return stage, layer
@MockLiveSyncingApi
async def test_session_management(self):
_, layer = await self.__create_fake_stage()
import omni.kit.ui_test as ui_test
await ui_test.find("Layer").focus()
await ui_test.human_delay(100)
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.select_context_menu("Create Session")
await ui_test.human_delay(100)
window = ui_test.find("Live Session")
self.assertTrue(window)
create_session_button = ui_test.find("Live Session//Frame/**/RadioButton[*].name=='create_session_radio_button'")
join_session_button = ui_test.find("Live Session//Frame/**/RadioButton[*].name=='join_session_radio_button'")
self.assertTrue(create_session_button)
self.assertTrue(join_session_button)
session_name_field = ui_test.find("Live Session//Frame/**/StringField[*].name=='new_session_name_field'")
self.assertTrue(session_name_field)
confirm_button = ui_test.find("Live Session//Frame/**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await self.wait()
session_name_field.model.set_value("")
# Empty session name is not valid
await confirm_button.click()
await ui_test.human_delay(100)
# Invalid session name will fail to join
await ui_test.human_delay(100)
session_name_field.model.set_value("11111test_session")
await confirm_button.click()
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
session_name_field.model.set_value("")
await ui_test.human_delay(100)
await session_name_field.input("test_session.,m,mn,m")
await confirm_button.click()
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
# Valid session name
session_name_field.model.set_value("")
await ui_test.human_delay(100)
await session_name_field.input("test_session")
await confirm_button.click()
await ui_test.human_delay(100)
self.assertTrue(self.live_syncing.is_stage_in_live_session())
current_live_session = self.live_syncing.get_current_live_session(layer.identifier)
self.assertEqual(current_live_session.name, "test_session")
layer_model = self.layers_instance.get_layer_model()
self.assertTrue(layer_model.get_layer_item_by_identifier(current_live_session.root))
join_new_simulated_user("user0", "user0", layer_identifier=layer.identifier)
await self.wait(20)
user_layout = ui_test.find("Layer//Frame/**/ZStack[*].identifier=='user0'")
self.assertTrue(user_layout)
# Creates another user
join_new_simulated_user("user1", "user1", layer_identifier=layer.identifier)
await self.wait(20)
user_layout = ui_test.find("Layer//Frame/**/ZStack[*].identifier=='user1'")
self.assertTrue(user_layout)
# Quits user should remove its icon
quit_simulated_user("user1", layer_identifier=layer.identifier)
await self.wait(20)
user_layout = ui_test.find("Layer//Frame/**/ZStack[*].identifier=='user1'")
self.assertFalse(user_layout)
# Joins another 10 users will show ellipsis since maximum count is 3.
all_user_ids = []
for i in range(10):
index = i + 10
user_id = f"user{index}"
all_user_ids.append(user_id)
join_new_simulated_user(user_id, user_id, layer_identifier=layer.identifier)
await self.wait(20)
user_layout = ui_test.find("Layer//Frame/**/Label[*].text=='...'")
self.assertTrue(user_layout)
# initialize mouse outside of list, so it doesn't accidentally hover on the wrong thing at the start
await ui_test.emulate_mouse_move(ui_test.Vec2(0,0))
await ui_test.emulate_mouse_move(user_layout.center)
await self.wait(100)
# Disable the check at this moment but only shows the dialog for coverage
# and ensure there is are no scripting errors since ui_test cannot find
# tooltip frame.
# for user_id in all_user_ids:
# user_layout = ui_test.find(f"Layer//Frame/**/HStack[*].identifier=='{user_id}'")
# self.assertTrue(user_layout)
quit_all_simulated_users(layer_identifier=layer.identifier)
await self.wait()
user_layout = ui_test.find("Layer//Frame/**/Label[*].text=='...'")
self.assertFalse(user_layout)
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Leave Session")
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
# Open session dialog again
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join Session")
await ui_test.human_delay(100)
window = ui_test.find("Live Session")
self.assertTrue(window)
create_session_button = ui_test.find("Live Session//Frame/**/RadioButton[*].name=='create_session_radio_button'")
join_session_button = ui_test.find("Live Session//Frame/**/RadioButton[*].name=='join_session_radio_button'")
self.assertTrue(create_session_button)
self.assertTrue(join_session_button)
# Click on join button will immediately join into the session.
confirm_button = ui_test.find("Live Session//Frame/**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await confirm_button.click()
await ui_test.human_delay(100)
self.assertTrue(self.live_syncing.is_stage_in_live_session())
self.assertEqual(current_live_session.name, "test_session")
# Quit session
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.select_context_menu("Leave Session")
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
# Cancel button test
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join Session")
cancel_button = ui_test.find("Live Session//Frame/**/Button[*].name=='cancel_button'")
self.assertTrue(cancel_button)
await cancel_button.click()
self.assertFalse(self.live_syncing.is_stage_in_live_session())
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join Session")
confirm_button = ui_test.find("Live Session//Frame/**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await confirm_button.click()
self.assertTrue(self.live_syncing.is_stage_in_live_session())
# Test leave session menu
await ui_test.human_delay(100)
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
# Select leave session will notify you if you want to leave
await ui_test.select_context_menu("Leave Session")
await ui_test.human_delay(100)
await confirm_button.click()
self.assertFalse(self.live_syncing.is_stage_in_live_session())
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join Session")
confirm_button = ui_test.find("Live Session//Frame/**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await confirm_button.click()
self.assertTrue(self.live_syncing.is_stage_in_live_session())
carb.settings.get_settings().set(lsm.VIEWER_ONLY_MODE_SETTING, True)
self.assertTrue(lsm.is_viewer_only_mode())
await ui_test.human_delay(100)
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
# Select leave session will notify you if you want to leave
with self.assertRaises(Exception):
await ui_test.select_context_menu("End and Merge")
carb.settings.get_settings().set(lsm.VIEWER_ONLY_MODE_SETTING, False)
self.assertFalse(lsm.is_viewer_only_mode())
# Join session and make some changes to test end session dialog
for confirm_or_cancel in [False, True]:
await self.wait()
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
# Select leave session will notify you if you want to leave
await ui_test.select_context_menu("End and Merge")
await self.wait()
confirm_button = ui_test.find("Merge Options//Frame/**/Button[*].name=='confirm_button'")
cancel_button = ui_test.find("Merge Options//Frame/**/Button[*].name=='cancel_button'")
self.assertTrue(confirm_button)
self.assertTrue(cancel_button)
if confirm_or_cancel:
await confirm_button.click()
await self.wait()
self.assertFalse(self.live_syncing.is_stage_in_live_session())
else:
await cancel_button.click()
self.assertTrue(self.live_syncing.is_stage_in_live_session())
self.live_syncing.stop_all_live_sessions()
await self.wait()
| 12,563 | Python | 42.777003 | 121 | 0.648253 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/drag_drop_single.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.test
import omni.usd
import omni.kit.ui_test as ui_test
from omni.kit.test.async_unittest import AsyncTestCase
from omni.kit.test_suite.helpers import open_stage, wait_stage_loading, get_test_data_path, get_prims, arrange_windows
from omni.kit.window.content_browser.test_helper import ContentBrowserTestHelper
class DragDropFileStageSingle(AsyncTestCase):
# Before running each test
async def setUp(self):
await arrange_windows("Layer", 800, 600)
await open_stage(get_test_data_path(__name__, "bound_shapes.usda"))
await wait_stage_loading()
# After running each test
async def tearDown(self):
await wait_stage_loading()
async def test_l1_drag_drop_single_usd_stage(self):
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
await ui_test.find("Content").focus()
layer_window = ui_test.find("Layer")
await layer_window.focus()
# verify prims
paths = [prim.GetPath().pathString for prim in get_prims(stage) if not omni.usd.is_hidden_type(prim)]
self.assertEqual(paths, ['/World', '/World/defaultLight', '/World/Cone', '/World/Cube', '/World/Sphere', '/World/Cylinder', '/World/Looks', '/World/Looks/OmniPBR', '/World/Looks/OmniPBR/Shader', '/World/Looks/OmniGlass', '/World/Looks/OmniGlass/Shader', '/World/Looks/OmniSurface_Plastic', '/World/Looks/OmniSurface_Plastic/Shader'])
# drag/drop files to stage window
drag_target = layer_window.position + ui_test.Vec2(layer_window.size.x / 2, layer_window.size.y / 2)
async with ContentBrowserTestHelper() as content_browser_helper:
await content_browser_helper.toggle_grid_view_async(show_grid_view=False)
await ui_test.human_delay(50)
for file_path in ["4Lights.usda", "quatCube.usda"]:
usd_path = get_test_data_path(__name__, file_path)
await content_browser_helper.drag_and_drop_tree_view(usd_path, drag_target=drag_target)
# verify prims
paths = [prim.GetPath().pathString for prim in get_prims(stage) if not omni.usd.is_hidden_type(prim)]
self.assertEqual(paths, ['/Stage', '/Stage/SphereLight_01', '/Stage/SphereLight_02', '/Stage/SphereLight_03', '/Stage/SphereLight_00', '/Stage/Cube', '/World', '/World/defaultLight', '/World/Cone', '/World/Cube', '/World/Sphere', '/World/Cylinder', '/World/Looks', '/World/Looks/OmniPBR', '/World/Looks/OmniPBR/Shader', '/World/Looks/OmniGlass', '/World/Looks/OmniGlass/Shader', '/World/Looks/OmniSurface_Plastic', '/World/Looks/OmniSurface_Plastic/Shader'])
| 3,061 | Python | 55.703703 | 466 | 0.698465 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_material_watcher.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.kit.test
import omni.usd
import omni.kit.app
from pxr import Usd, UsdShade, Sdf
from omni.kit.test_suite.helpers import get_test_data_path
from omni.kit.usd.layers import LayerUtils
class TestMaterialWatcher(omni.kit.test.AsyncTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._test_scene = get_test_data_path(__name__, "ColorsSaltBox/SaltBox.usda")
async def setUp(self):
await omni.usd.get_context().new_stage_async()
self.stage = omni.usd.get_context().get_stage()
self.stage.GetRootLayer().subLayerPaths.append(self._test_scene)
self.context = omni.usd.get_context()
self.root_prim_path = Sdf.Path("/SaltBox")
self.shader_prim_path = Sdf.Path("/SaltBox/Looks/SaltBox_Paper1/Shader")
self.app = omni.kit.app.get_app()
self.selection = self.context.get_selection()
self.diffuse_constant_color_path = self.shader_prim_path.AppendProperty("inputs:diffuse_color_constant")
self.albedo_brightness_path = self.shader_prim_path.AppendProperty("inputs:albedo_brightness")
self.session_layer = self.stage.GetSessionLayer()
self.stage.SetEditTarget(Usd.EditTarget(self.stage.GetRootLayer()))
renderer = "rtx"
if renderer not in self.context.get_attached_hydra_engine_names():
omni.usd.add_hydra_engine(renderer, self.context)
await self._wait()
self.context.add_to_pending_creating_mdl_paths(str(self.shader_prim_path), True, True)
await self._wait(2)
async def tearDown(self):
pass
async def _wait(self, frames=5):
for _ in range(frames):
await self.app.next_update_async()
async def test_referenced_material(self):
await self.context.new_stage_async()
self.stage = self.context.get_stage()
self.session_layer = self.stage.GetSessionLayer()
layer = Sdf.Layer.CreateAnonymous()
self.stage.GetRootLayer().subLayerPaths.append(layer.identifier)
# Writes materials into the sublayer to keep edit target empty.
with Usd.EditContext(self.stage, layer):
root_prim = self.stage.DefinePrim("/SaltBox")
root_prim.GetReferences().AddReference(self._test_scene)
await self._wait()
# OM-84443: Populating material params for referenced prim will not write all back to root layer
self.selection.set_selected_prim_paths([str(self.shader_prim_path)], False)
await self._wait()
shader_prim = UsdShade.Shader.Get(self.stage, self.shader_prim_path)
self.assertTrue(root_prim)
self.assertTrue(shader_prim)
await self.__check_material(root_prim, shader_prim)
async def test_material_watcher(self):
root_prim = self.stage.GetPrimAtPath(self.root_prim_path)
shader_prim = UsdShade.Shader.Get(self.stage, self.shader_prim_path)
self.assertTrue(root_prim)
self.assertTrue(shader_prim)
# Populating material params
self.selection.set_selected_prim_paths([str(self.shader_prim_path)], False)
await self._wait()
await self.__check_material(root_prim, shader_prim)
async def __check_material(self, root_prim, shader_prim):
# Checking params in the session layer
shader_prim_spec = self.session_layer.GetPrimAtPath(self.shader_prim_path)
self.assertTrue(shader_prim_spec)
# Checkes to make sure params are not written to the current edit target after selection.
root_layer = self.stage.GetRootLayer()
self.assertFalse(root_layer.GetPrimAtPath(self.shader_prim_path))
# It's overrided already in USD, so it's empty in session layer.
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, None)
constant_color_composed = self.stage.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_composed)
self.assertEqual(constant_color_composed.Get(), (0, 0, 1))
albedo_brightness_spec = self.session_layer.GetPropertyAtPath(self.albedo_brightness_path)
self.assertFalse(albedo_brightness_spec.default)
variant_set = root_prim.GetVariantSet("materials")
self.assertTrue(variant_set)
variant_set.SetVariantSelection("renderLow")
await self._wait()
# It's not overrided in USD, then the default value will be populated into session layer.
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, (0.2, 0.2, 0.2))
self.assertFalse(albedo_brightness_spec.default)
# Sets value to root layer will remove the overrides in session layer
constant_color_composed = self.stage.GetPropertyAtPath(self.diffuse_constant_color_path)
constant_color_composed.Set((0.5, 0.5, 0.5))
await self._wait()
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(not constant_color_spec or not constant_color_spec.default)
# Removes the overrides will cause session layer to populate the default value.
LayerUtils.remove_prim_spec(self.stage.GetRootLayer(), self.shader_prim_path)
await self._wait()
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertEqual(constant_color_spec.default, (0.2, 0.2, 0.2))
# Switches it back will remove the default value in session layer.
variant_set.SetVariantSelection("renderHigh")
await self._wait()
# It's not overrided in USD, then the default value will be populated into session layer.
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, None)
self.assertFalse(albedo_brightness_spec.default)
# OM-110849: Sets default value in root layer and then edits it with EditContext in session layer will not
# been removed.
constant_color_composed = self.stage.GetPropertyAtPath(self.diffuse_constant_color_path)
constant_color_composed.Set((0.5, 0.5, 0.5))
await self._wait()
with Usd.EditContext(self.stage, self.session_layer):
constant_color_composed.Set((0.2, 0.2, 0.2))
await self._wait()
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, (0.2, 0.2, 0.2))
constant_color_spec = root_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, (0.5, 0.5, 0.5))
| 7,529 | Python | 46.35849 | 114 | 0.693585 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_usd_events.py | import omni.kit.test
import os
import uuid
import omni.client
import omni.kit.commands
import omni.kit.usd.layers as layers
from stat import S_IREAD, S_IWRITE
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.layers.prim_spec_item import PrimSpecSpecifier
from .base import TestLayerUIBase
from pxr import Sdf, UsdGeom, Usd
class TestLayerUsdEvents(TestLayerUIBase):
"""Tests for layer model refresh reacted to usd stage changes."""
async def setUp(self):
await super().setUp()
self.test_folder = omni.client.combine_urls(self.temp_dir, str(uuid.uuid1()))
self.test_folder += "/"
await omni.client.create_folder_async(self.test_folder)
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
await self.usd_context.close_stage_async()
await omni.client.delete_async(self.test_folder)
await super().tearDown()
async def test_empty_stage(self):
root_layer = self.stage.GetRootLayer()
session_layer = self.stage.GetSessionLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
self.assertTrue(root_layer_item)
self.assertTrue(session_layer_item)
self.check_layer_regular_fields(
root_layer_item, "Root Layer", root_layer.identifier,
is_edit_target=True, reserved=True,
from_session_layer=False, anonymous=True,
)
self.check_layer_regular_fields(
session_layer_item, "Session Layer", session_layer.identifier,
is_edit_target=False, reserved=True,
from_session_layer=True, anonymous=True,
)
async def test_create_sublayers(self):
root_layer = self.stage.GetRootLayer()
session_layer = self.stage.GetSessionLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
_, identifiers_map = self.create_sublayers(root_layer, [2, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
_, identifiers_map = self.create_sublayers(session_layer, [2, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(session_layer_item, identifiers_map)
async def test_edit_target_change(self):
root_layer = self.stage.GetRootLayer()
session_layer = self.stage.GetSessionLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
LayerUtils.set_edit_target(self.stage, session_layer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertTrue(session_layer_item.is_edit_target)
self.assertFalse(root_layer_item.is_edit_target)
LayerUtils.set_edit_target(self.stage, root_layer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertTrue(root_layer_item.is_edit_target)
self.assertFalse(session_layer_item.is_edit_target)
async def test_layer_misc_properties(self):
root_layer = self.stage.GetRootLayer()
root_layer.subLayerPaths.insert(0, "../invalid_path.usd")
await self.app.next_update_async()
await self.app.next_update_async()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(len(root_layer_item.sublayers), 1)
missing_layer = root_layer_item.sublayers[0]
self.check_layer_regular_fields(
missing_layer, "invalid_path.usd", missing_layer.identifier,
missing=True, anonymous=False, parent=root_layer_item
)
read_only_usd = omni.client.combine_urls(self.test_folder, "read_only.usd")
read_only_layer = Sdf.Layer.CreateNew(read_only_usd)
read_only_layer.Save()
read_only_layer = None
self.assertTrue(os.path.exists(read_only_usd))
os.chmod(read_only_usd, S_IREAD)
read_only_layer = Sdf.Layer.FindOrOpen(read_only_usd)
root_layer.subLayerPaths.append(read_only_usd)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
read_only_item = root_layer_item.sublayers[1]
self.check_layer_regular_fields(
read_only_item, "read_only.usd", read_only_layer.identifier,
read_only=True, parent=root_layer_item, anonymous=False
)
# Change the write permission back so it could be removed.
os.chmod(read_only_usd, S_IWRITE)
dirty_layer_usd = omni.client.combine_urls(self.test_folder, "dirty_layer.usd")
dirty_layer = Sdf.Layer.CreateNew(dirty_layer_usd)
dirty_layer.Save()
root_layer.subLayerPaths.append(dirty_layer_usd)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertTrue(len(root_layer_item.sublayers) == 3)
dirty_layer_item = root_layer_item.sublayers[2]
self.check_layer_regular_fields(
dirty_layer_item, "dirty_layer.usd", dirty_layer.identifier,
dirty=False, parent=root_layer_item, anonymous=False
)
# Change something
customLayerData = dirty_layer.customLayerData
customLayerData["test"] = 1
dirty_layer.customLayerData = customLayerData
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
dirty_layer_item, "dirty_layer.usd", dirty_layer.identifier,
dirty=True, parent=root_layer_item, anonymous=False
)
async def test_layer_local_mute_events(self):
root_layer = self.stage.GetRootLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
sublayers_map, _ = self.create_sublayers(root_layer, [1, 1, 1])
await self.app.next_update_async()
await self.app.next_update_async()
level_0_sublayer = sublayers_map[root_layer.identifier][0]
level_1_sublayer = sublayers_map[level_0_sublayer.identifier][0]
level_2_sublayer = sublayers_map[level_1_sublayer.identifier][0]
level_0_item = root_layer_item.sublayers[0]
level_1_item = level_0_item.sublayers[0]
level_2_item = level_1_item.sublayers[0]
self.stage.MuteLayer(level_2_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_2_item, level_2_sublayer.identifier, level_2_sublayer.identifier,
parent=level_1_item, anonymous=True, muted=True, muted_or_parent_muted=True,
)
self.assertTrue(level_2_item.locally_muted)
self.stage.UnmuteLayer(level_2_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_2_item, level_2_sublayer.identifier, level_2_sublayer.identifier,
parent=level_1_item, anonymous=True, muted=False, muted_or_parent_muted=False
)
self.assertFalse(level_2_item.locally_muted)
self.stage.MuteLayer(level_0_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=True, muted_or_parent_muted=True,
sublayer_list=[level_1_sublayer.identifier]
)
self.check_layer_regular_fields(
level_1_item, level_1_sublayer.identifier, level_1_sublayer.identifier,
parent=level_0_item, anonymous=True, muted=False, muted_or_parent_muted=True,
sublayer_list=[level_2_sublayer.identifier]
)
self.check_layer_regular_fields(
level_2_item, level_2_sublayer.identifier, level_2_sublayer.identifier,
parent=level_1_item, anonymous=True, muted=False, muted_or_parent_muted=True
)
self.stage.UnmuteLayer(level_0_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_1_item, level_1_sublayer.identifier, level_1_sublayer.identifier,
parent=level_0_item, anonymous=True, muted=False, muted_or_parent_muted=False,
sublayer_list=[level_2_sublayer.identifier]
)
self.check_layer_regular_fields(
level_2_item, level_2_sublayer.identifier, level_2_sublayer.identifier,
parent=level_1_item, anonymous=True, muted=False, muted_or_parent_muted=False
)
async def test_layer_global_mute_events(self):
root_layer = self.stage.GetRootLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layers_state = layers.get_layers().get_layers_state()
layers_state.set_muteness_scope(True)
sublayers_map, _ = self.create_sublayers(root_layer, [1])
await self.app.next_update_async()
await self.app.next_update_async()
level_0_sublayer = sublayers_map[root_layer.identifier][0]
level_0_item = root_layer_item.sublayers[0]
self.stage.MuteLayer(level_0_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=True, muted_or_parent_muted=True,
)
self.assertTrue(level_0_item.globally_muted)
self.stage.UnmuteLayer(level_0_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=False, muted_or_parent_muted=False,
)
self.assertFalse(level_0_item.globally_muted)
LayerUtils.set_layer_global_muteness(root_layer, level_0_sublayer.identifier, True)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=True, muted_or_parent_muted=True,
)
self.assertTrue(level_0_item.globally_muted)
LayerUtils.set_layer_global_muteness(root_layer, level_0_sublayer.identifier, False)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=False, muted_or_parent_muted=False,
)
self.assertFalse(level_0_item.globally_muted)
async def test_sublayer_edits(self):
root_layer = self.stage.GetRootLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
_, identifiers_map = self.create_sublayers(root_layer, [3, 3, 3])
await self.app.next_update_async()
await self.app.next_update_async()
level_0_sublayer0_identifier = identifiers_map[root_layer.identifier][0]
level_0_sublayer1_identifier = identifiers_map[root_layer.identifier][1]
level_0_sublayer2_identifier = identifiers_map[root_layer.identifier][2]
# Layer refresh after remove.
omni.kit.commands.execute("RemoveSublayer", layer_identifier=root_layer.identifier, sublayer_position=1)
complete_sublayers = identifiers_map[root_layer.identifier][:]
identifiers_map[root_layer.identifier] = [level_0_sublayer0_identifier, level_0_sublayer2_identifier]
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
identifiers_map[root_layer.identifier] = complete_sublayers
self.check_sublayer_tree(root_layer_item, identifiers_map)
# Layer refresh after create.
# Create layer before second sublayer of root layer.
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=root_layer.identifier,
sublayer_position=1,
new_layer_path="",
transfer_root_content=False,
create_or_insert=True,
layer_name="",
)
new_layer_identifier = self.stage.GetRootLayer().subLayerPaths[1]
new_layer = Sdf.Layer.FindOrOpen(new_layer_identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertTrue(root_layer_item.sublayers[1].identifier == new_layer_identifier)
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
# Layer refresh after move.
omni.kit.commands.execute(
"MoveSublayer",
from_parent_layer_identifier=root_layer.identifier,
from_sublayer_position=2,
to_parent_layer_identifier=root_layer.identifier,
to_sublayer_position=0,
remove_source=True,
)
complete_sublayers = identifiers_map[root_layer.identifier][:]
identifiers_map[root_layer.identifier] = [
level_0_sublayer2_identifier,
level_0_sublayer0_identifier,
level_0_sublayer1_identifier
]
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
omni.kit.undo.undo()
identifiers_map[root_layer.identifier] = complete_sublayers
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
# Layer refresh after replace.
omni.kit.commands.execute(
"ReplaceSublayer",
layer_identifier=root_layer.identifier,
sublayer_position=1,
new_layer_path=new_layer_identifier,
)
complete_sublayers = identifiers_map[root_layer.identifier][:]
identifiers_map[root_layer.identifier] = [
level_0_sublayer0_identifier,
new_layer_identifier,
level_0_sublayer2_identifier
]
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
omni.kit.undo.undo()
identifiers_map[root_layer.identifier] = complete_sublayers
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
async def test_prim_specs_create(self):
session_layer = self.stage.GetSessionLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, prim_spec_paths)
LayerUtils.set_edit_target(self.stage, session_layer.identifier)
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(session_layer_item.absolute_root_spec, prim_spec_paths)
async def test_prim_specs_edits(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
prim1_of_root = root_layer_item.prim_specs[1].path
omni.kit.commands.execute(
"RemovePrimSpec",
layer_identifier=root_layer_item.identifier,
prim_spec_path=prim1_of_root
)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
changed_prim_specs = prim_spec_paths.copy()
for path in prim_spec_paths:
if path.HasPrefix(prim1_of_root):
changed_prim_specs.discard(path)
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, changed_prim_specs)
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, prim_spec_paths)
async def test_layer_flush(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, prim_spec_paths)
session_layer_prim_spec_paths = self.get_all_prim_spec_paths(session_layer_item.absolute_root_spec)
root_layer_item.layer.TransferContent(session_layer_item.layer)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, session_layer_prim_spec_paths)
async def test_prim_spec_type_name_change(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.stage.DefinePrim("/test")
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(root_layer_item.prim_specs[0].path, Sdf.Path("/test"))
self.assertEqual(root_layer_item.prim_specs[0].type_name, "")
UsdGeom.Cube.Define(self.stage, root_layer_item.prim_specs[0].path)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(root_layer_item.prim_specs[0].type_name, "Cube")
async def test_parenting_prim_refresh(self):
# Test for https://nvidia-omniverse.atlassian.net/browse/OM-34957
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
# Creates 3 prims
prim_spec_paths = list(self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [3]))
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, set(prim_spec_paths))
# Moves first two prims as the children of the 3rd one.
new_path0 = prim_spec_paths[2].AppendElementString(prim_spec_paths[0].name)
new_path1 = prim_spec_paths[2].AppendElementString(prim_spec_paths[1].name)
omni.kit.commands.execute("MovePrim", path_from=prim_spec_paths[0], path_to=new_path0)
omni.kit.commands.execute("MovePrim", path_from=prim_spec_paths[1], path_to=new_path1)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(len(root_layer_item.absolute_root_spec.children), 1)
self.assertEqual(root_layer_item.absolute_root_spec.children[0].path, prim_spec_paths[2])
self.assertEqual(len(root_layer_item.absolute_root_spec.children[0].children), 2)
self.check_prim_spec_children(root_layer_item.absolute_root_spec.children[0], set([new_path0, new_path1]))
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(len(root_layer_item.absolute_root_spec.children), 2)
self.check_prim_spec_children(root_layer_item.absolute_root_spec, set(prim_spec_paths[1:3]))
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, set([new_path0, prim_spec_paths[1], prim_spec_paths[2]]))
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, set(prim_spec_paths))
async def test_specifier_reference(self):
# Test for https://nvidia-omniverse.atlassian.net/browse/OM-34957
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layer_content = '''\
#sdf 1.0
def "test_prim" (
prepend references = @../invalid/reference2.usd@
)
{
}
'''
root_layer_item.layer.ImportFromString(layer_content)
await self.app.next_update_async()
await self.app.next_update_async()
test_prim_spec = root_layer_item.absolute_root_spec.children[0]
self.assertEqual(test_prim_spec.specifier, PrimSpecSpecifier.DEF_WITH_REFERENCE)
stage = self.stage
test_prim = stage.GetPrimAtPath("/test_prim")
ref_and_layers = omni.usd.get_composed_references_from_prim(test_prim)
for reference, layer in ref_and_layers:
with Usd.EditContext(stage, layer):
payload = Sdf.Payload(assetPath=reference.assetPath.replace("\\", "/"), primPath=reference.primPath, layerOffset=reference.layerOffset)
omni.kit.commands.execute("RemoveReference", stage=stage, prim_path=test_prim.GetPath(), reference=reference)
omni.kit.commands.execute("AddPayload", stage=stage, prim_path=test_prim.GetPath(), payload=payload)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(test_prim_spec.specifier, PrimSpecSpecifier.DEF_WITH_PAYLOAD)
ref_and_layers = omni.usd.get_composed_payloads_from_prim(test_prim)
for payload, layer in ref_and_layers:
with Usd.EditContext(stage, layer):
reference = Sdf.Reference(assetPath=payload.assetPath.replace("\\", "/"), primPath=payload.primPath, layerOffset=payload.layerOffset)
omni.kit.commands.execute("RemovePayload", stage=stage, prim_path=test_prim.GetPath(), payload=payload)
omni.kit.commands.execute("AddReference", stage=stage, prim_path=test_prim.GetPath(), reference=reference)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(test_prim_spec.specifier, PrimSpecSpecifier.DEF_WITH_REFERENCE)
| 24,707 | Python | 45.097015 | 151 | 0.657506 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_path_utils.py | import omni.kit.test
import omni.usd
from omni.kit.widget.layers.path_utils import PathUtils
class TestPathUtils(omni.kit.test.AsyncTestCase):
def test_utils(self):
path = "omniverse://test-server/invalid_path"
self.assertTrue(PathUtils.is_omni_path(path))
path = "c:/file.usd"
self.assertFalse(PathUtils.is_omni_path(path))
def test_is_live_layer(self):
path = "omniverse://test-server/test.live"
self.assertTrue(PathUtils.is_omni_live(path))
path = "c:/file.usd"
self.assertFalse(PathUtils.is_omni_live(path))
| 587 | Python | 31.666665 | 55 | 0.674617 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/base.py | import carb
import omni
import omni.kit.test
import omni.usd
import omni.client
import omni.kit.widget.layers
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.layers.prim_spec_item import PrimSpecItem, PrimSpecSpecifier
from pxr import Sdf, Usd, UsdGeom, Gf
class TestLayerNonUIBase(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
self.usd_context = omni.usd.get_context()
self.app = omni.kit.app.get_app()
await omni.usd.get_context().new_stage_async()
async def tearDown(self):
await omni.usd.get_context().close_stage_async()
class TestLayerUIBase(omni.kit.test.AsyncTestCase):
"""Tests for layer model refresh reacted to usd stage changes."""
# Before running each test
async def setUp(self):
await omni.usd.get_context().new_stage_async()
self.usd_context = omni.usd.get_context()
self.layers_instance = omni.kit.widget.layers.get_instance()
self.app = omni.kit.app.get_app()
token = carb.tokens.get_tokens_interface()
self.temp_dir = token.resolve("${temp}")
if not self.temp_dir.endswith("/"):
self.temp_dir += "/"
async def tearDown(self):
if self.layers_instance.get_layer_model():
self.layers_instance.get_layer_model().spec_linking_mode = False
if self.usd_context.get_stage():
await omni.usd.get_context().close_stage_async()
def check_prim_spec_regular_fields(
self, prim_spec_item, name, path, type_name="",
specifier=PrimSpecSpecifier.DEF_ONLY, children=[],
has_missing_reference=False, instanceable=False,
filtered=False, has_children=False
):
self.assertEqual(prim_spec_item.name, name)
self.assertEqual(prim_spec_item.path, path)
self.assertEqual(prim_spec_item.type_name, type_name)
self.assertEqual(prim_spec_item.specifier, specifier)
prim_spec_paths = self.get_all_prim_spec_paths(prim_spec_item)
paths = set([])
for path in children:
paths.add(Sdf.Path(path))
prim_spec_paths.discard(prim_spec_item.path)
self.assertEqual(prim_spec_paths, paths)
self.assertEqual(prim_spec_item.has_missing_reference, has_missing_reference)
self.assertEqual(prim_spec_item.instanceable, instanceable)
self.assertEqual(prim_spec_item.filtered, filtered)
self.assertEqual(prim_spec_item.has_children, has_children)
def check_layer_regular_fields(
self, layer_item, name, identifier, missing=False, is_edit_target=False,
reserved=False, read_only=False, sublayer_list=[], muted=False,
muted_or_parent_muted=False, from_session_layer=False,
dirty=False, anonymous=False, filtered=False,
prim_spec_list=[], parent=None
):
self.assertEqual(layer_item.name, name)
self.assertEqual(layer_item.identifier, identifier)
self.assertEqual(layer_item.missing, missing)
self.assertEqual(layer_item.is_edit_target, is_edit_target)
self.assertEqual(layer_item.reserved, reserved)
self.assertEqual(layer_item.read_only_on_disk, read_only)
self.assertEqual(layer_item.from_session_layer, from_session_layer)
self.assertEqual(layer_item.muted, muted)
self.assertEqual(layer_item.editable, not muted and not layer_item.read_only_on_disk and not layer_item.locked)
if not anonymous:
self.assertEqual(layer_item.dirty, dirty)
self.assertEqual(layer_item.anonymous, anonymous)
self.assertEqual(layer_item.filtered, filtered)
self.assertEqual(layer_item.muted_or_parent_muted, muted_or_parent_muted)
self.assertEqual(layer_item.parent, parent)
paths = self.get_all_sublayer_identifiers(layer_item)
self.assertEqual(paths, sublayer_list)
prim_spec_paths = self.get_all_prim_spec_paths(layer_item.absolute_root_spec)
expected_paths = set(prim_spec_list)
self.assertEqual(prim_spec_paths, expected_paths)
def create_flat_sublayers(self, root_layer, num):
sublayers = []
identifiers = []
for i in range(num):
layer = LayerUtils.create_sublayer(root_layer, i, "")
sublayers.append(layer)
identifiers.append(layer.identifier)
return sublayers, identifiers
def create_flat_prim_specs(self, stage, parent_path, num):
prim_spec_paths = set([])
for i in range(num):
prim = stage.DefinePrim(parent_path.AppendElementString(f"xform{i}"), "Xform")
translation = Gf.Vec3d(-200, 0.0, 0.0)
common_api = UsdGeom.XformCommonAPI(prim)
common_api.SetTranslate(translation)
prim_spec_paths.add(prim.GetPath())
return prim_spec_paths
def get_all_prim_spec_items(self, prim_spec_item):
prim_specs = set([])
q = [prim_spec_item]
if prim_spec_item.path != Sdf.Path.absoluteRootPath:
prim_specs.add(prim_spec_item)
while len(q) > 0:
item = q.pop()
specs = item.children
for spec in specs:
prim_specs.add(spec)
q.append(spec)
return prim_specs
def get_all_prim_spec_paths(self, prim_spec_item):
specs = self.get_all_prim_spec_items(prim_spec_item)
paths = [spec.path for spec in specs]
return set(paths)
def get_all_sublayer_identifiers(self, layer_item):
paths = []
for sublayer in layer_item.sublayers:
paths.append(sublayer.identifier)
return paths
def create_sublayers(self, root_layer, level=[]):
if not level:
return {}, {}
sublayers, identifiers = self.create_flat_sublayers(root_layer, level[0])
sublayers_map = {}
identifiers_map = {}
sublayers_map[root_layer.identifier] = sublayers
identifiers_map[root_layer.identifier] = identifiers
for sublayer in sublayers:
lm, im = self.create_sublayers(sublayer, level[1:])
sublayers_map.update(lm)
identifiers_map.update(im)
return sublayers_map, identifiers_map
def create_prim_specs(self, stage, parent_prim_path, level=[]):
if not level:
return set([])
prim_spec_paths = self.create_flat_prim_specs(stage, parent_prim_path, level[0])
all_child_spec_paths = set([])
for prim_spec_path in prim_spec_paths:
all_child_spec_paths.update(self.create_prim_specs(stage, prim_spec_path, level[1:]))
prim_spec_paths.update(all_child_spec_paths)
return prim_spec_paths
def check_sublayer_tree(self, layer_item, identifiers_map):
layer_identifiers = identifiers_map.get(layer_item.identifier, [])
sublayer_paths = self.get_all_sublayer_identifiers(layer_item)
self.assertEqual(
sublayer_paths, layer_identifiers,
f"{layer_item.identifier}'s sublayers does not match"
)
for sublayer_item in layer_item.sublayers:
self.check_sublayer_tree(sublayer_item, identifiers_map)
def check_prim_spec_children(self, prim_spec_item: PrimSpecItem, expected_children_prim_paths):
paths = set({})
for child in prim_spec_item.children:
paths.add(child.path)
self.assertEqual(paths, set(expected_children_prim_paths))
def check_prim_spec_tree(self, prim_spec_item, expected_prim_paths):
paths = self.get_all_prim_spec_paths(prim_spec_item)
self.assertEqual(paths, expected_prim_paths)
async def prepare_empty_stage(self):
root_layer = Sdf.Layer.CreateAnonymous("__root__")
stage = Usd.Stage.Open(root_layer)
await self.usd_context.attach_stage_async(stage)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
return stage
| 8,020 | Python | 37.936893 | 119 | 0.647132 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_prim_spec_item.py | import omni.kit.test
import os
import uuid
import omni.client
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.layers.layer_settings import LayerSettings
from omni.kit.widget.layers.prim_spec_item import PrimSpecSpecifier
from .base import TestLayerUIBase
from pxr import Sdf, UsdGeom
class TestLayerPrimSpecItemAPI(TestLayerUIBase):
"""Tests for layer model refresh reacted to usd stage changes."""
async def setUp(self):
await super().setUp()
self.test_folder = omni.client.combine_urls(self.temp_dir, str(uuid.uuid1()))
self.test_folder += "/"
await omni.client.create_folder_async(self.test_folder)
self.enable_missing_reference = LayerSettings().show_missing_reference
LayerSettings().show_missing_reference = True
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
LayerSettings().show_missing_reference = self.enable_missing_reference
await self.usd_context.close_stage_async()
await omni.client.delete_async(self.test_folder)
await super().tearDown()
async def test_prim_spec_item_properties(self):
temp_layer = Sdf.Layer.CreateAnonymous()
typeless_prim = self.stage.DefinePrim("/test")
cube_prim = self.stage.DefinePrim("/test/cube", "Cube")
prim_with_reference = self.stage.DefinePrim("/test/reference", "Xform")
prim_with_reference.GetReferences().AddReference(temp_layer.identifier)
# Add invalid reference
prim_with_reference.GetReferences().AddReference("../invalid_reference.usd")
instanced_prim = self.stage.DefinePrim("/test/instanced", "Xform")
instanced_prim.SetInstanceable(True)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(len(root_layer_item.prim_specs), 1)
typeless_prim_item = root_layer_item.prim_specs[0]
self.assertEqual(len(typeless_prim_item.children), 3)
cube_prim_item = typeless_prim_item.children[0]
prim_with_reference_item = typeless_prim_item.children[1]
instanced_prim_item = typeless_prim_item.children[2]
self.check_prim_spec_regular_fields(
typeless_prim_item, "test", "/test",
children=["/test/cube", "/test/reference", "/test/instanced"],
has_children=True
)
self.check_prim_spec_regular_fields(
cube_prim_item, "cube", "/test/cube", type_name="Cube"
)
self.check_prim_spec_regular_fields(
prim_with_reference_item, "reference", "/test/reference", type_name="Xform",
specifier=PrimSpecSpecifier.DEF_WITH_REFERENCE,
has_missing_reference=True
)
self.check_prim_spec_regular_fields(
instanced_prim_item, "instanced", "/test/instanced", type_name="Xform",
instanceable=True
)
async def test_prim_spec_item_filter(self):
self.stage.DefinePrim("/test/filter/keyword1/keyword2")
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(len(root_layer_item.prim_specs), 1)
test_prim = root_layer_item.prim_specs[0]
filter_prim = test_prim.children[0]
keyword1_prim = filter_prim.children[0]
keyword2_prim = keyword1_prim.children[0]
root_layer_item.prefilter("keyword1")
self.assertTrue(layer_model.can_item_have_children(root_layer_item))
self.assertTrue(test_prim.filtered)
self.assertTrue(filter_prim.filtered)
self.assertTrue(keyword1_prim.filtered)
self.assertFalse(keyword2_prim.filtered)
| 4,079 | Python | 42.870967 | 88 | 0.667075 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_layer_model.py | import omni.kit.test
import os
import uuid
import omni.client
import tempfile
from .base import TestLayerUIBase
from pxr import Sdf, Usd
from omni.kit.usd.layers import LayerUtils
class TestLayerModelAPI(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.test_folder = omni.client.combine_urls(self.temp_dir, str(uuid.uuid1()))
self.test_folder += "/"
await omni.client.create_folder_async(self.test_folder)
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
if self.usd_context.get_stage():
await self.usd_context.close_stage_async()
await omni.client.delete_async(self.test_folder)
await super().tearDown()
async def test_authoring_mode_switch(self):
layer_model = self.layers_instance.get_layer_model()
layer_model.auto_authoring_mode = True
self.assertTrue(layer_model.auto_authoring_mode)
self.assertFalse(layer_model.normal_mode)
self.assertFalse(layer_model.spec_linking_mode)
layer_model.auto_authoring_mode = False
self.assertFalse(layer_model.auto_authoring_mode)
self.assertFalse(layer_model.spec_linking_mode)
self.assertTrue(layer_model.normal_mode)
layer_model.spec_linking_mode = True
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertFalse(layer_model.auto_authoring_mode)
self.assertTrue(layer_model.spec_linking_mode)
self.assertFalse(layer_model.normal_mode)
layer_model.spec_linking_mode = False
self.assertFalse(layer_model.auto_authoring_mode)
self.assertFalse(layer_model.spec_linking_mode)
self.assertTrue(layer_model.normal_mode)
async def test_api(self):
layer_model = self.layers_instance.get_layer_model()
# Test API call to make sure it does not throw errors.
# It's simply called here without any checking since
# test wrapper will catch console errors if it's failed.
# For functionality tests, it's covered in test.command.py already.
layer_model.flatten_all_layers()
with tempfile.TemporaryDirectory() as tmpdirname:
# save the file
tmp_file_path = os.path.join(tmpdirname, "tmp.usda")
tmp_file_path2 = os.path.join(tmpdirname, "tmp2.usda")
result = await omni.usd.get_context().save_as_stage_async(tmp_file_path)
self.assertTrue(result)
new_sublayer = Sdf.Layer.CreateNew(tmp_file_path2)
new_sublayer.Save()
stage = omni.usd.get_context().get_stage()
stage.GetRootLayer().subLayerPaths.append(new_sublayer.identifier)
stage.SetEditTarget(Usd.EditTarget(new_sublayer))
def on_save_done(success, error_str, saved_layers):
self.assertTrue(success)
self.assertEqual(saved_layers, [tmp_file_path])
layer_model.save_layers([tmp_file_path])
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# Close stage and re-open it to see if edit target is saved correctly
await omni.usd.get_context().close_stage_async()
await omni.usd.get_context().open_stage_async(tmp_file_path)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
stage = omni.usd.get_context().get_stage()
self.assertEqual(stage.GetEditTarget().GetLayer(), new_sublayer)
async def _wait(self, frames=2):
for i in range(frames):
await omni.kit.app.get_app().next_update_async()
async def test_layer_move_and_reload(self):
usd_context = omni.usd.get_context()
with tempfile.TemporaryDirectory() as tmpdirname:
# save the file
tmp_file_path = os.path.join(tmpdirname, "tmp.usd")
result = await usd_context.save_as_stage_async(tmp_file_path)
self.assertTrue(result)
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
sublayer1 = Sdf.Layer.CreateAnonymous()
sublayer2 = Sdf.Layer.CreateAnonymous()
sublayer3 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(sublayer1.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer2.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer3.identifier)
await self._wait()
root_layer_item = layer_model.root_layer_item
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer2.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer3.identifier)
root_layer = stage.GetRootLayer()
LayerUtils.move_layer(root_layer.identifier, 0, root_layer.identifier, 1, True)
root_layer.Save()
await self._wait()
root_layer.Reload(True)
root_layer = None
stage = None
root_layer_item = layer_model.root_layer_item
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer2.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer3.identifier)
await usd_context.close_stage_async()
async def test_drag_and_drop_sublayer(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
sublayer1 = Sdf.Layer.CreateAnonymous()
sublayer2 = Sdf.Layer.CreateAnonymous()
sublayer3 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(sublayer1.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer2.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer3.identifier)
await self._wait()
root_layer_item = layer_model.root_layer_item
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer1_item, -1))
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer1_item, 0))
self.assertTrue(layer_model.drop_accepted(sublayer1_item, sublayer2_item, -1))
self.assertTrue(layer_model.drop_accepted(sublayer1_item, sublayer2_item, 0))
self.assertTrue(layer_model.drop_accepted(sublayer2_item, sublayer1_item, -1))
self.assertTrue(layer_model.drop_accepted(sublayer2_item, sublayer1_item, 0))
LayerUtils.set_layer_lock_status(stage.GetRootLayer(), sublayer1_item.identifier, True)
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer1_item, -1))
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer1_item, 0))
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer2_item, -1))
self.assertTrue(layer_model.drop_accepted(sublayer1_item, sublayer2_item, 0))
self.assertTrue(layer_model.drop_accepted(sublayer2_item, sublayer1_item, -1))
self.assertTrue(layer_model.drop_accepted(sublayer2_item, sublayer1_item, 0))
layer_model.drop(sublayer2_item, sublayer3_item, 1)
await self._wait()
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer3.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer2.identifier)
layer_model.drop(sublayer2_item, sublayer1_item, 2)
await self._wait()
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer3.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer2.identifier)
layer_model.drop(sublayer3_item, sublayer2_item, -1)
await self._wait()
self.assertEqual(len(root_layer_item.sublayers), 2)
self.assertEqual(len(sublayer3_item.sublayers), 1)
self.assertEqual(sublayer3_item.sublayers[0].identifier, sublayer2_item.identifier)
| 9,099 | Python | 46.643979 | 95 | 0.668315 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_legacy_layer_cpp_bindings.py | import carb
import omni.kit.test
import omni.kit.undo
import omni.kit.commands
import omni.timeline
import omni.usd
import tempfile
import omni.client
from omni.kit.widget.layers import LayerUtils
from pxr import Sdf, UsdGeom
from .base import TestLayerNonUIBase
class TestCppBindings(TestLayerNonUIBase):
async def setUp(self):
await super().setUp()
self.previous_retry_values = omni.client.set_retries(0, 0, 0)
async def tearDown(self):
omni.client.set_retries(*self.previous_retry_values)
await super().tearDown()
def check_sublayers(self, sublayer_paths, expected_layer_identifiers):
sublayer_paths = sorted(sublayer_paths)
expected_layer_identifiers = sorted(expected_layer_identifiers)
self.assertTrue(
sublayer_paths == expected_layer_identifiers,
f"Sublayers array does not match, got: {sublayer_paths}, expected: {expected_layer_identifiers}",
)
async def test_layers_state_apis(self):
layers = self.usd_context.get_layers()
stage = self.usd_context.get_stage()
self.assertFalse(layers.is_layer_muteness_global())
layers.set_layer_muteness_scope(True)
self.assertTrue(layers.is_layer_muteness_global())
layers.set_layer_muteness_scope(False)
self.assertFalse(layers.is_layer_muteness_global())
layer2 = Sdf.Layer.CreateAnonymous()
LayerUtils.insert_sublayer(stage.GetRootLayer(), 0, layer2.identifier)
self.assertFalse(layers.is_layer_locally_muted(layer2.identifier))
self.assertFalse(layers.is_layer_globally_muted(layer2.identifier))
stage = self.usd_context.get_stage()
stage.MuteLayer(layer2.identifier)
await omni.kit.app.get_app().next_update_async()
layers.set_layer_muteness_scope(False)
self.assertTrue(layers.is_layer_locally_muted(layer2.identifier))
self.assertFalse(layers.is_layer_globally_muted(layer2.identifier))
self.assertFalse(omni.usd.is_layer_globally_muted(self.usd_context, layer2.identifier))
layers.set_layer_muteness_scope(True)
self.assertTrue(layers.is_layer_muteness_global())
self.assertTrue(layers.is_layer_locally_muted(layer2.identifier))
self.assertFalse(layers.is_layer_globally_muted(layer2.identifier))
self.assertEqual(layers.get_layer_edit_mode(), omni.usd.LayerEditMode.NORMAL)
layers.set_layer_edit_mode(omni.usd.LayerEditMode.AUTO_AUTHORING)
self.assertEqual(layers.get_layer_edit_mode(), omni.usd.LayerEditMode.AUTO_AUTHORING)
layers.set_layer_edit_mode(omni.usd.LayerEditMode.NORMAL)
self.assertEqual(layers.get_layer_edit_mode(), omni.usd.LayerEditMode.NORMAL)
self.assertFalse(layers.is_layer_locked(layer2.identifier))
layers.set_layer_lock_state(layer2.identifier, True)
self.assertTrue(layers.is_layer_locked(layer2.identifier))
layers.set_layer_lock_state(layer2.identifier, False)
self.assertFalse(layers.is_layer_locked(layer2.identifier))
self.assertFalse(layers.is_layer_locked_by_other(layer2.identifier))
self.assertEqual(layers.get_layer_lock_user_name(layer2.identifier), "")
self.assertTrue(layers.is_layer_writable(layer2.identifier))
self.assertFalse(layers.is_layer_savable(layer2.identifier))
layers.set_layer_edit_mode(omni.usd.LayerEditMode.AUTO_AUTHORING)
layers.set_default_edit_layer_identifier(layer2.identifier)
self.assertEqual(layers.get_default_edit_layer_identifier(), layer2.identifier)
self.assertFalse(layers.is_auto_authoring_layer(layer2.identifier))
edit_target = stage.GetEditTarget()
self.assertTrue(layers.is_auto_authoring_layer(edit_target.GetLayer().identifier))
with omni.usd.active_authoring_layer_context(self.usd_context):
edit_target = stage.GetEditTarget()
self.assertFalse(layers.is_auto_authoring_layer(edit_target.GetLayer().identifier))
self.assertEqual(edit_target.GetLayer().identifier, layer2.identifier)
async def test_get_layer_name(self):
layers = self.usd_context.get_layers()
self.assertEqual("abc.usd", layers.get_layer_name("c:/a/b/abc.usd"))
self.assertEqual("abc.usda", layers.get_layer_name("c:/a/b/abc.usda"))
self.assertEqual("abc.usda", layers.get_layer_name("omniverse://ov-invalid-fake-server/a/b/abc.usda"))
layer = Sdf.Layer.CreateAnonymous()
self.assertEqual(layer.identifier, layers.get_layer_name(layer.identifier))
self.assertEqual("", layers.get_layer_name(""))
self.assertEqual("a b c.usda", layers.get_layer_name("omniverse://ov-invalid-fake-server/a/b/a%20b%20c.usda"))
async def test_get_used_sublayers(self):
layers = self.usd_context.get_layers()
stage = self.usd_context.get_stage()
root_layer = stage.GetRootLayer()
sublayers = layers.get_used_sublayers()
self.check_sublayers(sublayers, [root_layer.identifier])
layer0 = Sdf.Layer.CreateAnonymous()
LayerUtils.insert_sublayer(root_layer, 0, layer0.identifier)
sublayers = layers.get_used_sublayers()
self.check_sublayers(sublayers, [root_layer.identifier, layer0.identifier])
layer1 = Sdf.Layer.CreateAnonymous()
LayerUtils.insert_sublayer(root_layer, 1, layer1.identifier)
sublayers = layers.get_used_sublayers()
self.check_sublayers(sublayers, [root_layer.identifier, layer0.identifier, layer1.identifier])
layer2 = Sdf.Layer.CreateAnonymous()
layer3 = Sdf.Layer.CreateAnonymous()
layer4 = Sdf.Layer.CreateAnonymous()
layer5 = Sdf.Layer.CreateAnonymous()
LayerUtils.insert_sublayer(layer2, 0, layer3.identifier)
LayerUtils.insert_sublayer(layer2, 1, layer4.identifier)
LayerUtils.insert_sublayer(layer4, 0, layer5.identifier)
LayerUtils.insert_sublayer(root_layer, 2, layer2.identifier)
sublayers = layers.get_used_sublayers()
self.check_sublayers(
sublayers,
[
root_layer.identifier,
layer0.identifier,
layer1.identifier,
layer2.identifier,
layer3.identifier,
layer4.identifier,
layer5.identifier,
],
)
# Removes layer0
LayerUtils.remove_sublayer(root_layer, 0)
sublayers = layers.get_used_sublayers()
self.check_sublayers(
sublayers,
[
root_layer.identifier,
layer1.identifier,
layer2.identifier,
layer3.identifier,
layer4.identifier,
layer5.identifier,
],
)
# Removes layer2 will remove layer2, layer3, layer4, layer5
LayerUtils.remove_sublayer(root_layer, 1)
sublayers = layers.get_used_sublayers()
self.check_sublayers(sublayers, [root_layer.identifier, layer1.identifier])
async def test_get_dirty_sublayers(self):
usd_context = omni.usd.get_context()
layers = usd_context.get_layers()
stage = usd_context.get_stage()
root_layer = stage.GetRootLayer()
with tempfile.TemporaryDirectory() as tempdir:
format = Sdf.FileFormat.FindByExtension(".usd")
layer0 = Sdf.Layer.New(format, f"{tempdir}/1.usd")
layer1 = Sdf.Layer.New(format, f"{tempdir}/2.usd")
layer2 = Sdf.Layer.New(format, f"{tempdir}/3.usd")
layer3 = Sdf.Layer.New(format, f"{tempdir}/4.usd")
layer4 = Sdf.Layer.New(format, f"{tempdir}/5.usd")
layer5 = Sdf.Layer.New(format, f"{tempdir}/6.usd")
LayerUtils.insert_sublayer(root_layer, 0, layer0.identifier, False)
LayerUtils.insert_sublayer(root_layer, 0, layer1.identifier, False)
LayerUtils.insert_sublayer(layer2, 0, layer3.identifier, False)
LayerUtils.insert_sublayer(layer2, 0, layer4.identifier, False)
LayerUtils.insert_sublayer(layer4, 0, layer5.identifier, False)
LayerUtils.insert_sublayer(root_layer, 0, layer2.identifier, False)
sublayers = layers.get_used_sublayers()
self.check_sublayers(
sublayers,
[
root_layer.identifier,
layer0.identifier,
layer1.identifier,
layer2.identifier,
layer3.identifier,
layer4.identifier,
layer5.identifier,
],
)
# Checkes dirtiness of layers since layer2 and layer4 have been touched.
# They should be dirty at this moment
dirty_sublayers = layers.get_dirty_sublayers()
self.check_sublayers(dirty_sublayers, [layer2.identifier, layer4.identifier])
# Touches layer1
LayerUtils.set_edit_target(stage, layer1.identifier)
UsdGeom.Mesh.Define(stage, "/root/test")
dirty_sublayers = layers.get_dirty_sublayers()
self.check_sublayers(dirty_sublayers, [layer1.identifier, layer2.identifier, layer4.identifier])
| 9,314 | Python | 45.113861 | 118 | 0.654069 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/__init__.py | from .test_misc import *
from .test_usd_events import *
from .test_prim_spec_item import *
from .test_performance import *
from .test_layer_model import *
from .test_path_utils import *
from .test_extension import *
from .test_layer_mode_utils import *
from .test_selection import *
from .drag_drop_single import *
from .drag_drop_multi import *
from .test_live_session import *
from .test_context_menu import *
from .test_window_ui_states import *
from .test_material_watcher import *
from .test_hotkey import *
# Legacy tests
from .test_legacy_layer_cpp_bindings import *
from .test_legacy_edit_mode import *
| 612 | Python | 28.190475 | 45 | 0.754902 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_layer_mode_utils.py | import omni.kit.test
import os
import uuid
import omni.client
import omni.kit.ui
from omni.kit.widget.layers.layer_settings import LayerSettings
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
from omni.kit.widget.prompt import PromptManager
from omni.kit.usd.layers import LayerUtils
from .base import TestLayerUIBase
from pxr import Sdf, Usd, UsdGeom, Gf
class TestLayerModelUtils(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.previous_retry_values = omni.client.set_retries(0, 0, 0)
self.stage = await self.prepare_empty_stage()
self.old_warning_enabled = LayerSettings().show_merge_or_flatten_warning
self.test_folder = omni.client.combine_urls(self.temp_dir, str(uuid.uuid1()))
self.test_folder += "/"
await omni.client.create_folder_async(self.test_folder)
async def tearDown(self):
LayerSettings().show_merge_or_flatten_warning = self.old_warning_enabled
await self.usd_context.close_stage_async()
await omni.client.delete_async(self.test_folder)
omni.client.set_retries(*self.previous_retry_values)
await super().tearDown()
async def _wait(self, frames=2):
for i in range(frames):
await omni.kit.app.get_app().next_update_async()
async def test_merge_layers(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
layer0 = Sdf.Layer.CreateAnonymous()
layer1 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
stage.GetRootLayer().subLayerPaths.append(layer1.identifier)
await self._wait()
# Enable prompt and try to merge with ok button.
LayerSettings().show_merge_or_flatten_warning = True
LayerModelUtils.merge_layer_down(layer_model.root_layer_item.sublayers[0])
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
prompt = PromptManager.query_prompt_by_title("Merge Layer Down")
self.assertTrue(prompt)
prompt._on_ok_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
self.assertEqual(layer_model.root_layer_item.sublayers[0].identifier, layer1.identifier)
omni.kit.undo.undo()
await self._wait()
# Enable prompt and cancel merge
LayerModelUtils.merge_layer_down(layer_model.root_layer_item.sublayers[0])
await self._wait()
prompt = PromptManager.query_prompt_by_title("Merge Layer Down")
self.assertTrue(prompt)
prompt._on_cancel_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 2)
self.assertEqual(layer_model.root_layer_item.sublayers[0].identifier, layer0.identifier)
self.assertEqual(layer_model.root_layer_item.sublayers[1].identifier, layer1.identifier)
omni.kit.undo.undo()
await self._wait()
# Disable prompt and try to merge
LayerSettings().show_merge_or_flatten_warning = False
LayerModelUtils.merge_layer_down(layer_model.root_layer_item.sublayers[0])
await self._wait()
prompt = PromptManager.query_prompt_by_title("Merge Layer Down")
self.assertFalse(prompt)
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
self.assertEqual(layer_model.root_layer_item.sublayers[0].identifier, layer1.identifier)
# Make sure that all prompts are released
self.assertEqual(len(PromptManager._prompts), 0)
async def test_flatten_layers(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
layer0 = Sdf.Layer.CreateAnonymous()
layer1 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
stage.GetRootLayer().subLayerPaths.append(layer1.identifier)
await self._wait()
# Enable prompt and try to flatten with ok button.
LayerSettings().show_merge_or_flatten_warning = True
LayerModelUtils.flatten_all_layers(layer_model)
await self._wait()
prompt = PromptManager.query_prompt_by_title("Flatten All Layers")
self.assertTrue(prompt)
prompt._on_ok_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
omni.kit.undo.undo()
await self._wait()
# Enable prompt and cancel flatten
LayerModelUtils.flatten_all_layers(layer_model)
await self._wait()
prompt = PromptManager.query_prompt_by_title("Flatten All Layers")
self.assertTrue(prompt)
prompt._on_cancel_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 2)
self.assertEqual(layer_model.root_layer_item.sublayers[0].identifier, layer0.identifier)
self.assertEqual(layer_model.root_layer_item.sublayers[1].identifier, layer1.identifier)
omni.kit.undo.undo()
await self._wait()
# Disable prompt and try to merge
LayerSettings().show_merge_or_flatten_warning = False
LayerModelUtils.flatten_all_layers(layer_model)
await self._wait()
prompt = PromptManager.query_prompt_by_title("Flatten All Layers")
self.assertFalse(prompt)
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
async def test_layer_lock(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
format = Sdf.FileFormat.FindByExtension(".usd")
layer0 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test.usd")
layer1 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test2.usd")
layer2 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
layer0.subLayerPaths.append(layer1.identifier)
layer0.subLayerPaths.append(layer2.identifier)
await self._wait(10)
LayerModelUtils.lock_layer(layer_model.root_layer_item.sublayers[0], True)
await self._wait()
self.assertTrue(layer_model.root_layer_item.sublayers[0].locked)
self.assertTrue(layer_model.root_layer_item.sublayers[0].sublayers[0].locked)
# Anonymous layer cannot be locked.
self.assertFalse(layer_model.root_layer_item.sublayers[0].sublayers[1].locked)
async def test_move_sublayer(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
sublayer1 = Sdf.Layer.CreateAnonymous()
sublayer2 = Sdf.Layer.CreateAnonymous()
sublayer3 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(sublayer1.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer2.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer3.identifier)
await self._wait()
root_layer_item = layer_model.root_layer_item
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer1_item, -1))
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer1_item, 0))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer1_item, sublayer2_item, -1))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer1_item, sublayer2_item, 0))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer2_item, sublayer1_item, -1))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer2_item, sublayer1_item, 0))
LayerUtils.set_layer_lock_status(stage.GetRootLayer(), sublayer1_item.identifier, True)
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer1_item, -1))
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer1_item, 0))
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer2_item, -1))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer1_item, sublayer2_item, 0))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer2_item, sublayer1_item, -1))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer2_item, sublayer1_item, 0))
# Cannot move it as sublayer1 is locked
LayerModelUtils.move_layer(sublayer1_item, sublayer2_item, -1)
await self._wait()
self.assertEqual(len(sublayer1_item.sublayers), 0)
LayerModelUtils.move_layer(root_layer_item, sublayer3_item, 1)
await self._wait()
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer3.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer2.identifier)
LayerModelUtils.move_layer(root_layer_item, sublayer1_item, 2)
await self._wait()
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer3.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer2.identifier)
LayerModelUtils.move_layer(sublayer3_item, sublayer2_item, -1)
await self._wait()
self.assertEqual(len(root_layer_item.sublayers), 2)
self.assertEqual(len(sublayer3_item.sublayers), 1)
self.assertEqual(sublayer3_item.sublayers[0].identifier, sublayer2_item.identifier)
async def test_remove_sublayers(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
format = Sdf.FileFormat.FindByExtension(".usd")
layer0 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test.usd")
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
layer0.customLayerData["abc"] = "test"
with Usd.EditContext(stage, layer0):
UsdGeom.Cube.Define(stage, "/prim/test")
self.assertTrue(layer0.dirty)
await self._wait()
LayerModelUtils.remove_layer(layer_model.root_layer_item.sublayers[0])
await self._wait()
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer')
self.assertTrue(prompt)
prompt._on_ok_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
omni.kit.undo.undo()
await self._wait()
LayerModelUtils.remove_layer(layer_model.root_layer_item.sublayers[0])
await self._wait()
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer')
self.assertTrue(prompt)
prompt._on_cancel_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
omni.kit.undo.undo()
await self._wait()
async def test_remove_sublayers(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
format = Sdf.FileFormat.FindByExtension(".usd")
layer0 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test.usd")
layer1 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test1.usd")
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
stage.GetRootLayer().subLayerPaths.append(layer1.identifier)
layer0.customLayerData["abc"] = "test"
with Usd.EditContext(stage, layer0):
UsdGeom.Cube.Define(stage, "/prim/test")
self.assertTrue(layer0.dirty)
await self._wait()
LayerModelUtils.remove_layers(layer_model.root_layer_item.sublayers)
await self._wait()
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer')
self.assertTrue(prompt)
prompt._on_ok_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
omni.kit.undo.undo()
await self._wait()
LayerModelUtils.remove_layers(layer_model.root_layer_item.sublayers)
await self._wait()
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer')
self.assertTrue(prompt)
prompt._on_cancel_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 2)
omni.kit.undo.undo()
await self._wait()
def _skip_existing_file_prompt(self, click_yes=False):
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Overwrite')
if prompt:
if click_yes:
prompt._on_ok_button_fn()
else:
prompt._on_cancel_button_fn()
async def test_create_sublayer(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
def _skip_transfer_content_prompt():
prompt = PromptManager.query_prompt_by_title("Transfer Content")
if prompt:
prompt._on_cancel_button_fn()
# First create
LayerModelUtils.create_sublayer(layer_model.root_layer_item, 0)
await self._wait()
from omni.kit.widget.layers.layer_model_utils import _file_picker
self.assertTrue(_file_picker)
path = os.path.join(self.test_folder, "test.usd")
_file_picker._on_file_open([path])
_file_picker.hide()
self._skip_existing_file_prompt(True)
_skip_transfer_content_prompt()
status, _ = omni.client.stat(path)
self.assertEqual(status, omni.client.Result.OK)
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
# Change the content of it for further comparison
with Usd.EditContext(stage, layer_model.root_layer_item.sublayers[0].layer):
UsdGeom.Cube.Define(stage, "/world/test")
stage.Save()
omni.kit.undo.undo()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
def _check_content(old_content):
# Make sure stage is saved and content is there for further comparison
layer = Sdf.Layer.FindOrOpen(path)
self.assertTrue(layer)
prim = layer.GetPrimAtPath("/world/test")
if old_content:
self.assertTrue(prim)
else:
self.assertFalse(prim)
layer = None
_check_content(True)
# Open create file dialog and cancel it
LayerModelUtils.create_sublayer(layer_model.root_layer_item, 0)
from omni.kit.widget.layers.layer_model_utils import _file_picker
self.assertTrue(_file_picker)
_file_picker._on_cancel_open()
_file_picker.hide()
_check_content(True)
# Second create with override
LayerModelUtils.create_sublayer(layer_model.root_layer_item, 0)
from omni.kit.widget.layers.layer_model_utils import _file_picker
self.assertTrue(_file_picker)
_file_picker._on_file_open([path])
_file_picker.hide()
self._skip_existing_file_prompt(True)
_skip_transfer_content_prompt()
_check_content(False)
def _create_layer(self, path):
layer = Sdf.Layer.FindOrOpen(path)
if not layer:
layer = Sdf.Layer.CreateNew(path)
return layer
async def test_insert_sublayer(self):
layer_model = self.layers_instance.get_layer_model()
# Create layer to be inserted
path = os.path.join(self.test_folder, "test.usd")
self._create_layer(path)
path2 = os.path.join(self.test_folder, "test2.usd")
self._create_layer(path2)
path3 = os.path.join(self.test_folder, "test3.usd")
self._create_layer(path3)
LayerModelUtils.insert_sublayer(layer_model.root_layer_item, 0)
await self._wait()
# Only the first one will be successfully.
from omni.kit.widget.layers.layer_model_utils import _file_picker
for i in range(3):
self.assertTrue(_file_picker)
_file_picker._on_file_open([path])
_file_picker.hide()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
# Insert multiple layers at the same time
_file_picker._on_file_open([path2, path3])
_file_picker.hide()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 3)
all_sublayers = []
for sublayer_item in layer_model.root_layer_item.sublayers:
all_sublayers.append(os.path.normpath(sublayer_item.identifier))
expected_sublayers = [os.path.normpath(path), os.path.normpath(path2), os.path.normpath(path3)]
self.assertEqual(set(all_sublayers), set(expected_sublayers))
async def test_move_prim_spec(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
layer0 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
await self._wait()
cube_prim_path = "/World/test"
root_item = layer_model.root_layer_item
sublayer_item0 = layer_model.root_layer_item.sublayers[0]
with Usd.EditContext(stage, root_item.layer):
cube = UsdGeom.Cube.Define(stage, cube_prim_path)
cube_prim = cube.GetPrim()
await self._wait()
# Move prim without conflict
world_prim = root_item.absolute_root_spec.children[0]
self.assertTrue(root_item.layer.GetPrimAtPath(cube_prim_path))
LayerModelUtils.move_prim_spec(layer_model, sublayer_item0, world_prim)
world_prim = None
await self._wait()
self.assertFalse(root_item.layer.GetPrimAtPath(cube_prim_path))
omni.kit.undo.undo()
await self._wait()
self.assertTrue(root_item.layer.GetPrimAtPath(cube_prim_path))
with Usd.EditContext(stage, sublayer_item0.layer):
UsdGeom.XformCommonAPI(cube_prim).SetTranslate(Gf.Vec3d(0, 0, 0))
await self._wait()
self.assertTrue(sublayer_item0.layer.GetPrimAtPath(cube_prim_path))
# Move prim with conflict
world_prim = root_item.absolute_root_spec.children[0]
LayerModelUtils.move_prim_spec(layer_model, sublayer_item0, world_prim)
# It should have prompt to remind user
prompt = PromptManager.query_prompt_by_title(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Merge Prim Spec',
)
self.assertTrue(prompt)
# Cancel it and make sure it's not moved
prompt._on_cancel_button_fn()
self.assertTrue(root_item.layer.GetPrimAtPath(cube_prim_path))
LayerModelUtils.move_prim_spec(layer_model, sublayer_item0, world_prim)
prompt = PromptManager.query_prompt_by_title(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Merge Prim Spec',
)
self.assertTrue(prompt)
# Confirm it and make sure it's moved
prompt._on_ok_button_fn()
self.assertFalse(root_item.layer.GetPrimAtPath(cube_prim_path))
async def test_layer_save_as(self):
# Test for https://nvidia-omniverse.atlassian.net/browse/OM-35016
layer_model = self.layers_instance.get_layer_model()
LayerModelUtils.save_layer_as(layer_model.root_layer_item)
await self._wait(10)
from omni.kit.widget.layers.layer_model_utils import _file_picker
self.assertTrue(_file_picker)
self.assertEqual(_file_picker.get_current_filename(), layer_model.root_layer_item.layer.GetDisplayName())
saved_file = os.path.join(self.test_folder, "test_layer_save_as.usd")
_file_picker._on_file_open([saved_file])
_file_picker.hide()
self.assertTrue(os.path.exists(saved_file))
| 20,942 | Python | 41.915984 | 136 | 0.663165 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_extension.py | import omni.kit.test
from pxr import Sdf
from .base import TestLayerUIBase
class TestLayerExtension(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
await self.usd_context.close_stage_async()
async def test_layer_insert(self):
layer = Sdf.Layer.CreateAnonymous()
root_layer = self.stage.GetRootLayer()
self.layers_instance._on_icon_menu_click(None, layer.identifier)
self.assertEqual(len(root_layer.subLayerPaths), 1)
self.assertEqual(root_layer.subLayerPaths[0], layer.identifier)
# Dont allow to insert root layer
self.layers_instance._on_icon_menu_click(None, root_layer.identifier)
self.assertEqual(len(root_layer.subLayerPaths), 1)
self.assertEqual(root_layer.subLayerPaths[0], layer.identifier)
# Don't allow to insert duplicate layer
self.layers_instance._on_icon_menu_click(None, layer.identifier)
self.assertEqual(len(root_layer.subLayerPaths), 1)
self.assertEqual(root_layer.subLayerPaths[0], layer.identifier)
| 1,155 | Python | 35.124999 | 77 | 0.69697 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_context_menu.py | import omni.kit.test
import os
import tempfile
import shutil
import omni.client
import omni.kit.app
from .base import TestLayerUIBase
from pxr import Usd, Sdf
from stat import S_IREAD, S_IWRITE
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.prompt import PromptManager
class TestContextMenu(TestLayerUIBase):
# Before running each test
async def setUp(self):
await super().setUp()
self.stage = self.usd_context.get_stage()
self._temp_dir = tempfile.TemporaryDirectory().name
self._writable_layer_path = os.path.join(self._temp_dir, "writable.usd")
self._writable_layer = Sdf.Layer.CreateNew(self._writable_layer_path)
self._writable_layer.Save()
self._readonly_layer_path = os.path.join(self._temp_dir, "readonly.usd")
layer = Sdf.Layer.CreateNew(self._readonly_layer_path)
layer.Save()
layer = None
os.chmod(self._readonly_layer_path, S_IREAD)
self._readonly_layer = Sdf.Layer.FindOrOpen(self._readonly_layer_path)
# Prepare stage
root_layer = self.stage.GetRootLayer()
root_layer.subLayerPaths.append(self._readonly_layer_path)
root_layer.subLayerPaths.append(self._writable_layer_path)
await self.wait()
await self._hide_prompt()
import omni.kit.ui_test as ui_test
await ui_test.find("Layer").focus()
async def tearDown(self):
await super().tearDown()
self._writable_layer = None
self._readonly_layer = None
self.stage = None
os.chmod(self._readonly_layer_path, S_IWRITE)
shutil.rmtree(self._temp_dir)
async def wait(self, frames=10):
for i in range(frames):
await self.app.next_update_async()
def _find_all_layer_items(self):
import omni.kit.ui_test as ui_test
writable_item = ui_test.find("Layer//Frame/**/Label[*].text=='writable.usd'")
self.assertTrue(writable_item)
readonly_item = ui_test.find("Layer//Frame/**/Label[*].text=='readonly.usd'")
self.assertTrue(readonly_item)
root_item = ui_test.find("Layer//Frame/**/Label[*].text=='Root Layer (Authoring Layer)'")
self.assertTrue(root_item)
return root_item, writable_item, readonly_item
async def test_set_authoring_layer(self):
import omni.kit.ui_test as ui_test
root_item, writable_item, readonly_item = self._find_all_layer_items()
await writable_item.right_click()
await ui_test.select_context_menu("Set Authoring Layer")
self.assertEqual(self._writable_layer.identifier, self.stage.GetEditTarget().GetLayer().identifier)
await readonly_item.right_click()
# Cannot found this menu item for readonly layer.
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu("Set Authoring Layer")
self.assertEqual(self._writable_layer.identifier, self.stage.GetEditTarget().GetLayer().identifier)
# Double click to change authoring layer will fail also.
await readonly_item.double_click()
self.assertEqual(self._writable_layer.identifier, self.stage.GetEditTarget().GetLayer().identifier)
# Switch back to root layer
await root_item.double_click()
self.assertEqual(self.stage.GetEditTarget().GetLayer().identifier, self.stage.GetEditTarget().GetLayer().identifier)
# Mute layer and try to set it as authoring layer will fail also
self.stage.MuteLayer(self._writable_layer.identifier)
await self.wait()
await writable_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu("Set Authoring Layer")
self.assertEqual(self.stage.GetEditTarget().GetLayer().identifier, self.stage.GetEditTarget().GetLayer().identifier)
self.stage.UnmuteLayer(self._writable_layer.identifier)
await self.wait()
# Lock layer and try to set it as authoring layer will fail also
LayerUtils.set_layer_lock_status(self.stage.GetRootLayer(), self._writable_layer.identifier, True)
await self.wait()
await writable_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu("Set Authoring Layer")
self.assertEqual(self.stage.GetEditTarget().GetLayer().identifier, self.stage.GetEditTarget().GetLayer().identifier)
LayerUtils.set_layer_lock_status(self.stage.GetRootLayer(), self._writable_layer.identifier, False)
await self.wait()
async def _hide_prompt(self):
prompt = PromptManager.query_prompt_by_title("Flatten All Layers")
if prompt:
prompt.visible = False
prompt = PromptManager.query_prompt_by_title("Merge Layer Down")
if prompt:
prompt.visible = False
async def _test_menu_item(
self,
item_name,
file_picker_name=None,
allow_read_only=False,
allow_mute=False,
allow_lock=False,
select_multiple=False
):
import omni.kit.ui_test as ui_test
root_item, writable_item, readonly_item = self._find_all_layer_items()
if select_multiple:
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
# Select two sublayers: writable and readonly sublayer of root.
layer_tree_view.selection = root_layer_item.sublayers
await self.wait()
# Ensure they are selected.
self.assertEqual(len(self.layers_instance.get_selected_items()), 2)
await writable_item.right_click()
await ui_test.select_context_menu(item_name)
await ui_test.human_delay()
if file_picker_name:
await self.wait()
file_picker = ui_test.find(file_picker_name)
await file_picker.focus()
self.assertTrue(file_picker)
file_picker.window.visible = False
# Special treatment for flatten sublayers
await self._hide_prompt()
await readonly_item.right_click()
# Cannot found this menu item for readonly layer.
if not allow_read_only:
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(item_name)
else:
await ui_test.select_context_menu(item_name)
# Special treatment for flatten sublayers
await self._hide_prompt()
# Mute layer and try to create a sublayer for it will fail also
self.stage.MuteLayer(self._writable_layer.identifier)
await self.wait()
await writable_item.right_click()
if not allow_mute:
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(item_name)
else:
await ui_test.select_context_menu(item_name)
# Special treatment for flatten sublayers
await self._hide_prompt()
self.stage.UnmuteLayer(self._writable_layer.identifier)
await self.wait()
# Lock layer and try to Create Sublayer will fail also
LayerUtils.set_layer_lock_status(self.stage.GetRootLayer(), self._writable_layer.identifier, True)
await self.wait()
await writable_item.right_click()
if not allow_lock:
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(item_name)
else:
await ui_test.select_context_menu(item_name)
# Special treatment for flatten sublayers
await self._hide_prompt()
LayerUtils.set_layer_lock_status(self.stage.GetRootLayer(), self._writable_layer.identifier, False)
await self.wait()
async def test_copy_url_link(self):
import omni.kit.ui_test as ui_test
root_item, _, _ = self._find_all_layer_items()
await root_item.right_click()
await ui_test.select_context_menu("Copy URL Link")
import omni.kit.clipboard
url = omni.kit.clipboard.paste()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(url, root_layer_item.identifier)
async def test_collapse_expand_tree(self):
import omni.kit.ui_test as ui_test
root_item, _, _ = self._find_all_layer_items()
await root_item.right_click()
await ui_test.select_context_menu("Collapse Tree")
await root_item.right_click()
await ui_test.select_context_menu("Expand Tree")
async def test_set_edit_layer(self):
import omni.kit.ui_test as ui_test
menu_name = "Set Default Edit Layer"
_, writable_item, readonly_item = self._find_all_layer_items()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layer_model.auto_authoring_mode = True
await self.wait()
await writable_item.right_click()
await ui_test.select_context_menu(menu_name)
layer = Sdf.Find(layer_model.default_edit_layer)
self.assertEqual(layer, self._writable_layer)
await readonly_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(menu_name)
async def test_refresh_references_or_payloads(self):
import omni.kit.ui_test as ui_test
prim = self.stage.DefinePrim("/reference0", "Xform")
prim.GetReferences().AddReference(self._writable_layer.identifier)
prim = self.stage.DefinePrim("/payload0", "Xform")
prim.GetPayloads().AddPayload(self._writable_layer.identifier)
prim = self.stage.DefinePrim("/reference_and_payload0", "Xform")
prim.GetReferences().AddReference(self._writable_layer.identifier)
prim.GetPayloads().AddPayload(self._writable_layer.identifier)
await self.wait()
reference_widget = ui_test.find("Layer//Frame/**/Label[*].text=='reference0'")
payload_widget = ui_test.find("Layer//Frame/**/Label[*].text=='payload0'")
reference_and_payload_widget = ui_test.find("Layer//Frame/**/Label[*].text=='reference_and_payload0'")
all_widgets = [reference_widget, payload_widget, reference_and_payload_widget]
all_menu_names = ["Refresh Reference", "Refresh Payload", "Refresh Payload & Reference"]
for prim_item, menu_name in zip(all_widgets, all_menu_names):
await prim_item.right_click()
import asyncio
await asyncio.sleep(3.0)
await ui_test.select_context_menu(menu_name)
async def test_save_sublayer(self):
import omni.kit.ui_test as ui_test
menu_name = "Save"
for layer in [self.stage.GetRootLayer(), self._writable_layer, self._readonly_layer]:
Sdf.CreatePrimInLayer(layer, "/test")
await self.wait()
root_item, writable_item, readonly_item = self._find_all_layer_items()
await writable_item.right_click()
await ui_test.select_context_menu(menu_name)
# When it's not dirty, the menu item is not shown.
# Cannot save readonly layer
# Cannot save anonymous layer
for layer_item in [root_item, writable_item, readonly_item]:
await layer_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(menu_name)
async def test_find_in_browser(self):
import omni.kit.ui_test as ui_test
menu_name = "Find in Content Browser"
root_item, writable_item, readonly_item = self._find_all_layer_items()
for layer_item in [writable_item, readonly_item]:
await layer_item.right_click()
await ui_test.select_context_menu(menu_name)
# Cannot browse anonymous layer
await root_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(menu_name)
async def test_move_selection(self):
import omni.kit.ui_test as ui_test
menu_name = "Move Selections To This Layer"
_, writable_item, readonly_item = self._find_all_layer_items()
prim0 = self.stage.DefinePrim("/reference0", "Xform")
prim1 = self.stage.DefinePrim("/payload0", "Xform")
await self.wait()
self.usd_context.get_selection().set_selected_prim_paths([str(prim0.GetPath()), str(prim1.GetPath())], True)
# Cannot modify readonly layer
await readonly_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(menu_name)
await writable_item.right_click()
await ui_test.select_context_menu(menu_name)
await self.wait()
root_layer = self.stage.GetRootLayer()
self.assertFalse(root_layer.GetPrimAtPath(prim0.GetPath()))
self.assertFalse(root_layer.GetPrimAtPath(prim1.GetPath()))
self.assertTrue(self._writable_layer.GetPrimAtPath(prim0.GetPath()))
self.assertTrue(self._writable_layer.GetPrimAtPath(prim1.GetPath()))
async def _test_menu_without_selection(self, menu_name):
"""Right click on the empty area of layer window will pop up context menu also."""
import omni.kit.ui_test as ui_test
window = ui_test.find("Layer")
await window.bring_to_front()
await ui_test.emulate_mouse_move(ui_test.Vec2(-100, -100), human_delay_speed=10)
await ui_test.emulate_mouse_move(window.center)
await ui_test.emulate_mouse_click(right_click=True)
await ui_test.select_context_menu(menu_name)
async def test_create_sublayer_without_selection(self):
await self._test_menu_without_selection("Create Sublayer")
async def test_insert_sublayer_without_selection(self):
await self._test_menu_without_selection("Insert Sublayer")
async def test_create_sublayer(self):
await self._test_menu_item("Create Sublayer", "Create Sublayer")
async def test_insert_sublayer(self):
await self._test_menu_item("Insert Sublayer", "Insert Sublayer")
async def test_save_a_copy(self):
await self._test_menu_item("Save a Copy", "Save Layer As", True, True, True)
async def test_save_as(self):
await self._test_menu_item("Save As", "Save Layer As", True, False, True)
async def test_remove_sublayer(self):
await self._test_menu_item("Remove Layer", None, True, False, False)
async def test_remove_multiple_sublayers(self):
await self._test_menu_item("Remove Layer", None, False, False, False, True)
async def test_flatten_sublayers(self):
await self._test_menu_item("Flatten Sublayers", None, True, True, False)
async def test_reload_sublayer(self):
await self._test_menu_item("Reload Layer", None, True, False, False)
async def test_merge_layer_down(self):
import omni.kit.ui_test as ui_test
root_item, writable_item, readonly_item = self._find_all_layer_items()
layer = Sdf.Layer.CreateAnonymous()
self.stage.GetRootLayer().subLayerPaths.append(layer.identifier)
await self._test_menu_item("Merge Down One", None, False, False, False)
await writable_item.right_click()
await ui_test.select_context_menu("Merge Down One")
async def test_remove_prim(self):
index = 0
for layer in [self._writable_layer, self._readonly_layer]:
with Usd.EditContext(self.stage, layer):
self.stage.DefinePrim(f"/prim{index}")
index += 1
self.stage.DefinePrim(f"/prim{index}")
index += 1
await self.wait()
self.assertTrue(self._writable_layer.GetPrimAtPath("/prim0"))
self.assertTrue(self._writable_layer.GetPrimAtPath("/prim1"))
self.assertTrue(self._readonly_layer.GetPrimAtPath("/prim2"))
self.assertTrue(self._readonly_layer.GetPrimAtPath("/prim3"))
import omni.kit.ui_test as ui_test
# Select and delete single prim.
LayerUtils.set_edit_target(self.stage, self._writable_layer.identifier)
await self.wait()
omni.kit.commands.execute(
"SelectPrims",
old_selected_paths=[],
new_selected_paths=["/prim0"],
expand_in_stage=True
)
await self.wait()
prim0 = ui_test.find("Layer//Frame/**/Label[*].text=='prim0'")
await prim0.right_click()
await ui_test.select_context_menu("Delete")
self.assertFalse(self._writable_layer.GetPrimAtPath("/prim0"))
self.assertTrue(self._writable_layer.GetPrimAtPath("/prim1"))
omni.kit.undo.undo()
await self.wait()
# Select and delete multiple prims.
omni.kit.commands.execute(
"SelectPrims",
old_selected_paths=[],
new_selected_paths=["/prim0", "/prim1"],
expand_in_stage=True
)
await self.wait()
prim0 = ui_test.find("Layer//Frame/**/Label[*].text=='prim0'")
await prim0.right_click()
await ui_test.select_context_menu("Delete")
self.assertFalse(self._writable_layer.GetPrimAtPath("/prim0"))
self.assertFalse(self._writable_layer.GetPrimAtPath("/prim1"))
# Cannot remove prims in read-only layer.
LayerUtils.set_edit_target(self.stage, self._readonly_layer.identifier)
await self.wait()
omni.kit.commands.execute(
"SelectPrims",
old_selected_paths=[],
new_selected_paths=["/prim2"],
expand_in_stage=True
)
await self.wait()
prim2 = ui_test.find("Layer//Frame/**/Label[*].text=='prim2'")
await prim2.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu("Delete")
| 18,118 | Python | 38.561135 | 124 | 0.642952 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_misc.py | import os
import string
import random
import unittest
import carb
import omni
import omni.kit.test
import omni.usd
import omni.client
import omni.kit.widget.layers
from pathlib import Path
from omni.kit.usd.layers import LayerUtils
from pxr import Sdf, Usd, UsdGeom
from .base import TestLayerNonUIBase
class TestLayerMisc(TestLayerNonUIBase):
def get_random_string(self):
return "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
async def test_layer_release(self):
# Test fix for https://nvidia-omniverse.atlassian.net/browse/OM-18672
current_path = Path(__file__).parent
test_data_path = current_path.parent.parent.parent.parent.parent.joinpath("data")
sublayer_path = str(test_data_path.joinpath("sublayer.usd"))
usd_context = omni.usd.get_context()
await usd_context.new_stage_async()
stage = usd_context.get_stage()
root_layer = stage.GetRootLayer()
sublayer = LayerUtils.insert_sublayer(root_layer, 0, sublayer_path)
identifier = sublayer.identifier
self.assertTrue(sublayer != None)
sublayer = None # Release the ref count
# Remove sublayer to remove it from layer stack and also its reference from Layer Window
LayerUtils.remove_sublayer(root_layer, 0)
sublayer = Sdf.Find(identifier)
self.assertTrue(sublayer == None)
sublayer = LayerUtils.insert_sublayer(root_layer, 0, sublayer_path)
identifier = sublayer.identifier
self.assertTrue(sublayer != None)
sublayer = None # Release the ref count
# Reopen stage to see if the sublayer has been released
await usd_context.new_stage_async()
sublayer = Sdf.Find(identifier)
self.assertTrue(sublayer == None)
async def test_layer_dirtiness_after_save(self):
usd_context = omni.usd.get_context()
await usd_context.new_stage_async()
# Manually set current edit target identifier
self.layers_instance = omni.kit.widget.layers.get_instance()
layer_model = self.layers_instance.get_layer_model()
layer_model._edit_target_identifier = usd_context.get_stage_url()
token = carb.tokens.get_tokens_interface()
temp_dir = token.resolve("${temp}")
temp_usd = os.path.join(temp_dir, f"{self.get_random_string()}.usd")
temp_usd = omni.client.normalize_url(temp_usd)
success, _, saved_layers = await usd_context.save_layers_async(temp_usd, [])
self.assertTrue(success)
self.assertEqual(len(saved_layers), 1)
# Wait two frames to wait update event of layer_model to authoring edit target.
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# Manually trigger on update to try to update edit target into root layer.
layer_model._pending_changed_edit_target = temp_usd
layer_model._on_update(0.0)
# Check dirtiness to make sure it's not dirty.
stage = usd_context.get_stage()
root_layer = stage.GetRootLayer()
self.assertFalse(root_layer.dirty)
async def test_create_sublayer_with_stage_axis(self):
usd_context = omni.usd.get_context()
for axis in [UsdGeom.Tokens.y, UsdGeom.Tokens.z]:
await usd_context.new_stage_async()
stage = usd_context.get_stage()
UsdGeom.SetStageUpAxis(stage, axis)
sublayer = Sdf.Layer.CreateAnonymous()
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=stage.GetRootLayer().identifier,
sublayer_position=0,
new_layer_path=sublayer.identifier,
transfer_root_content=False,
create_or_insert=True,
)
sublayer_stage = Usd.Stage.Open(sublayer)
self.assertEqual(UsdGeom.GetStageUpAxis(sublayer_stage), axis)
| 4,150 | Python | 38.533333 | 96 | 0.655181 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_window_ui_states.py | import omni.kit.test
import os
import tempfile
import shutil
import omni.client
import omni.kit.app
from .base import TestLayerNonUIBase
from pxr import Usd, Sdf
from stat import S_IREAD, S_IWRITE
from omni.kit.usd.layers import LayerUtils, get_layers
from omni.kit.widget.prompt import PromptManager
class TestWindowUiStates(TestLayerNonUIBase):
# Before running each test
async def setUp(self):
await super().setUp()
self.stage = self.usd_context.get_stage()
self._temp_dir = tempfile.TemporaryDirectory().name
self._writable_layer_path = os.path.join(self._temp_dir, "writable.usd")
self._writable_layer = Sdf.Layer.CreateNew(self._writable_layer_path)
self._writable_layer.Save()
self._readonly_layer_path = os.path.join(self._temp_dir, "readonly.usd")
layer = Sdf.Layer.CreateNew(self._readonly_layer_path)
layer.Save()
layer = None
os.chmod(self._readonly_layer_path, S_IREAD)
self._readonly_layer = Sdf.Layer.FindOrOpen(self._readonly_layer_path)
# Prepare stage
root_layer = self.stage.GetRootLayer()
root_layer.subLayerPaths.append(self._readonly_layer_path)
root_layer.subLayerPaths.append(self._writable_layer_path)
import omni.kit.ui_test as ui_test
await ui_test.find("Layer").focus()
async def tearDown(self):
await super().tearDown()
self._writable_layer = None
self._readonly_layer = None
self.stage = None
os.chmod(self._readonly_layer_path, S_IWRITE)
shutil.rmtree(self._temp_dir)
async def test_mute(self):
import omni.kit.ui_test as ui_test
local_mute_items = ui_test.find_all("Layer//Frame/**/ToolButton[*].identifier=='local_mute'")
global_mute_items = ui_test.find_all("Layer//Frame/**/ToolButton[*].identifier=='global_mute'")
# Root layer has no mute button.
self.assertEqual(len(local_mute_items), 2)
self.assertEqual(len(global_mute_items), 2)
for global_scope in [False, True]:
layers = get_layers()
layers_state = layers.get_layers_state()
layers_state.set_muteness_scope(global_scope)
# Local mute
# Mute readonly layer
await local_mute_items[0].click()
self.assertEqual(self.stage.IsLayerMuted(self._readonly_layer.identifier), not global_scope)
self.assertFalse(self.stage.IsLayerMuted(self._writable_layer.identifier))
# Unmute
await local_mute_items[0].click()
self.assertFalse(self.stage.IsLayerMuted(self._readonly_layer.identifier))
# Mute writable layer
await local_mute_items[1].click()
self.assertFalse(self.stage.IsLayerMuted(self._readonly_layer.identifier))
self.assertEqual(self.stage.IsLayerMuted(self._writable_layer.identifier), not global_scope)
# Unmute
await local_mute_items[1].click()
self.assertFalse(self.stage.IsLayerMuted(self._writable_layer.identifier))
# global mute
# Mute readonly layer
await global_mute_items[0].click()
self.assertEqual(self.stage.IsLayerMuted(self._readonly_layer.identifier), global_scope)
self.assertFalse(self.stage.IsLayerMuted(self._writable_layer.identifier))
# Unmute
await global_mute_items[0].click()
self.assertFalse(self.stage.IsLayerMuted(self._readonly_layer.identifier))
# Mute writable layer
await global_mute_items[1].click()
self.assertFalse(self.stage.IsLayerMuted(self._readonly_layer.identifier))
self.assertEqual(self.stage.IsLayerMuted(self._writable_layer.identifier), global_scope)
# Unmute
await global_mute_items[1].click()
self.assertFalse(self.stage.IsLayerMuted(self._writable_layer.identifier))
async def test_lock(self):
import omni.kit.ui_test as ui_test
lock_items = ui_test.find_all("Layer//Frame/**/ToolButton[*].identifier=='lock'")
# Root or readonly layer has no lock button.
self.assertEqual(len(lock_items), 1)
layers = get_layers()
layers_state = layers.get_layers_state()
await lock_items[0].click()
self.assertTrue(layers_state.is_layer_locked(self._writable_layer.identifier))
await lock_items[0].click()
self.assertFalse(layers_state.is_layer_locked(self._writable_layer.identifier))
| 4,583 | Python | 36.884297 | 104 | 0.64892 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_hotkey.py | import carb
import omni.kit.app
from .base import TestLayerUIBase
from omni.kit.test_suite.helpers import arrange_windows
class TestHotkey(TestLayerUIBase):
# Before running each test
async def setUp(self):
await super().setUp()
await arrange_windows("Layer", 800, 600)
self.stage = self.usd_context.get_stage()
async def tearDown(self):
await super().tearDown()
async def _wait(self, frames=4):
for i in range(frames):
await self.app.next_update_async()
async def test_remove_prim_with_hot_key(self):
self.stage.DefinePrim("/cube", "Cube")
self.stage.DefinePrim("/cube2", "Cube")
await self._wait()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
all_root_specs = root_layer_item.absolute_root_spec.children
self.assertTrue(len(all_root_specs) != 0)
layer_tree_view.selection = all_root_specs
await self._wait()
self.assertEqual(len(self.layers_instance.get_selected_items()), 2)
# FIXME: Not sure why there are two dangling windows that are visible underlying.
import omni.kit.ui_test as ui_test
window = ui_test.find("Create Sublayer")
if window:
window.window.visible = False
window = ui_test.find("Insert Sublayer")
if window:
window.window.visible = False
window = ui_test.find("Save Layer As")
if window:
window.window.visible = False
window = ui_test.find("Layer")
await window.bring_to_front()
await ui_test.emulate_mouse_move(ui_test.Vec2(-100, -100), human_delay_speed=10)
await ui_test.emulate_mouse_move(window.center)
await omni.kit.ui_test.emulate_keyboard_press(carb.input.KeyboardInput.DEL)
await self._wait()
self.assertFalse(self.stage.GetPrimAtPath("/cube"))
self.assertFalse(self.stage.GetPrimAtPath("/cube2"))
| 2,117 | Python | 32.619047 | 89 | 0.640529 |
omniverse-code/kit/exts/omni.kit.widget.layers/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.6.11] - 2023-01-13
### Changes
- Save edit target into root layer before in SETTINGS_SAVING only to avoid actively change root layer.
## [1.6.10] - 2022-12-14
### Changes
- Improve drag and drop style.
## [1.6.9] - 2022-11-22
### Changes
- Swap out pyperclip for linux-friendly copy & paste.
## [1.6.8] - 2022-11-14
### Changes
- Fix issue to search in layer window.
## [1.6.7] - 2022-11-07
### Changes
- Optimize more loading time to avoid populating layer item when it's to request children count.
## [1.6.6] - 2022-11-03
### Changes
- Fix empty menu issue, and improve context menu display.
## [1.6.5] - 2022-10-18
### Changes
- Fix hotkey registration warnings.
## [1.6.4] - 2022-10-12
### Changes
- More guardrails for live session.
## [1.6.3] - 2022-10-06
### Changes
- Add hotkey support for removing prim specs.
## [1.6.2] - 2022-10-03
### Fixed
- Rearrange layer order.
## [1.6.1] - 2022-09-30
### Fixed
- Set the menu checkbox when the window is appearing
## [1.6.0] - 2022-09-29
### Changes
- Add sublayer live session support.
## [1.5.36] - 2022-09-08
### Added
- Explicit hover style/color for drag drop.
## [1.5.35] - 2022-09-02
### Changes
- Supports to remove multiple layers.
## [1.5.34] - 2022-08-15
### Changes
- Add more tests for layer window to cover code of UI and context menu.
## [1.5.33] - 2022-08-09
### Changes
- Show correct outdate status for layer icon.
## [1.5.32] - 2022-08-09
### Changes
- Fix layer link window.
## [1.5.31] - 2022-07-25
### Changes
- Move outdate notifiction into omni.kit.usd.layers to not be dependent on layer view.
## [1.5.30] - 2022-07-25
### Changes
- Fix layer mute in global scope.
## [1.5.29] - 2022-07-25
### Changes
- Refactored unittests to make use of content_browser test helpers
## [1.5.28] - 2022-07-19
### Changes
- Disable save button when it's in a live session.
## [1.5.27] - 2022-07-14
### Changes
- Optimize performance to remove large bunch of prims.
## [1.5.26] - 2022-07-11
### Changes
- Fix stage treeview in layer linking window
## [1.5.25] - 2022-07-07
### Changes
- Supports to insert multiple sublayers.
- Fix regression for lock UI.
## [1.5.24] - 2022-07-04
### Changes
- Validate session name to support only alphanumeric letters, hyphens or underscores only.
- Use radio buttons instead of checkboxes for join/create session buttons.
## [1.5.23] - 2022-06-22
### Changes
- Multiple drag and drop support.
## [1.5.22] - 2022-06-15
### Changes
- Move session related widgets into omni.kit.widget.layers to decouple session management stuff with layers view.
## [1.5.21] - 2022-06-10
### Changes
- Use lazy loading to optimize performance of loading for layer window.
## [1.5.20] - 2022-06-09
### Changes
- Use notice to drive refresh of outdate states.
## [1.5.19] - 2022-06-02
### Changes
- Add new live workflow with Omni-Objects.
## [1.5.18] - 2022-04-22
### Changes
- Handle multi-file drag & drops
## [1.5.17] - 2022-04-19
### Changed
- Integrate omni.kit.usd.layers to replace old layers interfaces from omni.usd.
## [1.5.16] - 2022-04-15
### Fixed
- Avoid selection reset after a selected item is deleted and another is selected in the same update
## [1.5.15] - 2021-12-28
### Changed
- Fix save all button refresh.
## [1.5.14] - 2021-11-23
### Changed
- Add layer linking
## [1.5.13] - 2021-10-20
### Changed
- External drag/drop doesn't use windows slashes
## [1.5.12] - 2021-09-27
### Changed
- Improve prims remove in large stage.
## [1.5.11] - 2021-08-13
### Changed
- Added payload icon.
## [1.5.10] - 2021-08-25
### Fixes
- Add defensive check for ReplaceSublayerCommand.
## [1.5.9] - 2021-08-04
### Changed
- More unittests.
## [1.5.8] - 2021-08-03
### Changed
- Prompt to user before layers merge/flatten.
## [1.5.7] - 2021-07-29
- Added "Refesh Payload" to context menu
## [1.5.6] - 2021-07-21
### Changed
- Use un-escaped filename for layer name.
- Replace "save as" menu as "save a copy" to avoid confuse, and "Save as and Replace" as "Save As"
## [1.5.5] - 2021-07-21
### Changed
- Added "Refesh Reference" to context menu
### [1.5.4] - 2021-07-22
### Fixes
- Fix layer insert when layer window is hidden.
### [1.5.3] - 2021-07-15
### Fixes
- Fixed `Select Bound Objects` in context menu
### [1.5.2] - 2021-07-02
### Fixes
- Fix sublayer refresh when it's switched from offine to live mode.
### [1.5.1] - 2021-04-19
### Changes
- Added drag/drop support from outside kit
## [1.5.0] - 2021-05-05
### Changes
- Add support for `Enter` and `Esc` buttons on prompts.
### [1.4.5] - 2021-05-05
### Changes
- Removed omni.kit.versioning
- Replaced omni.kit.versioning with omni.kit.widget.versioning for versioning support detection
### [1.4.4] - 2021-04-18
### Changes and Fixes
- Improve perf for multiple prim specs delete.
- Fix flicking of save icon in live mode.
- Fix layer move.
- Fix issue of disabling layer contents.
### [1.4.3] - 2021-04-09
### Changes
- Add support to find layer in content window.
- Don't allow to insert the sublayer if its parent already has the same layer.
### [1.4.2] - 2021-04-08
### Changes
- UI improvement.
- Fix possible exception caused by content refresh.
### [1.4.1] - 2021-04-06
### Changes
- Improve layer reload.
- Batch prim specs refresh to improve perf.
### [1.4.0] - 2021-04-05
### Changes
- Refactoring
- More tests and bug fixes.
### [1.2.0] - 2021-03-29
### Added
- Supported accepting drag and drop to insert versioned sublayer.
## [1.1.2] - 2021-03-25
### Changes
- Fixed `Save All` button to create checkpoint when applicable.
## [1.1.1] - 2021-03-17
### Changes
- Fixed leaks from content_browser
## [1.1.0] - 2021-02-10
### Changes
- Changed checkpoint comment message:
- When using Save As to save an anonymous layer file and overwriting an existing file: "Replaced with new file"
- When using Save As to save an existing layer file and overwriting an existing file: "Replaced with [the path of the file]"
## [1.0.2] - 2021-02-10
### Changes
- Updated StyleUI handling
## [1.0.1] - 2021-02-06
### Changed
- Remove old editor and content window dependencies.
## [1.0.0] - 2021-01-21
### Changed
- Initialize change log.
- Add context menu to insert USD as sublayer from content window.
| 6,307 | Markdown | 22.803773 | 126 | 0.672427 |
omniverse-code/kit/exts/omni.kit.pip_archive/omni/kit/pip_archive/tests/__init__.py | from .test_pip_archive import *
| 32 | Python | 15.499992 | 31 | 0.75 |
omniverse-code/kit/exts/omni.kit.pip_archive/omni/kit/pip_archive/tests/test_pip_archive.py | import omni.kit.test
import omni.kit.pipapi
class TestPipArchive(omni.kit.test.AsyncTestCase):
async def test_pip_archive(self):
# Take one of packages from deps/pip.toml, it should be prebundled and available without need for going into online index
omni.kit.pipapi.install("numpy", version="1.19.0", use_online_index=False)
import numpy
self.assertIsNotNone(numpy)
| 406 | Python | 32.916664 | 129 | 0.721675 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/_pyrsistent_version.py | __version__ = '0.19.3'
| 23 | Python | 10.999995 | 22 | 0.478261 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/png.py | #!/usr/bin/env python
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <[email protected]>
# Portions Copyright (C) 2009 David Jones <[email protected]>
# And probably portions Copyright (C) 2006 Nicko van Someren <[email protected]>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The ``png`` module can read and write PNG files.
Installation and Overview
-------------------------
``pip install pypng``
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer` classes.
Coverage of PNG formats is fairly complete;
all allowable bit depths (1/2/4/8/16/24/32/48/64 bits per pixel) and
colour combinations are supported:
- greyscale (1/2/4/8/16 bit);
- RGB, RGBA, LA (greyscale with alpha) with 8/16 bits per channel;
- colour mapped images (1/2/4/8 bit).
Interlaced images,
which support a progressive display when downloading,
are supported for both reading and writing.
A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
The ``sBIT`` chunk can be used to specify precision for
non-native bit depths.
Requires Python 3.5 or higher.
Installation is trivial,
but see the ``README.txt`` file (with the source distribution) for details.
Full use of all features will need some reading of the PNG specification
http://www.w3.org/TR/2003/REC-PNG-20031110/.
The package also comes with command line utilities.
- ``pripamtopng`` converts
`Netpbm <http://netpbm.sourceforge.net/>`_ PAM/PNM files to PNG;
- ``pripngtopam`` converts PNG to file PAM/PNM.
There are a few more for simple PNG manipulations.
Spelling and Terminology
------------------------
Generally British English spelling is used in the documentation.
So that's "greyscale" and "colour".
This not only matches the author's native language,
it's also used by the PNG specification.
Colour Models
-------------
The major colour models supported by PNG (and hence by PyPNG) are:
- greyscale;
- greyscale--alpha;
- RGB;
- RGB--alpha.
Also referred to using the abbreviations: L, LA, RGB, RGBA.
Each letter codes a single channel:
*L* is for Luminance or Luma or Lightness (greyscale images);
*A* stands for Alpha, the opacity channel
(used for transparency effects, but higher values are more opaque,
so it makes sense to call it opacity);
*R*, *G*, *B* stand for Red, Green, Blue (colour image).
Lists, arrays, sequences, and so on
-----------------------------------
When getting pixel data out of this module (reading) and
presenting data to this module (writing) there are
a number of ways the data could be represented as a Python value.
The preferred format is a sequence of *rows*,
which each row being a sequence of *values*.
In this format, the values are in pixel order,
with all the values from all the pixels in a row
being concatenated into a single sequence for that row.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Sequence of rows::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list,
but the pixels are flattened so that three values for one pixel
simply follow the three values for the previous pixel.
This is the preferred because
it provides a good compromise between space and convenience.
PyPNG regards itself as at liberty to replace any sequence type with
any sufficiently compatible other sequence type;
in practice each row is an array (``bytearray`` or ``array.array``).
To allow streaming the outer list is sometimes
an iterator rather than an explicit list.
An alternative format is a single array holding all the values.
Array of values::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
The top row comes first,
and within each row the pixels are ordered from left-to-right.
Within a pixel the values appear in the order R-G-B-A
(or L-A for greyscale--alpha).
There is another format, which should only be used with caution.
It is mentioned because it is used internally,
is close to what lies inside a PNG file itself,
and has some support from the public API.
This format is called *packed*.
When packed, each row is a sequence of bytes (integers from 0 to 255),
just as it is before PNG scanline filtering is applied.
When the bit depth is 8 this is the same as a sequence of rows;
when the bit depth is less than 8 (1, 2 and 4),
several pixels are packed into each byte;
when the bit depth is 16 each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer).
This format is used by the :meth:`Writer.write_packed` method.
It isn't usually a convenient format,
but may be just right if the source data for
the PNG image comes from something that uses a similar format
(for example, 1-bit BMPs, or another PNG file).
"""
__version__ = "0.20220715.0"
import collections
import io # For io.BytesIO
import itertools
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import re
import struct
import sys
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
import zlib
from array import array
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
# The xstart, ystart, xstep, ystep for the Adam7 interlace passes.
adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def adam7_generate(width, height):
"""
Generate the coordinates for the reduced scanlines
of an Adam7 interlaced image
of size `width` by `height` pixels.
Yields a generator for each pass,
and each pass generator yields a series of (x, y, xstep) triples,
each one identifying a reduced scanline consisting of
pixels starting at (x, y) and taking every xstep pixel to the right.
"""
for xstart, ystart, xstep, ystep in adam7:
if xstart >= width:
continue
yield ((xstart, y, xstep) for y in range(ystart, height, ystep))
# Models the 'pHYs' chunk (used by the Reader)
Resolution = collections.namedtuple('_Resolution', 'x y unit_is_meter')
def group(s, n):
return list(zip(* [iter(s)] * n))
def isarray(x):
return isinstance(x, array)
def check_palette(palette):
"""
Check a palette argument (to the :class:`Writer` class) for validity.
Returns the palette as a list if okay;
raises an exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ProtocolError(
"a palette must have between 1 and 256 entries,"
" see https://www.w3.org/TR/PNG/#11PLTE")
seen_triple = False
for i, t in enumerate(p):
if len(t) not in (3, 4):
raise ProtocolError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ProtocolError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ProtocolError(
"palette entry %d: "
"values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ProtocolError(
"size argument should be a pair (width, height) instead is %r" % (size,))
if width is not None and width != size[0]:
raise ProtocolError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ProtocolError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""
Checks that a colour argument for transparent or background options
is the right form.
Returns the colour
(which, if it's a bare integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ProtocolError("%s for greyscale must be 1-tuple" % which)
if not is_natural(c[0]):
raise ProtocolError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
is_natural(c[0]) and
is_natural(c[1]) and
is_natural(c[2])):
raise ProtocolError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""
Problem with input file format.
In other words, PNG file does not conform to
the specification in some way and is invalid.
"""
class ProtocolError(Error):
"""
Problem with the way the programming interface has been used,
or the data presented to it.
"""
class ChunkError(FormatError):
pass
class Default:
"""The default for the greyscale parameter."""
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=Default,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20,
x_pixels_per_unit=None,
y_pixels_per_unit=None,
unit_is_meter=False):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Pixels are greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16 (for each channel).
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
x_pixels_per_unit
Number of pixels a unit along the x axis (write a
`pHYs` chunk).
y_pixels_per_unit
Number of pixels a unit along the y axis (write a
`pHYs` chunk). Along with `x_pixel_unit`, this gives
the pixel size ratio.
unit_is_meter
`True` to indicate that the unit (for the `pHYs`
chunk) is metre.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument.
If `size` is used it should be a pair (*width*, *height*).
The `greyscale` argument indicates whether input pixels
are greyscale (when true), or colour (when false).
The default is true unless `palette=` is used.
The `alpha` argument (a boolean) specifies
whether input pixels have an alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each channel may have a different bit depth.
Each source pixel must have values that are
an integer between 0 and ``2**bitdepth-1``, where
`bitdepth` is the bit depth for the corresponding channel.
For example, 8-bit images have values between 0 and 255.
PNG only stores images with bit depths of
1,2,4,8, or 16 (the same for all channels).
When `bitdepth` is not one of these values or where
channels have different bit depths,
the next highest valid bit depth is selected,
and an ``sBIT`` (significant bits) chunk is generated
that specifies the original precision of the source image.
In this case the supplied pixel values will be rescaled to
fit the range of the selected bit depth.
The PNG file format supports many bit depth / colour model
combinations, but not all.
The details are somewhat arcane
(refer to the PNG specification for full details).
Briefly:
Bit depths < 8 (1,2,4) are only allowed with greyscale and
colour mapped images;
colour mapped images cannot have bit depth 16.
For colour mapped images
(in other words, when the `palette` argument is specified)
the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8.
(It is valid to have a PNG image with a palette and
an ``sBIT`` chunk, but the meaning is slightly different;
it would be awkward to use the `bitdepth` argument for this.)
The `palette` option, when specified,
causes a colour mapped image to be created:
the PNG colour type is set to 3;
`greyscale` must not be true; `alpha` must not be true;
`transparent` must not be set.
The bit depth must be 1,2,4, or 8.
When a colour mapped image is created,
the pixel values are palette indexes and
the `bitdepth` argument specifies the size of these indexes
(not the size of the colour values in the palette).
The palette argument value should be a sequence of 3- or
4-tuples.
3-tuples specify RGB palette entries;
4-tuples specify RGBA palette entries.
All the 4-tuples (if present) must come before all the 3-tuples.
A ``PLTE`` chunk is created;
if there are 4-tuples then a ``tRNS`` chunk is created as well.
The ``PLTE`` chunk will contain all the RGB triples in the same
sequence;
the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence.
Palette entries are always 8-bit.
If specified, the `transparent` and `background` parameters must be
a tuple with one element for each channel in the image.
Either a 3-tuple of integer (RGB) values for a colour image, or
a 1-tuple of a single integer for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`).
A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file,
they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module.
Values from 1 to 9 (highest) specify compression.
0 means no compression.
-1 and ``None`` both mean that the ``zlib`` module uses
the default level of compression (which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interlace method, *Adam7*).
This does not affect how the pixels should be passed in,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be
partially decoded by the browser to give
a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image.
In order to avoid using large amounts of memory,
multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if not is_natural(width) or not is_natural(height):
raise ProtocolError("width and height must be integers")
if width <= 0 or height <= 0:
raise ProtocolError("width and height must be greater than zero")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2 ** 31 - 1 or height > 2 ** 31 - 1:
raise ProtocolError("width and height cannot exceed 2**31-1")
if alpha and transparent is not None:
raise ProtocolError(
"transparent colour not allowed with alpha channel")
# bitdepth is either single integer, or tuple of integers.
# Convert to tuple.
try:
len(bitdepth)
except TypeError:
bitdepth = (bitdepth, )
for b in bitdepth:
valid = is_natural(b) and 1 <= b <= 16
if not valid:
raise ProtocolError(
"each bitdepth %r must be a positive integer <= 16" %
(bitdepth,))
# Calculate channels, and
# expand bitdepth to be one element per channel.
palette = check_palette(palette)
alpha = bool(alpha)
colormap = bool(palette)
if greyscale is Default and palette:
greyscale = False
greyscale = bool(greyscale)
if colormap:
color_planes = 1
planes = 1
else:
color_planes = (3, 1)[greyscale]
planes = color_planes + alpha
if len(bitdepth) == 1:
bitdepth *= planes
bitdepth, self.rescale = check_bitdepth_rescale(
palette,
bitdepth,
transparent, alpha, greyscale)
# These are assertions, because above logic should have
# corrected or raised all problematic cases.
if bitdepth < 8:
assert greyscale or palette
assert not alpha
if bitdepth > 8:
assert not palette
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values
# (greyscale, alpha, colormap, interlace) are converted
# to bool because Iverson's convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = greyscale
self.alpha = alpha
self.colormap = colormap
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = (4 * self.alpha +
2 * (not greyscale) +
1 * self.colormap)
assert self.color_type in (0, 2, 3, 4, 6)
self.color_planes = color_planes
self.planes = planes
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth / 8) * self.planes
def write(self, outfile, rows):
"""
Write a PNG image to the output file.
`rows` should be an iterable that yields each row
(each row is a sequence of values).
The rows should be the rows of the original image,
so there should be ``self.height`` rows of
``self.width * self.planes`` values.
If `interlace` is specified (when creating the instance),
then an interlaced PNG file will be written.
Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing requires the entire image to be in working memory.
"""
# Values per row
vpr = self.width * self.planes
def check_rows(rows):
"""
Yield each row in rows,
but check each row first (for correct width).
"""
for i, row in enumerate(rows):
try:
wrong_length = len(row) != vpr
except TypeError:
# When using an itertools.ichain object or
# other generator not supporting __len__,
# we set this to False to skip the check.
wrong_length = False
if wrong_length:
# Note: row numbers start at 0.
raise ProtocolError(
"Expected %d values but got %d values, in row %d" %
(vpr, len(row), i))
yield row
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*check_rows(rows)))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, check_rows(rows))
if nrows != self.height:
raise ProtocolError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
return nrows
def write_passes(self, outfile, rows):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file.
For straightlaced images, this is the usual top to bottom ordering.
For interlaced images the rows should have been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row
(each row being a sequence of values).
"""
# Ensure rows are scaled (to 4-/8-/16-bit),
# and packed into bytes.
if self.rescale:
rows = rescale_rows(rows, self.rescale)
if self.bitdepth < 8:
rows = pack_rows(rows, self.bitdepth)
elif self.bitdepth == 16:
rows = unpack_rows(rows)
return self.write_packed(outfile, rows)
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`.
`rows` should be an iterator that yields each packed row;
a packed row being a sequence of packed bytes.
The rows have a filter byte prefixed and
are then compressed into one or more IDAT chunks.
They are not processed any further,
so if bitdepth is other than 1, 2, 4, 8, 16,
the pixel values should have been scaled
before passing them to this method.
This method does work for interlaced images but it is best avoided.
For interlaced images, the rows should be
presented in the order that they appear in the file.
"""
self.write_preamble(outfile)
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# data accumulates bytes to be compressed for the IDAT chunk;
# it's compressed when sufficiently large.
data = bytearray()
# raise i scope out of the for loop. set to -1, because the for loop
# sets i to 0 on the first pass
i = -1
for i, row in enumerate(rows):
# Add "None" filter type.
# Currently, it's essential that this filter type be used
# for every scanline as
# we do not mark the first row of a reduced pass image;
# that means we could accidentally compute
# the wrong filtered scanline if we used
# "up", "average", or "paeth" on such a line.
data.append(0)
data.extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(data)
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
data = bytearray()
compressed = compressor.compress(bytes(data))
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b'IEND')
return i + 1
def write_preamble(self, outfile):
# http://www.w3.org/TR/PNG/#5PNG-file-signature
# This is the first write that is made when
# writing a PNG file.
# This one, and only this one, is checked for TypeError,
# which generally indicates that we are writing bytes
# into a text stream.
try:
outfile.write(signature)
except TypeError as e:
raise ProtocolError("PNG must be written to a binary stream") from e
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, b'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, b'gAMA',
struct.pack("!L", int(round(self.gamma * 1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(
outfile, b'sBIT',
struct.pack('%dB' % self.planes,
* [s[0] for s in self.rescale]))
# :chunk:order: Without a palette (PLTE chunk),
# ordering is relatively relaxed.
# With one, gAMA chunk must precede PLTE chunk
# which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p, t = make_palette_chunks(self.palette)
write_chunk(outfile, b'PLTE', p)
if t:
# tRNS chunk is optional;
# Only needed if palette entries have alpha.
write_chunk(outfile, b'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
fmt = "!1H"
else:
fmt = "!3H"
write_chunk(outfile, b'tRNS',
struct.pack(fmt, *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
fmt = "!1H"
else:
fmt = "!3H"
write_chunk(outfile, b'bKGD',
struct.pack(fmt, *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if (self.x_pixels_per_unit is not None and
self.y_pixels_per_unit is not None):
tup = (self.x_pixels_per_unit,
self.y_pixels_per_unit,
int(self.unit_is_meter))
write_chunk(outfile, b'pHYs', struct.pack("!LLB", *tup))
def write_array(self, outfile, pixels):
"""
Write an array that holds all the image values
as a PNG file on the output file.
See also :meth:`write` method.
"""
if self.interlace:
if type(pixels) != array:
# Coerce to array type
fmt = 'BH'[self.bitdepth > 8]
pixels = array(fmt, pixels)
return self.write_passes(
outfile,
self.array_scanlines_interlace(pixels)
)
else:
return self.write_passes(
outfile,
self.array_scanlines(pixels)
)
def array_scanlines(self, pixels):
"""
Generates rows (each a sequence of values) from
a single array of values.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array.
`pixels` is the full source image as a single array of values.
The generator yields each scanline of the reduced passes in turn,
each scanline being a sequence of values.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
# Each iteration generates a scanline starting at (x, y)
# and consisting of every xstep pixels.
for lines in adam7_generate(self.width, self.height):
for x, y, xstep in lines:
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Values per row (of reduced image)
reduced_row_len = ppr * self.planes
if xstep == 1:
# Easy case: line is a simple slice.
offset = y * vpr
yield pixels[offset: offset + vpr]
continue
# We have to step by xstep,
# which we can do one plane at a time
# using the step in Python slices.
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:reduced_row_len])
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset + i: end_offset: skip]
yield row
def write_chunk(outfile, tag, data=b''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
data = bytes(data)
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2 ** 32 - 1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(signature)
for chunk in chunks:
write_chunk(out, *chunk)
def rescale_rows(rows, rescale):
"""
Take each row in rows (an iterator) and yield
a fresh row with the pixels scaled according to
the rescale parameters in the list `rescale`.
Each element of `rescale` is a tuple of
(source_bitdepth, target_bitdepth),
with one element per channel.
"""
# One factor for each channel
fs = [float(2 ** s[1] - 1)/float(2 ** s[0] - 1)
for s in rescale]
# Assume all target_bitdepths are the same
target_bitdepths = set(s[1] for s in rescale)
assert len(target_bitdepths) == 1
(target_bitdepth, ) = target_bitdepths
typecode = 'BH'[target_bitdepth > 8]
# Number of channels
n_chans = len(rescale)
for row in rows:
rescaled_row = array(typecode, iter(row))
for i in range(n_chans):
channel = array(
typecode,
(int(round(fs[i] * x)) for x in row[i::n_chans]))
rescaled_row[i::n_chans] = channel
yield rescaled_row
def pack_rows(rows, bitdepth):
"""Yield packed rows that are a byte array.
Each byte is packed with the values from several pixels.
"""
assert bitdepth < 8
assert 8 % bitdepth == 0
# samples per byte
spb = int(8 / bitdepth)
def make_byte(block):
"""Take a block of (2, 4, or 8) values,
and pack them into a single byte.
"""
res = 0
for v in block:
res = (res << bitdepth) + v
return res
for row in rows:
a = bytearray(row)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
n = float(len(a))
extra = math.ceil(n / spb) * spb - n
a.extend([0] * int(extra))
# Pack into bytes.
# Each block is the samples for one byte.
blocks = group(a, spb)
yield bytearray(make_byte(block) for block in blocks)
def unpack_rows(rows):
"""Unpack each row from being 16-bits per value,
to being a sequence of bytes.
"""
for row in rows:
fmt = '!%dH' % len(row)
yield bytearray(struct.pack(fmt, *row))
def make_palette_chunks(palette):
"""
Create the byte sequences for a ``PLTE`` and
if necessary a ``tRNS`` chunk.
Returned as a pair (*p*, *t*).
*t* will be ``None`` if no ``tRNS`` chunk is necessary.
"""
p = bytearray()
t = bytearray()
for x in palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
if t:
return p, t
return p, None
def check_bitdepth_rescale(
palette, bitdepth, transparent, alpha, greyscale):
"""
Returns (bitdepth, rescale) pair.
"""
if palette:
if len(bitdepth) != 1:
raise ProtocolError(
"with palette, only a single bitdepth may be used")
(bitdepth, ) = bitdepth
if bitdepth not in (1, 2, 4, 8):
raise ProtocolError(
"with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ProtocolError("transparent and palette not compatible")
if alpha:
raise ProtocolError("alpha and palette not compatible")
if greyscale:
raise ProtocolError("greyscale and palette not compatible")
return bitdepth, None
# No palette, check for sBIT chunk generation.
if greyscale and not alpha:
# Single channel, L.
(bitdepth,) = bitdepth
if bitdepth in (1, 2, 4, 8, 16):
return bitdepth, None
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5, 6, 7)
targetbitdepth = 8
return targetbitdepth, [(bitdepth, targetbitdepth)]
assert alpha or not greyscale
depth_set = tuple(set(bitdepth))
if depth_set in [(8,), (16,)]:
# No sBIT required.
(bitdepth, ) = depth_set
return bitdepth, None
targetbitdepth = (8, 16)[max(bitdepth) > 8]
return targetbitdepth, [(b, targetbitdepth) for b in bitdepth]
# Regex for decoding mode string
RegexModeDecode = re.compile("(LA?|RGBA?);?([0-9]*)", flags=re.IGNORECASE)
def from_array(a, mode=None, info={}):
"""
Create a PNG :class:`Image` object from a 2-dimensional array.
One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
Unless they are specified using the *info* parameter,
the PNG's height and width are taken from the array size.
The first axis is the height; the second axis is the
ravelled width and channel index.
The array is treated is a sequence of rows,
each row being a sequence of values (``width*channels`` in number).
So an RGB image that is 16 pixels high and 8 wide will
occupy a 2-dimensional array that is 16x24
(each row will be 8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth
(overriding how this function normally derives the bit depth,
see below).
Appending ``';16'`` to the mode will cause the PNG to be
16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array,
but it can be any suitable Python sequence.
For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``.
The exact rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension.
It's slightly more complicated than that because
an iterator of rows can be used, and it all still works.
Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from
the array element's datatype
(but if *mode* specifies a bitdepth then that is used instead).
The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects.
A 1 byte datatype will give a bit depth of 8,
a 2 byte datatype will give a bit depth of 16.
If the datatype does not have an implicit size,
like the above example where it is a plain Python list of lists,
then a default of 8 is used.
The *info* parameter is a dictionary that can
be used to specify metadata (in the same style as
the arguments to the :class:`png.Writer` class).
For this function the keys that are useful are:
height
overrides the height derived from the array dimensions and
allows *a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype
(but must match *mode* if that also specifies a bit depth).
Generally anything specified in the *info* dictionary will
override any implicit choices that this function would otherwise make,
but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and
false when mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
match = RegexModeDecode.match(mode)
if not match:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode, bitdepth = match.groups()
if bitdepth:
bitdepth = int(bitdepth)
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise ProtocolError("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
alpha = 'A' in mode
if 'alpha' in info:
if bool(info['alpha']) != alpha:
raise ProtocolError("info['alpha'] should match mode.")
info['alpha'] = alpha
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get("bitdepth") and bitdepth != info['bitdepth']:
raise ProtocolError(
"bitdepth (%d) should match bitdepth of info (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
width, height = check_sizes(
info.get("size"),
info.get("width"),
info.get("height"))
if width:
info["width"] = width
if height:
info["height"] = height
if "height" not in info:
try:
info['height'] = len(a)
except TypeError:
raise ProtocolError(
"len(a) does not work, supply info['height'] instead.")
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a, t = itertools.tee(a)
row = next(t)
del t
testelement = row
if 'width' not in info:
width = len(row) // planes
info['width'] = width
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's datatype,
# use a default of 8.
bitdepth = 8
else:
# If we got here without exception,
# we now assume that the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in ["width", "height", "bitdepth", "greyscale", "alpha"]:
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to the named *file*.
See `.write()` if you already have an open file object.
In general, you can only call this method once;
after it has been called the first time the PNG image is written,
the source data will have been streamed, and
cannot be streamed again.
"""
w = Writer(**self.info)
with open(file, 'wb') as fd:
w.write(fd, self.rows)
def stream(self):
"""Stream the rows into a list, so that the rows object
can be accessed multiple times, or randomly.
"""
self.rows = list(self.rows)
def write(self, file):
"""Write the image to the open file object.
See `.save()` if you have a filename.
In general, you can only call this method once;
after it has been called the first time the PNG image is written,
the source data will have been streamed, and
cannot be streamed again.
"""
w = Writer(**self.info)
w.write(file, self.rows)
class Reader:
"""
Pure Python PNG decoder in pure Python.
"""
def __init__(self, _guess=None, filename=None, file=None, bytes=None):
"""
The constructor expects exactly one keyword argument.
If you supply a positional argument instead,
it will guess the input type.
Choose from the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``bytes`` or ``bytearray`` with PNG data.
"""
keywords_supplied = (
(_guess is not None) +
(filename is not None) +
(file is not None) +
(bytes is not None))
if keywords_supplied != 1:
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type).
# See preamble method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
bytes = _guess
elif isinstance(_guess, str):
filename = _guess
elif hasattr(_guess, 'read'):
file = _guess
if bytes is not None:
self.file = io.BytesIO(bytes)
elif filename is not None:
self.file = open(filename, "rb")
elif file is not None:
self.file = file
else:
raise ProtocolError("expecting filename, file or bytes array")
def chunk(self, lenient=False):
"""
Read the next PNG chunk from the input file;
returns a (*type*, *data*) tuple.
*type* is the chunk's type as a byte string
(all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if not self.atchunk:
raise ChunkError("No more chunks.")
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError(
'Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.' % type)
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = ("Checksum error in %s chunk: 0x%08X != 0x%08X."
% (type.decode('ascii'), a, b))
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t, v = self.chunk()
yield t, v
if t == b'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""
Undo the filter for a scanline.
`scanline` is a sequence of bytes that
does not include the initial filter type byte.
`previous` is decoded previous scanline
(for straightlaced images this is the previous pixel row,
but for interlaced images, it is
the previous scanline in the reduced image,
which in general is not the previous pixel row in the final image).
When there is no previous scanline
(the first row of a straightlaced image,
or the first row in one of the passes in an interlaced image),
then this argument should be ``None``.
The scanline will have the effects of filtering removed;
the result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
result = scanline
if filter_type == 0:
return result
if filter_type not in (1, 2, 3, 4):
raise FormatError(
'Invalid PNG Filter Type. '
'See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = bytearray([0] * len(scanline))
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
fn = (None,
undo_filter_sub,
undo_filter_up,
undo_filter_average,
undo_filter_paeth)[filter_type]
fn(fu, scanline, previous, result)
return result
def _deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return a single array of values.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Values per image
vpi = vpr * self.height
# Interleaving writes to the output array randomly
# (well, not quite), so the entire output array must be in memory.
# Make a result array, and make it big enough.
if self.bitdepth > 8:
a = array('H', [0] * vpi)
else:
a = bytearray([0] * vpi)
source_offset = 0
for lines in adam7_generate(self.width, self.height):
# The previous (reconstructed) scanline.
# `None` at the beginning of a pass
# to indicate that there is no previous line.
recon = None
for x, y, xstep in lines:
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset: source_offset + row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self._bytes_to_values(recon, width=ppr)
if xstep == 1:
assert x == 0
offset = y * vpr
a[offset: offset + vpr] = flat
else:
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset + i: end_offset: skip] = \
flat[i:: self.planes]
return a
def _iter_bytes_to_values(self, byte_rows):
"""
Iterator that yields each scanline;
each scanline being a sequence of values.
`byte_rows` should be an iterator that yields
the bytes of each row in turn.
"""
for row in byte_rows:
yield self._bytes_to_values(row)
def _bytes_to_values(self, bs, width=None):
"""Convert a packed row of bytes into a row of values.
Result will be a freshly allocated object,
not shared with the argument.
"""
if self.bitdepth == 8:
return bytearray(bs)
if self.bitdepth == 16:
return array('H',
struct.unpack('!%dH' % (len(bs) // 2), bs))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8 // self.bitdepth
out = bytearray()
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in bs:
out.extend([mask & (o >> i) for i in shifts])
return out[:width]
def _iter_straight_packed(self, byte_blocks):
"""Iterator that undoes the effect of filtering;
yields each row as a sequence of packed bytes.
Assumes input is straightlaced.
`byte_blocks` should be an iterable that yields the raw bytes
in blocks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = bytearray()
# The previous (reconstructed) scanline.
# None indicates first line of image.
recon = None
for some_bytes in byte_blocks:
a.extend(some_bytes)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1: rb + 1]
del a[: rb + 1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError('Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""
If signature (header) has not been read then read and
validate it; otherwise do nothing.
No signature (empty read()) will raise EOFError;
An invalid signature will raise FormatError.
EOFError is raised to make possible the case where
a program can read multiple PNG files from the same stream.
The end of the stream can be distinguished from non-PNG files
or corrupted PNG files.
"""
if self.signature:
return
self.signature = self.file.read(8)
if len(self.signature) == 0:
raise EOFError("End of PNG stream.")
if self.signature != signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading
the initial part of the PNG file up to
the start of the ``IDAT`` chunk.
All the chunks that precede the ``IDAT`` chunk are
read and either processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if self.atchunk is None:
raise FormatError('This PNG file has no IDAT chunks.')
if self.atchunk[1] == b'IDAT':
return
self.process_chunk(lenient=lenient)
def _chunk_len_type(self):
"""
Reads just enough of the input to
determine the next chunk's length and type;
return a (*length*, *type*) pair where *type* is a byte sequence.
If there are no more chunks, ``None`` is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length, type = struct.unpack('!I4s', x)
if length > 2 ** 31 - 1:
raise FormatError('Chunk %s is too large: %d.' % (type, length))
# Check that all bytes are in valid ASCII range.
# https://www.w3.org/TR/2003/REC-PNG-20031110/#5Chunk-layout
type_bytes = set(bytearray(type))
if not(type_bytes <= set(range(65, 91)) | set(range(97, 123))):
raise FormatError(
'Chunk %r has invalid Chunk Type.'
% list(type))
return length, type
def process_chunk(self, lenient=False):
"""
Process the next chunk and its data.
This only processes the following chunk types:
``IHDR``, ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``.
All other chunk types are ignored.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = '_process_' + type.decode('ascii')
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise FormatError(
"Unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError(
"Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0, 1):
raise FormatError(
"Unknown interlace method %d, see "
"http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods"
" ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not(self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3, 1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth) / float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sBIT chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2 ** self.bitdepth) * 3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte) / 3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
self.phys = data
fmt = "!LLB"
if len(data) != struct.calcsize(fmt):
raise FormatError("pHYs chunk has incorrect length.")
self.x_pixels_per_unit, self.y_pixels_per_unit, unit = \
struct.unpack(fmt, data)
self.unit_is_meter = bool(unit)
def read(self, lenient=False):
"""
Read the PNG file and decode it.
Returns (`width`, `height`, `rows`, `info`).
May use excessive memory.
`rows` is a sequence of rows;
each row is a sequence of values.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
type, data = self.chunk(lenient=lenient)
if type == b'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != b'IDAT':
continue
# type == b'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
self.preamble(lenient=lenient)
raw = decompress(iteridat())
if self.interlace:
def rows_from_interlace():
"""Yield each row from an interlaced PNG."""
# It's important that this iterator doesn't read
# IDAT chunks until it yields the first row.
bs = bytearray(itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth > 8]
# Like :meth:`group` but
# producing an array.array object for each row.
values = self._deinterlace(bs)
vpr = self.width * self.planes
for i in range(0, len(values), vpr):
row = array(arraycode, values[i:i+vpr])
yield row
rows = rows_from_interlace()
else:
rows = self._iter_bytes_to_values(self._iter_straight_packed(raw))
info = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
info[attr] = getattr(self, attr)
info['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
info[attr] = a
if getattr(self, 'x_pixels_per_unit', None):
info['physical'] = Resolution(self.x_pixels_per_unit,
self.y_pixels_per_unit,
self.unit_is_meter)
if self.plte:
info['palette'] = self.palette()
return self.width, self.height, rows, info
def read_flat(self):
"""
Read a PNG file and decode it into a single array of values.
Returns (*width*, *height*, *values*, *info*).
May use excessive memory.
`values` is a single array.
The :meth:`read` method is more stream-friendly than this,
because it returns a sequence of rows.
"""
x, y, pixel, info = self.read()
arraycode = 'BH'[info['bitdepth'] > 8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, info
def palette(self, alpha='natural'):
"""
Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks.
These chunks should have already been processed (for example,
by calling the :meth:`preamble` method).
All the tuples are the same size:
3-tuples if there is no ``tRNS`` chunk,
4-tuples when there is a ``tRNS`` chunk.
Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or [])
trns.extend([255] * (len(plte) - len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
"""
Returns the image data as a direct representation of
an ``x * y * planes`` array.
This removes the need for callers to deal with
palettes and transparency themselves.
Images with a palette (colour type 3) are converted to RGB or RGBA;
images with transparency (a ``tRNS`` chunk) are converted to
LA or RGBA as appropriate.
When returned in this format the pixel values represent
the colour value directly without needing to refer
to palettes or transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *rows*, *info*)
This method normally returns pixel values with
the bit depth they have in the source image, but
when the source PNG has an ``sBIT`` chunk it is inspected and
can reduce the bit depth of the result pixels;
pixel values will be reduced according to the bit depth
specified in the ``sBIT`` chunk.
PNG nerds should note a single result bit depth is
used for all channels:
the maximum of the ones specified in the ``sBIT`` chunk.
An RGB565 image will be rescaled to 6-bit RGB666.
The *info* dictionary that is returned reflects
the `direct` format and not the original source image.
For example, an RGB source image with a ``tRNS`` chunk
to represent a transparent colour,
will start with ``planes=3`` and ``alpha=False`` for the
source image,
but the *info* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because
an alpha channel is synthesized and added.
*rows* is a sequence of rows;
each row being a sequence of values
(like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x, y, pixels, info = self.read()
if self.colormap:
info['colormap'] = False
info['alpha'] = bool(self.trns)
info['bitdepth'] = 8
info['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[x] for x in row]
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2 ** info['bitdepth'] - 1
planes = info['planes']
info['alpha'] = True
info['planes'] += 1
typecode = 'BH'[info['bitdepth'] > 8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = list(zip(opa)) # convert to 1-tuples
yield array(
typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > info['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit, self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth:
shift = info['bitdepth'] - targetbitdepth
info['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield [p >> shift for p in row]
pixels = itershift(pixels)
return x, y, pixels, info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width, height, pixels, info = get()
maxval = 2**info['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
info['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield [int(round(x * factor)) for x in row]
if maxval == targetmaxval:
return width, height, pixels, info
else:
return width, height, iterscale(), info
def asRGB8(self):
"""
Return the image data as an RGB pixels with 8-bits per sample.
This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that
they are all between 0 and 255 (8-bit).
In the case where the source image has a bit depth < 8
the transformation preserves all the information;
where the source image has bit depth > 8, then
rescaling to 8-bit values loses precision.
No dithering is performed.
Like :meth:`asRGB`,
an alpha channel in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *rows*, *info*).
*width*, *height*, *info* are as per the :meth:`read` method.
*rows* is the pixel data as a sequence of rows.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""
Return the image data as RGBA pixels with 8-bits per sample.
This method is similar to :meth:`asRGB8` and :meth:`asRGBA`:
The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255.
The alpha channel is synthesized if necessary
(with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""
Return image as RGB pixels.
RGB colour images are passed through unchanged;
greyscales are expanded into RGB triplets
(there is a small speed overhead for doing this).
An alpha channel in the source image will raise an exception.
The return values are as for the :meth:`read` method except that
the *info* reflect the returned pixels, not the source image.
In particular,
for this method ``info['greyscale']`` will be ``False``.
"""
width, height, pixels, info = self.asDirect()
if info['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not info['greyscale']:
return width, height, pixels, info
info['greyscale'] = False
info['planes'] = 3
if info['bitdepth'] > 8:
def newarray():
return array('H', [0])
else:
def newarray():
return bytearray([0])
def iterrgb():
for row in pixels:
a = newarray() * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width, height, iterrgb(), info
def asRGBA(self):
"""
Return image as RGBA pixels.
Greyscales are expanded into RGB triplets;
an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method except that
the *info* reflect the returned pixels, not the source image.
In particular, for this method
``info['greyscale']`` will be ``False``, and
``info['alpha']`` will be ``True``.
"""
width, height, pixels, info = self.asDirect()
if info['alpha'] and not info['greyscale']:
return width, height, pixels, info
typecode = 'BH'[info['bitdepth'] > 8]
maxval = 2**info['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
if info['bitdepth'] > 8:
def newarray():
return array('H', maxbuffer)
else:
def newarray():
return bytearray(maxbuffer)
if info['alpha'] and info['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
convert_la_to_rgba(row, a)
yield a
elif info['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
convert_l_to_rgba(row, a)
yield a
else:
assert not info['alpha'] and not info['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
convert_rgb_to_rgba(row, a)
yield a
info['alpha'] = True
info['greyscale'] = False
info['planes'] = 4
return width, height, convert(), info
def decompress(data_blocks):
"""
`data_blocks` should be an iterable that
yields the compressed data (from the ``IDAT`` chunks).
This yields decompressed byte strings.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in data_blocks:
# :todo: add a max_length argument here to limit output size.
yield bytearray(d.decompress(data))
yield bytearray(d.flush())
def check_bitdepth_colortype(bitdepth, colortype):
"""
Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination.
Returns (None) if valid, raise an Exception if not valid.
"""
if bitdepth not in (1, 2, 4, 8, 16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0, 2, 3, 4, 6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0, 3):
raise FormatError(
"Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def is_natural(x):
"""A non-negative integer."""
try:
is_integer = int(x) == x
except (TypeError, ValueError):
return False
return is_integer and x >= 0
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
def convert_l_to_rgba(row, result):
"""
Convert a grayscale image to RGBA.
This method assumes the alpha channel in result is
already correctly initialized.
"""
for i in range(3):
result[i::4] = row
def convert_rgb_to_rgba(row, result):
"""
Convert an RGB image to RGBA.
This method assumes the alpha channel in result is
already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
# Only reason to include this in this module is that
# several utilities need it, and it is small.
def binary_stdin():
"""
A sys.stdin that returns bytes.
"""
return sys.stdin.buffer
def binary_stdout():
"""
A sys.stdout that accepts bytes.
"""
stdout = sys.stdout.buffer
# On Windows the C runtime file orientation needs changing.
if sys.platform == "win32":
import msvcrt
import os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return stdout
def cli_open(path):
if path == "-":
return binary_stdin()
return open(path, "rb")
def main(argv):
"""
Run command line PNG.
Which reports version.
"""
print(__version__, __file__)
if __name__ == '__main__':
try:
main(sys.argv)
except Error as e:
print(e, file=sys.stderr)
| 82,781 | Python | 33.884956 | 85 | 0.576727 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/six.py | # Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.16.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
if PY34:
from importlib.util import spec_from_loader
else:
spec_from_loader = None
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def find_spec(self, fullname, path, target=None):
if fullname in self.known_modules:
return spec_from_loader(fullname, self)
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
_assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return getattr(self, _assertNotRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
# This does exactly the same what the :func:`py3:functools.update_wrapper`
# function does on Python versions after 3.2. It sets the ``__wrapped__``
# attribute on ``wrapper`` object and it doesn't raise an error if any of
# the attributes mentioned in ``assigned`` and ``updated`` are missing on
# ``wrapped`` object.
def _update_wrapper(wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
continue
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
wrapper.__wrapped__ = wrapped
return wrapper
_update_wrapper.__doc__ = functools.update_wrapper.__doc__
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
wraps.__doc__ = functools.wraps.__doc__
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| 34,549 | Python | 33.584585 | 118 | 0.624649 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna_ssl.py | import ssl
import sys
import idna
__version__ = '1.1.0'
real_match_hostname = ssl.match_hostname
PY_370 = sys.version_info >= (3, 7, 0)
def patched_match_hostname(cert, hostname):
try:
hostname = idna.encode(hostname, uts46=True).decode('ascii')
except UnicodeError:
hostname = hostname.encode('idna').decode('ascii')
return real_match_hostname(cert, hostname)
def patch_match_hostname():
if PY_370:
return
if hasattr(ssl.match_hostname, 'patched'):
return
ssl.match_hostname = patched_match_hostname
ssl.match_hostname.patched = True
def reset_match_hostname():
if PY_370:
return
if not hasattr(ssl.match_hostname, 'patched'):
return
ssl.match_hostname = real_match_hostname
| 779 | Python | 18.02439 | 68 | 0.654685 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/typing_extensions.py | import abc
import collections
import contextlib
import sys
import typing
import collections.abc as collections_abc
import operator
# These are used by Protocol implementation
# We use internal typing helpers here, but this significantly reduces
# code duplication. (Also this is only until Protocol is in typing.)
from typing import Generic, Callable, TypeVar, Tuple
# After PEP 560, internal typing API was substantially reworked.
# This is especially important for Protocol class which uses internal APIs
# quite extensivelly.
PEP_560 = sys.version_info[:3] >= (3, 7, 0)
if PEP_560:
GenericMeta = TypingMeta = type
from typing import _GenericAlias
else:
from typing import GenericMeta, TypingMeta
OLD_GENERICS = False
try:
from typing import _type_vars, _next_in_mro, _type_check
except ImportError:
OLD_GENERICS = True
try:
from typing import _subs_tree # noqa
SUBS_TREE = True
except ImportError:
SUBS_TREE = False
try:
from typing import _tp_cache
except ImportError:
def _tp_cache(x):
return x
try:
from typing import _TypingEllipsis, _TypingEmpty
except ImportError:
class _TypingEllipsis:
pass
class _TypingEmpty:
pass
# The two functions below are copies of typing internal helpers.
# They are needed by _ProtocolMeta
def _no_slots_copy(dct):
dict_copy = dict(dct)
if '__slots__' in dict_copy:
for slot in dict_copy['__slots__']:
dict_copy.pop(slot, None)
return dict_copy
def _check_generic(cls, parameters):
if not cls.__parameters__:
raise TypeError("%s is not a generic class" % repr(cls))
alen = len(parameters)
elen = len(cls.__parameters__)
if alen != elen:
raise TypeError("Too %s parameters for %s; actual %s, expected %s" %
("many" if alen > elen else "few", repr(cls), alen, elen))
if hasattr(typing, '_generic_new'):
_generic_new = typing._generic_new
else:
# Note: The '_generic_new(...)' function is used as a part of the
# process of creating a generic type and was added to the typing module
# as of Python 3.5.3.
#
# We've defined '_generic_new(...)' below to exactly match the behavior
# implemented in older versions of 'typing' bundled with Python 3.5.0 to
# 3.5.2. This helps eliminate redundancy when defining collection types
# like 'Deque' later.
#
# See https://github.com/python/typing/pull/308 for more details -- in
# particular, compare and contrast the definition of types like
# 'typing.List' before and after the merge.
def _generic_new(base_cls, cls, *args, **kwargs):
return base_cls.__new__(cls, *args, **kwargs)
# See https://github.com/python/typing/pull/439
if hasattr(typing, '_geqv'):
from typing import _geqv
_geqv_defined = True
else:
_geqv = None
_geqv_defined = False
if sys.version_info[:2] >= (3, 6):
import _collections_abc
_check_methods_in_mro = _collections_abc._check_methods
else:
def _check_methods_in_mro(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'ClassVar',
'Concatenate',
'Final',
'ParamSpec',
'Type',
# ABCs (from collections.abc).
# The following are added depending on presence
# of their non-generic counterparts in stdlib:
# 'Awaitable',
# 'AsyncIterator',
# 'AsyncIterable',
# 'Coroutine',
# 'AsyncGenerator',
# 'AsyncContextManager',
# 'ChainMap',
# Concrete collection types.
'ContextManager',
'Counter',
'Deque',
'DefaultDict',
'OrderedDict',
'TypedDict',
# Structural checks, a.k.a. protocols.
'SupportsIndex',
# One-off things.
'final',
'IntVar',
'Literal',
'NewType',
'overload',
'Text',
'TypeAlias',
'TypeGuard',
'TYPE_CHECKING',
]
# Annotated relies on substitution trees of pep 560. It will not work for
# versions of typing older than 3.5.3
HAVE_ANNOTATED = PEP_560 or SUBS_TREE
if PEP_560:
__all__.extend(["get_args", "get_origin", "get_type_hints"])
if HAVE_ANNOTATED:
__all__.append("Annotated")
# Protocols are hard to backport to the original version of typing 3.5.0
HAVE_PROTOCOLS = sys.version_info[:3] != (3, 5, 0)
if HAVE_PROTOCOLS:
__all__.extend(['Protocol', 'runtime', 'runtime_checkable'])
# TODO
if hasattr(typing, 'NoReturn'):
NoReturn = typing.NoReturn
elif hasattr(typing, '_FinalTypingBase'):
class _NoReturn(typing._FinalTypingBase, _root=True):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("NoReturn cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("NoReturn cannot be used with issubclass().")
NoReturn = _NoReturn(_root=True)
else:
class _NoReturnMeta(typing.TypingMeta):
"""Metaclass for NoReturn"""
def __new__(cls, name, bases, namespace, _root=False):
return super().__new__(cls, name, bases, namespace, _root=_root)
def __instancecheck__(self, obj):
raise TypeError("NoReturn cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("NoReturn cannot be used with issubclass().")
class NoReturn(typing.Final, metaclass=_NoReturnMeta, _root=True):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
__slots__ = ()
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = typing.TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = typing.TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
if hasattr(typing, 'ClassVar'):
ClassVar = typing.ClassVar
elif hasattr(typing, '_FinalTypingBase'):
class _ClassVar(typing._FinalTypingBase, _root=True):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
'{} accepts only single type.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _ClassVar):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
ClassVar = _ClassVar(_root=True)
else:
class _ClassVarMeta(typing.TypingMeta):
"""Metaclass for ClassVar"""
def __new__(cls, name, bases, namespace, tp=None, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
if tp is not None:
self.__type__ = tp
return self
def __instancecheck__(self, obj):
raise TypeError("ClassVar cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("ClassVar cannot be used with issubclass().")
def __getitem__(self, item):
cls = type(self)
if self.__type__ is not None:
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
param = typing._type_check(
item,
'{} accepts only single type.'.format(cls.__name__[1:]))
return cls(self.__name__, self.__bases__,
dict(self.__dict__), tp=param, _root=True)
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(self.__name__, self.__bases__,
dict(self.__dict__), tp=self.__type__,
_root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, ClassVar):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
class ClassVar(typing.Final, metaclass=_ClassVarMeta, _root=True):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
__type__ = None
# On older versions of typing there is an internal class named "Final".
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
elif sys.version_info[:2] >= (3, 7):
class _FinalForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
'{} accepts only single type'.format(self._name))
return _GenericAlias(self, (item,))
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.""")
elif hasattr(typing, '_FinalTypingBase'):
class _Final(typing._FinalTypingBase, _root=True):
"""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
'{} accepts only single type.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _Final):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
Final = _Final(_root=True)
else:
class _FinalMeta(typing.TypingMeta):
"""Metaclass for Final"""
def __new__(cls, name, bases, namespace, tp=None, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
if tp is not None:
self.__type__ = tp
return self
def __instancecheck__(self, obj):
raise TypeError("Final cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Final cannot be used with issubclass().")
def __getitem__(self, item):
cls = type(self)
if self.__type__ is not None:
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
param = typing._type_check(
item,
'{} accepts only single type.'.format(cls.__name__[1:]))
return cls(self.__name__, self.__bases__,
dict(self.__dict__), tp=param, _root=True)
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(self.__name__, self.__bases__,
dict(self.__dict__), tp=self.__type__,
_root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, Final):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
class Final(typing.Final, metaclass=_FinalMeta, _root=True):
"""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
__type__ = None
if hasattr(typing, 'final'):
final = typing.final
else:
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties.
"""
return f
def IntVar(name):
return TypeVar(name)
if hasattr(typing, 'Literal'):
Literal = typing.Literal
elif sys.version_info[:2] >= (3, 7):
class _LiteralForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return _GenericAlias(self, parameters)
Literal = _LiteralForm('Literal',
doc="""A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
elif hasattr(typing, '_FinalTypingBase'):
class _Literal(typing._FinalTypingBase, _root=True):
"""A type that can be used to indicate to type checkers that the
corresponding value has a value literally equivalent to the
provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to the
value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime checking
verifying that the parameter is actually a value instead of a type.
"""
__slots__ = ('__values__',)
def __init__(self, values=None, **kwds):
self.__values__ = values
def __getitem__(self, values):
cls = type(self)
if self.__values__ is None:
if not isinstance(values, tuple):
values = (values,)
return cls(values, _root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
return self
def __repr__(self):
r = super().__repr__()
if self.__values__ is not None:
r += '[{}]'.format(', '.join(map(typing._type_repr, self.__values__)))
return r
def __hash__(self):
return hash((type(self).__name__, self.__values__))
def __eq__(self, other):
if not isinstance(other, _Literal):
return NotImplemented
if self.__values__ is not None:
return self.__values__ == other.__values__
return self is other
Literal = _Literal(_root=True)
else:
class _LiteralMeta(typing.TypingMeta):
"""Metaclass for Literal"""
def __new__(cls, name, bases, namespace, values=None, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
if values is not None:
self.__values__ = values
return self
def __instancecheck__(self, obj):
raise TypeError("Literal cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Literal cannot be used with issubclass().")
def __getitem__(self, item):
cls = type(self)
if self.__values__ is not None:
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
if not isinstance(item, tuple):
item = (item,)
return cls(self.__name__, self.__bases__,
dict(self.__dict__), values=item, _root=True)
def _eval_type(self, globalns, localns):
return self
def __repr__(self):
r = super().__repr__()
if self.__values__ is not None:
r += '[{}]'.format(', '.join(map(typing._type_repr, self.__values__)))
return r
def __hash__(self):
return hash((type(self).__name__, self.__values__))
def __eq__(self, other):
if not isinstance(other, Literal):
return NotImplemented
if self.__values__ is not None:
return self.__values__ == other.__values__
return self is other
class Literal(typing.Final, metaclass=_LiteralMeta, _root=True):
"""A type that can be used to indicate to type checkers that the
corresponding value has a value literally equivalent to the
provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to the
value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime checking
verifying that the parameter is actually a value instead of a type.
"""
__values__ = None
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
# This is not a real generic class. Don't use outside annotations.
if hasattr(typing, 'Type'):
Type = typing.Type
else:
# Internal type variable used for Type[].
CT_co = typing.TypeVar('CT_co', covariant=True, bound=type)
class Type(typing.Generic[CT_co], extra=type):
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
__slots__ = ()
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
def _define_guard(type_name):
"""
Returns True if the given type isn't defined in typing but
is defined in collections_abc.
Adds the type to __all__ if the collection is found in either
typing or collection_abc.
"""
if hasattr(typing, type_name):
__all__.append(type_name)
globals()[type_name] = getattr(typing, type_name)
return False
elif hasattr(collections_abc, type_name):
__all__.append(type_name)
return True
else:
return False
class _ExtensionsGenericMeta(GenericMeta):
def __subclasscheck__(self, subclass):
"""This mimics a more modern GenericMeta.__subclasscheck__() logic
(that does not have problems with recursion) to work around interactions
between collections, typing, and typing_extensions on older
versions of Python, see https://github.com/python/typing/issues/501.
"""
if sys.version_info[:3] >= (3, 5, 3) or sys.version_info[:3] < (3, 5, 0):
if self.__origin__ is not None:
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
raise TypeError("Parameterized generics cannot be used with class "
"or instance checks")
return False
if not self.__extra__:
return super().__subclasscheck__(subclass)
res = self.__extra__.__subclasshook__(subclass)
if res is not NotImplemented:
return res
if self.__extra__ in subclass.__mro__:
return True
for scls in self.__extra__.__subclasses__():
if isinstance(scls, GenericMeta):
continue
if issubclass(subclass, scls):
return True
return False
if _define_guard('Awaitable'):
class Awaitable(typing.Generic[T_co], metaclass=_ExtensionsGenericMeta,
extra=collections_abc.Awaitable):
__slots__ = ()
if _define_guard('Coroutine'):
class Coroutine(Awaitable[V_co], typing.Generic[T_co, T_contra, V_co],
metaclass=_ExtensionsGenericMeta,
extra=collections_abc.Coroutine):
__slots__ = ()
if _define_guard('AsyncIterable'):
class AsyncIterable(typing.Generic[T_co],
metaclass=_ExtensionsGenericMeta,
extra=collections_abc.AsyncIterable):
__slots__ = ()
if _define_guard('AsyncIterator'):
class AsyncIterator(AsyncIterable[T_co],
metaclass=_ExtensionsGenericMeta,
extra=collections_abc.AsyncIterator):
__slots__ = ()
if hasattr(typing, 'Deque'):
Deque = typing.Deque
elif _geqv_defined:
class Deque(collections.deque, typing.MutableSequence[T],
metaclass=_ExtensionsGenericMeta,
extra=collections.deque):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Deque):
return collections.deque(*args, **kwds)
return _generic_new(collections.deque, cls, *args, **kwds)
else:
class Deque(collections.deque, typing.MutableSequence[T],
metaclass=_ExtensionsGenericMeta,
extra=collections.deque):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Deque:
return collections.deque(*args, **kwds)
return _generic_new(collections.deque, cls, *args, **kwds)
if hasattr(typing, 'ContextManager'):
ContextManager = typing.ContextManager
elif hasattr(contextlib, 'AbstractContextManager'):
class ContextManager(typing.Generic[T_co],
metaclass=_ExtensionsGenericMeta,
extra=contextlib.AbstractContextManager):
__slots__ = ()
else:
class ContextManager(typing.Generic[T_co]):
__slots__ = ()
def __enter__(self):
return self
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
return None
@classmethod
def __subclasshook__(cls, C):
if cls is ContextManager:
# In Python 3.6+, it is possible to set a method to None to
# explicitly indicate that the class does not implement an ABC
# (https://bugs.python.org/issue25958), but we do not support
# that pattern here because this fallback class is only used
# in Python 3.5 and earlier.
if (any("__enter__" in B.__dict__ for B in C.__mro__) and
any("__exit__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
if hasattr(typing, 'AsyncContextManager'):
AsyncContextManager = typing.AsyncContextManager
__all__.append('AsyncContextManager')
elif hasattr(contextlib, 'AbstractAsyncContextManager'):
class AsyncContextManager(typing.Generic[T_co],
metaclass=_ExtensionsGenericMeta,
extra=contextlib.AbstractAsyncContextManager):
__slots__ = ()
__all__.append('AsyncContextManager')
elif sys.version_info[:2] >= (3, 5):
exec("""
class AsyncContextManager(typing.Generic[T_co]):
__slots__ = ()
async def __aenter__(self):
return self
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_value, traceback):
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncContextManager:
return _check_methods_in_mro(C, "__aenter__", "__aexit__")
return NotImplemented
__all__.append('AsyncContextManager')
""")
if hasattr(typing, 'DefaultDict'):
DefaultDict = typing.DefaultDict
elif _geqv_defined:
class DefaultDict(collections.defaultdict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.defaultdict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, DefaultDict):
return collections.defaultdict(*args, **kwds)
return _generic_new(collections.defaultdict, cls, *args, **kwds)
else:
class DefaultDict(collections.defaultdict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.defaultdict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is DefaultDict:
return collections.defaultdict(*args, **kwds)
return _generic_new(collections.defaultdict, cls, *args, **kwds)
if hasattr(typing, 'OrderedDict'):
OrderedDict = typing.OrderedDict
elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
elif _geqv_defined:
class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.OrderedDict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, OrderedDict):
return collections.OrderedDict(*args, **kwds)
return _generic_new(collections.OrderedDict, cls, *args, **kwds)
else:
class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.OrderedDict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is OrderedDict:
return collections.OrderedDict(*args, **kwds)
return _generic_new(collections.OrderedDict, cls, *args, **kwds)
if hasattr(typing, 'Counter'):
Counter = typing.Counter
elif (3, 5, 0) <= sys.version_info[:3] <= (3, 5, 1):
assert _geqv_defined
_TInt = typing.TypeVar('_TInt')
class _CounterMeta(typing.GenericMeta):
"""Metaclass for Counter"""
def __getitem__(self, item):
return super().__getitem__((item, int))
class Counter(collections.Counter,
typing.Dict[T, int],
metaclass=_CounterMeta,
extra=collections.Counter):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Counter):
return collections.Counter(*args, **kwds)
return _generic_new(collections.Counter, cls, *args, **kwds)
elif _geqv_defined:
class Counter(collections.Counter,
typing.Dict[T, int],
metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Counter):
return collections.Counter(*args, **kwds)
return _generic_new(collections.Counter, cls, *args, **kwds)
else:
class Counter(collections.Counter,
typing.Dict[T, int],
metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Counter:
return collections.Counter(*args, **kwds)
return _generic_new(collections.Counter, cls, *args, **kwds)
if hasattr(typing, 'ChainMap'):
ChainMap = typing.ChainMap
__all__.append('ChainMap')
elif hasattr(collections, 'ChainMap'):
# ChainMap only exists in 3.3+
if _geqv_defined:
class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.ChainMap):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, ChainMap):
return collections.ChainMap(*args, **kwds)
return _generic_new(collections.ChainMap, cls, *args, **kwds)
else:
class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.ChainMap):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is ChainMap:
return collections.ChainMap(*args, **kwds)
return _generic_new(collections.ChainMap, cls, *args, **kwds)
__all__.append('ChainMap')
if _define_guard('AsyncGenerator'):
class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
metaclass=_ExtensionsGenericMeta,
extra=collections_abc.AsyncGenerator):
__slots__ = ()
if hasattr(typing, 'NewType'):
NewType = typing.NewType
else:
def NewType(name, tp):
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy function that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def new_type(x):
return x
new_type.__name__ = name
new_type.__supertype__ = tp
return new_type
if hasattr(typing, 'Text'):
Text = typing.Text
else:
Text = str
if hasattr(typing, 'TYPE_CHECKING'):
TYPE_CHECKING = typing.TYPE_CHECKING
else:
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
def _gorg(cls):
"""This function exists for compatibility with old typing versions."""
assert isinstance(cls, GenericMeta)
if hasattr(cls, '_gorg'):
return cls._gorg
while cls.__origin__ is not None:
cls = cls.__origin__
return cls
if OLD_GENERICS:
def _next_in_mro(cls): # noqa
"""This function exists for compatibility with old typing versions."""
next_in_mro = object
for i, c in enumerate(cls.__mro__[:-1]):
if isinstance(c, GenericMeta) and _gorg(c) is Generic:
next_in_mro = cls.__mro__[i + 1]
return next_in_mro
_PROTO_WHITELIST = ['Callable', 'Awaitable',
'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
'ContextManager', 'AsyncContextManager']
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if (not attr.startswith('_abc_') and attr not in (
'__abstractmethods__', '__annotations__', '__weakref__',
'_is_protocol', '_is_runtime_protocol', '__dict__',
'__args__', '__slots__',
'__next_in_mro__', '__parameters__', '__origin__',
'__orig_bases__', '__extra__', '__tree_hash__',
'__doc__', '__subclasshook__', '__init__', '__new__',
'__module__', '_MutableMapping__marker', '_gorg')):
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
elif HAVE_PROTOCOLS and not PEP_560:
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(GenericMeta):
"""Internal metaclass for Protocol.
This exists so Protocol classes can be generic without deriving
from Generic.
"""
if not OLD_GENERICS:
def __new__(cls, name, bases, namespace,
tvars=None, args=None, origin=None, extra=None, orig_bases=None):
# This is just a version copied from GenericMeta.__new__ that
# includes "Protocol" special treatment. (Comments removed for brevity.)
assert extra is None # Protocols should not have extra
if tvars is not None:
assert origin is not None
assert all(isinstance(t, TypeVar) for t in tvars), tvars
else:
tvars = _type_vars(bases)
gvars = None
for base in bases:
if base is Generic:
raise TypeError("Cannot inherit from plain Generic")
if (isinstance(base, GenericMeta) and
base.__origin__ in (Generic, Protocol)):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] or"
" Protocol[...] multiple times.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
raise TypeError(
"Some type variables (%s) "
"are not listed in %s[%s]" %
(", ".join(str(t) for t in tvars if t not in gvarset),
"Generic" if any(b.__origin__ is Generic
for b in bases) else "Protocol",
", ".join(str(g) for g in gvars)))
tvars = gvars
initial_bases = bases
if (extra is not None and type(extra) is abc.ABCMeta and
extra not in bases):
bases = (extra,) + bases
bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
for b in bases)
if any(isinstance(b, GenericMeta) and b is not Generic for b in bases):
bases = tuple(b for b in bases if b is not Generic)
namespace.update({'__origin__': origin, '__extra__': extra})
self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
_root=True)
super(GenericMeta, self).__setattr__('_gorg',
self if not origin else
_gorg(origin))
self.__parameters__ = tvars
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in args) if args else None
self.__next_in_mro__ = _next_in_mro(self)
if orig_bases is None:
self.__orig_bases__ = initial_bases
elif origin is not None:
self._abc_registry = origin._abc_registry
self._abc_cache = origin._abc_cache
if hasattr(self, '_subs_tree'):
self.__tree_hash__ = (hash(self._subs_tree()) if origin else
super(GenericMeta, self).__hash__())
return self
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol or
isinstance(b, _ProtocolMeta) and
b.__origin__ is Protocol
for b in cls.__bases__)
if cls._is_protocol:
for base in cls.__mro__[1:]:
if not (base in (object, Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, TypingMeta) and base._is_protocol or
isinstance(base, GenericMeta) and
base.__origin__ is Generic):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
def __instancecheck__(self, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(self, '_is_protocol', False) or
_is_callable_members_only(self)) and
issubclass(instance.__class__, self)):
return True
if self._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(self, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(self)):
return True
return super(GenericMeta, self).__instancecheck__(instance)
def __subclasscheck__(self, cls):
if self.__origin__ is not None:
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
raise TypeError("Parameterized generics cannot be used with class "
"or instance checks")
return False
if (self.__dict__.get('_is_protocol', None) and
not self.__dict__.get('_is_runtime_protocol', None)):
if sys._getframe(1).f_globals['__name__'] in ['abc',
'functools',
'typing']:
return False
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if (self.__dict__.get('_is_runtime_protocol', None) and
not _is_callable_members_only(self)):
if sys._getframe(1).f_globals['__name__'] in ['abc',
'functools',
'typing']:
return super(GenericMeta, self).__subclasscheck__(cls)
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
return super(GenericMeta, self).__subclasscheck__(cls)
if not OLD_GENERICS:
@_tp_cache
def __getitem__(self, params):
# We also need to copy this from GenericMeta.__getitem__ to get
# special treatment of "Protocol". (Comments removed for brevity.)
if not isinstance(params, tuple):
params = (params,)
if not params and _gorg(self) is not Tuple:
raise TypeError(
"Parameter list to %s[...] cannot be empty" % self.__qualname__)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if self in (Generic, Protocol):
if not all(isinstance(p, TypeVar) for p in params):
raise TypeError(
"Parameters to %r[...] must all be type variables" % self)
if len(set(params)) != len(params):
raise TypeError(
"Parameters to %r[...] must all be unique" % self)
tvars = params
args = params
elif self in (Tuple, Callable):
tvars = _type_vars(params)
args = params
elif self.__origin__ in (Generic, Protocol):
raise TypeError("Cannot subscript already-subscripted %s" %
repr(self))
else:
_check_generic(self, params)
tvars = _type_vars(params)
args = params
prepend = (self,) if self.__origin__ is None else ()
return self.__class__(self.__name__,
prepend + self.__bases__,
_no_slots_copy(self.__dict__),
tvars=tvars,
args=args,
origin=self,
extra=self.__extra__,
orig_bases=self.__orig_bases__)
class Protocol(metaclass=_ProtocolMeta):
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto({bases}):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if _gorg(cls) is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can be used only as a base class")
if OLD_GENERICS:
return _generic_new(_next_in_mro(cls), cls, *args, **kwds)
return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
if Protocol.__doc__ is not None:
Protocol.__doc__ = Protocol.__doc__.format(bases="Protocol, Generic[T]" if
OLD_GENERICS else "Protocol[T]")
elif PEP_560:
from typing import _type_check, _collect_type_vars # noqa
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(abc.ABCMeta):
# This metaclass is a bit unfortunate and exists only because of the lack
# of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(metaclass=_ProtocolMeta):
# There is quite a lot of overlapping code with typing.Generic.
# Unfortunately it is hard to avoid this while these live in two different
# modules. The duplicated code will be removed when Protocol is moved to typing.
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if cls is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can only be used as a base class")
return super().__new__(cls)
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
"Parameter list to {}[...] cannot be empty".format(cls.__qualname__))
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if cls is Protocol:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, TypeVar) for p in params):
i = 0
while isinstance(params[i], TypeVar):
i += 1
raise TypeError(
"Parameters to Protocol[...] must all be type variables."
" Parameter {} is {}".format(i + 1, params[i]))
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Protocol[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
_check_generic(cls, params)
return _GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...] and/or Protocol[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ in (Generic, Protocol)):
# for error messages
the_base = 'Generic' if base.__origin__ is Generic else 'Protocol'
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...]"
" and/or Protocol[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError("Some type variables ({}) are"
" not listed in {}[{}]".format(s_vars,
the_base, s_args))
tvars = gvars
cls.__parameters__ = tuple(tvars)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not getattr(cls, '_is_runtime_protocol', False):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if not _is_callable_members_only(cls):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols.
if not cls._is_protocol:
return
# Check consistency of bases.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, _ProtocolMeta) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init
if hasattr(typing, 'runtime_checkable'):
runtime_checkable = typing.runtime_checkable
elif HAVE_PROTOCOLS:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol, so that it
can be used with isinstance() and issubclass(). Raise TypeError
if applied to a non-protocol class.
This allows a simple-minded structural check very similar to the
one-offs in collections.abc such as Hashable.
"""
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
if HAVE_PROTOCOLS:
# Exists for backwards compatibility.
runtime = runtime_checkable
if hasattr(typing, 'SupportsIndex'):
SupportsIndex = typing.SupportsIndex
elif HAVE_PROTOCOLS:
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
if sys.version_info >= (3, 9, 2):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
TypedDict = typing.TypedDict
else:
def _check_fails(cls, other):
try:
if sys._getframe(1).f_globals['__name__'] not in ['abc',
'functools',
'typing']:
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
except (AttributeError, ValueError):
pass
return False
def _dict_new(*args, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
return dict(*args, **kwargs)
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
def _typeddict_new(*args, total=True, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
if args:
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
elif '_typename' in kwargs:
typename = kwargs.pop('_typename')
import warnings
warnings.warn("Passing '_typename' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError("TypedDict.__new__() missing 1 required positional "
"argument: '_typename'")
if args:
try:
fields, = args # allow the "_fields" keyword be passed
except ValueError:
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
'positional arguments but {} '
'were given'.format(len(args) + 2))
elif '_fields' in kwargs and len(kwargs) == 1:
fields = kwargs.pop('_fields')
import warnings
warnings.warn("Passing '_fields' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
fields = None
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns, total=total)
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
class _TypedDictMeta(type):
def __init__(cls, name, bases, ns, total=True):
# In Python 3.4 and 3.5 the __init__ method also needs to support the
# keyword arguments.
# See https://www.python.org/dev/peps/pep-0487/#implementation-details
super(_TypedDictMeta, cls).__init__(name, bases, ns)
def __new__(cls, name, bases, ns, total=True):
# Create new typed dict class object.
# This method is called directly when TypedDict is subclassed,
# or via _typeddict_new when TypedDict is instantiated. This way
# TypedDict supports all three syntaxes described in its docstring.
# Subclasses and instances of TypedDict return actual dictionaries
# via _dict_new.
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
tp_dict = super(_TypedDictMeta, cls).__new__(cls, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__instancecheck__ = __subclasscheck__ = _check_fails
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
get_type_hints = typing.get_type_hints
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
elif PEP_560:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing_extensions.Annotated[{}, {}]".format(
typing._type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
res = t.copy_with(stripped_args)
res._special = t._special
return res
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_annotations(t) for k, t in hint.items()}
elif HAVE_ANNOTATED:
def _is_dunder(name):
"""Returns True if name is a __dunder_variable_name__."""
return len(name) > 4 and name.startswith('__') and name.endswith('__')
# Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
# checks, argument expansion etc. are done on the _subs_tre. As a result we
# can't provide a get_type_hints function that strips out annotations.
class AnnotatedMeta(typing.GenericMeta):
"""Metaclass for Annotated"""
def __new__(cls, name, bases, namespace, **kwargs):
if any(b is not object for b in bases):
raise TypeError("Cannot subclass " + str(Annotated))
return super().__new__(cls, name, bases, namespace, **kwargs)
@property
def __metadata__(self):
return self._subs_tree()[2]
def _tree_repr(self, tree):
cls, origin, metadata = tree
if not isinstance(origin, tuple):
tp_repr = typing._type_repr(origin)
else:
tp_repr = origin[0]._tree_repr(origin)
metadata_reprs = ", ".join(repr(arg) for arg in metadata)
return '%s[%s, %s]' % (cls, tp_repr, metadata_reprs)
def _subs_tree(self, tvars=None, args=None): # noqa
if self is Annotated:
return Annotated
res = super()._subs_tree(tvars=tvars, args=args)
# Flatten nested Annotated
if isinstance(res[1], tuple) and res[1][0] is Annotated:
sub_tp = res[1][1]
sub_annot = res[1][2]
return (Annotated, sub_tp, sub_annot + res[2])
return res
def _get_cons(self):
"""Return the class used to create instance of this type."""
if self.__origin__ is None:
raise TypeError("Cannot get the underlying type of a "
"non-specialized Annotated type.")
tree = self._subs_tree()
while isinstance(tree, tuple) and tree[0] is Annotated:
tree = tree[1]
if isinstance(tree, tuple):
return tree[0]
else:
return tree
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if self.__origin__ is not None: # specializing an instantiated type
return super().__getitem__(params)
elif not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be instantiated "
"with at least two arguments (a type and an "
"annotation).")
else:
msg = "Annotated[t, ...]: t must be a type."
tp = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return self.__class__(
self.__name__,
self.__bases__,
_no_slots_copy(self.__dict__),
tvars=_type_vars((tp,)),
# Metadata is a tuple so it won't be touched by _replace_args et al.
args=(tp, metadata),
origin=self,
)
def __call__(self, *args, **kwargs):
cons = self._get_cons()
result = cons(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __getattr__(self, attr):
# For simplicity we just don't relay all dunder names
if self.__origin__ is not None and not _is_dunder(attr):
return getattr(self._get_cons(), attr)
raise AttributeError(attr)
def __setattr__(self, attr, value):
if _is_dunder(attr) or attr.startswith('_abc_'):
super().__setattr__(attr, value)
elif self.__origin__ is None:
raise AttributeError(attr)
else:
setattr(self._get_cons(), attr, value)
def __instancecheck__(self, obj):
raise TypeError("Annotated cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Annotated cannot be used with issubclass().")
class Annotated(metaclass=AnnotatedMeta):
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type, the remaining
arguments are kept as a tuple in the __metadata__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those, only Python 3.9 versions will do.
# Similarly, Python 3.9's implementation doesn't support ParamSpecArgs and
# ParamSpecKwargs.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
elif PEP_560:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = _GenericAlias
try:
# 3.9+
from typing import GenericAlias
except ImportError:
GenericAlias = _GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_GenericAlias, GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
elif sys.version_info[:2] >= (3, 9):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeAliasForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError("{} is not subscriptable".format(self))
elif sys.version_info[:2] >= (3, 7):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
TypeAlias = _TypeAliasForm('TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above.""")
elif hasattr(typing, '_FinalTypingBase'):
class _TypeAliasMeta(typing.TypingMeta):
"""Metaclass for TypeAlias"""
def __repr__(self):
return 'typing_extensions.TypeAlias'
class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("TypeAlias cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeAlias cannot be used with issubclass().")
def __repr__(self):
return 'typing_extensions.TypeAlias'
TypeAlias = _TypeAliasBase(_root=True)
else:
class _TypeAliasMeta(typing.TypingMeta):
"""Metaclass for TypeAlias"""
def __instancecheck__(self, obj):
raise TypeError("TypeAlias cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeAlias cannot be used with issubclass().")
def __call__(self, *args, **kwargs):
raise TypeError("Cannot instantiate TypeAlias")
class TypeAlias(metaclass=_TypeAliasMeta, _root=True):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
__slots__ = ()
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return "{}.args".format(self.__origin__.__name__)
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return "{}.kwargs".format(self.__origin__.__name__)
if hasattr(typing, 'ParamSpec'):
ParamSpec = typing.ParamSpec
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
super().__init__([self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
if not PEP_560:
# Only needed in 3.6 and lower.
def _get_type_vars(self, tvars):
if self not in tvars:
tvars.append(self)
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
if PEP_560:
__class__ = typing._GenericAlias
elif sys.version_info[:3] == (3, 5, 2):
__class__ = typing.TypingMeta
else:
__class__ = typing._TypingBase
# Flag in 3.8.
_special = False
# Attribute in 3.6 and earlier.
if sys.version_info[:3] == (3, 5, 2):
_gorg = typing.GenericMeta
else:
_gorg = typing.Generic
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return '{origin}[{args}]' \
.format(origin=_type_repr(self.__origin__),
args=', '.join(_type_repr(arg) for arg in self.__args__))
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(tp for tp in self.__args__ if isinstance(tp, (TypeVar, ParamSpec)))
if not PEP_560:
# Only required in 3.6 and lower.
def _get_type_vars(self, tvars):
if self.__origin__ and self.__parameters__:
typing._get_type_vars(self.__parameters__, tvars)
@_tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
elif sys.version_info[:2] >= (3, 9):
@_TypeAliasForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
elif sys.version_info[:2] >= (3, 7):
class _ConcatenateForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
elif hasattr(typing, '_FinalTypingBase'):
class _ConcatenateAliasMeta(typing.TypingMeta):
"""Metaclass for Concatenate."""
def __repr__(self):
return 'typing_extensions.Concatenate'
class _ConcatenateAliasBase(typing._FinalTypingBase,
metaclass=_ConcatenateAliasMeta,
_root=True):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("Concatenate cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Concatenate cannot be used with issubclass().")
def __repr__(self):
return 'typing_extensions.Concatenate'
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateAliasBase(_root=True)
# For 3.5.0 - 3.5.2
else:
class _ConcatenateAliasMeta(typing.TypingMeta):
"""Metaclass for Concatenate."""
def __instancecheck__(self, obj):
raise TypeError("TypeAlias cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeAlias cannot be used with issubclass().")
def __call__(self, *args, **kwargs):
raise TypeError("Cannot instantiate TypeAlias")
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
class Concatenate(metaclass=_ConcatenateAliasMeta, _root=True):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
__slots__ = ()
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
elif sys.version_info[:2] >= (3, 9):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeGuardForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, '{} accepts only single type.'.format(self))
return _GenericAlias(self, (item,))
elif sys.version_info[:2] >= (3, 7):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
'{} accepts only a single type'.format(self._name))
return _GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
elif hasattr(typing, '_FinalTypingBase'):
class _TypeGuard(typing._FinalTypingBase, _root=True):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
'{} accepts only a single type.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _TypeGuard):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
TypeGuard = _TypeGuard(_root=True)
else:
class _TypeGuardMeta(typing.TypingMeta):
"""Metaclass for TypeGuard"""
def __new__(cls, name, bases, namespace, tp=None, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
if tp is not None:
self.__type__ = tp
return self
def __instancecheck__(self, obj):
raise TypeError("TypeGuard cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeGuard cannot be used with issubclass().")
def __getitem__(self, item):
cls = type(self)
if self.__type__ is not None:
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
param = typing._type_check(
item,
'{} accepts only single type.'.format(cls.__name__[1:]))
return cls(self.__name__, self.__bases__,
dict(self.__dict__), tp=param, _root=True)
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(self.__name__, self.__bases__,
dict(self.__dict__), tp=self.__type__,
_root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not hasattr(other, "__type__"):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
class TypeGuard(typing.Final, metaclass=_TypeGuardMeta, _root=True):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
__type__ = None
| 109,284 | Python | 37.426512 | 90 | 0.544856 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_pytesttester.pyi | from collections.abc import Iterable
from typing import Literal as L
__all__: list[str]
class PytestTester:
module_name: str
def __init__(self, module_name: str) -> None: ...
def __call__(
self,
label: L["fast", "full"] = ...,
verbose: int = ...,
extra_argv: None | Iterable[str] = ...,
doctests: L[False] = ...,
coverage: bool = ...,
durations: int = ...,
tests: None | Iterable[str] = ...,
) -> bool: ...
| 489 | unknown | 24.789472 | 53 | 0.505112 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ctypeslib.pyi | # NOTE: Numpy's mypy plugin is used for importing the correct
# platform-specific `ctypes._SimpleCData[int]` sub-type
from ctypes import c_int64 as _c_intp
import os
import sys
import ctypes
from collections.abc import Iterable, Sequence
from typing import (
Literal as L,
Any,
Union,
TypeVar,
Generic,
overload,
ClassVar,
)
from numpy import (
ndarray,
dtype,
generic,
bool_,
byte,
short,
intc,
int_,
longlong,
ubyte,
ushort,
uintc,
uint,
ulonglong,
single,
double,
longdouble,
void,
)
from numpy.core._internal import _ctypes
from numpy.core.multiarray import flagsobj
from numpy._typing import (
# Arrays
NDArray,
_ArrayLike,
# Shapes
_ShapeLike,
# DTypes
DTypeLike,
_DTypeLike,
_VoidDTypeLike,
_BoolCodes,
_UByteCodes,
_UShortCodes,
_UIntCCodes,
_UIntCodes,
_ULongLongCodes,
_ByteCodes,
_ShortCodes,
_IntCCodes,
_IntCodes,
_LongLongCodes,
_SingleCodes,
_DoubleCodes,
_LongDoubleCodes,
)
# TODO: Add a proper `_Shape` bound once we've got variadic typevars
_DType = TypeVar("_DType", bound=dtype[Any])
_DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any])
_SCT = TypeVar("_SCT", bound=generic)
_FlagsKind = L[
'C_CONTIGUOUS', 'CONTIGUOUS', 'C',
'F_CONTIGUOUS', 'FORTRAN', 'F',
'ALIGNED', 'A',
'WRITEABLE', 'W',
'OWNDATA', 'O',
'WRITEBACKIFCOPY', 'X',
]
# TODO: Add a shape typevar once we have variadic typevars (PEP 646)
class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]):
# In practice these 4 classvars are defined in the dynamic class
# returned by `ndpointer`
_dtype_: ClassVar[_DTypeOptional]
_shape_: ClassVar[None]
_ndim_: ClassVar[None | int]
_flags_: ClassVar[None | list[_FlagsKind]]
@overload
@classmethod
def from_param(cls: type[_ndptr[None]], obj: ndarray[Any, Any]) -> _ctypes: ...
@overload
@classmethod
def from_param(cls: type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes: ...
class _concrete_ndptr(_ndptr[_DType]):
_dtype_: ClassVar[_DType]
_shape_: ClassVar[tuple[int, ...]]
@property
def contents(self) -> ndarray[Any, _DType]: ...
def load_library(
libname: str | bytes | os.PathLike[str] | os.PathLike[bytes],
loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes],
) -> ctypes.CDLL: ...
__all__: list[str]
c_intp = _c_intp
@overload
def ndpointer(
dtype: None = ...,
ndim: int = ...,
shape: None | _ShapeLike = ...,
flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
) -> type[_ndptr[None]]: ...
@overload
def ndpointer(
dtype: _DTypeLike[_SCT],
ndim: int = ...,
*,
shape: _ShapeLike,
flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
) -> type[_concrete_ndptr[dtype[_SCT]]]: ...
@overload
def ndpointer(
dtype: DTypeLike,
ndim: int = ...,
*,
shape: _ShapeLike,
flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
) -> type[_concrete_ndptr[dtype[Any]]]: ...
@overload
def ndpointer(
dtype: _DTypeLike[_SCT],
ndim: int = ...,
shape: None = ...,
flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
) -> type[_ndptr[dtype[_SCT]]]: ...
@overload
def ndpointer(
dtype: DTypeLike,
ndim: int = ...,
shape: None = ...,
flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
) -> type[_ndptr[dtype[Any]]]: ...
@overload
def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[bool_] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ...
@overload
def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ...
@overload
def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ...
@overload
def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ...
@overload
def as_ctypes_type(dtype: _IntCodes | _DTypeLike[int_] | type[int | ctypes.c_long]) -> type[ctypes.c_long]: ...
@overload
def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ...
@overload
def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ...
@overload
def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ...
@overload
def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ...
@overload
def as_ctypes_type(dtype: _UIntCodes | _DTypeLike[uint] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ...
@overload
def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ...
@overload
def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ...
@overload
def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ...
@overload
def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ...
@overload
def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure`
@overload
def as_ctypes_type(dtype: str) -> type[Any]: ...
@overload
def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ...
@overload
def as_array(obj: _ArrayLike[_SCT], shape: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
@overload
def as_array(obj: object, shape: None | _ShapeLike = ...) -> NDArray[Any]: ...
@overload
def as_ctypes(obj: bool_) -> ctypes.c_bool: ...
@overload
def as_ctypes(obj: byte) -> ctypes.c_byte: ...
@overload
def as_ctypes(obj: short) -> ctypes.c_short: ...
@overload
def as_ctypes(obj: intc) -> ctypes.c_int: ...
@overload
def as_ctypes(obj: int_) -> ctypes.c_long: ...
@overload
def as_ctypes(obj: longlong) -> ctypes.c_longlong: ...
@overload
def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ...
@overload
def as_ctypes(obj: ushort) -> ctypes.c_ushort: ...
@overload
def as_ctypes(obj: uintc) -> ctypes.c_uint: ...
@overload
def as_ctypes(obj: uint) -> ctypes.c_ulong: ...
@overload
def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ...
@overload
def as_ctypes(obj: single) -> ctypes.c_float: ...
@overload
def as_ctypes(obj: double) -> ctypes.c_double: ...
@overload
def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ...
@overload
def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure`
@overload
def as_ctypes(obj: NDArray[bool_]) -> ctypes.Array[ctypes.c_bool]: ...
@overload
def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ...
@overload
def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ...
@overload
def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ...
@overload
def as_ctypes(obj: NDArray[int_]) -> ctypes.Array[ctypes.c_long]: ...
@overload
def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ...
@overload
def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ...
@overload
def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ...
@overload
def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ...
@overload
def as_ctypes(obj: NDArray[uint]) -> ctypes.Array[ctypes.c_ulong]: ...
@overload
def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ...
@overload
def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ...
@overload
def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ...
@overload
def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ...
@overload
def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure`
| 7,962 | unknown | 30.599206 | 130 | 0.651595 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/matlib.py | import warnings
# 2018-05-29, PendingDeprecationWarning added to matrix.__new__
# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning
warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. "
"The matrix subclass is not the recommended way to represent "
"matrices or deal with linear algebra (see "
"https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). "
"Please adjust your code to use regular ndarray. ",
PendingDeprecationWarning, stacklevel=2)
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# Matlib.py contains all functions in the numpy namespace with a few
# replacements. See doc/source/reference/routines.matlib.rst for details.
# Need * as we're copying the numpy namespace.
from numpy import * # noqa: F403
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307], # random
[ 7.39337286e-309, 3.22135945e-309]])
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0], # random
[ 6586976, 22740995]])
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[1., 1., 1.],
[1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[0., 0., 0.],
[0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0], dtype=dtype)
b = empty((n, n), dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float, order='C'):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.RandomState.rand
Examples
--------
>>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[0.69646919, 0.28613933, 0.22685145],
[0.55131477, 0.71946897, 0.42310646]])
>>> np.matlib.rand((2, 3))
matrix([[0.9807642 , 0.68482974, 0.4809319 ],
[0.39211752, 0.34317802, 0.72904971]])
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[0.43857224, 0.0596779 , 0.39804426],
[0.73799541, 0.18249173, 0.17545176]])
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, numpy.random.RandomState.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-1.0856306]])
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.99734545, 0.2829785 , -1.50629471],
[-0.57860025, 1.65143654, -2.42667924]])
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462],
[2.76322758, 6.72847407, 1.40274501, 1.8900451 ]])
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols)
| 10,365 | Python | 26.496021 | 84 | 0.566136 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_distributor_init.py |
'''
Helper to preload windows dlls to prevent dll not found errors.
Once a DLL is preloaded, its namespace is made available to any
subsequent DLL. This file originated in the numpy-wheels repo,
and is created as part of the scripts that build the wheel.
'''
import os
import glob
if os.name == 'nt':
# convention for storing / loading the DLL from
# numpy/.libs/, if present
try:
from ctypes import WinDLL
basedir = os.path.dirname(__file__)
except:
pass
else:
libs_dir = os.path.abspath(os.path.join(basedir, '.libs'))
DLL_filenames = []
if os.path.isdir(libs_dir):
for filename in glob.glob(os.path.join(libs_dir,
'*openblas*dll')):
# NOTE: would it change behavior to load ALL
# DLLs at this path vs. the name restriction?
WinDLL(os.path.abspath(filename))
DLL_filenames.append(filename)
if len(DLL_filenames) > 1:
import warnings
warnings.warn("loaded more than 1 DLL from .libs:"
"\n%s" % "\n".join(DLL_filenames),
stacklevel=1)
| 1,215 | Python | 35.848484 | 69 | 0.567901 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_pytesttester.py | """
Pytest test running.
This module implements the ``test()`` function for NumPy modules. The usual
boiler plate for doing that is to put the following in the module
``__init__.py`` file::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
Warnings filtering and other runtime settings should be dealt with in the
``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
whether or not that file is found as follows:
* ``pytest.ini`` is present (develop mode)
All warnings except those explicitly filtered out are raised as error.
* ``pytest.ini`` is absent (release mode)
DeprecationWarnings and PendingDeprecationWarnings are ignored, other
warnings are passed through.
In practice, tests run from the numpy repo are run in develop mode. That
includes the standard ``python runtests.py`` invocation.
This module is imported by every numpy subpackage, so lies at the top level to
simplify circular import issues. For the same reason, it contains no numpy
imports at module scope, instead importing numpy within function calls.
"""
import sys
import os
__all__ = ['PytestTester']
def _show_numpy_info():
import numpy as np
print("NumPy version %s" % np.__version__)
relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
info = np.lib.utils._opt_info()
print("NumPy CPU features: ", (info if info else 'nothing enabled'))
class PytestTester:
"""
Pytest test runner.
A test function is typically added to a package's __init__.py like so::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
Calling this test function finds and runs all tests associated with the
module and all its sub-modules.
Attributes
----------
module_name : str
Full path to the package to test.
Parameters
----------
module_name : module name
The name of the module to test.
Notes
-----
Unlike the previous ``nose``-based implementation, this class is not
publicly exposed as it performs some ``numpy``-specific warning
suppression.
"""
def __init__(self, module_name):
self.module_name = module_name
def __call__(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, durations=-1, tests=None):
"""
Run tests for module using pytest.
Parameters
----------
label : {'fast', 'full'}, optional
Identifies the tests to run. When set to 'fast', tests decorated
with `pytest.mark.slow` are skipped, when 'full', the slow marker
is ignored.
verbose : int, optional
Verbosity value for test outputs, in the range 1-3. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to pytests.
doctests : bool, optional
.. note:: Not supported
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
Requires installation of (pip) pytest-cov.
durations : int, optional
If < 0, do nothing, If 0, report time of all tests, if > 0,
report the time of the slowest `timer` tests. Default is -1.
tests : test or list of tests
Tests to be executed with pytest '--pyargs'
Returns
-------
result : bool
Return True on success, false otherwise.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for
it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
...
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
>>> result
True
"""
import pytest
import warnings
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
# setup the pytest arguments
pytest_args = ["-l"]
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
with warnings.catch_warnings():
warnings.simplefilter("always")
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
from numpy.distutils import cpuinfo
with warnings.catch_warnings(record=True):
# Ignore the warning from importing the array_api submodule. This
# warning is done on import, so it would break pytest collection,
# but importing it early here prevents the warning from being
# issued when it imported again.
import numpy.array_api
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
"-W ignore:Not importing directory",
"-W ignore:numpy.dtype size changed",
"-W ignore:numpy.ufunc size changed",
"-W ignore::UserWarning:cpuinfo",
]
# When testing matrices, ignore their PendingDeprecationWarnings
pytest_args += [
"-W ignore:the matrix subclass is not",
"-W ignore:Importing from numpy.matlib is",
]
if doctests:
raise ValueError("Doctests not supported")
if extra_argv:
pytest_args += list(extra_argv)
if verbose > 1:
pytest_args += ["-" + "v"*(verbose - 1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
# not importing at the top level to avoid circular import of module
from numpy.testing import IS_PYPY
if IS_PYPY:
pytest_args += ["-m", "not slow and not slow_pypy"]
else:
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if durations >= 0:
pytest_args += ["--durations=%s" % durations]
if tests is None:
tests = [self.module_name]
pytest_args += ["--pyargs"] + list(tests)
# run tests.
_show_numpy_info()
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return code == 0
| 6,676 | Python | 30.947368 | 79 | 0.599011 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ctypeslib.py | """
============================
``ctypes`` Utility Functions
============================
See Also
--------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> _lib.foo_func.restype = None #doctest: +SKIP
>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array',
'as_ctypes_type']
import os
from numpy import (
integer, ndarray, dtype as _dtype, asarray, frombuffer
)
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
"""
Dummy object that raises an ImportError if ctypes is not available.
Raises
------
ImportError
If ctypes is not available.
"""
raise ImportError("ctypes is not available.")
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
_ndptr_base = object
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
_ndptr_base = ctypes.c_void_p
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
"""
It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
.. versionchanged:: 1.20.0
Allow libname and loader_path to take any
:term:`python:path-like object`.
Parameters
----------
libname : path-like
Name of the library, which can have 'lib' as a prefix,
but without an extension.
loader_path : path-like
Where the library can be found.
Returns
-------
ctypes.cdll[libpath] : library object
A ctypes library object
Raises
------
OSError
If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work "
"with ctypes < 1.0.1", stacklevel=2)
# Convert path-like objects into strings
libname = os.fsdecode(libname)
loader_path = os.fsdecode(loader_path)
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
from numpy.distutils.misc_util import get_shared_lib_extension
so_ext = get_shared_lib_extension()
libname_ext = [libname + so_ext]
# mac, windows and linux >= py3.2 shared library and loadable
# module have different extensions so try both
so_ext2 = get_shared_lib_extension(is_python_ext=True)
if not so_ext2 == so_ext:
libname_ext.insert(0, libname + so_ext2)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
libpath = os.path.join(libdir, ln)
if os.path.exists(libpath):
try:
return ctypes.cdll[libpath]
except OSError:
## defective lib file
raise
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'WRITEBACKIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(_ndptr_base):
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError("argument must be an ndarray")
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError("array must have data type %s" % cls._dtype_)
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError("array must have %d dimension(s)" % cls._ndim_)
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError("array must have shape %s" % str(cls._shape_))
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError("array must have flags %s" %
_flags_fromnum(cls._flags_))
return obj.ctypes
class _concrete_ndptr(_ndptr):
"""
Like _ndptr, but with `_shape_` and `_dtype_` specified.
Notably, this means the pointer has enough information to reconstruct
the array, which is not generally true.
"""
def _check_retval_(self):
"""
This method is called when this class is used as the .restype
attribute for a shared-library function, to automatically wrap the
pointer into an array.
"""
return self.contents
@property
def contents(self):
"""
Get an ndarray viewing the data pointed to by this pointer.
This mirrors the `contents` attribute of a normal ctypes pointer
"""
full_dtype = _dtype((self._dtype_, self._shape_))
full_ctype = ctypes.c_char * full_dtype.itemsize
buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- WRITEBACKIFCOPY / X
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP
"""
# normalize dtype to an Optional[dtype]
if dtype is not None:
dtype = _dtype(dtype)
# normalize flags to an Optional[int]
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except Exception as e:
raise TypeError("invalid flags specification") from e
num = _num_fromflags(flags)
# normalize shape to an Optional[tuple]
if shape is not None:
try:
shape = tuple(shape)
except TypeError:
# single integer -> 1-tuple
shape = (shape,)
cache_key = (dtype, ndim, shape, num)
try:
return _pointer_type_cache[cache_key]
except KeyError:
pass
# produce a name for the new type
if dtype is None:
name = 'any'
elif dtype.names is not None:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
name += "_"+"x".join(str(x) for x in shape)
if flags is not None:
name += "_"+"_".join(flags)
if dtype is not None and shape is not None:
base = _concrete_ndptr
else:
base = _ndptr
klass = type("ndpointer_%s"%name, (base,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[cache_key] = klass
return klass
if ctypes is not None:
def _ctype_ndarray(element_type, shape):
""" Create an ndarray of the given element type and shape """
for dim in shape[::-1]:
element_type = dim * element_type
# prevent the type name include np.ctypeslib
element_type.__module__ = None
return element_type
def _get_scalar_type_map():
"""
Return a dictionary mapping native endian scalar dtype to ctypes types
"""
ct = ctypes
simple_types = [
ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
ct.c_float, ct.c_double,
ct.c_bool,
]
return {_dtype(ctype): ctype for ctype in simple_types}
_scalar_type_map = _get_scalar_type_map()
def _ctype_from_dtype_scalar(dtype):
# swapping twice ensure that `=` is promoted to <, >, or |
dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
dtype_native = dtype.newbyteorder('=')
try:
ctype = _scalar_type_map[dtype_native]
except KeyError as e:
raise NotImplementedError(
"Converting {!r} to a ctypes type".format(dtype)
) from None
if dtype_with_endian.byteorder == '>':
ctype = ctype.__ctype_be__
elif dtype_with_endian.byteorder == '<':
ctype = ctype.__ctype_le__
return ctype
def _ctype_from_dtype_subarray(dtype):
element_dtype, shape = dtype.subdtype
ctype = _ctype_from_dtype(element_dtype)
return _ctype_ndarray(ctype, shape)
def _ctype_from_dtype_structured(dtype):
# extract offsets of each field
field_data = []
for name in dtype.names:
field_dtype, offset = dtype.fields[name][:2]
field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
# ctypes doesn't care about field order
field_data = sorted(field_data, key=lambda f: f[0])
if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):
# union, if multiple fields all at address 0
size = 0
_fields_ = []
for offset, name, ctype in field_data:
_fields_.append((name, ctype))
size = max(size, ctypes.sizeof(ctype))
# pad to the right size
if dtype.itemsize != size:
_fields_.append(('', ctypes.c_char * dtype.itemsize))
# we inserted manual padding, so always `_pack_`
return type('union', (ctypes.Union,), dict(
_fields_=_fields_,
_pack_=1,
__module__=None,
))
else:
last_offset = 0
_fields_ = []
for offset, name, ctype in field_data:
padding = offset - last_offset
if padding < 0:
raise NotImplementedError("Overlapping fields")
if padding > 0:
_fields_.append(('', ctypes.c_char * padding))
_fields_.append((name, ctype))
last_offset = offset + ctypes.sizeof(ctype)
padding = dtype.itemsize - last_offset
if padding > 0:
_fields_.append(('', ctypes.c_char * padding))
# we inserted manual padding, so always `_pack_`
return type('struct', (ctypes.Structure,), dict(
_fields_=_fields_,
_pack_=1,
__module__=None,
))
def _ctype_from_dtype(dtype):
if dtype.fields is not None:
return _ctype_from_dtype_structured(dtype)
elif dtype.subdtype is not None:
return _ctype_from_dtype_subarray(dtype)
else:
return _ctype_from_dtype_scalar(dtype)
def as_ctypes_type(dtype):
r"""
Convert a dtype into a ctypes type.
Parameters
----------
dtype : dtype
The dtype to convert
Returns
-------
ctype
A ctype scalar, union, array, or struct
Raises
------
NotImplementedError
If the conversion is not possible
Notes
-----
This function does not losslessly round-trip in either direction.
``np.dtype(as_ctypes_type(dt))`` will:
- insert padding fields
- reorder fields to be sorted by offset
- discard field titles
``as_ctypes_type(np.dtype(ctype))`` will:
- discard the class names of `ctypes.Structure`\ s and
`ctypes.Union`\ s
- convert single-element `ctypes.Union`\ s into single-element
`ctypes.Structure`\ s
- insert padding fields
"""
return _ctype_from_dtype(_dtype(dtype))
def as_array(obj, shape=None):
"""
Create a numpy array from a ctypes array or POINTER.
The numpy array shares the memory with the ctypes object.
The shape parameter must be given if converting from a ctypes POINTER.
The shape parameter is ignored if converting from a ctypes array
"""
if isinstance(obj, ctypes._Pointer):
# convert pointers to an array of the desired shape
if shape is None:
raise TypeError(
'as_array() requires a shape argument when called on a '
'pointer')
p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
obj = ctypes.cast(obj, p_arr_type).contents
return asarray(obj)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
# can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
# dtype.itemsize (gh-14214)
ctype_scalar = as_ctypes_type(ai["typestr"])
result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
result = result_type.from_address(addr)
result.__keep = obj
return result
| 17,460 | Python | 30.863139 | 90 | 0.560538 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/__init__.py | """
NumPy
=====
Provides
1. An array object of arbitrary homogeneous items
2. Fast mathematical operations over arrays
3. Linear Algebra, Fourier Transforms, Random Number Generation
How to use the documentation
----------------------------
Documentation is available in two forms: docstrings provided
with the code, and a loose standing reference guide, available from
`the NumPy homepage <https://numpy.org>`_.
We recommend exploring the docstrings using
`IPython <https://ipython.org>`_, an advanced Python shell with
TAB-completion and introspection capabilities. See below for further
instructions.
The docstring examples assume that `numpy` has been imported as `np`::
>>> import numpy as np
Code snippets are indicated by three greater-than signs::
>>> x = 42
>>> x = x + 1
Use the built-in ``help`` function to view a function's docstring::
>>> help(np.sort)
... # doctest: +SKIP
For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
of the help() page. Ufuncs are implemented in C, not Python, for speed.
The native Python help() does not know how to view their help, but our
np.info() function does.
To search for documents containing a keyword, do::
>>> np.lookfor('keyword')
... # doctest: +SKIP
General-purpose documents like a glossary and help on the basic concepts
of numpy are available under the ``doc`` sub-module::
>>> from numpy import doc
>>> help(doc)
... # doctest: +SKIP
Available subpackages
---------------------
lib
Basic functions used by several sub-packages.
random
Core Random Tools
linalg
Core Linear Algebra Tools
fft
Core FFT routines
polynomial
Polynomial tools
testing
NumPy testing tools
distutils
Enhancements to distutils with support for
Fortran compilers support and more.
Utilities
---------
test
Run numpy unittests
show_config
Show numpy build configuration
dual
Overwrite certain functions with high-performance SciPy tools.
Note: `numpy.dual` is deprecated. Use the functions from NumPy or Scipy
directly instead of importing them from `numpy.dual`.
matlib
Make everything matrices.
__version__
NumPy version string
Viewing documentation using IPython
-----------------------------------
Start IPython with the NumPy profile (``ipython -p numpy``), which will
import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
paste examples into the shell. To see which functions are available in
`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
down the list. To view the docstring for a function, use
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
the source code).
Copies vs. in-place operation
-----------------------------
Most of the functions in `numpy` return a copy of the array argument
(e.g., `np.sort`). In-place versions of these functions are often
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
import sys
import warnings
from ._globals import (
ModuleDeprecationWarning, VisibleDeprecationWarning,
_NoValue, _CopyMode
)
# We first need to detect if we're being called as part of the numpy setup
# procedure itself in a reliable manner.
try:
__NUMPY_SETUP__
except NameError:
__NUMPY_SETUP__ = False
if __NUMPY_SETUP__:
sys.stderr.write('Running from numpy source directory.\n')
else:
try:
from numpy.__config__ import show as show_config
except ImportError as e:
msg = """Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there."""
raise ImportError(msg) from e
__all__ = ['ModuleDeprecationWarning',
'VisibleDeprecationWarning']
# mapping of {name: (value, deprecation_msg)}
__deprecated_attrs__ = {}
# Allow distributors to run custom init code
from . import _distributor_init
from . import core
from .core import *
from . import compat
from . import lib
# NOTE: to be revisited following future namespace cleanup.
# See gh-14454 and gh-15672 for discussion.
from .lib import *
from . import linalg
from . import fft
from . import polynomial
from . import random
from . import ctypeslib
from . import ma
from . import matrixlib as _mat
from .matrixlib import *
# Deprecations introduced in NumPy 1.20.0, 2020-06-06
import builtins as _builtins
_msg = (
"`np.{n}` is a deprecated alias for the builtin `{n}`. "
"To silence this warning, use `{n}` by itself. Doing this will not "
"modify any behavior and is safe. {extended_msg}\n"
"Deprecated in NumPy 1.20; for more details and guidance: "
"https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
_specific_msg = (
"If you specifically wanted the numpy scalar type, use `np.{}` here.")
_int_extended_msg = (
"When replacing `np.{}`, you may wish to use e.g. `np.int64` "
"or `np.int32` to specify the precision. If you wish to review "
"your current use, check the release note link for "
"additional information.")
_type_info = [
("object", ""), # The NumPy scalar only exists by name.
("bool", _specific_msg.format("bool_")),
("float", _specific_msg.format("float64")),
("complex", _specific_msg.format("complex128")),
("str", _specific_msg.format("str_")),
("int", _int_extended_msg.format("int"))]
__deprecated_attrs__.update({
n: (getattr(_builtins, n), _msg.format(n=n, extended_msg=extended_msg))
for n, extended_msg in _type_info
})
# Numpy 1.20.0, 2020-10-19
__deprecated_attrs__["typeDict"] = (
core.numerictypes.typeDict,
"`np.typeDict` is a deprecated alias for `np.sctypeDict`."
)
# NumPy 1.22, 2021-10-20
__deprecated_attrs__["MachAr"] = (
core._machar.MachAr,
"`np.MachAr` is deprecated (NumPy 1.22)."
)
_msg = (
"`np.{n}` is a deprecated alias for `np.compat.{n}`. "
"To silence this warning, use `np.compat.{n}` by itself. "
"In the likely event your code does not need to work on Python 2 "
"you can use the builtin `{n2}` for which `np.compat.{n}` is itself "
"an alias. Doing this will not modify any behaviour and is safe. "
"{extended_msg}\n"
"Deprecated in NumPy 1.20; for more details and guidance: "
"https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
__deprecated_attrs__["long"] = (
getattr(compat, "long"),
_msg.format(n="long", n2="int",
extended_msg=_int_extended_msg.format("long")))
__deprecated_attrs__["unicode"] = (
getattr(compat, "unicode"),
_msg.format(n="unicode", n2="str",
extended_msg=_specific_msg.format("str_")))
del _msg, _specific_msg, _int_extended_msg, _type_info, _builtins
from .core import round, abs, max, min
# now that numpy modules are imported, can initialize limits
core.getlimits._register_known_types()
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
# Remove one of the two occurrences of `issubdtype`, which is exposed as
# both `numpy.core.issubdtype` and `numpy.lib.issubdtype`.
__all__.remove('issubdtype')
# These are exported by np.core, but are replaced by the builtins below
# remove them to ensure that we don't end up with `np.long == np.int_`,
# which would be a breaking change.
del long, unicode
__all__.remove('long')
__all__.remove('unicode')
# Remove things that are in the numpy.lib but not in the numpy namespace
# Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
# that prevents adding more things to the main namespace by accident.
# The list below will grow until the `from .lib import *` fixme above is
# taken care of
__all__.remove('Arrayterator')
del Arrayterator
# These names were removed in NumPy 1.20. For at least one release,
# attempts to access these names in the numpy namespace will trigger
# a warning, and calling the function will raise an exception.
_financial_names = ['fv', 'ipmt', 'irr', 'mirr', 'nper', 'npv', 'pmt',
'ppmt', 'pv', 'rate']
__expired_functions__ = {
name: (f'In accordance with NEP 32, the function {name} was removed '
'from NumPy version 1.20. A replacement for this function '
'is available in the numpy_financial library: '
'https://pypi.org/project/numpy-financial')
for name in _financial_names}
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
# oldnumeric and numarray were removed in 1.9. In case some packages import
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
numarray = 'removed'
def __getattr__(attr):
# Warn for expired attributes, and return a dummy function
# that always raises an exception.
try:
msg = __expired_functions__[attr]
except KeyError:
pass
else:
warnings.warn(msg, DeprecationWarning, stacklevel=2)
def _expired(*args, **kwds):
raise RuntimeError(msg)
return _expired
# Emit warnings for deprecated attributes
try:
val, msg = __deprecated_attrs__[attr]
except KeyError:
pass
else:
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return val
# Importing Tester requires importing all of UnitTest which is not a
# cheap import Since it is mainly used in test suits, we lazy import it
# here to save on the order of 10 ms of import time for most users
#
# The previous way Tester was imported also had a side effect of adding
# the full `numpy.testing` namespace
if attr == 'testing':
import numpy.testing as testing
return testing
elif attr == 'Tester':
from .testing import Tester
return Tester
raise AttributeError("module {!r} has no attribute "
"{!r}".format(__name__, attr))
def __dir__():
return list(globals().keys() | {'Tester', 'testing'})
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
There are some cases e.g. with wrong BLAS ABI that cause wrong
results under specific runtime conditions that are not necessarily
achieved during test suite runs, and it is useful to catch those early.
See https://github.com/numpy/numpy/issues/8577 and other
similar bug reports.
"""
try:
x = ones(2, dtype=float32)
if not abs(x.dot(x) - 2.0) < 1e-5:
raise AssertionError()
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
"pass simple sanity checks. This can be caused for example "
"by incorrect BLAS library being linked in, or by mixing "
"package managers (pip, conda, apt, ...). Search closed "
"numpy issues for similar problems.")
raise RuntimeError(msg.format(__file__)) from None
_sanity_check()
del _sanity_check
def _mac_os_check():
"""
Quick Sanity check for Mac OS look for accelerate build bugs.
Testing numpy polyfit calls init_dgelsd(LAPACK)
"""
try:
c = array([3., 2., 1.])
x = linspace(0, 2, 5)
y = polyval(c, x)
_ = polyfit(x, y, 2, cov=True)
except ValueError:
pass
import sys
if sys.platform == "darwin":
with warnings.catch_warnings(record=True) as w:
_mac_os_check()
# Throw runtime error, if the test failed Check for warning and error_message
error_message = ""
if len(w) > 0:
error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message))
msg = (
"Polyfit sanity test emitted a warning, most likely due "
"to using a buggy Accelerate backend."
"\nIf you compiled yourself, more information is available at:"
"\nhttps://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries"
"\nOtherwise report this to the vendor "
"that provided NumPy.\n{}\n".format(error_message))
raise RuntimeError(msg)
del _mac_os_check
# We usually use madvise hugepages support, but on some old kernels it
# is slow and thus better avoided.
# Specifically kernel version 4.6 had a bug fix which probably fixed this:
# https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
import os
use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
if sys.platform == "linux" and use_hugepage is None:
# If there is an issue with parsing the kernel version,
# set use_hugepages to 0. Usage of LooseVersion will handle
# the kernel version parsing better, but avoided since it
# will increase the import time. See: #16679 for related discussion.
try:
use_hugepage = 1
kernel_version = os.uname().release.split(".")[:2]
kernel_version = tuple(int(v) for v in kernel_version)
if kernel_version < (4, 6):
use_hugepage = 0
except ValueError:
use_hugepages = 0
elif use_hugepage is None:
# This is not Linux, so it should not matter, just enable anyway
use_hugepage = 1
else:
use_hugepage = int(use_hugepage)
# Note that this will currently only make a difference on Linux
core.multiarray._set_madvise_hugepage(use_hugepage)
# Give a warning if NumPy is reloaded or imported on a sub-interpreter
# We do this from python, since the C-module may not be reloaded and
# it is tidier organized.
core.multiarray._multiarray_umath._reload_guard()
# Tell PyInstaller where to find hook-numpy.py
def _pyinstaller_hooks_dir():
from pathlib import Path
return [str(Path(__file__).with_name("_pyinstaller").resolve())]
# get the version using versioneer
from .version import __version__, git_revision as __git_version__
| 15,398 | Python | 35.664286 | 105 | 0.628004 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/version.py | from __future__ import annotations
from ._version import get_versions
__ALL__ = ['version', '__version__', 'full_version', 'git_revision', 'release']
vinfo: dict[str, str] = get_versions()
version = vinfo["version"]
__version__ = vinfo.get("closest-tag", vinfo["version"])
full_version = vinfo['version']
git_revision = vinfo['full-revisionid']
release = 'dev0' not in version and '+' not in version
short_version = vinfo['version'].split("+")[0]
del get_versions, vinfo
| 475 | Python | 28.749998 | 79 | 0.68 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/setup.py | #!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy', parent_package, top_path)
config.add_subpackage('array_api')
config.add_subpackage('compat')
config.add_subpackage('core')
config.add_subpackage('distutils')
config.add_subpackage('doc')
config.add_subpackage('f2py')
config.add_subpackage('fft')
config.add_subpackage('lib')
config.add_subpackage('linalg')
config.add_subpackage('ma')
config.add_subpackage('matrixlib')
config.add_subpackage('polynomial')
config.add_subpackage('random')
config.add_subpackage('testing')
config.add_subpackage('typing')
config.add_subpackage('_typing')
config.add_data_dir('doc')
config.add_data_files('py.typed')
config.add_data_files('*.pyi')
config.add_subpackage('tests')
config.add_subpackage('_pyinstaller')
config.make_config_py() # installs __config__.py
return config
if __name__ == '__main__':
print('This is the wrong setup.py file to run')
| 1,101 | Python | 32.393938 | 61 | 0.682107 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/dual.py | """
.. deprecated:: 1.20
*This module is deprecated. Instead of importing functions from*
``numpy.dual``, *the functions should be imported directly from NumPy
or SciPy*.
Aliases for functions which may be accelerated by SciPy.
SciPy_ can be built to use accelerated or otherwise improved libraries
for FFTs, linear algebra, and special functions. This module allows
developers to transparently support these accelerated functions when
SciPy is available but still support users who have only installed
NumPy.
.. _SciPy : https://www.scipy.org
"""
import warnings
warnings.warn('The module numpy.dual is deprecated. Instead of using dual, '
'use the functions directly from numpy or scipy.',
category=DeprecationWarning,
stacklevel=2)
# This module should be used for functions both in numpy and scipy if
# you want to use the numpy version if available but the scipy version
# otherwise.
# Usage --- from numpy.dual import fft, inv
__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2',
'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals',
'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0']
import numpy.linalg as linpkg
import numpy.fft as fftpkg
from numpy.lib import i0
import sys
fft = fftpkg.fft
ifft = fftpkg.ifft
fftn = fftpkg.fftn
ifftn = fftpkg.ifftn
fft2 = fftpkg.fft2
ifft2 = fftpkg.ifft2
norm = linpkg.norm
inv = linpkg.inv
svd = linpkg.svd
solve = linpkg.solve
det = linpkg.det
eig = linpkg.eig
eigvals = linpkg.eigvals
eigh = linpkg.eigh
eigvalsh = linpkg.eigvalsh
lstsq = linpkg.lstsq
pinv = linpkg.pinv
cholesky = linpkg.cholesky
_restore_dict = {}
def register_func(name, func):
if name not in __all__:
raise ValueError("{} not a dual function.".format(name))
f = sys._getframe(0).f_globals
_restore_dict[name] = f[name]
f[name] = func
def restore_func(name):
if name not in __all__:
raise ValueError("{} not a dual function.".format(name))
try:
val = _restore_dict[name]
except KeyError:
return
else:
sys._getframe(0).f_globals[name] = val
def restore_all():
for name in _restore_dict.keys():
restore_func(name)
| 2,214 | Python | 25.369047 | 77 | 0.67841 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_globals.py | """
Module defining global singleton classes.
This module raises a RuntimeError if an attempt to reload it is made. In that
way the identities of the classes defined here are fixed and will remain so
even if numpy itself is reloaded. In particular, a function like the following
will still work correctly after numpy is reloaded::
def foo(arg=np._NoValue):
if arg is np._NoValue:
...
That was not the case when the singleton classes were defined in the numpy
``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
motivated this module.
"""
import enum
__ALL__ = [
'ModuleDeprecationWarning', 'VisibleDeprecationWarning',
'_NoValue', '_CopyMode'
]
# Disallow reloading this module so as to preserve the identities of the
# classes defined here.
if '_is_loaded' in globals():
raise RuntimeError('Reloading numpy._globals is not allowed')
_is_loaded = True
class ModuleDeprecationWarning(DeprecationWarning):
"""Module deprecation warning.
The nose tester turns ordinary Deprecation warnings into test failures.
That makes it hard to deprecate whole modules, because they get
imported by default. So this is a special Deprecation warning that the
nose tester will let pass without making tests fail.
"""
ModuleDeprecationWarning.__module__ = 'numpy'
class VisibleDeprecationWarning(UserWarning):
"""Visible deprecation warning.
By default, python will not show deprecation warnings, so this class
can be used when a very visible warning is helpful, for example because
the usage is most likely a user bug.
"""
VisibleDeprecationWarning.__module__ = 'numpy'
class _NoValueType:
"""Special keyword value.
The instance of this class may be used as the default value assigned to a
keyword if no other obvious default (e.g., `None`) is suitable,
Common reasons for using this keyword are:
- A new keyword is added to a function, and that function forwards its
inputs to another function or method which can be defined outside of
NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims``
keyword was added that could only be forwarded if the user explicitly
specified ``keepdims``; downstream array libraries may not have added
the same keyword, so adding ``x.std(..., keepdims=keepdims)``
unconditionally could have broken previously working code.
- A keyword is being deprecated, and a deprecation warning must only be
emitted when the keyword is used.
"""
__instance = None
def __new__(cls):
# ensure that only one instance exists
if not cls.__instance:
cls.__instance = super().__new__(cls)
return cls.__instance
# needed for python 2 to preserve identity through a pickle
def __reduce__(self):
return (self.__class__, ())
def __repr__(self):
return "<no value>"
_NoValue = _NoValueType()
class _CopyMode(enum.Enum):
"""
An enumeration for the copy modes supported
by numpy.copy() and numpy.array(). The following three modes are supported,
- ALWAYS: This means that a deep copy of the input
array will always be taken.
- IF_NEEDED: This means that a deep copy of the input
array will be taken only if necessary.
- NEVER: This means that the deep copy will never be taken.
If a copy cannot be avoided then a `ValueError` will be
raised.
Note that the buffer-protocol could in theory do copies. NumPy currently
assumes an object exporting the buffer protocol will never do this.
"""
ALWAYS = True
IF_NEEDED = False
NEVER = 2
def __bool__(self):
# For backwards compatibility
if self == _CopyMode.ALWAYS:
return True
if self == _CopyMode.IF_NEEDED:
return False
raise ValueError(f"{self} is neither True nor False.")
_CopyMode.__module__ = 'numpy'
| 4,012 | Python | 29.869231 | 79 | 0.679711 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/conftest.py | """
Pytest configuration and fixtures for the Numpy test suite.
"""
import os
import tempfile
import hypothesis
import pytest
import numpy
from numpy.core._multiarray_tests import get_fpu_mode
_old_fpu_mode = None
_collect_results = {}
# Use a known and persistent tmpdir for hypothesis' caches, which
# can be automatically cleared by the OS or user.
hypothesis.configuration.set_hypothesis_home_dir(
os.path.join(tempfile.gettempdir(), ".hypothesis")
)
# We register two custom profiles for Numpy - for details see
# https://hypothesis.readthedocs.io/en/latest/settings.html
# The first is designed for our own CI runs; the latter also
# forces determinism and is designed for use via np.test()
hypothesis.settings.register_profile(
name="numpy-profile", deadline=None, print_blob=True,
)
hypothesis.settings.register_profile(
name="np.test() profile",
deadline=None, print_blob=True, database=None, derandomize=True,
suppress_health_check=hypothesis.HealthCheck.all(),
)
# Note that the default profile is chosen based on the presence
# of pytest.ini, but can be overridden by passing the
# --hypothesis-profile=NAME argument to pytest.
_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
hypothesis.settings.load_profile(
"numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
)
def pytest_configure(config):
config.addinivalue_line("markers",
"valgrind_error: Tests that are known to error under valgrind.")
config.addinivalue_line("markers",
"leaks_references: Tests that are known to leak references.")
config.addinivalue_line("markers",
"slow: Tests that are very slow.")
config.addinivalue_line("markers",
"slow_pypy: Tests that are very slow on pypy.")
def pytest_addoption(parser):
parser.addoption("--available-memory", action="store", default=None,
help=("Set amount of memory available for running the "
"test suite. This can result to tests requiring "
"especially large amounts of memory to be skipped. "
"Equivalent to setting environment variable "
"NPY_AVAILABLE_MEM. Default: determined"
"automatically."))
def pytest_sessionstart(session):
available_mem = session.config.getoption('available_memory')
if available_mem is not None:
os.environ['NPY_AVAILABLE_MEM'] = available_mem
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
"""
Check FPU precision mode was not changed during test collection.
The clumsy way we do it here is mainly necessary because numpy
still uses yield tests, which can execute code at test collection
time.
"""
global _old_fpu_mode
mode = get_fpu_mode()
if _old_fpu_mode is None:
_old_fpu_mode = mode
elif mode != _old_fpu_mode:
_collect_results[item] = (_old_fpu_mode, mode)
_old_fpu_mode = mode
@pytest.fixture(scope="function", autouse=True)
def check_fpu_mode(request):
"""
Check FPU precision mode was not changed during the test.
"""
old_mode = get_fpu_mode()
yield
new_mode = get_fpu_mode()
if old_mode != new_mode:
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
" during the test".format(old_mode, new_mode))
collect_result = _collect_results.get(request.node)
if collect_result is not None:
old_mode, new_mode = collect_result
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
" when collecting the test".format(old_mode,
new_mode))
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace['np'] = numpy
@pytest.fixture(autouse=True)
def env_setup(monkeypatch):
monkeypatch.setenv('PYTHONHASHSEED', '0')
| 4,032 | Python | 32.608333 | 79 | 0.659474 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/__init__.pyi | import builtins
import os
import sys
import mmap
import ctypes as ct
import array as _array
import datetime as dt
import enum
from abc import abstractmethod
from types import TracebackType, MappingProxyType
from contextlib import ContextDecorator
if sys.version_info >= (3, 9):
from types import GenericAlias
from numpy._pytesttester import PytestTester
from numpy.core._internal import _ctypes
from numpy._typing import (
# Arrays
ArrayLike,
NDArray,
_SupportsArray,
_NestedSequence,
_FiniteNestedSequence,
_SupportsArray,
_ArrayLikeBool_co,
_ArrayLikeUInt_co,
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ArrayLikeNumber_co,
_ArrayLikeTD64_co,
_ArrayLikeDT64_co,
_ArrayLikeObject_co,
_ArrayLikeStr_co,
_ArrayLikeBytes_co,
_ArrayLikeUnknown,
_UnknownType,
# DTypes
DTypeLike,
_DTypeLike,
_SupportsDType,
_VoidDTypeLike,
# Shapes
_Shape,
_ShapeLike,
# Scalars
_CharLike_co,
_BoolLike_co,
_IntLike_co,
_FloatLike_co,
_ComplexLike_co,
_TD64Like_co,
_NumberLike_co,
_ScalarLike_co,
# `number` precision
NBitBase,
_256Bit,
_128Bit,
_96Bit,
_80Bit,
_64Bit,
_32Bit,
_16Bit,
_8Bit,
_NBitByte,
_NBitShort,
_NBitIntC,
_NBitIntP,
_NBitInt,
_NBitLongLong,
_NBitHalf,
_NBitSingle,
_NBitDouble,
_NBitLongDouble,
# Character codes
_BoolCodes,
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_Float16Codes,
_Float32Codes,
_Float64Codes,
_Complex64Codes,
_Complex128Codes,
_ByteCodes,
_ShortCodes,
_IntCCodes,
_IntPCodes,
_IntCodes,
_LongLongCodes,
_UByteCodes,
_UShortCodes,
_UIntCCodes,
_UIntPCodes,
_UIntCodes,
_ULongLongCodes,
_HalfCodes,
_SingleCodes,
_DoubleCodes,
_LongDoubleCodes,
_CSingleCodes,
_CDoubleCodes,
_CLongDoubleCodes,
_DT64Codes,
_TD64Codes,
_StrCodes,
_BytesCodes,
_VoidCodes,
_ObjectCodes,
# Ufuncs
_UFunc_Nin1_Nout1,
_UFunc_Nin2_Nout1,
_UFunc_Nin1_Nout2,
_UFunc_Nin2_Nout2,
_GUFunc_Nin2_Nout1,
)
from numpy._typing._callable import (
_BoolOp,
_BoolBitOp,
_BoolSub,
_BoolTrueDiv,
_BoolMod,
_BoolDivMod,
_TD64Div,
_IntTrueDiv,
_UnsignedIntOp,
_UnsignedIntBitOp,
_UnsignedIntMod,
_UnsignedIntDivMod,
_SignedIntOp,
_SignedIntBitOp,
_SignedIntMod,
_SignedIntDivMod,
_FloatOp,
_FloatMod,
_FloatDivMod,
_ComplexOp,
_NumberOp,
_ComparisonOp,
)
# NOTE: Numpy's mypy plugin is used for removing the types unavailable
# to the specific platform
from numpy._typing._extended_precision import (
uint128 as uint128,
uint256 as uint256,
int128 as int128,
int256 as int256,
float80 as float80,
float96 as float96,
float128 as float128,
float256 as float256,
complex160 as complex160,
complex192 as complex192,
complex256 as complex256,
complex512 as complex512,
)
from collections.abc import (
Callable,
Container,
Iterable,
Iterator,
Mapping,
Sequence,
Sized,
)
from typing import (
Literal as L,
Any,
Generic,
IO,
NoReturn,
overload,
SupportsComplex,
SupportsFloat,
SupportsInt,
TypeVar,
Union,
Protocol,
SupportsIndex,
Final,
final,
ClassVar,
)
# Ensures that the stubs are picked up
from numpy import (
ctypeslib as ctypeslib,
fft as fft,
lib as lib,
linalg as linalg,
ma as ma,
matrixlib as matrixlib,
polynomial as polynomial,
random as random,
testing as testing,
version as version,
)
from numpy.core import defchararray, records
char = defchararray
rec = records
from numpy.core.function_base import (
linspace as linspace,
logspace as logspace,
geomspace as geomspace,
)
from numpy.core.fromnumeric import (
take as take,
reshape as reshape,
choose as choose,
repeat as repeat,
put as put,
swapaxes as swapaxes,
transpose as transpose,
partition as partition,
argpartition as argpartition,
sort as sort,
argsort as argsort,
argmax as argmax,
argmin as argmin,
searchsorted as searchsorted,
resize as resize,
squeeze as squeeze,
diagonal as diagonal,
trace as trace,
ravel as ravel,
nonzero as nonzero,
shape as shape,
compress as compress,
clip as clip,
sum as sum,
all as all,
any as any,
cumsum as cumsum,
ptp as ptp,
amax as amax,
amin as amin,
prod as prod,
cumprod as cumprod,
ndim as ndim,
size as size,
around as around,
mean as mean,
std as std,
var as var,
)
from numpy.core._asarray import (
require as require,
)
from numpy.core._type_aliases import (
sctypes as sctypes,
sctypeDict as sctypeDict,
)
from numpy.core._ufunc_config import (
seterr as seterr,
geterr as geterr,
setbufsize as setbufsize,
getbufsize as getbufsize,
seterrcall as seterrcall,
geterrcall as geterrcall,
_ErrKind,
_ErrFunc,
_ErrDictOptional,
)
from numpy.core.arrayprint import (
set_printoptions as set_printoptions,
get_printoptions as get_printoptions,
array2string as array2string,
format_float_scientific as format_float_scientific,
format_float_positional as format_float_positional,
array_repr as array_repr,
array_str as array_str,
set_string_function as set_string_function,
printoptions as printoptions,
)
from numpy.core.einsumfunc import (
einsum as einsum,
einsum_path as einsum_path,
)
from numpy.core.multiarray import (
ALLOW_THREADS as ALLOW_THREADS,
BUFSIZE as BUFSIZE,
CLIP as CLIP,
MAXDIMS as MAXDIMS,
MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS,
MAY_SHARE_EXACT as MAY_SHARE_EXACT,
RAISE as RAISE,
WRAP as WRAP,
tracemalloc_domain as tracemalloc_domain,
array as array,
empty_like as empty_like,
empty as empty,
zeros as zeros,
concatenate as concatenate,
inner as inner,
where as where,
lexsort as lexsort,
can_cast as can_cast,
min_scalar_type as min_scalar_type,
result_type as result_type,
dot as dot,
vdot as vdot,
bincount as bincount,
copyto as copyto,
putmask as putmask,
packbits as packbits,
unpackbits as unpackbits,
shares_memory as shares_memory,
may_share_memory as may_share_memory,
asarray as asarray,
asanyarray as asanyarray,
ascontiguousarray as ascontiguousarray,
asfortranarray as asfortranarray,
arange as arange,
busday_count as busday_count,
busday_offset as busday_offset,
compare_chararrays as compare_chararrays,
datetime_as_string as datetime_as_string,
datetime_data as datetime_data,
frombuffer as frombuffer,
fromfile as fromfile,
fromiter as fromiter,
is_busday as is_busday,
promote_types as promote_types,
seterrobj as seterrobj,
geterrobj as geterrobj,
fromstring as fromstring,
frompyfunc as frompyfunc,
nested_iters as nested_iters,
flagsobj,
)
from numpy.core.numeric import (
zeros_like as zeros_like,
ones as ones,
ones_like as ones_like,
full as full,
full_like as full_like,
count_nonzero as count_nonzero,
isfortran as isfortran,
argwhere as argwhere,
flatnonzero as flatnonzero,
correlate as correlate,
convolve as convolve,
outer as outer,
tensordot as tensordot,
roll as roll,
rollaxis as rollaxis,
moveaxis as moveaxis,
cross as cross,
indices as indices,
fromfunction as fromfunction,
isscalar as isscalar,
binary_repr as binary_repr,
base_repr as base_repr,
identity as identity,
allclose as allclose,
isclose as isclose,
array_equal as array_equal,
array_equiv as array_equiv,
)
from numpy.core.numerictypes import (
maximum_sctype as maximum_sctype,
issctype as issctype,
obj2sctype as obj2sctype,
issubclass_ as issubclass_,
issubsctype as issubsctype,
issubdtype as issubdtype,
sctype2char as sctype2char,
find_common_type as find_common_type,
nbytes as nbytes,
cast as cast,
ScalarType as ScalarType,
typecodes as typecodes,
)
from numpy.core.shape_base import (
atleast_1d as atleast_1d,
atleast_2d as atleast_2d,
atleast_3d as atleast_3d,
block as block,
hstack as hstack,
stack as stack,
vstack as vstack,
)
from numpy.lib import (
emath as emath,
)
from numpy.lib.arraypad import (
pad as pad,
)
from numpy.lib.arraysetops import (
ediff1d as ediff1d,
intersect1d as intersect1d,
setxor1d as setxor1d,
union1d as union1d,
setdiff1d as setdiff1d,
unique as unique,
in1d as in1d,
isin as isin,
)
from numpy.lib.arrayterator import (
Arrayterator as Arrayterator,
)
from numpy.lib.function_base import (
select as select,
piecewise as piecewise,
trim_zeros as trim_zeros,
copy as copy,
iterable as iterable,
percentile as percentile,
diff as diff,
gradient as gradient,
angle as angle,
unwrap as unwrap,
sort_complex as sort_complex,
disp as disp,
flip as flip,
rot90 as rot90,
extract as extract,
place as place,
asarray_chkfinite as asarray_chkfinite,
average as average,
bincount as bincount,
digitize as digitize,
cov as cov,
corrcoef as corrcoef,
msort as msort,
median as median,
sinc as sinc,
hamming as hamming,
hanning as hanning,
bartlett as bartlett,
blackman as blackman,
kaiser as kaiser,
trapz as trapz,
i0 as i0,
add_newdoc as add_newdoc,
add_docstring as add_docstring,
meshgrid as meshgrid,
delete as delete,
insert as insert,
append as append,
interp as interp,
add_newdoc_ufunc as add_newdoc_ufunc,
quantile as quantile,
)
from numpy.lib.histograms import (
histogram_bin_edges as histogram_bin_edges,
histogram as histogram,
histogramdd as histogramdd,
)
from numpy.lib.index_tricks import (
ravel_multi_index as ravel_multi_index,
unravel_index as unravel_index,
mgrid as mgrid,
ogrid as ogrid,
r_ as r_,
c_ as c_,
s_ as s_,
index_exp as index_exp,
ix_ as ix_,
fill_diagonal as fill_diagonal,
diag_indices as diag_indices,
diag_indices_from as diag_indices_from,
)
from numpy.lib.nanfunctions import (
nansum as nansum,
nanmax as nanmax,
nanmin as nanmin,
nanargmax as nanargmax,
nanargmin as nanargmin,
nanmean as nanmean,
nanmedian as nanmedian,
nanpercentile as nanpercentile,
nanvar as nanvar,
nanstd as nanstd,
nanprod as nanprod,
nancumsum as nancumsum,
nancumprod as nancumprod,
nanquantile as nanquantile,
)
from numpy.lib.npyio import (
savetxt as savetxt,
loadtxt as loadtxt,
genfromtxt as genfromtxt,
recfromtxt as recfromtxt,
recfromcsv as recfromcsv,
load as load,
save as save,
savez as savez,
savez_compressed as savez_compressed,
packbits as packbits,
unpackbits as unpackbits,
fromregex as fromregex,
)
from numpy.lib.polynomial import (
poly as poly,
roots as roots,
polyint as polyint,
polyder as polyder,
polyadd as polyadd,
polysub as polysub,
polymul as polymul,
polydiv as polydiv,
polyval as polyval,
polyfit as polyfit,
)
from numpy.lib.shape_base import (
column_stack as column_stack,
row_stack as row_stack,
dstack as dstack,
array_split as array_split,
split as split,
hsplit as hsplit,
vsplit as vsplit,
dsplit as dsplit,
apply_over_axes as apply_over_axes,
expand_dims as expand_dims,
apply_along_axis as apply_along_axis,
kron as kron,
tile as tile,
get_array_wrap as get_array_wrap,
take_along_axis as take_along_axis,
put_along_axis as put_along_axis,
)
from numpy.lib.stride_tricks import (
broadcast_to as broadcast_to,
broadcast_arrays as broadcast_arrays,
broadcast_shapes as broadcast_shapes,
)
from numpy.lib.twodim_base import (
diag as diag,
diagflat as diagflat,
eye as eye,
fliplr as fliplr,
flipud as flipud,
tri as tri,
triu as triu,
tril as tril,
vander as vander,
histogram2d as histogram2d,
mask_indices as mask_indices,
tril_indices as tril_indices,
tril_indices_from as tril_indices_from,
triu_indices as triu_indices,
triu_indices_from as triu_indices_from,
)
from numpy.lib.type_check import (
mintypecode as mintypecode,
asfarray as asfarray,
real as real,
imag as imag,
iscomplex as iscomplex,
isreal as isreal,
iscomplexobj as iscomplexobj,
isrealobj as isrealobj,
nan_to_num as nan_to_num,
real_if_close as real_if_close,
typename as typename,
common_type as common_type,
)
from numpy.lib.ufunclike import (
fix as fix,
isposinf as isposinf,
isneginf as isneginf,
)
from numpy.lib.utils import (
issubclass_ as issubclass_,
issubsctype as issubsctype,
issubdtype as issubdtype,
deprecate as deprecate,
deprecate_with_doc as deprecate_with_doc,
get_include as get_include,
info as info,
source as source,
who as who,
lookfor as lookfor,
byte_bounds as byte_bounds,
safe_eval as safe_eval,
)
from numpy.matrixlib import (
asmatrix as asmatrix,
mat as mat,
bmat as bmat,
)
_AnyStr_contra = TypeVar("_AnyStr_contra", str, bytes, contravariant=True)
# Protocol for representing file-like-objects accepted
# by `ndarray.tofile` and `fromfile`
class _IOProtocol(Protocol):
def flush(self) -> object: ...
def fileno(self) -> int: ...
def tell(self) -> SupportsIndex: ...
def seek(self, offset: int, whence: int, /) -> object: ...
# NOTE: `seek`, `write` and `flush` are technically only required
# for `readwrite`/`write` modes
class _MemMapIOProtocol(Protocol):
def flush(self) -> object: ...
def fileno(self) -> SupportsIndex: ...
def tell(self) -> int: ...
def seek(self, offset: int, whence: int, /) -> object: ...
def write(self, s: bytes, /) -> object: ...
@property
def read(self) -> object: ...
class _SupportsWrite(Protocol[_AnyStr_contra]):
def write(self, s: _AnyStr_contra, /) -> object: ...
__all__: list[str]
__path__: list[str]
__version__: str
__git_version__: str
test: PytestTester
# TODO: Move placeholders to their respective module once
# their annotations are properly implemented
#
# Placeholders for classes
# Some of these are aliases; others are wrappers with an identical signature
round = around
round_ = around
max = amax
min = amin
product = prod
cumproduct = cumprod
sometrue = any
alltrue = all
def show_config() -> None: ...
_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray)
_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic)
_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"]
@final
class dtype(Generic[_DTypeScalar_co]):
names: None | tuple[builtins.str, ...]
# Overload for subclass of generic
@overload
def __new__(
cls,
dtype: type[_DTypeScalar_co],
align: bool = ...,
copy: bool = ...,
) -> dtype[_DTypeScalar_co]: ...
# Overloads for string aliases, Python types, and some assorted
# other special cases. Order is sometimes important because of the
# subtype relationships
#
# bool < int < float < complex < object
#
# so we have to make sure the overloads for the narrowest type is
# first.
# Builtin types
@overload
def __new__(cls, dtype: type[bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ...
@overload
def __new__(cls, dtype: type[int], align: bool = ..., copy: bool = ...) -> dtype[int_]: ...
@overload
def __new__(cls, dtype: None | type[float], align: bool = ..., copy: bool = ...) -> dtype[float_]: ...
@overload
def __new__(cls, dtype: type[complex], align: bool = ..., copy: bool = ...) -> dtype[complex_]: ...
@overload
def __new__(cls, dtype: type[builtins.str], align: bool = ..., copy: bool = ...) -> dtype[str_]: ...
@overload
def __new__(cls, dtype: type[bytes], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ...
# `unsignedinteger` string-based representations and ctypes
@overload
def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: bool = ..., copy: bool = ...) -> dtype[uint8]: ...
@overload
def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: bool = ..., copy: bool = ...) -> dtype[uint16]: ...
@overload
def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: bool = ..., copy: bool = ...) -> dtype[uint32]: ...
@overload
def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: bool = ..., copy: bool = ...) -> dtype[uint64]: ...
@overload
def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ...
@overload
def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: bool = ..., copy: bool = ...) -> dtype[ushort]: ...
@overload
def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: bool = ..., copy: bool = ...) -> dtype[uintc]: ...
# NOTE: We're assuming here that `uint_ptr_t == size_t`,
# an assumption that does not hold in rare cases (same for `ssize_t`)
@overload
def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: bool = ..., copy: bool = ...) -> dtype[uintp]: ...
@overload
def __new__(cls, dtype: _UIntCodes | type[ct.c_ulong], align: bool = ..., copy: bool = ...) -> dtype[uint]: ...
@overload
def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ...
# `signedinteger` string-based representations and ctypes
@overload
def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: bool = ..., copy: bool = ...) -> dtype[int8]: ...
@overload
def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: bool = ..., copy: bool = ...) -> dtype[int16]: ...
@overload
def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: bool = ..., copy: bool = ...) -> dtype[int32]: ...
@overload
def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: bool = ..., copy: bool = ...) -> dtype[int64]: ...
@overload
def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: bool = ..., copy: bool = ...) -> dtype[byte]: ...
@overload
def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: bool = ..., copy: bool = ...) -> dtype[short]: ...
@overload
def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: bool = ..., copy: bool = ...) -> dtype[intc]: ...
@overload
def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: bool = ..., copy: bool = ...) -> dtype[intp]: ...
@overload
def __new__(cls, dtype: _IntCodes | type[ct.c_long], align: bool = ..., copy: bool = ...) -> dtype[int_]: ...
@overload
def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: bool = ..., copy: bool = ...) -> dtype[longlong]: ...
# `floating` string-based representations and ctypes
@overload
def __new__(cls, dtype: _Float16Codes, align: bool = ..., copy: bool = ...) -> dtype[float16]: ...
@overload
def __new__(cls, dtype: _Float32Codes, align: bool = ..., copy: bool = ...) -> dtype[float32]: ...
@overload
def __new__(cls, dtype: _Float64Codes, align: bool = ..., copy: bool = ...) -> dtype[float64]: ...
@overload
def __new__(cls, dtype: _HalfCodes, align: bool = ..., copy: bool = ...) -> dtype[half]: ...
@overload
def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: bool = ..., copy: bool = ...) -> dtype[single]: ...
@overload
def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: bool = ..., copy: bool = ...) -> dtype[double]: ...
@overload
def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ...
# `complexfloating` string-based representations
@overload
def __new__(cls, dtype: _Complex64Codes, align: bool = ..., copy: bool = ...) -> dtype[complex64]: ...
@overload
def __new__(cls, dtype: _Complex128Codes, align: bool = ..., copy: bool = ...) -> dtype[complex128]: ...
@overload
def __new__(cls, dtype: _CSingleCodes, align: bool = ..., copy: bool = ...) -> dtype[csingle]: ...
@overload
def __new__(cls, dtype: _CDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[cdouble]: ...
@overload
def __new__(cls, dtype: _CLongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[clongdouble]: ...
# Miscellaneous string-based representations and ctypes
@overload
def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ...
@overload
def __new__(cls, dtype: _TD64Codes, align: bool = ..., copy: bool = ...) -> dtype[timedelta64]: ...
@overload
def __new__(cls, dtype: _DT64Codes, align: bool = ..., copy: bool = ...) -> dtype[datetime64]: ...
@overload
def __new__(cls, dtype: _StrCodes, align: bool = ..., copy: bool = ...) -> dtype[str_]: ...
@overload
def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ...
@overload
def __new__(cls, dtype: _VoidCodes, align: bool = ..., copy: bool = ...) -> dtype[void]: ...
@overload
def __new__(cls, dtype: _ObjectCodes | type[ct.py_object], align: bool = ..., copy: bool = ...) -> dtype[object_]: ...
# dtype of a dtype is the same dtype
@overload
def __new__(
cls,
dtype: dtype[_DTypeScalar_co],
align: bool = ...,
copy: bool = ...,
) -> dtype[_DTypeScalar_co]: ...
@overload
def __new__(
cls,
dtype: _SupportsDType[dtype[_DTypeScalar_co]],
align: bool = ...,
copy: bool = ...,
) -> dtype[_DTypeScalar_co]: ...
# Handle strings that can't be expressed as literals; i.e. s1, s2, ...
@overload
def __new__(
cls,
dtype: builtins.str,
align: bool = ...,
copy: bool = ...,
) -> dtype[Any]: ...
# Catchall overload for void-likes
@overload
def __new__(
cls,
dtype: _VoidDTypeLike,
align: bool = ...,
copy: bool = ...,
) -> dtype[void]: ...
# Catchall overload for object-likes
@overload
def __new__(
cls,
dtype: type[object],
align: bool = ...,
copy: bool = ...,
) -> dtype[object_]: ...
if sys.version_info >= (3, 9):
def __class_getitem__(self, item: Any) -> GenericAlias: ...
@overload
def __getitem__(self: dtype[void], key: list[builtins.str]) -> dtype[void]: ...
@overload
def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex) -> dtype[Any]: ...
# NOTE: In the future 1-based multiplications will also yield `flexible` dtypes
@overload
def __mul__(self: _DType, value: L[1]) -> _DType: ...
@overload
def __mul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ...
@overload
def __mul__(self, value: SupportsIndex) -> dtype[void]: ...
# NOTE: `__rmul__` seems to be broken when used in combination with
# literals as of mypy 0.902. Set the return-type to `dtype[Any]` for
# now for non-flexible dtypes.
@overload
def __rmul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ...
@overload
def __rmul__(self, value: SupportsIndex) -> dtype[Any]: ...
def __gt__(self, other: DTypeLike) -> bool: ...
def __ge__(self, other: DTypeLike) -> bool: ...
def __lt__(self, other: DTypeLike) -> bool: ...
def __le__(self, other: DTypeLike) -> bool: ...
# Explicitly defined `__eq__` and `__ne__` to get around mypy's
# `strict_equality` option; even though their signatures are
# identical to their `object`-based counterpart
def __eq__(self, other: Any) -> bool: ...
def __ne__(self, other: Any) -> bool: ...
@property
def alignment(self) -> int: ...
@property
def base(self) -> dtype[Any]: ...
@property
def byteorder(self) -> builtins.str: ...
@property
def char(self) -> builtins.str: ...
@property
def descr(self) -> list[tuple[builtins.str, builtins.str] | tuple[builtins.str, builtins.str, _Shape]]: ...
@property
def fields(
self,
) -> None | MappingProxyType[builtins.str, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ...
@property
def flags(self) -> int: ...
@property
def hasobject(self) -> bool: ...
@property
def isbuiltin(self) -> int: ...
@property
def isnative(self) -> bool: ...
@property
def isalignedstruct(self) -> bool: ...
@property
def itemsize(self) -> int: ...
@property
def kind(self) -> builtins.str: ...
@property
def metadata(self) -> None | MappingProxyType[builtins.str, Any]: ...
@property
def name(self) -> builtins.str: ...
@property
def num(self) -> int: ...
@property
def shape(self) -> _Shape: ...
@property
def ndim(self) -> int: ...
@property
def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ...
def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ...
@property
def str(self) -> builtins.str: ...
@property
def type(self) -> type[_DTypeScalar_co]: ...
_ArrayLikeInt = Union[
int,
integer,
Sequence[Union[int, integer]],
Sequence[Sequence[Any]], # TODO: wait for support for recursive types
ndarray
]
_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter)
@final
class flatiter(Generic[_NdArraySubClass]):
@property
def base(self) -> _NdArraySubClass: ...
@property
def coords(self) -> _Shape: ...
@property
def index(self) -> int: ...
def copy(self) -> _NdArraySubClass: ...
def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ...
def __next__(self: flatiter[ndarray[Any, dtype[_ScalarType]]]) -> _ScalarType: ...
def __len__(self) -> int: ...
@overload
def __getitem__(
self: flatiter[ndarray[Any, dtype[_ScalarType]]],
key: int | integer | tuple[int | integer],
) -> _ScalarType: ...
@overload
def __getitem__(
self,
key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis],
) -> _NdArraySubClass: ...
# TODO: `__setitem__` operates via `unsafe` casting rules, and can
# thus accept any type accepted by the relevant underlying `np.generic`
# constructor.
# This means that `value` must in reality be a supertype of `npt.ArrayLike`.
def __setitem__(
self,
key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis],
value: Any,
) -> None: ...
@overload
def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ...
@overload
def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
_OrderKACF = L[None, "K", "A", "C", "F"]
_OrderACF = L[None, "A", "C", "F"]
_OrderCF = L[None, "C", "F"]
_ModeKind = L["raise", "wrap", "clip"]
_PartitionKind = L["introselect"]
_SortKind = L["quicksort", "mergesort", "heapsort", "stable"]
_SortSide = L["left", "right"]
_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon)
class _ArrayOrScalarCommon:
@property
def T(self: _ArraySelf) -> _ArraySelf: ...
@property
def data(self) -> memoryview: ...
@property
def flags(self) -> flagsobj: ...
@property
def itemsize(self) -> int: ...
@property
def nbytes(self) -> int: ...
def __bool__(self) -> bool: ...
def __bytes__(self) -> bytes: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __copy__(self: _ArraySelf) -> _ArraySelf: ...
def __deepcopy__(self: _ArraySelf, memo: None | dict[int, Any], /) -> _ArraySelf: ...
# TODO: How to deal with the non-commutative nature of `==` and `!=`?
# xref numpy/numpy#17368
def __eq__(self, other: Any) -> Any: ...
def __ne__(self, other: Any) -> Any: ...
def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ...
def dumps(self) -> bytes: ...
def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
# NOTE: `tostring()` is deprecated and therefore excluded
# def tostring(self, order=...): ...
def tofile(
self,
fid: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _IOProtocol,
sep: str = ...,
format: str = ...,
) -> None: ...
# generics and 0d arrays return builtin scalars
def tolist(self) -> Any: ...
@property
def __array_interface__(self) -> dict[str, Any]: ...
@property
def __array_priority__(self) -> float: ...
@property
def __array_struct__(self) -> Any: ... # builtins.PyCapsule
def __setstate__(self, state: tuple[
SupportsIndex, # version
_ShapeLike, # Shape
_DType_co, # DType
bool, # F-continuous
bytes | list[Any], # Data
], /) -> None: ...
# a `bool_` is returned when `keepdims=True` and `self` is a 0d array
@overload
def all(
self,
axis: None = ...,
out: None = ...,
keepdims: L[False] = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> bool_: ...
@overload
def all(
self,
axis: None | _ShapeLike = ...,
out: None = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def all(
self,
axis: None | _ShapeLike = ...,
out: _NdArraySubClass = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
def any(
self,
axis: None = ...,
out: None = ...,
keepdims: L[False] = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> bool_: ...
@overload
def any(
self,
axis: None | _ShapeLike = ...,
out: None = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def any(
self,
axis: None | _ShapeLike = ...,
out: _NdArraySubClass = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
def argmax(
self,
axis: None = ...,
out: None = ...,
*,
keepdims: L[False] = ...,
) -> intp: ...
@overload
def argmax(
self,
axis: SupportsIndex = ...,
out: None = ...,
*,
keepdims: bool = ...,
) -> Any: ...
@overload
def argmax(
self,
axis: None | SupportsIndex = ...,
out: _NdArraySubClass = ...,
*,
keepdims: bool = ...,
) -> _NdArraySubClass: ...
@overload
def argmin(
self,
axis: None = ...,
out: None = ...,
*,
keepdims: L[False] = ...,
) -> intp: ...
@overload
def argmin(
self,
axis: SupportsIndex = ...,
out: None = ...,
*,
keepdims: bool = ...,
) -> Any: ...
@overload
def argmin(
self,
axis: None | SupportsIndex = ...,
out: _NdArraySubClass = ...,
*,
keepdims: bool = ...,
) -> _NdArraySubClass: ...
def argsort(
self,
axis: None | SupportsIndex = ...,
kind: None | _SortKind = ...,
order: None | str | Sequence[str] = ...,
) -> ndarray: ...
@overload
def choose(
self,
choices: ArrayLike,
out: None = ...,
mode: _ModeKind = ...,
) -> ndarray: ...
@overload
def choose(
self,
choices: ArrayLike,
out: _NdArraySubClass = ...,
mode: _ModeKind = ...,
) -> _NdArraySubClass: ...
@overload
def clip(
self,
min: ArrayLike = ...,
max: None | ArrayLike = ...,
out: None = ...,
**kwargs: Any,
) -> ndarray: ...
@overload
def clip(
self,
min: None = ...,
max: ArrayLike = ...,
out: None = ...,
**kwargs: Any,
) -> ndarray: ...
@overload
def clip(
self,
min: ArrayLike = ...,
max: None | ArrayLike = ...,
out: _NdArraySubClass = ...,
**kwargs: Any,
) -> _NdArraySubClass: ...
@overload
def clip(
self,
min: None = ...,
max: ArrayLike = ...,
out: _NdArraySubClass = ...,
**kwargs: Any,
) -> _NdArraySubClass: ...
@overload
def compress(
self,
a: ArrayLike,
axis: None | SupportsIndex = ...,
out: None = ...,
) -> ndarray: ...
@overload
def compress(
self,
a: ArrayLike,
axis: None | SupportsIndex = ...,
out: _NdArraySubClass = ...,
) -> _NdArraySubClass: ...
def conj(self: _ArraySelf) -> _ArraySelf: ...
def conjugate(self: _ArraySelf) -> _ArraySelf: ...
@overload
def cumprod(
self,
axis: None | SupportsIndex = ...,
dtype: DTypeLike = ...,
out: None = ...,
) -> ndarray: ...
@overload
def cumprod(
self,
axis: None | SupportsIndex = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
) -> _NdArraySubClass: ...
@overload
def cumsum(
self,
axis: None | SupportsIndex = ...,
dtype: DTypeLike = ...,
out: None = ...,
) -> ndarray: ...
@overload
def cumsum(
self,
axis: None | SupportsIndex = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
) -> _NdArraySubClass: ...
@overload
def max(
self,
axis: None | _ShapeLike = ...,
out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def max(
self,
axis: None | _ShapeLike = ...,
out: _NdArraySubClass = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
def mean(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: None = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def mean(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
def min(
self,
axis: None | _ShapeLike = ...,
out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def min(
self,
axis: None | _ShapeLike = ...,
out: _NdArraySubClass = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
def newbyteorder(
self: _ArraySelf,
__new_order: _ByteOrder = ...,
) -> _ArraySelf: ...
@overload
def prod(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def prod(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
def ptp(
self,
axis: None | _ShapeLike = ...,
out: None = ...,
keepdims: bool = ...,
) -> Any: ...
@overload
def ptp(
self,
axis: None | _ShapeLike = ...,
out: _NdArraySubClass = ...,
keepdims: bool = ...,
) -> _NdArraySubClass: ...
@overload
def round(
self: _ArraySelf,
decimals: SupportsIndex = ...,
out: None = ...,
) -> _ArraySelf: ...
@overload
def round(
self,
decimals: SupportsIndex = ...,
out: _NdArraySubClass = ...,
) -> _NdArraySubClass: ...
@overload
def std(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: None = ...,
ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def std(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
def sum(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: None = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def sum(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
keepdims: bool = ...,
initial: _NumberLike_co = ...,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
@overload
def var(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: None = ...,
ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
@overload
def var(
self,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
ddof: float = ...,
keepdims: bool = ...,
*,
where: _ArrayLikeBool_co = ...,
) -> _NdArraySubClass: ...
_DType = TypeVar("_DType", bound=dtype[Any])
_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any])
_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible])
# TODO: Set the `bound` to something more suitable once we
# have proper shape support
_ShapeType = TypeVar("_ShapeType", bound=Any)
_ShapeType2 = TypeVar("_ShapeType2", bound=Any)
_NumberType = TypeVar("_NumberType", bound=number[Any])
# There is currently no exhaustive way to type the buffer protocol,
# as it is implemented exclusivelly in the C API (python/typing#593)
_SupportsBuffer = Union[
bytes,
bytearray,
memoryview,
_array.array[Any],
mmap.mmap,
NDArray[Any],
generic,
]
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_T_contra = TypeVar("_T_contra", contravariant=True)
_2Tuple = tuple[_T, _T]
_CastingKind = L["no", "equiv", "safe", "same_kind", "unsafe"]
_ArrayUInt_co = NDArray[Union[bool_, unsignedinteger[Any]]]
_ArrayInt_co = NDArray[Union[bool_, integer[Any]]]
_ArrayFloat_co = NDArray[Union[bool_, integer[Any], floating[Any]]]
_ArrayComplex_co = NDArray[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]
_ArrayNumber_co = NDArray[Union[bool_, number[Any]]]
_ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]]
# Introduce an alias for `dtype` to avoid naming conflicts.
_dtype = dtype
# `builtins.PyCapsule` unfortunately lacks annotations as of the moment;
# use `Any` as a stopgap measure
_PyCapsule = Any
class _SupportsItem(Protocol[_T_co]):
def item(self, args: Any, /) -> _T_co: ...
class _SupportsReal(Protocol[_T_co]):
@property
def real(self) -> _T_co: ...
class _SupportsImag(Protocol[_T_co]):
@property
def imag(self) -> _T_co: ...
class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@property
def base(self) -> None | ndarray: ...
@property
def ndim(self) -> int: ...
@property
def size(self) -> int: ...
@property
def real(
self: ndarray[_ShapeType, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var]
) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ...
@real.setter
def real(self, value: ArrayLike) -> None: ...
@property
def imag(
self: ndarray[_ShapeType, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var]
) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ...
@imag.setter
def imag(self, value: ArrayLike) -> None: ...
def __new__(
cls: type[_ArraySelf],
shape: _ShapeLike,
dtype: DTypeLike = ...,
buffer: None | _SupportsBuffer = ...,
offset: SupportsIndex = ...,
strides: None | _ShapeLike = ...,
order: _OrderKACF = ...,
) -> _ArraySelf: ...
if sys.version_info >= (3, 9):
def __class_getitem__(self, item: Any) -> GenericAlias: ...
@overload
def __array__(self, dtype: None = ..., /) -> ndarray[Any, _DType_co]: ...
@overload
def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
def __array_ufunc__(
self,
ufunc: ufunc,
method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
*inputs: Any,
**kwargs: Any,
) -> Any: ...
def __array_function__(
self,
func: Callable[..., Any],
types: Iterable[type],
args: Iterable[Any],
kwargs: Mapping[str, Any],
) -> Any: ...
# NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__`
# is a pseudo-abstract method the type has been narrowed down in order to
# grant subclasses a bit more flexiblity
def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ...
def __array_wrap__(
self,
array: ndarray[_ShapeType2, _DType],
context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
/,
) -> ndarray[_ShapeType2, _DType]: ...
def __array_prepare__(
self,
array: ndarray[_ShapeType2, _DType],
context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
/,
) -> ndarray[_ShapeType2, _DType]: ...
@overload
def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...]) -> Any: ...
@overload
def __getitem__(self, key: (
None
| slice
| ellipsis
| SupportsIndex
| _ArrayLikeInt_co
| tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...]
)) -> ndarray[Any, _DType_co]: ...
@overload
def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ...
@overload
def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, _dtype[void]]: ...
@property
def ctypes(self) -> _ctypes[int]: ...
@property
def shape(self) -> _Shape: ...
@shape.setter
def shape(self, value: _ShapeLike) -> None: ...
@property
def strides(self) -> _Shape: ...
@strides.setter
def strides(self, value: _ShapeLike) -> None: ...
def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ...
def fill(self, value: Any) -> None: ...
@property
def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ...
# Use the same output type as that of the underlying `generic`
@overload
def item(
self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var]
*args: SupportsIndex,
) -> _T: ...
@overload
def item(
self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var]
args: tuple[SupportsIndex, ...],
/,
) -> _T: ...
@overload
def itemset(self, value: Any, /) -> None: ...
@overload
def itemset(self, item: _ShapeLike, value: Any, /) -> None: ...
@overload
def resize(self, new_shape: _ShapeLike, /, *, refcheck: bool = ...) -> None: ...
@overload
def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ...
def setflags(
self, write: bool = ..., align: bool = ..., uic: bool = ...
) -> None: ...
def squeeze(
self,
axis: None | SupportsIndex | tuple[SupportsIndex, ...] = ...,
) -> ndarray[Any, _DType_co]: ...
def swapaxes(
self,
axis1: SupportsIndex,
axis2: SupportsIndex,
) -> ndarray[Any, _DType_co]: ...
@overload
def transpose(self: _ArraySelf, axes: None | _ShapeLike, /) -> _ArraySelf: ...
@overload
def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ...
def argpartition(
self,
kth: _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
kind: _PartitionKind = ...,
order: None | str | Sequence[str] = ...,
) -> ndarray[Any, _dtype[intp]]: ...
def diagonal(
self,
offset: SupportsIndex = ...,
axis1: SupportsIndex = ...,
axis2: SupportsIndex = ...,
) -> ndarray[Any, _DType_co]: ...
# 1D + 1D returns a scalar;
# all other with at least 1 non-0D array return an ndarray.
@overload
def dot(self, b: _ScalarLike_co, out: None = ...) -> ndarray: ...
@overload
def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc]
@overload
def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ...
# `nonzero()` is deprecated for 0d arrays/generics
def nonzero(self) -> tuple[ndarray[Any, _dtype[intp]], ...]: ...
def partition(
self,
kth: _ArrayLikeInt_co,
axis: SupportsIndex = ...,
kind: _PartitionKind = ...,
order: None | str | Sequence[str] = ...,
) -> None: ...
# `put` is technically available to `generic`,
# but is pointless as `generic`s are immutable
def put(
self,
ind: _ArrayLikeInt_co,
v: ArrayLike,
mode: _ModeKind = ...,
) -> None: ...
@overload
def searchsorted( # type: ignore[misc]
self, # >= 1D array
v: _ScalarLike_co, # 0D array-like
side: _SortSide = ...,
sorter: None | _ArrayLikeInt_co = ...,
) -> intp: ...
@overload
def searchsorted(
self, # >= 1D array
v: ArrayLike,
side: _SortSide = ...,
sorter: None | _ArrayLikeInt_co = ...,
) -> ndarray[Any, _dtype[intp]]: ...
def setfield(
self,
val: ArrayLike,
dtype: DTypeLike,
offset: SupportsIndex = ...,
) -> None: ...
def sort(
self,
axis: SupportsIndex = ...,
kind: None | _SortKind = ...,
order: None | str | Sequence[str] = ...,
) -> None: ...
@overload
def trace(
self, # >= 2D array
offset: SupportsIndex = ...,
axis1: SupportsIndex = ...,
axis2: SupportsIndex = ...,
dtype: DTypeLike = ...,
out: None = ...,
) -> Any: ...
@overload
def trace(
self, # >= 2D array
offset: SupportsIndex = ...,
axis1: SupportsIndex = ...,
axis2: SupportsIndex = ...,
dtype: DTypeLike = ...,
out: _NdArraySubClass = ...,
) -> _NdArraySubClass: ...
@overload
def take( # type: ignore[misc]
self: ndarray[Any, _dtype[_ScalarType]],
indices: _IntLike_co,
axis: None | SupportsIndex = ...,
out: None = ...,
mode: _ModeKind = ...,
) -> _ScalarType: ...
@overload
def take( # type: ignore[misc]
self,
indices: _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
out: None = ...,
mode: _ModeKind = ...,
) -> ndarray[Any, _DType_co]: ...
@overload
def take(
self,
indices: _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
out: _NdArraySubClass = ...,
mode: _ModeKind = ...,
) -> _NdArraySubClass: ...
def repeat(
self,
repeats: _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
) -> ndarray[Any, _DType_co]: ...
def flatten(
self,
order: _OrderKACF = ...,
) -> ndarray[Any, _DType_co]: ...
def ravel(
self,
order: _OrderKACF = ...,
) -> ndarray[Any, _DType_co]: ...
@overload
def reshape(
self, shape: _ShapeLike, /, *, order: _OrderACF = ...
) -> ndarray[Any, _DType_co]: ...
@overload
def reshape(
self, *shape: SupportsIndex, order: _OrderACF = ...
) -> ndarray[Any, _DType_co]: ...
@overload
def astype(
self,
dtype: _DTypeLike[_ScalarType],
order: _OrderKACF = ...,
casting: _CastingKind = ...,
subok: bool = ...,
copy: bool | _CopyMode = ...,
) -> NDArray[_ScalarType]: ...
@overload
def astype(
self,
dtype: DTypeLike,
order: _OrderKACF = ...,
casting: _CastingKind = ...,
subok: bool = ...,
copy: bool | _CopyMode = ...,
) -> NDArray[Any]: ...
@overload
def view(self: _ArraySelf) -> _ArraySelf: ...
@overload
def view(self, type: type[_NdArraySubClass]) -> _NdArraySubClass: ...
@overload
def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ...
@overload
def view(self, dtype: DTypeLike) -> NDArray[Any]: ...
@overload
def view(
self,
dtype: DTypeLike,
type: type[_NdArraySubClass],
) -> _NdArraySubClass: ...
@overload
def getfield(
self,
dtype: _DTypeLike[_ScalarType],
offset: SupportsIndex = ...
) -> NDArray[_ScalarType]: ...
@overload
def getfield(
self,
dtype: DTypeLike,
offset: SupportsIndex = ...
) -> NDArray[Any]: ...
# Dispatch to the underlying `generic` via protocols
def __int__(
self: ndarray[Any, _dtype[SupportsInt]], # type: ignore[type-var]
) -> int: ...
def __float__(
self: ndarray[Any, _dtype[SupportsFloat]], # type: ignore[type-var]
) -> float: ...
def __complex__(
self: ndarray[Any, _dtype[SupportsComplex]], # type: ignore[type-var]
) -> complex: ...
def __index__(
self: ndarray[Any, _dtype[SupportsIndex]], # type: ignore[type-var]
) -> int: ...
def __len__(self) -> int: ...
def __setitem__(self, key, value): ...
def __iter__(self) -> Any: ...
def __contains__(self, key) -> bool: ...
# The last overload is for catching recursive objects whose
# nesting is too deep.
# The first overload is for catching `bytes` (as they are a subtype of
# `Sequence[int]`) and `str`. As `str` is a recursive sequence of
# strings, it will pass through the final overload otherwise
@overload
def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ...
@overload
def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ...
@overload
def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ...
@overload
def __lt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ...
@overload
def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ...
@overload
def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ...
@overload
def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ...
@overload
def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ...
@overload
def __le__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ...
@overload
def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ...
@overload
def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ...
@overload
def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ...
@overload
def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ...
@overload
def __gt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ...
@overload
def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ...
@overload
def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ...
@overload
def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ...
@overload
def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ...
@overload
def __ge__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ...
@overload
def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ...
# Unary ops
@overload
def __abs__(self: NDArray[bool_]) -> NDArray[bool_]: ...
@overload
def __abs__(self: NDArray[complexfloating[_NBit1, _NBit1]]) -> NDArray[floating[_NBit1]]: ...
@overload
def __abs__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ...
@overload
def __abs__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ...
@overload
def __abs__(self: NDArray[object_]) -> Any: ...
@overload
def __invert__(self: NDArray[bool_]) -> NDArray[bool_]: ...
@overload
def __invert__(self: NDArray[_IntType]) -> NDArray[_IntType]: ...
@overload
def __invert__(self: NDArray[object_]) -> Any: ...
@overload
def __pos__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ...
@overload
def __pos__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ...
@overload
def __pos__(self: NDArray[object_]) -> Any: ...
@overload
def __neg__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ...
@overload
def __neg__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ...
@overload
def __neg__(self: NDArray[object_]) -> Any: ...
# Binary ops
# NOTE: `ndarray` does not implement `__imatmul__`
@overload
def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __matmul__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ...
@overload
def __mod__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ...
@overload
def __rmod__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __divmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc]
@overload
def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc]
@overload
def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc]
@overload
def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc]
@overload
def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ...
@overload
def __rdivmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc]
@overload
def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc]
@overload
def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc]
@overload
def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc]
@overload
def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ...
@overload
def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
@overload
def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ...
@overload
def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
@overload
def __add__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
@overload
def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ...
@overload
def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
@overload
def __radd__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ...
@overload
def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ...
@overload
def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
@overload
def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
@overload
def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ...
@overload
def __sub__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ...
@overload
def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ...
@overload
def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
@overload
def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc]
@overload
def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ...
@overload
def __rsub__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
@overload
def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
@overload
def __mul__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
@overload
def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
@overload
def __rmul__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ...
@overload
def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
@overload
def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
@overload
def __floordiv__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ...
@overload
def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ...
@overload
def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
@overload
def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __pow__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __rpow__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc]
@overload
def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ...
@overload
def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
@overload
def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
@overload
def __truediv__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc]
@overload
def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
@overload
def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ...
@overload
def __rtruediv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ...
@overload
def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
@overload
def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __lshift__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __rlshift__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __rshift__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
@overload
def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __rrshift__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __and__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __rand__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __xor__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __rxor__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __or__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@overload
def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
@overload
def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
@overload
def __ror__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
# `np.generic` does not support inplace operations
# NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left
# operand. An exception to this rule are unsigned integers though, which
# also accepts a signed integer for the right operand as long it is a 0D
# object and its value is >= 0
@overload
def __iadd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
@overload
def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
@overload
def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
@overload
def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
@overload
def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
@overload
def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
@overload
def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
@overload
def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
@overload
def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
@overload
def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __imul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
@overload
def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
@overload
def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
@overload
def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
@overload
def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
@overload
def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
@overload
def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
@overload
def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ...
@overload
def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
@overload
def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
@overload
def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
@overload
def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ...
@overload
def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
@overload
def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
@overload
def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
@overload
def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ...
@overload
def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __iand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
@overload
def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __ixor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
@overload
def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
@overload
def __ior__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
@overload
def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
@overload
def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ...
def __dlpack_device__(self) -> tuple[int, L[0]]: ...
# Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
@property
def dtype(self) -> _DType_co: ...
# NOTE: while `np.generic` is not technically an instance of `ABCMeta`,
# the `@abstractmethod` decorator is herein used to (forcefully) deny
# the creation of `np.generic` instances.
# The `# type: ignore` comments are necessary to silence mypy errors regarding
# the missing `ABCMeta` metaclass.
# See https://github.com/numpy/numpy-stubs/pull/80 for more details.
_ScalarType = TypeVar("_ScalarType", bound=generic)
_NBit1 = TypeVar("_NBit1", bound=NBitBase)
_NBit2 = TypeVar("_NBit2", bound=NBitBase)
class generic(_ArrayOrScalarCommon):
@abstractmethod
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
@overload
def __array__(self: _ScalarType, dtype: None = ..., /) -> ndarray[Any, _dtype[_ScalarType]]: ...
@overload
def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
@property
def base(self) -> None: ...
@property
def ndim(self) -> L[0]: ...
@property
def size(self) -> L[1]: ...
@property
def shape(self) -> tuple[()]: ...
@property
def strides(self) -> tuple[()]: ...
def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ...
@property
def flat(self: _ScalarType) -> flatiter[ndarray[Any, _dtype[_ScalarType]]]: ...
@overload
def astype(
self,
dtype: _DTypeLike[_ScalarType],
order: _OrderKACF = ...,
casting: _CastingKind = ...,
subok: bool = ...,
copy: bool | _CopyMode = ...,
) -> _ScalarType: ...
@overload
def astype(
self,
dtype: DTypeLike,
order: _OrderKACF = ...,
casting: _CastingKind = ...,
subok: bool = ...,
copy: bool | _CopyMode = ...,
) -> Any: ...
# NOTE: `view` will perform a 0D->scalar cast,
# thus the array `type` is irrelevant to the output type
@overload
def view(
self: _ScalarType,
type: type[ndarray[Any, Any]] = ...,
) -> _ScalarType: ...
@overload
def view(
self,
dtype: _DTypeLike[_ScalarType],
type: type[ndarray[Any, Any]] = ...,
) -> _ScalarType: ...
@overload
def view(
self,
dtype: DTypeLike,
type: type[ndarray[Any, Any]] = ...,
) -> Any: ...
@overload
def getfield(
self,
dtype: _DTypeLike[_ScalarType],
offset: SupportsIndex = ...
) -> _ScalarType: ...
@overload
def getfield(
self,
dtype: DTypeLike,
offset: SupportsIndex = ...
) -> Any: ...
def item(
self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
) -> Any: ...
@overload
def take( # type: ignore[misc]
self: _ScalarType,
indices: _IntLike_co,
axis: None | SupportsIndex = ...,
out: None = ...,
mode: _ModeKind = ...,
) -> _ScalarType: ...
@overload
def take( # type: ignore[misc]
self: _ScalarType,
indices: _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
out: None = ...,
mode: _ModeKind = ...,
) -> ndarray[Any, _dtype[_ScalarType]]: ...
@overload
def take(
self,
indices: _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
out: _NdArraySubClass = ...,
mode: _ModeKind = ...,
) -> _NdArraySubClass: ...
def repeat(
self: _ScalarType,
repeats: _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
) -> ndarray[Any, _dtype[_ScalarType]]: ...
def flatten(
self: _ScalarType,
order: _OrderKACF = ...,
) -> ndarray[Any, _dtype[_ScalarType]]: ...
def ravel(
self: _ScalarType,
order: _OrderKACF = ...,
) -> ndarray[Any, _dtype[_ScalarType]]: ...
@overload
def reshape(
self: _ScalarType, shape: _ShapeLike, /, *, order: _OrderACF = ...
) -> ndarray[Any, _dtype[_ScalarType]]: ...
@overload
def reshape(
self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ...
) -> ndarray[Any, _dtype[_ScalarType]]: ...
def squeeze(
self: _ScalarType, axis: None | L[0] | tuple[()] = ...
) -> _ScalarType: ...
def transpose(self: _ScalarType, axes: None | tuple[()] = ..., /) -> _ScalarType: ...
# Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
@property
def dtype(self: _ScalarType) -> _dtype[_ScalarType]: ...
class number(generic, Generic[_NBit1]): # type: ignore
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
if sys.version_info >= (3, 9):
def __class_getitem__(self, item: Any) -> GenericAlias: ...
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
def __neg__(self: _ArraySelf) -> _ArraySelf: ...
def __pos__(self: _ArraySelf) -> _ArraySelf: ...
def __abs__(self: _ArraySelf) -> _ArraySelf: ...
# Ensure that objects annotated as `number` support arithmetic operations
__add__: _NumberOp
__radd__: _NumberOp
__sub__: _NumberOp
__rsub__: _NumberOp
__mul__: _NumberOp
__rmul__: _NumberOp
__floordiv__: _NumberOp
__rfloordiv__: _NumberOp
__pow__: _NumberOp
__rpow__: _NumberOp
__truediv__: _NumberOp
__rtruediv__: _NumberOp
__lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
__le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
__gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
__ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
class bool_(generic):
def __init__(self, value: object = ..., /) -> None: ...
def item(
self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
) -> bool: ...
def tolist(self) -> bool: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
def __abs__(self: _ArraySelf) -> _ArraySelf: ...
__add__: _BoolOp[bool_]
__radd__: _BoolOp[bool_]
__sub__: _BoolSub
__rsub__: _BoolSub
__mul__: _BoolOp[bool_]
__rmul__: _BoolOp[bool_]
__floordiv__: _BoolOp[int8]
__rfloordiv__: _BoolOp[int8]
__pow__: _BoolOp[int8]
__rpow__: _BoolOp[int8]
__truediv__: _BoolTrueDiv
__rtruediv__: _BoolTrueDiv
def __invert__(self) -> bool_: ...
__lshift__: _BoolBitOp[int8]
__rlshift__: _BoolBitOp[int8]
__rshift__: _BoolBitOp[int8]
__rrshift__: _BoolBitOp[int8]
__and__: _BoolBitOp[bool_]
__rand__: _BoolBitOp[bool_]
__xor__: _BoolBitOp[bool_]
__rxor__: _BoolBitOp[bool_]
__or__: _BoolBitOp[bool_]
__ror__: _BoolBitOp[bool_]
__mod__: _BoolMod
__rmod__: _BoolMod
__divmod__: _BoolDivMod
__rdivmod__: _BoolDivMod
__lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
__le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
__gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
__ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
bool8 = bool_
class object_(generic):
def __init__(self, value: object = ..., /) -> None: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
# The 3 protocols below may or may not raise,
# depending on the underlying object
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
object0 = object_
# The `datetime64` constructors requires an object with the three attributes below,
# and thus supports datetime duck typing
class _DatetimeScalar(Protocol):
@property
def day(self) -> int: ...
@property
def month(self) -> int: ...
@property
def year(self) -> int: ...
# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int`
# depending on the unit
class datetime64(generic):
@overload
def __init__(
self,
value: None | datetime64 | _CharLike_co | _DatetimeScalar = ...,
format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ...,
/,
) -> None: ...
@overload
def __init__(
self,
value: int,
format: _CharLike_co | tuple[_CharLike_co, _IntLike_co],
/,
) -> None: ...
def __add__(self, other: _TD64Like_co) -> datetime64: ...
def __radd__(self, other: _TD64Like_co) -> datetime64: ...
@overload
def __sub__(self, other: datetime64) -> timedelta64: ...
@overload
def __sub__(self, other: _TD64Like_co) -> datetime64: ...
def __rsub__(self, other: datetime64) -> timedelta64: ...
__lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
__le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
__gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
__ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
_IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex]
_FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex]
_ComplexValue = Union[
None,
_CharLike_co,
SupportsFloat,
SupportsComplex,
SupportsIndex,
complex, # `complex` is not a subtype of `SupportsComplex`
]
class integer(number[_NBit1]): # type: ignore
@property
def numerator(self: _ScalarType) -> _ScalarType: ...
@property
def denominator(self) -> L[1]: ...
@overload
def __round__(self, ndigits: None = ...) -> int: ...
@overload
def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ...
# NOTE: `__index__` is technically defined in the bottom-most
# sub-classes (`int64`, `uint32`, etc)
def item(
self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
) -> int: ...
def tolist(self) -> int: ...
def is_integer(self) -> L[True]: ...
def bit_count(self: _ScalarType) -> int: ...
def __index__(self) -> int: ...
__truediv__: _IntTrueDiv[_NBit1]
__rtruediv__: _IntTrueDiv[_NBit1]
def __mod__(self, value: _IntLike_co) -> integer: ...
def __rmod__(self, value: _IntLike_co) -> integer: ...
def __invert__(self: _IntType) -> _IntType: ...
# Ensure that objects annotated as `integer` support bit-wise operations
def __lshift__(self, other: _IntLike_co) -> integer: ...
def __rlshift__(self, other: _IntLike_co) -> integer: ...
def __rshift__(self, other: _IntLike_co) -> integer: ...
def __rrshift__(self, other: _IntLike_co) -> integer: ...
def __and__(self, other: _IntLike_co) -> integer: ...
def __rand__(self, other: _IntLike_co) -> integer: ...
def __or__(self, other: _IntLike_co) -> integer: ...
def __ror__(self, other: _IntLike_co) -> integer: ...
def __xor__(self, other: _IntLike_co) -> integer: ...
def __rxor__(self, other: _IntLike_co) -> integer: ...
class signedinteger(integer[_NBit1]):
def __init__(self, value: _IntValue = ..., /) -> None: ...
__add__: _SignedIntOp[_NBit1]
__radd__: _SignedIntOp[_NBit1]
__sub__: _SignedIntOp[_NBit1]
__rsub__: _SignedIntOp[_NBit1]
__mul__: _SignedIntOp[_NBit1]
__rmul__: _SignedIntOp[_NBit1]
__floordiv__: _SignedIntOp[_NBit1]
__rfloordiv__: _SignedIntOp[_NBit1]
__pow__: _SignedIntOp[_NBit1]
__rpow__: _SignedIntOp[_NBit1]
__lshift__: _SignedIntBitOp[_NBit1]
__rlshift__: _SignedIntBitOp[_NBit1]
__rshift__: _SignedIntBitOp[_NBit1]
__rrshift__: _SignedIntBitOp[_NBit1]
__and__: _SignedIntBitOp[_NBit1]
__rand__: _SignedIntBitOp[_NBit1]
__xor__: _SignedIntBitOp[_NBit1]
__rxor__: _SignedIntBitOp[_NBit1]
__or__: _SignedIntBitOp[_NBit1]
__ror__: _SignedIntBitOp[_NBit1]
__mod__: _SignedIntMod[_NBit1]
__rmod__: _SignedIntMod[_NBit1]
__divmod__: _SignedIntDivMod[_NBit1]
__rdivmod__: _SignedIntDivMod[_NBit1]
int8 = signedinteger[_8Bit]
int16 = signedinteger[_16Bit]
int32 = signedinteger[_32Bit]
int64 = signedinteger[_64Bit]
byte = signedinteger[_NBitByte]
short = signedinteger[_NBitShort]
intc = signedinteger[_NBitIntC]
intp = signedinteger[_NBitIntP]
int0 = signedinteger[_NBitIntP]
int_ = signedinteger[_NBitInt]
longlong = signedinteger[_NBitLongLong]
# TODO: `item`/`tolist` returns either `dt.timedelta` or `int`
# depending on the unit
class timedelta64(generic):
def __init__(
self,
value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ...,
format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ...,
/,
) -> None: ...
@property
def numerator(self: _ScalarType) -> _ScalarType: ...
@property
def denominator(self) -> L[1]: ...
# NOTE: Only a limited number of units support conversion
# to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as`
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
def __neg__(self: _ArraySelf) -> _ArraySelf: ...
def __pos__(self: _ArraySelf) -> _ArraySelf: ...
def __abs__(self: _ArraySelf) -> _ArraySelf: ...
def __add__(self, other: _TD64Like_co) -> timedelta64: ...
def __radd__(self, other: _TD64Like_co) -> timedelta64: ...
def __sub__(self, other: _TD64Like_co) -> timedelta64: ...
def __rsub__(self, other: _TD64Like_co) -> timedelta64: ...
def __mul__(self, other: _FloatLike_co) -> timedelta64: ...
def __rmul__(self, other: _FloatLike_co) -> timedelta64: ...
__truediv__: _TD64Div[float64]
__floordiv__: _TD64Div[int64]
def __rtruediv__(self, other: timedelta64) -> float64: ...
def __rfloordiv__(self, other: timedelta64) -> int64: ...
def __mod__(self, other: timedelta64) -> timedelta64: ...
def __rmod__(self, other: timedelta64) -> timedelta64: ...
def __divmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ...
def __rdivmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ...
__lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co]
__le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co]
__gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co]
__ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co]
class unsignedinteger(integer[_NBit1]):
# NOTE: `uint64 + signedinteger -> float64`
def __init__(self, value: _IntValue = ..., /) -> None: ...
__add__: _UnsignedIntOp[_NBit1]
__radd__: _UnsignedIntOp[_NBit1]
__sub__: _UnsignedIntOp[_NBit1]
__rsub__: _UnsignedIntOp[_NBit1]
__mul__: _UnsignedIntOp[_NBit1]
__rmul__: _UnsignedIntOp[_NBit1]
__floordiv__: _UnsignedIntOp[_NBit1]
__rfloordiv__: _UnsignedIntOp[_NBit1]
__pow__: _UnsignedIntOp[_NBit1]
__rpow__: _UnsignedIntOp[_NBit1]
__lshift__: _UnsignedIntBitOp[_NBit1]
__rlshift__: _UnsignedIntBitOp[_NBit1]
__rshift__: _UnsignedIntBitOp[_NBit1]
__rrshift__: _UnsignedIntBitOp[_NBit1]
__and__: _UnsignedIntBitOp[_NBit1]
__rand__: _UnsignedIntBitOp[_NBit1]
__xor__: _UnsignedIntBitOp[_NBit1]
__rxor__: _UnsignedIntBitOp[_NBit1]
__or__: _UnsignedIntBitOp[_NBit1]
__ror__: _UnsignedIntBitOp[_NBit1]
__mod__: _UnsignedIntMod[_NBit1]
__rmod__: _UnsignedIntMod[_NBit1]
__divmod__: _UnsignedIntDivMod[_NBit1]
__rdivmod__: _UnsignedIntDivMod[_NBit1]
uint8 = unsignedinteger[_8Bit]
uint16 = unsignedinteger[_16Bit]
uint32 = unsignedinteger[_32Bit]
uint64 = unsignedinteger[_64Bit]
ubyte = unsignedinteger[_NBitByte]
ushort = unsignedinteger[_NBitShort]
uintc = unsignedinteger[_NBitIntC]
uintp = unsignedinteger[_NBitIntP]
uint0 = unsignedinteger[_NBitIntP]
uint = unsignedinteger[_NBitInt]
ulonglong = unsignedinteger[_NBitLongLong]
class inexact(number[_NBit1]): # type: ignore
def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ...
_IntType = TypeVar("_IntType", bound=integer)
_FloatType = TypeVar('_FloatType', bound=floating)
class floating(inexact[_NBit1]):
def __init__(self, value: _FloatValue = ..., /) -> None: ...
def item(
self, args: L[0] | tuple[()] | tuple[L[0]] = ...,
/,
) -> float: ...
def tolist(self) -> float: ...
def is_integer(self) -> bool: ...
def hex(self: float64) -> str: ...
@classmethod
def fromhex(cls: type[float64], string: str, /) -> float64: ...
def as_integer_ratio(self) -> tuple[int, int]: ...
if sys.version_info >= (3, 9):
def __ceil__(self: float64) -> int: ...
def __floor__(self: float64) -> int: ...
def __trunc__(self: float64) -> int: ...
def __getnewargs__(self: float64) -> tuple[float]: ...
def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ...
@overload
def __round__(self, ndigits: None = ...) -> int: ...
@overload
def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ...
__add__: _FloatOp[_NBit1]
__radd__: _FloatOp[_NBit1]
__sub__: _FloatOp[_NBit1]
__rsub__: _FloatOp[_NBit1]
__mul__: _FloatOp[_NBit1]
__rmul__: _FloatOp[_NBit1]
__truediv__: _FloatOp[_NBit1]
__rtruediv__: _FloatOp[_NBit1]
__floordiv__: _FloatOp[_NBit1]
__rfloordiv__: _FloatOp[_NBit1]
__pow__: _FloatOp[_NBit1]
__rpow__: _FloatOp[_NBit1]
__mod__: _FloatMod[_NBit1]
__rmod__: _FloatMod[_NBit1]
__divmod__: _FloatDivMod[_NBit1]
__rdivmod__: _FloatDivMod[_NBit1]
float16 = floating[_16Bit]
float32 = floating[_32Bit]
float64 = floating[_64Bit]
half = floating[_NBitHalf]
single = floating[_NBitSingle]
double = floating[_NBitDouble]
float_ = floating[_NBitDouble]
longdouble = floating[_NBitLongDouble]
longfloat = floating[_NBitLongDouble]
# The main reason for `complexfloating` having two typevars is cosmetic.
# It is used to clarify why `complex128`s precision is `_64Bit`, the latter
# describing the two 64 bit floats representing its real and imaginary component
class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]):
def __init__(self, value: _ComplexValue = ..., /) -> None: ...
def item(
self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
) -> complex: ...
def tolist(self) -> complex: ...
@property
def real(self) -> floating[_NBit1]: ... # type: ignore[override]
@property
def imag(self) -> floating[_NBit2]: ... # type: ignore[override]
def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override]
def __getnewargs__(self: complex128) -> tuple[float, float]: ...
# NOTE: Deprecated
# def __round__(self, ndigits=...): ...
__add__: _ComplexOp[_NBit1]
__radd__: _ComplexOp[_NBit1]
__sub__: _ComplexOp[_NBit1]
__rsub__: _ComplexOp[_NBit1]
__mul__: _ComplexOp[_NBit1]
__rmul__: _ComplexOp[_NBit1]
__truediv__: _ComplexOp[_NBit1]
__rtruediv__: _ComplexOp[_NBit1]
__pow__: _ComplexOp[_NBit1]
__rpow__: _ComplexOp[_NBit1]
complex64 = complexfloating[_32Bit, _32Bit]
complex128 = complexfloating[_64Bit, _64Bit]
csingle = complexfloating[_NBitSingle, _NBitSingle]
singlecomplex = complexfloating[_NBitSingle, _NBitSingle]
cdouble = complexfloating[_NBitDouble, _NBitDouble]
complex_ = complexfloating[_NBitDouble, _NBitDouble]
cfloat = complexfloating[_NBitDouble, _NBitDouble]
clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble]
clongfloat = complexfloating[_NBitLongDouble, _NBitLongDouble]
longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble]
class flexible(generic): ... # type: ignore
# TODO: `item`/`tolist` returns either `bytes` or `tuple`
# depending on whether or not it's used as an opaque bytes sequence
# or a structure
class void(flexible):
def __init__(self, value: _IntLike_co | bytes, /) -> None: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
def setfield(
self, val: ArrayLike, dtype: DTypeLike, offset: int = ...
) -> None: ...
@overload
def __getitem__(self, key: str | SupportsIndex) -> Any: ...
@overload
def __getitem__(self, key: list[str]) -> void: ...
def __setitem__(
self,
key: str | list[str] | SupportsIndex,
value: ArrayLike,
) -> None: ...
void0 = void
class character(flexible): # type: ignore
def __int__(self) -> int: ...
def __float__(self) -> float: ...
# NOTE: Most `np.bytes_` / `np.str_` methods return their
# builtin `bytes` / `str` counterpart
class bytes_(character, bytes):
@overload
def __init__(self, value: object = ..., /) -> None: ...
@overload
def __init__(
self, value: str, /, encoding: str = ..., errors: str = ...
) -> None: ...
def item(
self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
) -> bytes: ...
def tolist(self) -> bytes: ...
string_ = bytes_
bytes0 = bytes_
class str_(character, str):
@overload
def __init__(self, value: object = ..., /) -> None: ...
@overload
def __init__(
self, value: bytes, /, encoding: str = ..., errors: str = ...
) -> None: ...
def item(
self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
) -> str: ...
def tolist(self) -> str: ...
unicode_ = str_
str0 = str_
#
# Constants
#
Inf: Final[float]
Infinity: Final[float]
NAN: Final[float]
NINF: Final[float]
NZERO: Final[float]
NaN: Final[float]
PINF: Final[float]
PZERO: Final[float]
e: Final[float]
euler_gamma: Final[float]
inf: Final[float]
infty: Final[float]
nan: Final[float]
pi: Final[float]
CLIP: L[0]
WRAP: L[1]
RAISE: L[2]
ERR_IGNORE: L[0]
ERR_WARN: L[1]
ERR_RAISE: L[2]
ERR_CALL: L[3]
ERR_PRINT: L[4]
ERR_LOG: L[5]
ERR_DEFAULT: L[521]
SHIFT_DIVIDEBYZERO: L[0]
SHIFT_OVERFLOW: L[3]
SHIFT_UNDERFLOW: L[6]
SHIFT_INVALID: L[9]
FPE_DIVIDEBYZERO: L[1]
FPE_OVERFLOW: L[2]
FPE_UNDERFLOW: L[4]
FPE_INVALID: L[8]
FLOATING_POINT_SUPPORT: L[1]
UFUNC_BUFSIZE_DEFAULT = BUFSIZE
little_endian: Final[bool]
True_: Final[bool_]
False_: Final[bool_]
UFUNC_PYVALS_NAME: L["UFUNC_PYVALS"]
newaxis: None
# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs
@final
class ufunc:
@property
def __name__(self) -> str: ...
@property
def __doc__(self) -> str: ...
__call__: Callable[..., Any]
@property
def nin(self) -> int: ...
@property
def nout(self) -> int: ...
@property
def nargs(self) -> int: ...
@property
def ntypes(self) -> int: ...
@property
def types(self) -> list[str]: ...
# Broad return type because it has to encompass things like
#
# >>> np.logical_and.identity is True
# True
# >>> np.add.identity is 0
# True
# >>> np.sin.identity is None
# True
#
# and any user-defined ufuncs.
@property
def identity(self) -> Any: ...
# This is None for ufuncs and a string for gufuncs.
@property
def signature(self) -> None | str: ...
# The next four methods will always exist, but they will just
# raise a ValueError ufuncs with that don't accept two input
# arguments and return one output argument. Because of that we
# can't type them very precisely.
reduce: Any
accumulate: Any
reduce: Any
outer: Any
# Similarly at won't be defined for ufuncs that return multiple
# outputs, so we can't type it very precisely.
at: Any
# Parameters: `__name__`, `ntypes` and `identity`
absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None]
add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]]
arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None]
arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None]
arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None]
arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None]
arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None]
arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None]
arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None]
bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]]
bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None]
bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]]
bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]]
cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None]
ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None]
conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None]
conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None]
copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None]
cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None]
cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None]
deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None]
degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None]
divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None]
divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None]
equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None]
exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None]
exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None]
expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None]
fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None]
float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None]
floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None]
floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None]
fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None]
fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None]
fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None]
frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None]
gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]]
greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None]
greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None]
heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None]
hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]]
invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None]
isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None]
isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None]
isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None]
isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None]
lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None]
ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None]
left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None]
less: _UFunc_Nin2_Nout1[L['less'], L[23], None]
less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None]
log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None]
log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None]
log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None]
log: _UFunc_Nin1_Nout1[L['log'], L[10], None]
logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float]
logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float]
logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]]
logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None]
logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]]
logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]]
matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None]
maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None]
minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None]
mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None]
modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None]
multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]]
negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None]
nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None]
not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None]
positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None]
power: _UFunc_Nin2_Nout1[L['power'], L[18], None]
rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None]
radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None]
reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None]
remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None]
right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None]
rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None]
sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None]
signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None]
sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None]
sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None]
spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None]
sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None]
square: _UFunc_Nin1_Nout1[L['square'], L[18], None]
subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None]
tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None]
tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None]
true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None]
trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None]
abs = absolute
class _CopyMode(enum.Enum):
ALWAYS: L[True]
IF_NEEDED: L[False]
NEVER: L[2]
# Warnings
class ModuleDeprecationWarning(DeprecationWarning): ...
class VisibleDeprecationWarning(UserWarning): ...
class ComplexWarning(RuntimeWarning): ...
class RankWarning(UserWarning): ...
# Errors
class TooHardError(RuntimeError): ...
class AxisError(ValueError, IndexError):
axis: None | int
ndim: None | int
@overload
def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ...
@overload
def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ...
_CallType = TypeVar("_CallType", bound=_ErrFunc | _SupportsWrite[str])
class errstate(Generic[_CallType], ContextDecorator):
call: _CallType
kwargs: _ErrDictOptional
# Expand `**kwargs` into explicit keyword-only arguments
def __init__(
self,
*,
call: _CallType = ...,
all: None | _ErrKind = ...,
divide: None | _ErrKind = ...,
over: None | _ErrKind = ...,
under: None | _ErrKind = ...,
invalid: None | _ErrKind = ...,
) -> None: ...
def __enter__(self) -> None: ...
def __exit__(
self,
exc_type: None | type[BaseException],
exc_value: None | BaseException,
traceback: None | TracebackType,
/,
) -> None: ...
class ndenumerate(Generic[_ScalarType]):
iter: flatiter[NDArray[_ScalarType]]
@overload
def __new__(
cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]],
) -> ndenumerate[_ScalarType]: ...
@overload
def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ...
@overload
def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[bytes_]: ...
@overload
def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[bool_]: ...
@overload
def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[int_]: ...
@overload
def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float_]: ...
@overload
def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex_]: ...
def __next__(self: ndenumerate[_ScalarType]) -> tuple[_Shape, _ScalarType]: ...
def __iter__(self: _T) -> _T: ...
class ndindex:
@overload
def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ...
@overload
def __init__(self, *shape: SupportsIndex) -> None: ...
def __iter__(self: _T) -> _T: ...
def __next__(self) -> _Shape: ...
class DataSource:
def __init__(
self,
destpath: None | str | os.PathLike[str] = ...,
) -> None: ...
def __del__(self) -> None: ...
def abspath(self, path: str) -> str: ...
def exists(self, path: str) -> bool: ...
# Whether the file-object is opened in string or bytes mode (by default)
# depends on the file-extension of `path`
def open(
self,
path: str,
mode: str = ...,
encoding: None | str = ...,
newline: None | str = ...,
) -> IO[Any]: ...
# TODO: The type of each `__next__` and `iters` return-type depends
# on the length and dtype of `args`; we can't describe this behavior yet
# as we lack variadics (PEP 646).
@final
class broadcast:
def __new__(cls, *args: ArrayLike) -> broadcast: ...
@property
def index(self) -> int: ...
@property
def iters(self) -> tuple[flatiter[Any], ...]: ...
@property
def nd(self) -> int: ...
@property
def ndim(self) -> int: ...
@property
def numiter(self) -> int: ...
@property
def shape(self) -> _Shape: ...
@property
def size(self) -> int: ...
def __next__(self) -> tuple[Any, ...]: ...
def __iter__(self: _T) -> _T: ...
def reset(self) -> None: ...
@final
class busdaycalendar:
def __new__(
cls,
weekmask: ArrayLike = ...,
holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
) -> busdaycalendar: ...
@property
def weekmask(self) -> NDArray[bool_]: ...
@property
def holidays(self) -> NDArray[datetime64]: ...
class finfo(Generic[_FloatType]):
dtype: dtype[_FloatType]
bits: int
eps: _FloatType
epsneg: _FloatType
iexp: int
machep: int
max: _FloatType
maxexp: int
min: _FloatType
minexp: int
negep: int
nexp: int
nmant: int
precision: int
resolution: _FloatType
smallest_subnormal: _FloatType
@property
def smallest_normal(self) -> _FloatType: ...
@property
def tiny(self) -> _FloatType: ...
@overload
def __new__(
cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]]
) -> finfo[floating[_NBit1]]: ...
@overload
def __new__(
cls, dtype: complex | float | type[complex] | type[float]
) -> finfo[float_]: ...
@overload
def __new__(
cls, dtype: str
) -> finfo[floating[Any]]: ...
class iinfo(Generic[_IntType]):
dtype: dtype[_IntType]
kind: str
bits: int
key: str
@property
def min(self) -> int: ...
@property
def max(self) -> int: ...
@overload
def __new__(cls, dtype: _IntType | _DTypeLike[_IntType]) -> iinfo[_IntType]: ...
@overload
def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ...
@overload
def __new__(cls, dtype: str) -> iinfo[Any]: ...
class format_parser:
dtype: dtype[void]
def __init__(
self,
formats: DTypeLike,
names: None | str | Sequence[str],
titles: None | str | Sequence[str],
aligned: bool = ...,
byteorder: None | _ByteOrder = ...,
) -> None: ...
class recarray(ndarray[_ShapeType, _DType_co]):
# NOTE: While not strictly mandatory, we're demanding here that arguments
# for the `format_parser`- and `dtype`-based dtype constructors are
# mutually exclusive
@overload
def __new__(
subtype,
shape: _ShapeLike,
dtype: None = ...,
buf: None | _SupportsBuffer = ...,
offset: SupportsIndex = ...,
strides: None | _ShapeLike = ...,
*,
formats: DTypeLike,
names: None | str | Sequence[str] = ...,
titles: None | str | Sequence[str] = ...,
byteorder: None | _ByteOrder = ...,
aligned: bool = ...,
order: _OrderKACF = ...,
) -> recarray[Any, dtype[record]]: ...
@overload
def __new__(
subtype,
shape: _ShapeLike,
dtype: DTypeLike,
buf: None | _SupportsBuffer = ...,
offset: SupportsIndex = ...,
strides: None | _ShapeLike = ...,
formats: None = ...,
names: None = ...,
titles: None = ...,
byteorder: None = ...,
aligned: L[False] = ...,
order: _OrderKACF = ...,
) -> recarray[Any, dtype[Any]]: ...
def __array_finalize__(self, obj: object) -> None: ...
def __getattribute__(self, attr: str) -> Any: ...
def __setattr__(self, attr: str, val: ArrayLike) -> None: ...
@overload
def __getitem__(self, indx: (
SupportsIndex
| _ArrayLikeInt_co
| tuple[SupportsIndex | _ArrayLikeInt_co, ...]
)) -> Any: ...
@overload
def __getitem__(self: recarray[Any, dtype[void]], indx: (
None
| slice
| ellipsis
| SupportsIndex
| _ArrayLikeInt_co
| tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...]
)) -> recarray[Any, _DType_co]: ...
@overload
def __getitem__(self, indx: (
None
| slice
| ellipsis
| SupportsIndex
| _ArrayLikeInt_co
| tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...]
)) -> ndarray[Any, _DType_co]: ...
@overload
def __getitem__(self, indx: str) -> NDArray[Any]: ...
@overload
def __getitem__(self, indx: list[str]) -> recarray[_ShapeType, dtype[record]]: ...
@overload
def field(self, attr: int | str, val: None = ...) -> Any: ...
@overload
def field(self, attr: int | str, val: ArrayLike) -> None: ...
class record(void):
def __getattribute__(self, attr: str) -> Any: ...
def __setattr__(self, attr: str, val: ArrayLike) -> None: ...
def pprint(self) -> str: ...
@overload
def __getitem__(self, key: str | SupportsIndex) -> Any: ...
@overload
def __getitem__(self, key: list[str]) -> record: ...
_NDIterFlagsKind = L[
"buffered",
"c_index",
"copy_if_overlap",
"common_dtype",
"delay_bufalloc",
"external_loop",
"f_index",
"grow_inner", "growinner",
"multi_index",
"ranged",
"refs_ok",
"reduce_ok",
"zerosize_ok",
]
_NDIterOpFlagsKind = L[
"aligned",
"allocate",
"arraymask",
"copy",
"config",
"nbo",
"no_subtype",
"no_broadcast",
"overlap_assume_elementwise",
"readonly",
"readwrite",
"updateifcopy",
"virtual",
"writeonly",
"writemasked"
]
@final
class nditer:
def __new__(
cls,
op: ArrayLike | Sequence[ArrayLike],
flags: None | Sequence[_NDIterFlagsKind] = ...,
op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ...,
op_dtypes: DTypeLike | Sequence[DTypeLike] = ...,
order: _OrderKACF = ...,
casting: _CastingKind = ...,
op_axes: None | Sequence[Sequence[SupportsIndex]] = ...,
itershape: None | _ShapeLike = ...,
buffersize: SupportsIndex = ...,
) -> nditer: ...
def __enter__(self) -> nditer: ...
def __exit__(
self,
exc_type: None | type[BaseException],
exc_value: None | BaseException,
traceback: None | TracebackType,
) -> None: ...
def __iter__(self) -> nditer: ...
def __next__(self) -> tuple[NDArray[Any], ...]: ...
def __len__(self) -> int: ...
def __copy__(self) -> nditer: ...
@overload
def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ...
@overload
def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ...
def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ...
def close(self) -> None: ...
def copy(self) -> nditer: ...
def debug_print(self) -> None: ...
def enable_external_loop(self) -> None: ...
def iternext(self) -> bool: ...
def remove_axis(self, i: SupportsIndex, /) -> None: ...
def remove_multi_index(self) -> None: ...
def reset(self) -> None: ...
@property
def dtypes(self) -> tuple[dtype[Any], ...]: ...
@property
def finished(self) -> bool: ...
@property
def has_delayed_bufalloc(self) -> bool: ...
@property
def has_index(self) -> bool: ...
@property
def has_multi_index(self) -> bool: ...
@property
def index(self) -> int: ...
@property
def iterationneedsapi(self) -> bool: ...
@property
def iterindex(self) -> int: ...
@property
def iterrange(self) -> tuple[int, ...]: ...
@property
def itersize(self) -> int: ...
@property
def itviews(self) -> tuple[NDArray[Any], ...]: ...
@property
def multi_index(self) -> tuple[int, ...]: ...
@property
def ndim(self) -> int: ...
@property
def nop(self) -> int: ...
@property
def operands(self) -> tuple[NDArray[Any], ...]: ...
@property
def shape(self) -> tuple[int, ...]: ...
@property
def value(self) -> tuple[NDArray[Any], ...]: ...
_MemMapModeKind = L[
"readonly", "r",
"copyonwrite", "c",
"readwrite", "r+",
"write", "w+",
]
class memmap(ndarray[_ShapeType, _DType_co]):
__array_priority__: ClassVar[float]
filename: str | None
offset: int
mode: str
@overload
def __new__(
subtype,
filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol,
dtype: type[uint8] = ...,
mode: _MemMapModeKind = ...,
offset: int = ...,
shape: None | int | tuple[int, ...] = ...,
order: _OrderKACF = ...,
) -> memmap[Any, dtype[uint8]]: ...
@overload
def __new__(
subtype,
filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol,
dtype: _DTypeLike[_ScalarType],
mode: _MemMapModeKind = ...,
offset: int = ...,
shape: None | int | tuple[int, ...] = ...,
order: _OrderKACF = ...,
) -> memmap[Any, dtype[_ScalarType]]: ...
@overload
def __new__(
subtype,
filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol,
dtype: DTypeLike,
mode: _MemMapModeKind = ...,
offset: int = ...,
shape: None | int | tuple[int, ...] = ...,
order: _OrderKACF = ...,
) -> memmap[Any, dtype[Any]]: ...
def __array_finalize__(self, obj: object) -> None: ...
def __array_wrap__(
self,
array: memmap[_ShapeType, _DType_co],
context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
) -> Any: ...
def flush(self) -> None: ...
# TODO: Add a mypy plugin for managing functions whose output type is dependant
# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`)
class vectorize:
pyfunc: Callable[..., Any]
cache: bool
signature: None | str
otypes: None | str
excluded: set[int | str]
__doc__: None | str
def __init__(
self,
pyfunc: Callable[..., Any],
otypes: None | str | Iterable[DTypeLike] = ...,
doc: None | str = ...,
excluded: None | Iterable[int | str] = ...,
cache: bool = ...,
signature: None | str = ...,
) -> None: ...
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
class poly1d:
@property
def variable(self) -> str: ...
@property
def order(self) -> int: ...
@property
def o(self) -> int: ...
@property
def roots(self) -> NDArray[Any]: ...
@property
def r(self) -> NDArray[Any]: ...
@property
def coeffs(self) -> NDArray[Any]: ...
@coeffs.setter
def coeffs(self, value: NDArray[Any]) -> None: ...
@property
def c(self) -> NDArray[Any]: ...
@c.setter
def c(self, value: NDArray[Any]) -> None: ...
@property
def coef(self) -> NDArray[Any]: ...
@coef.setter
def coef(self, value: NDArray[Any]) -> None: ...
@property
def coefficients(self) -> NDArray[Any]: ...
@coefficients.setter
def coefficients(self, value: NDArray[Any]) -> None: ...
__hash__: None # type: ignore
@overload
def __array__(self, t: None = ...) -> NDArray[Any]: ...
@overload
def __array__(self, t: _DType) -> ndarray[Any, _DType]: ...
@overload
def __call__(self, val: _ScalarLike_co) -> Any: ...
@overload
def __call__(self, val: poly1d) -> poly1d: ...
@overload
def __call__(self, val: ArrayLike) -> NDArray[Any]: ...
def __init__(
self,
c_or_r: ArrayLike,
r: bool = ...,
variable: None | str = ...,
) -> None: ...
def __len__(self) -> int: ...
def __neg__(self) -> poly1d: ...
def __pos__(self) -> poly1d: ...
def __mul__(self, other: ArrayLike) -> poly1d: ...
def __rmul__(self, other: ArrayLike) -> poly1d: ...
def __add__(self, other: ArrayLike) -> poly1d: ...
def __radd__(self, other: ArrayLike) -> poly1d: ...
def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted
def __sub__(self, other: ArrayLike) -> poly1d: ...
def __rsub__(self, other: ArrayLike) -> poly1d: ...
def __div__(self, other: ArrayLike) -> poly1d: ...
def __truediv__(self, other: ArrayLike) -> poly1d: ...
def __rdiv__(self, other: ArrayLike) -> poly1d: ...
def __rtruediv__(self, other: ArrayLike) -> poly1d: ...
def __getitem__(self, val: int) -> Any: ...
def __setitem__(self, key: int, val: Any) -> None: ...
def __iter__(self) -> Iterator[Any]: ...
def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ...
def integ(
self,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
) -> poly1d: ...
class matrix(ndarray[_ShapeType, _DType_co]):
__array_priority__: ClassVar[float]
def __new__(
subtype,
data: ArrayLike,
dtype: DTypeLike = ...,
copy: bool = ...,
) -> matrix[Any, Any]: ...
def __array_finalize__(self, obj: object) -> None: ...
@overload
def __getitem__(self, key: (
SupportsIndex
| _ArrayLikeInt_co
| tuple[SupportsIndex | _ArrayLikeInt_co, ...]
)) -> Any: ...
@overload
def __getitem__(self, key: (
None
| slice
| ellipsis
| SupportsIndex
| _ArrayLikeInt_co
| tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...]
)) -> matrix[Any, _DType_co]: ...
@overload
def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ...
@overload
def __getitem__(self: NDArray[void], key: list[str]) -> matrix[_ShapeType, dtype[void]]: ...
def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ...
def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ...
def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ...
def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ...
def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ...
@overload
def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
@overload
def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
@overload
def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
@overload
def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
@overload
def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ...
@overload
def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ...
@overload
def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ...
@overload
def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ...
@overload
def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ...
@overload
def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ...
@overload
def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
@overload
def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
@overload
def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def any(self, axis: None = ..., out: None = ...) -> bool_: ...
@overload
def any(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ...
@overload
def any(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def all(self, axis: None = ..., out: None = ...) -> bool_: ...
@overload
def all(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ...
@overload
def all(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def max(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
@overload
def max(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
@overload
def max(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def min(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
@overload
def min(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
@overload
def min(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def argmax(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ...
@overload
def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ...
@overload
def argmax(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def argmin(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ...
@overload
def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ...
@overload
def argmin(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
@overload
def ptp(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
@overload
def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
@overload
def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ...
def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> list[list[_T]]: ... # type: ignore[typevar]
def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ...
def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ...
@property
def T(self) -> matrix[Any, _DType_co]: ...
@property
def I(self) -> matrix[Any, Any]: ...
@property
def A(self) -> ndarray[_ShapeType, _DType_co]: ...
@property
def A1(self) -> ndarray[Any, _DType_co]: ...
@property
def H(self) -> matrix[Any, _DType_co]: ...
def getT(self) -> matrix[Any, _DType_co]: ...
def getI(self) -> matrix[Any, Any]: ...
def getA(self) -> ndarray[_ShapeType, _DType_co]: ...
def getA1(self) -> ndarray[Any, _DType_co]: ...
def getH(self) -> matrix[Any, _DType_co]: ...
_CharType = TypeVar("_CharType", str_, bytes_)
_CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_])
_CharArray = chararray[Any, dtype[_CharType]]
class chararray(ndarray[_ShapeType, _CharDType]):
@overload
def __new__(
subtype,
shape: _ShapeLike,
itemsize: SupportsIndex | SupportsInt = ...,
unicode: L[False] = ...,
buffer: _SupportsBuffer = ...,
offset: SupportsIndex = ...,
strides: _ShapeLike = ...,
order: _OrderKACF = ...,
) -> chararray[Any, dtype[bytes_]]: ...
@overload
def __new__(
subtype,
shape: _ShapeLike,
itemsize: SupportsIndex | SupportsInt = ...,
unicode: L[True] = ...,
buffer: _SupportsBuffer = ...,
offset: SupportsIndex = ...,
strides: _ShapeLike = ...,
order: _OrderKACF = ...,
) -> chararray[Any, dtype[str_]]: ...
def __array_finalize__(self, obj: object) -> None: ...
def __mul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
def __rmul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
def __mod__(self, i: Any) -> chararray[Any, _CharDType]: ...
@overload
def __eq__(
self: _CharArray[str_],
other: _ArrayLikeStr_co,
) -> NDArray[bool_]: ...
@overload
def __eq__(
self: _CharArray[bytes_],
other: _ArrayLikeBytes_co,
) -> NDArray[bool_]: ...
@overload
def __ne__(
self: _CharArray[str_],
other: _ArrayLikeStr_co,
) -> NDArray[bool_]: ...
@overload
def __ne__(
self: _CharArray[bytes_],
other: _ArrayLikeBytes_co,
) -> NDArray[bool_]: ...
@overload
def __ge__(
self: _CharArray[str_],
other: _ArrayLikeStr_co,
) -> NDArray[bool_]: ...
@overload
def __ge__(
self: _CharArray[bytes_],
other: _ArrayLikeBytes_co,
) -> NDArray[bool_]: ...
@overload
def __le__(
self: _CharArray[str_],
other: _ArrayLikeStr_co,
) -> NDArray[bool_]: ...
@overload
def __le__(
self: _CharArray[bytes_],
other: _ArrayLikeBytes_co,
) -> NDArray[bool_]: ...
@overload
def __gt__(
self: _CharArray[str_],
other: _ArrayLikeStr_co,
) -> NDArray[bool_]: ...
@overload
def __gt__(
self: _CharArray[bytes_],
other: _ArrayLikeBytes_co,
) -> NDArray[bool_]: ...
@overload
def __lt__(
self: _CharArray[str_],
other: _ArrayLikeStr_co,
) -> NDArray[bool_]: ...
@overload
def __lt__(
self: _CharArray[bytes_],
other: _ArrayLikeBytes_co,
) -> NDArray[bool_]: ...
@overload
def __add__(
self: _CharArray[str_],
other: _ArrayLikeStr_co,
) -> _CharArray[str_]: ...
@overload
def __add__(
self: _CharArray[bytes_],
other: _ArrayLikeBytes_co,
) -> _CharArray[bytes_]: ...
@overload
def __radd__(
self: _CharArray[str_],
other: _ArrayLikeStr_co,
) -> _CharArray[str_]: ...
@overload
def __radd__(
self: _CharArray[bytes_],
other: _ArrayLikeBytes_co,
) -> _CharArray[bytes_]: ...
@overload
def center(
self: _CharArray[str_],
width: _ArrayLikeInt_co,
fillchar: _ArrayLikeStr_co = ...,
) -> _CharArray[str_]: ...
@overload
def center(
self: _CharArray[bytes_],
width: _ArrayLikeInt_co,
fillchar: _ArrayLikeBytes_co = ...,
) -> _CharArray[bytes_]: ...
@overload
def count(
self: _CharArray[str_],
sub: _ArrayLikeStr_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def count(
self: _CharArray[bytes_],
sub: _ArrayLikeBytes_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
def decode(
self: _CharArray[bytes_],
encoding: None | str = ...,
errors: None | str = ...,
) -> _CharArray[str_]: ...
def encode(
self: _CharArray[str_],
encoding: None | str = ...,
errors: None | str = ...,
) -> _CharArray[bytes_]: ...
@overload
def endswith(
self: _CharArray[str_],
suffix: _ArrayLikeStr_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[bool_]: ...
@overload
def endswith(
self: _CharArray[bytes_],
suffix: _ArrayLikeBytes_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[bool_]: ...
def expandtabs(
self,
tabsize: _ArrayLikeInt_co = ...,
) -> chararray[Any, _CharDType]: ...
@overload
def find(
self: _CharArray[str_],
sub: _ArrayLikeStr_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def find(
self: _CharArray[bytes_],
sub: _ArrayLikeBytes_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def index(
self: _CharArray[str_],
sub: _ArrayLikeStr_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def index(
self: _CharArray[bytes_],
sub: _ArrayLikeBytes_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def join(
self: _CharArray[str_],
seq: _ArrayLikeStr_co,
) -> _CharArray[str_]: ...
@overload
def join(
self: _CharArray[bytes_],
seq: _ArrayLikeBytes_co,
) -> _CharArray[bytes_]: ...
@overload
def ljust(
self: _CharArray[str_],
width: _ArrayLikeInt_co,
fillchar: _ArrayLikeStr_co = ...,
) -> _CharArray[str_]: ...
@overload
def ljust(
self: _CharArray[bytes_],
width: _ArrayLikeInt_co,
fillchar: _ArrayLikeBytes_co = ...,
) -> _CharArray[bytes_]: ...
@overload
def lstrip(
self: _CharArray[str_],
chars: None | _ArrayLikeStr_co = ...,
) -> _CharArray[str_]: ...
@overload
def lstrip(
self: _CharArray[bytes_],
chars: None | _ArrayLikeBytes_co = ...,
) -> _CharArray[bytes_]: ...
@overload
def partition(
self: _CharArray[str_],
sep: _ArrayLikeStr_co,
) -> _CharArray[str_]: ...
@overload
def partition(
self: _CharArray[bytes_],
sep: _ArrayLikeBytes_co,
) -> _CharArray[bytes_]: ...
@overload
def replace(
self: _CharArray[str_],
old: _ArrayLikeStr_co,
new: _ArrayLikeStr_co,
count: None | _ArrayLikeInt_co = ...,
) -> _CharArray[str_]: ...
@overload
def replace(
self: _CharArray[bytes_],
old: _ArrayLikeBytes_co,
new: _ArrayLikeBytes_co,
count: None | _ArrayLikeInt_co = ...,
) -> _CharArray[bytes_]: ...
@overload
def rfind(
self: _CharArray[str_],
sub: _ArrayLikeStr_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def rfind(
self: _CharArray[bytes_],
sub: _ArrayLikeBytes_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def rindex(
self: _CharArray[str_],
sub: _ArrayLikeStr_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def rindex(
self: _CharArray[bytes_],
sub: _ArrayLikeBytes_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[int_]: ...
@overload
def rjust(
self: _CharArray[str_],
width: _ArrayLikeInt_co,
fillchar: _ArrayLikeStr_co = ...,
) -> _CharArray[str_]: ...
@overload
def rjust(
self: _CharArray[bytes_],
width: _ArrayLikeInt_co,
fillchar: _ArrayLikeBytes_co = ...,
) -> _CharArray[bytes_]: ...
@overload
def rpartition(
self: _CharArray[str_],
sep: _ArrayLikeStr_co,
) -> _CharArray[str_]: ...
@overload
def rpartition(
self: _CharArray[bytes_],
sep: _ArrayLikeBytes_co,
) -> _CharArray[bytes_]: ...
@overload
def rsplit(
self: _CharArray[str_],
sep: None | _ArrayLikeStr_co = ...,
maxsplit: None | _ArrayLikeInt_co = ...,
) -> NDArray[object_]: ...
@overload
def rsplit(
self: _CharArray[bytes_],
sep: None | _ArrayLikeBytes_co = ...,
maxsplit: None | _ArrayLikeInt_co = ...,
) -> NDArray[object_]: ...
@overload
def rstrip(
self: _CharArray[str_],
chars: None | _ArrayLikeStr_co = ...,
) -> _CharArray[str_]: ...
@overload
def rstrip(
self: _CharArray[bytes_],
chars: None | _ArrayLikeBytes_co = ...,
) -> _CharArray[bytes_]: ...
@overload
def split(
self: _CharArray[str_],
sep: None | _ArrayLikeStr_co = ...,
maxsplit: None | _ArrayLikeInt_co = ...,
) -> NDArray[object_]: ...
@overload
def split(
self: _CharArray[bytes_],
sep: None | _ArrayLikeBytes_co = ...,
maxsplit: None | _ArrayLikeInt_co = ...,
) -> NDArray[object_]: ...
def splitlines(self, keepends: None | _ArrayLikeBool_co = ...) -> NDArray[object_]: ...
@overload
def startswith(
self: _CharArray[str_],
prefix: _ArrayLikeStr_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[bool_]: ...
@overload
def startswith(
self: _CharArray[bytes_],
prefix: _ArrayLikeBytes_co,
start: _ArrayLikeInt_co = ...,
end: None | _ArrayLikeInt_co = ...,
) -> NDArray[bool_]: ...
@overload
def strip(
self: _CharArray[str_],
chars: None | _ArrayLikeStr_co = ...,
) -> _CharArray[str_]: ...
@overload
def strip(
self: _CharArray[bytes_],
chars: None | _ArrayLikeBytes_co = ...,
) -> _CharArray[bytes_]: ...
@overload
def translate(
self: _CharArray[str_],
table: _ArrayLikeStr_co,
deletechars: None | _ArrayLikeStr_co = ...,
) -> _CharArray[str_]: ...
@overload
def translate(
self: _CharArray[bytes_],
table: _ArrayLikeBytes_co,
deletechars: None | _ArrayLikeBytes_co = ...,
) -> _CharArray[bytes_]: ...
def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
def capitalize(self) -> chararray[_ShapeType, _CharDType]: ...
def title(self) -> chararray[_ShapeType, _CharDType]: ...
def swapcase(self) -> chararray[_ShapeType, _CharDType]: ...
def lower(self) -> chararray[_ShapeType, _CharDType]: ...
def upper(self) -> chararray[_ShapeType, _CharDType]: ...
def isalnum(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
def isalpha(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
def isdigit(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
def islower(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
def isspace(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
def istitle(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
def isupper(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
def isnumeric(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
def isdecimal(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
# NOTE: Deprecated
# class MachAr: ...
class _SupportsDLPack(Protocol[_T_contra]):
def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ...
def from_dlpack(obj: _SupportsDLPack[None], /) -> NDArray[Any]: ...
| 150,723 | unknown | 33.201044 | 191 | 0.57881 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/__config__.py | # This file is generated by numpy's setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.add_dll_directory(extra_dll_dir)
openblas64__info={'library_dirs': ['D:\\a\\numpy\\numpy\\build\\openblas64__info'], 'libraries': ['openblas64__info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)]}
blas_ilp64_opt_info={'library_dirs': ['D:\\a\\numpy\\numpy\\build\\openblas64__info'], 'libraries': ['openblas64__info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)]}
openblas64__lapack_info={'library_dirs': ['D:\\a\\numpy\\numpy\\build\\openblas64__lapack_info'], 'libraries': ['openblas64__lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)]}
lapack_ilp64_opt_info={'library_dirs': ['D:\\a\\numpy\\numpy\\build\\openblas64__lapack_info'], 'libraries': ['openblas64__lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)]}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
"""
Show libraries in the system on which NumPy was built.
Print information about various resources (libraries, library
directories, include directories, etc.) in the system on which
NumPy was built.
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
1. Classes specifying the information to be printed are defined
in the `numpy.distutils.system_info` module.
Information may include:
* ``language``: language used to write the libraries (mostly
C or f77)
* ``libraries``: names of libraries found in the system
* ``library_dirs``: directories containing the libraries
* ``include_dirs``: directories containing library header files
* ``src_dirs``: directories containing library source files
* ``define_macros``: preprocessor macros used by
``distutils.setup``
* ``baseline``: minimum CPU features required
* ``found``: dispatched features supported in the system
* ``not found``: dispatched features that are not supported
in the system
2. NumPy BLAS/LAPACK Installation Notes
Installing a numpy wheel (``pip install numpy`` or force it
via ``pip install numpy --only-binary :numpy: numpy``) includes
an OpenBLAS implementation of the BLAS and LAPACK linear algebra
APIs. In this case, ``library_dirs`` reports the original build
time configuration as compiled with gcc/gfortran; at run time
the OpenBLAS library is in
``site-packages/numpy.libs/`` (linux), or
``site-packages/numpy/.dylibs/`` (macOS), or
``site-packages/numpy/.libs/`` (windows).
Installing numpy from source
(``pip install numpy --no-binary numpy``) searches for BLAS and
LAPACK dynamic link libraries at build time as influenced by
environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
or the optional file ``~/.numpy-site.cfg``.
NumPy remembers those locations and expects to load the same
libraries at run-time.
In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
library) is in the default build-time search order after
'openblas'.
Examples
--------
>>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
define_macros = [('HAVE_CBLAS', None)]
libraries = ['openblas', 'openblas']
library_dirs = ['/usr/local/lib']
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
features_found.append(feature)
else:
features_not_found.append(feature)
print("Supported SIMD extensions in this NumPy install:")
print(" baseline = %s" % (','.join(__cpu_baseline__)))
print(" found = %s" % (','.join(features_found)))
print(" not found = %s" % (','.join(features_not_found)))
| 5,083 | Python | 42.827586 | 281 | 0.616762 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_philox.pyi | from typing import Any, TypedDict
from numpy import dtype, ndarray, uint64
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _PhiloxInternal(TypedDict):
counter: ndarray[Any, dtype[uint64]]
key: ndarray[Any, dtype[uint64]]
class _PhiloxState(TypedDict):
bit_generator: str
state: _PhiloxInternal
buffer: ndarray[Any, dtype[uint64]]
buffer_pos: int
has_uint32: int
uinteger: int
class Philox(BitGenerator):
def __init__(
self,
seed: None | _ArrayLikeInt_co | SeedSequence = ...,
counter: None | _ArrayLikeInt_co = ...,
key: None | _ArrayLikeInt_co = ...,
) -> None: ...
@property
def state(
self,
) -> _PhiloxState: ...
@state.setter
def state(
self,
value: _PhiloxState,
) -> None: ...
def jumped(self, jumps: int = ...) -> Philox: ...
def advance(self, delta: int) -> Philox: ...
| 978 | unknown | 25.459459 | 65 | 0.614519 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_mt19937.pyi | from typing import Any, TypedDict
from numpy import dtype, ndarray, uint32
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _MT19937Internal(TypedDict):
key: ndarray[Any, dtype[uint32]]
pos: int
class _MT19937State(TypedDict):
bit_generator: str
state: _MT19937Internal
class MT19937(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ...
def jumped(self, jumps: int = ...) -> MT19937: ...
@property
def state(self) -> _MT19937State: ...
@state.setter
def state(self, value: _MT19937State) -> None: ...
| 724 | unknown | 30.521738 | 87 | 0.679558 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/bit_generator.pyi | import abc
from threading import Lock
from collections.abc import Callable, Mapping, Sequence
from typing import (
Any,
NamedTuple,
TypedDict,
TypeVar,
Union,
overload,
Literal,
)
from numpy import dtype, ndarray, uint32, uint64
from numpy._typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes
_T = TypeVar("_T")
_DTypeLikeUint32 = Union[
dtype[uint32],
_SupportsDType[dtype[uint32]],
type[uint32],
_UInt32Codes,
]
_DTypeLikeUint64 = Union[
dtype[uint64],
_SupportsDType[dtype[uint64]],
type[uint64],
_UInt64Codes,
]
class _SeedSeqState(TypedDict):
entropy: None | int | Sequence[int]
spawn_key: tuple[int, ...]
pool_size: int
n_children_spawned: int
class _Interface(NamedTuple):
state_address: Any
state: Any
next_uint64: Any
next_uint32: Any
next_double: Any
bit_generator: Any
class ISeedSequence(abc.ABC):
@abc.abstractmethod
def generate_state(
self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
) -> ndarray[Any, dtype[uint32 | uint64]]: ...
class ISpawnableSeedSequence(ISeedSequence):
@abc.abstractmethod
def spawn(self: _T, n_children: int) -> list[_T]: ...
class SeedlessSeedSequence(ISpawnableSeedSequence):
def generate_state(
self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
) -> ndarray[Any, dtype[uint32 | uint64]]: ...
def spawn(self: _T, n_children: int) -> list[_T]: ...
class SeedSequence(ISpawnableSeedSequence):
entropy: None | int | Sequence[int]
spawn_key: tuple[int, ...]
pool_size: int
n_children_spawned: int
pool: ndarray[Any, dtype[uint32]]
def __init__(
self,
entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ...,
*,
spawn_key: Sequence[int] = ...,
pool_size: int = ...,
n_children_spawned: int = ...,
) -> None: ...
def __repr__(self) -> str: ...
@property
def state(
self,
) -> _SeedSeqState: ...
def generate_state(
self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
) -> ndarray[Any, dtype[uint32 | uint64]]: ...
def spawn(self, n_children: int) -> list[SeedSequence]: ...
class BitGenerator(abc.ABC):
lock: Lock
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def __getstate__(self) -> dict[str, Any]: ...
def __setstate__(self, state: dict[str, Any]) -> None: ...
def __reduce__(
self,
) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ...
@abc.abstractmethod
@property
def state(self) -> Mapping[str, Any]: ...
@state.setter
def state(self, value: Mapping[str, Any]) -> None: ...
@overload
def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc]
@overload
def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]: ... # type: ignore[misc]
@overload
def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc]
def _benchmark(self, cnt: int, method: str = ...) -> None: ...
@property
def ctypes(self) -> _Interface: ...
@property
def cffi(self) -> _Interface: ...
| 3,387 | unknown | 29.8 | 135 | 0.604074 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_generator.pyi | from collections.abc import Callable
from typing import Any, Union, overload, TypeVar, Literal
from numpy import (
bool_,
dtype,
float32,
float64,
int8,
int16,
int32,
int64,
int_,
ndarray,
uint,
uint8,
uint16,
uint32,
uint64,
)
from numpy.random import BitGenerator, SeedSequence
from numpy._typing import (
ArrayLike,
_ArrayLikeFloat_co,
_ArrayLikeInt_co,
_DoubleCodes,
_DTypeLikeBool,
_DTypeLikeInt,
_DTypeLikeUInt,
_Float32Codes,
_Float64Codes,
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_IntCodes,
_ShapeLike,
_SingleCodes,
_SupportsDType,
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_UIntCodes,
)
_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
_DTypeLikeFloat32 = Union[
dtype[float32],
_SupportsDType[dtype[float32]],
type[float32],
_Float32Codes,
_SingleCodes,
]
_DTypeLikeFloat64 = Union[
dtype[float64],
_SupportsDType[dtype[float64]],
type[float],
type[float64],
_Float64Codes,
_DoubleCodes,
]
class Generator:
def __init__(self, bit_generator: BitGenerator) -> None: ...
def __repr__(self) -> str: ...
def __str__(self) -> str: ...
def __getstate__(self) -> dict[str, Any]: ...
def __setstate__(self, state: dict[str, Any]) -> None: ...
def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ...
@property
def bit_generator(self) -> BitGenerator: ...
def bytes(self, length: int) -> bytes: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
) -> float: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
*,
out: ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat32 = ...,
out: None | ndarray[Any, dtype[float32]] = ...,
) -> ndarray[Any, dtype[float32]]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat64 = ...,
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ...
@overload
def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ...
@overload
def standard_exponential( # type: ignore[misc]
self,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
method: Literal["zig", "inv"] = ...,
out: None = ...,
) -> float: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_exponential(
self,
*,
out: ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
*,
method: Literal["zig", "inv"] = ...,
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat32 = ...,
method: Literal["zig", "inv"] = ...,
out: None | ndarray[Any, dtype[float32]] = ...,
) -> ndarray[Any, dtype[float32]]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat64 = ...,
method: Literal["zig", "inv"] = ...,
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def random( # type: ignore[misc]
self,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
) -> float: ...
@overload
def random(
self,
*,
out: ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
*,
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat32 = ...,
out: None | ndarray[Any, dtype[float32]] = ...,
) -> ndarray[Any, dtype[float32]]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat64 = ...,
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def beta(
self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def exponential(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
) -> int: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: _DTypeLikeBool = ...,
endpoint: bool = ...,
) -> bool: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
endpoint: bool = ...,
) -> int: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[int64]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: _DTypeLikeBool = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[bool_]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[int8]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[int16]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[int32]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[int64]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[uint8]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[uint16]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[uint32]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[uint64]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[int_]]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
endpoint: bool = ...,
) -> ndarray[Any, dtype[uint]]: ...
# TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> _T | ndarray[Any,Any]
@overload
def choice(
self,
a: int,
size: None = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
axis: int = ...,
shuffle: bool = ...,
) -> int: ...
@overload
def choice(
self,
a: int,
size: _ShapeLike = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
axis: int = ...,
shuffle: bool = ...,
) -> ndarray[Any, dtype[int64]]: ...
@overload
def choice(
self,
a: ArrayLike,
size: None = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
axis: int = ...,
shuffle: bool = ...,
) -> Any: ...
@overload
def choice(
self,
a: ArrayLike,
size: _ShapeLike = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
axis: int = ...,
shuffle: bool = ...,
) -> ndarray[Any, Any]: ...
@overload
def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def uniform(
self,
low: _ArrayLikeFloat_co = ...,
high: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def normal(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_gamma( # type: ignore[misc]
self,
shape: float,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
) -> float: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
*,
out: ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
dtype: _DTypeLikeFloat32 = ...,
out: None | ndarray[Any, dtype[float32]] = ...,
) -> ndarray[Any, dtype[float32]]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
dtype: _DTypeLikeFloat64 = ...,
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gamma(
self,
shape: _ArrayLikeFloat_co,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def f(
self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_f(
self,
dfnum: _ArrayLikeFloat_co,
dfden: _ArrayLikeFloat_co,
nonc: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def chisquare(
self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_chisquare(
self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: None = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def vonmises(
self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def pareto(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def weibull(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def power(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
@overload
def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def laplace(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gumbel(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def logistic(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def lognormal(
self,
mean: _ArrayLikeFloat_co = ...,
sigma: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def rayleigh(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def wald(
self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def triangular(
self,
left: _ArrayLikeFloat_co,
mode: _ArrayLikeFloat_co,
right: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def binomial(
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def negative_binomial(
self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
@overload
def poisson(
self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def zipf(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def geometric(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def hypergeometric(
self,
ngood: _ArrayLikeInt_co,
nbad: _ArrayLikeInt_co,
nsample: _ArrayLikeInt_co,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[int64]]: ...
@overload
def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def logseries(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
def multivariate_normal(
self,
mean: _ArrayLikeFloat_co,
cov: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
check_valid: Literal["warn", "raise", "ignore"] = ...,
tol: float = ...,
*,
method: Literal["svd", "eigh", "cholesky"] = ...,
) -> ndarray[Any, dtype[float64]]: ...
def multinomial(
self, n: _ArrayLikeInt_co,
pvals: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
def multivariate_hypergeometric(
self,
colors: _ArrayLikeInt_co,
nsample: int,
size: None | _ShapeLike = ...,
method: Literal["marginals", "count"] = ...,
) -> ndarray[Any, dtype[int64]]: ...
def dirichlet(
self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
def permuted(
self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ...
) -> ndarray[Any, Any]: ...
def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ...
def default_rng(
seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ...
) -> Generator: ...
| 21,682 | unknown | 32.932707 | 144 | 0.52592 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/__init__.py | """
========================
Random Number Generation
========================
Use ``default_rng()`` to create a `Generator` and call its methods.
=============== =========================================================
Generator
--------------- ---------------------------------------------------------
Generator Class implementing all of the random number distributions
default_rng Default constructor for ``Generator``
=============== =========================================================
============================================= ===
BitGenerator Streams that work with Generator
--------------------------------------------- ---
MT19937
PCG64
PCG64DXSM
Philox
SFC64
============================================= ===
============================================= ===
Getting entropy to initialize a BitGenerator
--------------------------------------------- ---
SeedSequence
============================================= ===
Legacy
------
For backwards compatibility with previous versions of numpy before 1.17, the
various aliases to the global `RandomState` methods are left alone and do not
use the new `Generator` API.
==================== =========================================================
Utility functions
-------------------- ---------------------------------------------------------
random Uniformly distributed floats over ``[0, 1)``
bytes Uniformly distributed random bytes.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility
functions - removed
in the new API
-------------------- ---------------------------------------------------------
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
random_integers Uniformly distributed integers in a given range.
(deprecated, use ``integers(..., closed=True)`` instead)
random_sample Alias for `random_sample`
randint Uniformly distributed integers in a given range
seed Seed the legacy random number generator.
==================== =========================================================
==================== =========================================================
Univariate
distributions
-------------------- ---------------------------------------------------------
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== ==========================================================
Multivariate
distributions
-------------------- ----------------------------------------------------------
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== ==========================================================
==================== =========================================================
Standard
distributions
-------------------- ---------------------------------------------------------
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
-------------------- ---------------------------------------------------------
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random',
'random_integers',
'random_sample',
'ranf',
'rayleigh',
'sample',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf',
]
# add these for module-freeze analysis (like PyInstaller)
from . import _pickle
from . import _common
from . import _bounded_integers
from ._generator import Generator, default_rng
from .bit_generator import SeedSequence, BitGenerator
from ._mt19937 import MT19937
from ._pcg64 import PCG64, PCG64DXSM
from ._philox import Philox
from ._sfc64 import SFC64
from .mtrand import *
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
'BitGenerator']
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this
function's entire purpose is to return a newly allocated RandomState whose
state pickle can set. Consequently the RandomState returned by this function
is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 7,506 | Python | 33.754629 | 81 | 0.493472 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_pickle.py | from .mtrand import RandomState
from ._philox import Philox
from ._pcg64 import PCG64, PCG64DXSM
from ._sfc64 import SFC64
from ._generator import Generator
from ._mt19937 import MT19937
BitGenerators = {'MT19937': MT19937,
'PCG64': PCG64,
'PCG64DXSM': PCG64DXSM,
'Philox': Philox,
'SFC64': SFC64,
}
def __generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a Generator object
Parameters
----------
bit_generator_name : str
String containing the core BitGenerator
Returns
-------
rg : Generator
Generator using the named core BitGenerator
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return Generator(bit_generator())
def __bit_generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a bit generator object
Parameters
----------
bit_generator_name : str
String containing the name of the BitGenerator
Returns
-------
bit_generator : BitGenerator
BitGenerator instance
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return bit_generator()
def __randomstate_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a legacy RandomState-like object
Parameters
----------
bit_generator_name : str
String containing the core BitGenerator
Returns
-------
rs : RandomState
Legacy RandomState using the named core BitGenerator
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return RandomState(bit_generator())
| 2,305 | Python | 26.452381 | 74 | 0.597831 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/setup.py | import os
import platform
import sys
from os.path import join
from numpy.distutils.system_info import platform_bits
is_msvc = (platform.platform().startswith('Windows') and
platform.python_compiler().startswith('MS'))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
if sys.platform == 'win32':
libs.extend(['Advapi32', 'Kernel32'])
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
if sys.platform[:3] == 'aix':
defs = [('_LARGE_FILES', None)]
else:
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1')]
defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_subpackage('tests')
config.add_data_dir('tests/data')
config.add_data_dir('_examples')
EXTRA_LINK_ARGS = []
EXTRA_LIBRARIES = ['npyrandom']
if os.name != 'nt':
# Math lib
EXTRA_LIBRARIES.append('m')
# Some bit generators exclude GCC inlining
EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__']
if is_msvc and platform_bits == 32:
# 32-bit windows requires explicit sse2 option
EXTRA_COMPILE_ARGS += ['/arch:SSE2']
elif not is_msvc:
# Some bit generators require c99
EXTRA_COMPILE_ARGS += ['-std=c99']
if sys.platform == 'cygwin':
# Export symbols without __declspec(dllexport) for using by cython.
# Using __declspec(dllexport) does not export other necessary symbols
# in Cygwin package's Cython environment, making it impossible to
# import modules.
EXTRA_LINK_ARGS += ['-Wl,--export-all-symbols']
# Use legacy integer variable sizes
LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')]
PCG64_DEFS = []
# One can force emulated 128-bit arithmetic if one wants.
#PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
depends = ['__init__.pxd', 'c_distributions.pxd', 'bit_generator.pxd']
# npyrandom - a library like npymath
npyrandom_sources = [
'src/distributions/logfactorial.c',
'src/distributions/distributions.c',
'src/distributions/random_mvhg_count.c',
'src/distributions/random_mvhg_marginals.c',
'src/distributions/random_hypergeometric.c',
]
def gl_if_msvc(build_cmd):
""" Add flag if we are using MSVC compiler
We can't see this in our scope, because we have not initialized the
distutils build command, so use this deferred calculation to run when
we are building the library.
"""
# Keep in sync with numpy/core/setup.py
if build_cmd.compiler.compiler_type == 'msvc':
# explicitly disable whole-program optimization
return ['/GL-']
return []
config.add_installed_library('npyrandom',
sources=npyrandom_sources,
install_dir='lib',
build_info={
'include_dirs' : [], # empty list required for creating npyrandom.h
'extra_compiler_args': [gl_if_msvc],
})
for gen in ['mt19937']:
# gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
config.add_extension(f'_{gen}',
sources=[f'_{gen}.c',
f'src/{gen}/{gen}.c',
f'src/{gen}/{gen}-jump.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'_{gen}.pyx'],
define_macros=defs,
)
for gen in ['philox', 'pcg64', 'sfc64']:
# gen.pyx, src/gen/gen.c
_defs = defs + PCG64_DEFS if gen == 'pcg64' else defs
config.add_extension(f'_{gen}',
sources=[f'_{gen}.c',
f'src/{gen}/{gen}.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'_{gen}.pyx',
'bit_generator.pyx', 'bit_generator.pxd'],
define_macros=_defs,
)
for gen in ['_common', 'bit_generator']:
# gen.pyx
config.add_extension(gen,
sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
include_dirs=['.', 'src'],
depends=depends + [f'{gen}.pyx', f'{gen}.pxd',],
define_macros=defs,
)
config.add_data_files(f'{gen}.pxd')
for gen in ['_generator', '_bounded_integers']:
# gen.pyx, src/distributions/distributions.c
config.add_extension(gen,
sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES + ['npymath'],
extra_compile_args=EXTRA_COMPILE_ARGS,
include_dirs=['.', 'src'],
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'{gen}.pyx'],
define_macros=defs,
)
config.add_data_files('_bounded_integers.pxd')
mtrand_libs = ['m', 'npymath'] if os.name != 'nt' else ['npymath']
config.add_extension('mtrand',
sources=['mtrand.c',
'src/legacy/legacy-distributions.c',
'src/distributions/distributions.c',
],
include_dirs=['.', 'src', 'src/legacy'],
libraries=mtrand_libs,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + ['mtrand.pyx'],
define_macros=defs + LEGACY_DEFS,
)
config.add_data_files(*depends)
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 6,998 | Python | 40.170588 | 80 | 0.507288 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_pcg64.pyi | from typing import TypedDict
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _PCG64Internal(TypedDict):
state: int
inc: int
class _PCG64State(TypedDict):
bit_generator: str
state: _PCG64Internal
has_uint32: int
uinteger: int
class PCG64(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def jumped(self, jumps: int = ...) -> PCG64: ...
@property
def state(
self,
) -> _PCG64State: ...
@state.setter
def state(
self,
value: _PCG64State,
) -> None: ...
def advance(self, delta: int) -> PCG64: ...
class PCG64DXSM(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def jumped(self, jumps: int = ...) -> PCG64DXSM: ...
@property
def state(
self,
) -> _PCG64State: ...
@state.setter
def state(
self,
value: _PCG64State,
) -> None: ...
def advance(self, delta: int) -> PCG64DXSM: ...
| 1,091 | unknown | 24.395348 | 87 | 0.588451 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/__init__.pyi | from numpy._pytesttester import PytestTester
from numpy.random._generator import Generator as Generator
from numpy.random._generator import default_rng as default_rng
from numpy.random._mt19937 import MT19937 as MT19937
from numpy.random._pcg64 import (
PCG64 as PCG64,
PCG64DXSM as PCG64DXSM,
)
from numpy.random._philox import Philox as Philox
from numpy.random._sfc64 import SFC64 as SFC64
from numpy.random.bit_generator import BitGenerator as BitGenerator
from numpy.random.bit_generator import SeedSequence as SeedSequence
from numpy.random.mtrand import (
RandomState as RandomState,
beta as beta,
binomial as binomial,
bytes as bytes,
chisquare as chisquare,
choice as choice,
dirichlet as dirichlet,
exponential as exponential,
f as f,
gamma as gamma,
geometric as geometric,
get_state as get_state,
gumbel as gumbel,
hypergeometric as hypergeometric,
laplace as laplace,
logistic as logistic,
lognormal as lognormal,
logseries as logseries,
multinomial as multinomial,
multivariate_normal as multivariate_normal,
negative_binomial as negative_binomial,
noncentral_chisquare as noncentral_chisquare,
noncentral_f as noncentral_f,
normal as normal,
pareto as pareto,
permutation as permutation,
poisson as poisson,
power as power,
rand as rand,
randint as randint,
randn as randn,
random as random,
random_integers as random_integers,
random_sample as random_sample,
ranf as ranf,
rayleigh as rayleigh,
sample as sample,
seed as seed,
set_state as set_state,
shuffle as shuffle,
standard_cauchy as standard_cauchy,
standard_exponential as standard_exponential,
standard_gamma as standard_gamma,
standard_normal as standard_normal,
standard_t as standard_t,
triangular as triangular,
uniform as uniform,
vonmises as vonmises,
wald as wald,
weibull as weibull,
zipf as zipf,
)
__all__: list[str]
__path__: list[str]
test: PytestTester
| 2,055 | unknown | 27.957746 | 67 | 0.726034 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/mtrand.pyi | from collections.abc import Callable
from typing import Any, Union, overload, Literal
from numpy import (
bool_,
dtype,
float32,
float64,
int8,
int16,
int32,
int64,
int_,
ndarray,
uint,
uint8,
uint16,
uint32,
uint64,
)
from numpy.random.bit_generator import BitGenerator
from numpy._typing import (
ArrayLike,
_ArrayLikeFloat_co,
_ArrayLikeInt_co,
_DoubleCodes,
_DTypeLikeBool,
_DTypeLikeInt,
_DTypeLikeUInt,
_Float32Codes,
_Float64Codes,
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_IntCodes,
_ShapeLike,
_SingleCodes,
_SupportsDType,
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_UIntCodes,
)
_DTypeLikeFloat32 = Union[
dtype[float32],
_SupportsDType[dtype[float32]],
type[float32],
_Float32Codes,
_SingleCodes,
]
_DTypeLikeFloat64 = Union[
dtype[float64],
_SupportsDType[dtype[float64]],
type[float],
type[float64],
_Float64Codes,
_DoubleCodes,
]
class RandomState:
_bit_generator: BitGenerator
def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ...
def __repr__(self) -> str: ...
def __str__(self) -> str: ...
def __getstate__(self) -> dict[str, Any]: ...
def __setstate__(self, state: dict[str, Any]) -> None: ...
def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ...
def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ...
@overload
def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ...
@overload
def get_state(
self, legacy: Literal[True] = ...
) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]: ...
def set_state(
self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]
) -> None: ...
@overload
def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
@overload
def random(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
@overload
def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def beta(
self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def exponential(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
@overload
def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
) -> int: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: _DTypeLikeBool = ...,
) -> bool: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
) -> int: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[int_]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: _DTypeLikeBool = ...,
) -> ndarray[Any, dtype[bool_]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
) -> ndarray[Any, dtype[int8]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
) -> ndarray[Any, dtype[int16]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
) -> ndarray[Any, dtype[int32]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
) -> ndarray[Any, dtype[int64]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
) -> ndarray[Any, dtype[uint8]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
) -> ndarray[Any, dtype[uint16]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
) -> ndarray[Any, dtype[uint32]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
) -> ndarray[Any, dtype[uint64]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
) -> ndarray[Any, dtype[int_]]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
) -> ndarray[Any, dtype[uint]]: ...
def bytes(self, length: int) -> bytes: ...
@overload
def choice(
self,
a: int,
size: None = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
) -> int: ...
@overload
def choice(
self,
a: int,
size: _ShapeLike = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
) -> ndarray[Any, dtype[int_]]: ...
@overload
def choice(
self,
a: ArrayLike,
size: None = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
) -> Any: ...
@overload
def choice(
self,
a: ArrayLike,
size: _ShapeLike = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
) -> ndarray[Any, Any]: ...
@overload
def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def uniform(
self,
low: _ArrayLikeFloat_co = ...,
high: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def rand(self) -> float: ...
@overload
def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
@overload
def randn(self) -> float: ...
@overload
def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
@overload
def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc]
@overload
def random_integers(
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[int_]]: ...
@overload
def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_normal( # type: ignore[misc]
self, size: _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def normal(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_gamma( # type: ignore[misc]
self,
shape: float,
size: None = ...,
) -> float: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gamma(
self,
shape: _ArrayLikeFloat_co,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def f(
self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_f(
self,
dfnum: _ArrayLikeFloat_co,
dfden: _ArrayLikeFloat_co,
nonc: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def chisquare(
self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_chisquare(
self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: None = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def vonmises(
self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def pareto(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def weibull(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def power(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
@overload
def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def laplace(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gumbel(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def logistic(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def lognormal(
self,
mean: _ArrayLikeFloat_co = ...,
sigma: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def rayleigh(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def wald(
self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def triangular(
self,
left: _ArrayLikeFloat_co,
mode: _ArrayLikeFloat_co,
right: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def binomial(
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int_]]: ...
@overload
def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def negative_binomial(
self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int_]]: ...
@overload
def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
@overload
def poisson(
self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int_]]: ...
@overload
def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def zipf(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int_]]: ...
@overload
def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def geometric(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int_]]: ...
@overload
def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def hypergeometric(
self,
ngood: _ArrayLikeInt_co,
nbad: _ArrayLikeInt_co,
nsample: _ArrayLikeInt_co,
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[int_]]: ...
@overload
def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def logseries(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int_]]: ...
def multivariate_normal(
self,
mean: _ArrayLikeFloat_co,
cov: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
check_valid: Literal["warn", "raise", "ignore"] = ...,
tol: float = ...,
) -> ndarray[Any, dtype[float64]]: ...
def multinomial(
self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int_]]: ...
def dirichlet(
self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
def shuffle(self, x: ArrayLike) -> None: ...
@overload
def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ...
@overload
def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ...
_rand: RandomState
beta = _rand.beta
binomial = _rand.binomial
bytes = _rand.bytes
chisquare = _rand.chisquare
choice = _rand.choice
dirichlet = _rand.dirichlet
exponential = _rand.exponential
f = _rand.f
gamma = _rand.gamma
get_state = _rand.get_state
geometric = _rand.geometric
gumbel = _rand.gumbel
hypergeometric = _rand.hypergeometric
laplace = _rand.laplace
logistic = _rand.logistic
lognormal = _rand.lognormal
logseries = _rand.logseries
multinomial = _rand.multinomial
multivariate_normal = _rand.multivariate_normal
negative_binomial = _rand.negative_binomial
noncentral_chisquare = _rand.noncentral_chisquare
noncentral_f = _rand.noncentral_f
normal = _rand.normal
pareto = _rand.pareto
permutation = _rand.permutation
poisson = _rand.poisson
power = _rand.power
rand = _rand.rand
randint = _rand.randint
randn = _rand.randn
random = _rand.random
random_integers = _rand.random_integers
random_sample = _rand.random_sample
rayleigh = _rand.rayleigh
seed = _rand.seed
set_state = _rand.set_state
shuffle = _rand.shuffle
standard_cauchy = _rand.standard_cauchy
standard_exponential = _rand.standard_exponential
standard_gamma = _rand.standard_gamma
standard_normal = _rand.standard_normal
standard_t = _rand.standard_t
triangular = _rand.triangular
uniform = _rand.uniform
vonmises = _rand.vonmises
wald = _rand.wald
weibull = _rand.weibull
zipf = _rand.zipf
# Two legacy that are trivial wrappers around random_sample
sample = _rand.random_sample
ranf = _rand.random_sample
| 19,616 | unknown | 33.720354 | 121 | 0.555975 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_sfc64.pyi | from typing import Any, TypedDict
from numpy import dtype as dtype
from numpy import ndarray as ndarray
from numpy import uint64
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _SFC64Internal(TypedDict):
state: ndarray[Any, dtype[uint64]]
class _SFC64State(TypedDict):
bit_generator: str
state: _SFC64Internal
has_uint32: int
uinteger: int
class SFC64(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
@property
def state(
self,
) -> _SFC64State: ...
@state.setter
def state(
self,
value: _SFC64State,
) -> None: ...
| 709 | unknown | 23.482758 | 87 | 0.669958 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_random.py | import warnings
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
class TestSeed:
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState,
np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
def test_multidimensional_pvals(self):
assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestSetState:
def setup_method(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint:
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71',
'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404',
'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup_method(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random(self):
np.random.seed(self.seed)
actual = np.random.random((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(np.random.randint(0, -10, size=0).shape, (0,))
assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
assert_equal(np.random.choice(0, size=0).shape, (0,))
assert_equal(np.random.choice([], size=(0,)).shape, (0,))
assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, np.random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, np.random.choice, a, p=p)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object), ("b", np.int32)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
@pytest.mark.parametrize("random",
[np.random, np.random.RandomState(), np.random.default_rng()])
def test_shuffle_untyped_warning(self, random):
# Create a dict works like a sequence but isn't one
values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6}
with pytest.warns(UserWarning,
match="you are shuffling a 'dict' object") as rec:
random.shuffle(values)
assert "test_random" in rec[0].filename
@pytest.mark.parametrize("random",
[np.random, np.random.RandomState(), np.random.default_rng()])
@pytest.mark.parametrize("use_array_like", [True, False])
def test_shuffle_no_object_unpacking(self, random, use_array_like):
class MyArr(np.ndarray):
pass
items = [
None, np.array([3]), np.float64(3), np.array(10), np.float64(7)
]
arr = np.array(items, dtype=object)
item_ids = {id(i) for i in items}
if use_array_like:
arr = arr.view(MyArr)
# The array was created fine, and did not modify any objects:
assert all(id(i) in item_ids for i in arr)
if use_array_like and not isinstance(random, np.random.Generator):
# The old API gives incorrect results, but warns about it.
with pytest.warns(UserWarning,
match="Shuffling a one dimensional array.*"):
random.shuffle(arr)
else:
random.shuffle(arr)
assert all(id(i) in item_ids for i in arr)
def test_shuffle_memoryview(self):
# gh-18273
# allow graceful handling of memoryviews
# (treat the same as arrays)
np.random.seed(self.seed)
a = np.arange(5).data
np.random.shuffle(a)
assert_equal(np.asarray(a), [0, 1, 4, 3, 2])
rng = np.random.RandomState(self.seed)
rng.shuffle(a)
assert_equal(np.asarray(a), [0, 1, 2, 3, 4])
rng = np.random.default_rng(self.seed)
rng.shuffle(a)
assert_equal(np.asarray(a), [4, 1, 0, 3, 2])
def test_shuffle_not_writeable(self):
a = np.zeros(3)
a.flags.writeable = False
with pytest.raises(ValueError, match='read-only'):
np.random.shuffle(a)
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10, 5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
np.random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
__index__ = __int__
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
np.random.seed(self.seed)
assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup_method(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.setSeed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup_method(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
| 69,988 | Python | 39.085338 | 92 | 0.567955 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_extending.py | import os
import pytest
import shutil
import subprocess
import sys
import warnings
import numpy as np
from numpy.distutils.misc_util import exec_mod_from_location
try:
import cffi
except ImportError:
cffi = None
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
cffi = None
try:
with warnings.catch_warnings(record=True) as w:
# numba issue gh-4733
warnings.filterwarnings('always', '', DeprecationWarning)
import numba
except ImportError:
numba = None
try:
import cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
cython = None
else:
from numpy.compat import _pep440
# Cython 0.29.30 is required for Python 3.11 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with the one in pyproject.toml
required_version = '0.29.30'
if _pep440.parse(cython_version) < _pep440.Version(required_version):
# too old or wrong cython, skip the test
cython = None
@pytest.mark.skipif(cython is None, reason="requires cython")
@pytest.mark.slow
def test_cython(tmp_path):
srcdir = os.path.join(os.path.dirname(__file__), '..')
shutil.copytree(srcdir, tmp_path / 'random')
# build the examples and "install" them into a temporary directory
build_dir = tmp_path / 'random' / '_examples' / 'cython'
subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
'--prefix', str(tmp_path / 'installdir'),
'--single-version-externally-managed',
'--record', str(tmp_path/ 'tmp_install_log.txt'),
],
cwd=str(build_dir),
)
# gh-16162: make sure numpy's __init__.pxd was used for cython
# not really part of this test, but it is a convenient place to check
with open(build_dir / 'extending.c') as fid:
txt_to_find = 'NumPy API declarations from "numpy/__init__.pxd"'
for i, line in enumerate(fid):
if txt_to_find in line:
break
else:
assert False, ("Could not find '{}' in C file, "
"wrong pxd used".format(txt_to_find))
# get the path to the so's
so1 = so2 = None
with open(tmp_path /'tmp_install_log.txt') as fid:
for line in fid:
if 'extending.' in line:
so1 = line.strip()
if 'extending_distributions' in line:
so2 = line.strip()
assert so1 is not None
assert so2 is not None
# import the so's without adding the directory to sys.path
exec_mod_from_location('extending', so1)
extending_distributions = exec_mod_from_location(
'extending_distributions', so2)
# actually test the cython c-extension
from numpy.random import PCG64
values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')
assert values.shape == (10,)
assert values.dtype == np.float64
@pytest.mark.skipif(numba is None or cffi is None,
reason="requires numba and cffi")
def test_numba():
from numpy.random._examples.numba import extending # noqa: F401
@pytest.mark.skipif(cffi is None, reason="requires cffi")
def test_cffi():
from numpy.random._examples.cffi import extending # noqa: F401
| 3,488 | Python | 35.34375 | 78 | 0.62586 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_regression.py | import sys
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
from numpy import random
import numpy as np
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = np.random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
np.random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
np.random.multivariate_normal([0], [[0]], size=1)
np.random.multivariate_normal([0], [[0]], size=np.int_(1))
np.random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
np.random.seed(1234567890)
x = np.random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
np.random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = np.random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, np.random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
np.random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
np.random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
np.random.seed(1)
orig = np.arange(3).view(N)
perm = np.random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
np.random.seed(1)
m = M()
perm = np.random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
| 5,439 | Python | 35.266666 | 77 | 0.561684 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_randomstate_regression.py | import sys
import pytest
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
import numpy as np
from numpy import random
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
random.seed(0)
rvsn = random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
random.multivariate_normal([0], [[0]], size=1)
random.multivariate_normal([0], [[0]], size=np.int_(1))
random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
random.seed(1234567890)
x = random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
random.seed(1)
orig = np.arange(3).view(N)
perm = random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
random.seed(1)
m = M()
perm = random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
def test_warns_byteorder(self):
# GH 13159
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.deprecated_call(match='non-native byteorder is not'):
random.randint(0, 200, size=10, dtype=other_byteord_dt)
def test_named_argument_initialization(self):
# GH 13669
rs1 = np.random.RandomState(123456789)
rs2 = np.random.RandomState(seed=123456789)
assert rs1.randint(0, 100) == rs2.randint(0, 100)
def test_choice_retun_dtype(self):
# GH 9867
c = np.random.choice(10, p=[.1]*10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, p=[.1]*10, replace=False, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, replace=False, size=2)
assert c.dtype == np.dtype(int)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_randint_117(self):
# GH 14189
random.seed(0)
expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
2588848963, 3684848379, 2340255427, 3638918503,
1819583497, 2678185683], dtype='int64')
actual = random.randint(2**32, size=10)
assert_array_equal(actual, expected)
def test_p_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(12345)
assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
[0, 0, 0, 1, 1])
def test_n_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(8675309)
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)
def test_multinomial_empty():
# gh-20483
# Ensure that empty p-vals are correctly handled
assert random.multinomial(10, []).shape == (0,)
assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0)
def test_multinomial_1d_pval():
# gh-20483
with pytest.raises(TypeError, match="pvals must be a 1-d"):
random.multinomial(10, 0.3)
| 7,917 | Python | 35.488479 | 77 | 0.563092 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_randomstate.py | import hashlib
import pickle
import sys
import warnings
import numpy as np
import pytest
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy.random import MT19937, PCG64
from numpy import random
INT_FUNCS = {'binomial': (100.0, 0.6),
'geometric': (.5,),
'hypergeometric': (20, 20, 10),
'logseries': (.5,),
'multinomial': (20, np.ones(6) / 6.0),
'negative_binomial': (100, .5),
'poisson': (10.0,),
'zipf': (2,),
}
if np.iinfo(int).max < 2**32:
# Windows and some 32-bit platforms, e.g., ARM
INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263',
'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb',
'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf',
'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67',
'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3',
'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824',
'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7',
'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f',
}
else:
INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112',
'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9',
'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657',
'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db',
'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605',
'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61',
'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4',
'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45',
}
@pytest.fixture(scope='module', params=INT_FUNCS)
def int_func(request):
return (request.param, INT_FUNCS[request.param],
INT_FUNC_HASHES[request.param])
def assert_mt19937_state_equal(a, b):
assert_equal(a['bit_generator'], b['bit_generator'])
assert_array_equal(a['state']['key'], b['state']['key'])
assert_array_equal(a['state']['pos'], b['state']['pos'])
assert_equal(a['has_gauss'], b['has_gauss'])
assert_equal(a['gauss'], b['gauss'])
class TestSeed:
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, -0.5)
assert_raises(ValueError, random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, [-0.5])
assert_raises(ValueError, random.RandomState, [-1])
assert_raises(ValueError, random.RandomState, [4294967296])
assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, random.RandomState, np.array([],
dtype=np.int64))
assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, random.RandomState, [[1, 2, 3],
[4, 5, 6]])
def test_cannot_seed(self):
rs = random.RandomState(PCG64(0))
with assert_raises(TypeError):
rs.seed(1234)
def test_invalid_initialization(self):
assert_raises(ValueError, random.RandomState, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random.seed(1432985819)
non_contig = random.multinomial(100, pvals=pvals)
random.seed(1432985819)
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multinomial_pvals_float32(self):
x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09,
1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32)
pvals = x / x.sum()
match = r"[\w\s]*pvals array is cast to 64-bit floating"
with pytest.raises(ValueError, match=match):
random.multinomial(1, pvals)
class TestSetState:
def setup_method(self):
self.seed = 1234567890
self.random_state = random.RandomState(self.seed)
self.state = self.random_state.get_state()
def test_basic(self):
old = self.random_state.tomaxint(16)
self.random_state.set_state(self.state)
new = self.random_state.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(self.state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.random_state.standard_normal()
state = self.random_state.get_state()
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.random_state.standard_normal(size=16)
self.random_state.set_state(old_state)
x2 = self.random_state.standard_normal(size=16)
self.random_state.set_state(self.state)
x3 = self.random_state.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.random_state.negative_binomial(0.5, 0.5)
def test_get_state_warning(self):
rs = random.RandomState(PCG64())
with suppress_warnings() as sup:
w = sup.record(RuntimeWarning)
state = rs.get_state()
assert_(len(w) == 1)
assert isinstance(state, dict)
assert state['bit_generator'] == 'PCG64'
def test_invalid_legacy_state_setting(self):
state = self.random_state.get_state()
new_state = ('Unknown', ) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state,
np.array(new_state, dtype=object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
def test_pickle(self):
self.random_state.seed(0)
self.random_state.random_sample(100)
self.random_state.standard_normal()
pickled = self.random_state.get_state(legacy=False)
assert_equal(pickled['has_gauss'], 1)
rs_unpick = pickle.loads(pickle.dumps(self.random_state))
unpickled = rs_unpick.get_state(legacy=False)
assert_mt19937_state_equal(pickled, unpickled)
def test_state_setting(self):
attr_state = self.random_state.__getstate__()
self.random_state.standard_normal()
self.random_state.__setstate__(attr_state)
state = self.random_state.get_state(legacy=False)
assert_mt19937_state_equal(attr_state, state)
def test_repr(self):
assert repr(self.random_state).startswith('RandomState(MT19937)')
class TestRandint:
rfunc = random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71',
'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404',
'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'}
for dt in self.itype[1:]:
random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[3992670689, 2438360420, 2557845020],
[4107320065, 4142558326, 3216529513],
[1605979228, 2807061240, 665605495]],
[[3211410639, 4128781000, 457175120],
[1712592594, 1282922662, 3081439808],
[3997822960, 2008322436, 1563495165]],
[[1398375547, 4269260146, 115316740],
[3414372578, 3437564012, 2112038651],
[3572980305, 2260248732, 3908238631]],
[[2561372503, 223155946, 3127879445],
[ 441282060, 3514786552, 2148440361],
[1629275283, 3479737011, 3003195987]],
[[ 412181688, 940383289, 3047321305],
[2978368172, 764731833, 2282559898],
[ 105711276, 720447391, 3596512484]]])
for size in [None, (5, 3, 3)]:
random.seed(12345)
x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup_method(self):
self.seed = 1234567890
def test_rand(self):
random.seed(self.seed)
actual = random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rand_singleton(self):
random.seed(self.seed)
actual = random.rand()
desired = 0.61879477158567997
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
random.seed(self.seed)
actual = random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.randn()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_randint(self):
random.seed(self.seed)
actual = random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(198, size=(3, 2))
assert_(len(w) == 1)
assert_array_equal(actual, desired + 100)
def test_tomaxint(self):
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
if np.iinfo(int).max == 2147483647:
desired = np.array([[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889]], dtype=np.int64)
else:
desired = np.array([[5707374374421908479, 5456764827585442327],
[8196659375100692377, 8224063923314595285],
[4220315081820346526, 7177518203184491332]],
dtype=np.int64)
assert_equal(actual, desired)
rs.seed(self.seed)
actual = rs.tomaxint()
assert_equal(actual, desired[0, 0])
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
typer = np.dtype('l').type
actual = random.random_integers(typer(np.iinfo('l').max),
typer(np.iinfo('l').max))
assert_(len(w) == 1)
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
random.seed(self.seed)
actual = random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.random_sample()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_choice_uniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random.seed(self.seed)
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.randint(0, -10, size=0).shape, (0,))
assert_equal(random.randint(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random.seed(self.seed)
non_contig = random.choice(5, 3, p=p[::2])
random.seed(self.seed)
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_bytes(self):
random.seed(self.seed)
actual = random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_invalid_objects(self):
x = np.array(3)
assert_raises(TypeError, random.shuffle, x)
def test_permutation(self):
random.seed(self.seed)
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
assert_array_equal(actual, desired)
random.seed(self.seed)
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
random.seed(self.seed)
bad_x_str = "abcd"
assert_raises(IndexError, random.permutation, bad_x_str)
random.seed(self.seed)
bad_x_float = 1.2
assert_raises(IndexError, random.permutation, bad_x_float)
integer_val = 10
desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2]
random.seed(self.seed)
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_beta(self):
random.seed(self.seed)
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random.seed(self.seed)
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
random.seed(self.seed)
actual = random.binomial(100.123, .456)
desired = 37
assert_array_equal(actual, desired)
def test_chisquare(self):
random.seed(self.seed)
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random.seed(self.seed)
non_contig = random.dirichlet(alpha, size=(3, 2))
random.seed(self.seed)
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random.seed(self.seed)
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random.seed(self.seed)
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random.seed(self.seed)
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random.seed(self.seed)
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random.seed(self.seed)
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random.seed(self.seed)
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random.seed(self.seed)
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random.seed(self.seed)
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random.seed(self.seed)
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random.seed(self.seed)
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_logseries_zero(self):
assert random.logseries(0) == 1
@pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.])
def test_logseries_exceptions(self, value):
with np.errstate(invalid="ignore"):
with pytest.raises(ValueError):
random.logseries(value)
with pytest.raises(ValueError):
# contiguous path:
random.logseries(np.array([value] * 10))
with pytest.raises(ValueError):
# non-contiguous path:
random.logseries(np.array([value] * 10)[::2])
def test_multinomial(self):
random.seed(self.seed)
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random.seed(self.seed)
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random.seed(self.seed)
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random.seed(self.seed)
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random.seed(self.seed)
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random.seed(self.seed)
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random.seed(self.seed)
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random.seed(self.seed)
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random.seed(self.seed)
actual = random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
random.seed(self.seed)
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random.seed(self.seed)
actual = random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn_singleton(self):
random.seed(self.seed)
actual = random.randn()
desired = np.array(1.34016345771863121)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
random.seed(self.seed)
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random.seed(self.seed)
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random.seed(self.seed)
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random.seed(self.seed)
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_large(self):
# guard against changes in RandomState when Generator is fixed
random.seed(self.seed)
actual = random.vonmises(mu=0., kappa=1e7, size=3)
desired = np.array([4.634253748521111e-04,
3.558873596114509e-04,
-2.337119622577433e-04])
assert_array_almost_equal(actual, desired, decimal=8)
def test_vonmises_nan(self):
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random.seed(self.seed)
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random.seed(self.seed)
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random.seed(self.seed)
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random.seed(self.seed)
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup_method(self):
self.seed = 123456789
def set_seed(self):
random.seed(self.seed)
def test_uniform(self):
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.set_seed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.set_seed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.set_seed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.set_seed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.set_seed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.set_seed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.set_seed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.set_seed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.set_seed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.set_seed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.set_seed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.set_seed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.set_seed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.set_seed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.set_seed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.set_seed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.set_seed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.set_seed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.set_seed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.set_seed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.set_seed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.set_seed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.set_seed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.set_seed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.set_seed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
self.set_seed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.set_seed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.set_seed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
self.set_seed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.set_seed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.set_seed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.set_seed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = random.binomial
desired = np.array([1, 1, 1])
self.set_seed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.set_seed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = random.negative_binomial
desired = np.array([1, 0, 1])
self.set_seed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.set_seed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = random.poisson
desired = np.array([1, 1, 0])
self.set_seed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = random.zipf
desired = np.array([2, 2, 1])
self.set_seed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = random.geometric
desired = np.array([2, 2, 2])
self.set_seed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = random.hypergeometric
desired = np.array([1, 1, 1])
self.set_seed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, 0)
assert_raises(ValueError, hypergeom, 10, 10, 25)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = random.logseries
desired = np.array([1, 1, 1])
self.set_seed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup_method(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
# Ensure returned array dtype is correct for platform
def test_integer_dtype(int_func):
random.seed(123456789)
fname, args, sha256 = int_func
f = getattr(random, fname)
actual = f(*args, size=2)
assert_(actual.dtype == np.dtype('l'))
def test_integer_repeat(int_func):
random.seed(123456789)
fname, args, sha256 = int_func
f = getattr(random, fname)
val = f(*args, size=1000000)
if sys.byteorder != 'little':
val = val.byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(res == sha256)
def test_broadcast_size_error():
# GH-16833
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
| 81,906 | Python | 39.308563 | 111 | 0.571228 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_generator_mt19937_regressions.py | from numpy.testing import (assert_, assert_array_equal)
import numpy as np
import pytest
from numpy.random import Generator, MT19937
mt19937 = Generator(MT19937())
class TestRegression:
def test_vonmises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = mt19937.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
assert_(mt19937.hypergeometric(*args) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
mt19937 = Generator(MT19937(0))
rvsn = mt19937.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
mt19937 = Generator(MT19937(12345))
shuffled = np.array(t, dtype=object)
mt19937.shuffle(shuffled)
expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom BitGenerator does not call into global state
res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4])
for i in range(3):
mt19937 = Generator(MT19937(i))
m = Generator(MT19937(4321))
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
mt19937.multivariate_normal([0], [[0]], size=1)
mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
mt19937 = Generator(MT19937(1234567890))
x = mt19937.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
mt19937 = Generator(MT19937(1234))
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = mt19937.choice(a, p=probs)
assert_(c in a)
with pytest.raises(ValueError):
mt19937.choice(a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
mt19937 = Generator(MT19937(1234))
a = np.array(['a', 'a' * 1000])
for _ in range(100):
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
mt19937 = Generator(MT19937(1234))
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
mt19937 = Generator(MT19937(1))
orig = np.arange(3).view(N)
perm = mt19937.permutation(orig)
assert_array_equal(perm, np.array([2, 0, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
mt19937 = Generator(MT19937(1))
m = M()
perm = mt19937.permutation(m)
assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
assert_array_equal(m.__array__(), np.arange(5))
def test_gamma_0(self):
assert mt19937.standard_gamma(0.0) == 0.0
assert_array_equal(mt19937.standard_gamma([0.0]), 0.0)
actual = mt19937.standard_gamma([0.0], dtype='float')
expected = np.array([0.], dtype=np.float32)
assert_array_equal(actual, expected)
| 5,639 | Python | 36.350993 | 77 | 0.577762 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_direct.py | import os
from os.path import join
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
assert_raises)
import pytest
from numpy.random import (
Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence,
SFC64, default_rng
)
from numpy.random._common import interface
try:
import cffi # noqa: F401
MISSING_CFFI = False
except ImportError:
MISSING_CFFI = True
try:
import ctypes # noqa: F401
MISSING_CTYPES = False
except ImportError:
MISSING_CTYPES = False
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
MISSING_CFFI = True
pwd = os.path.dirname(os.path.abspath(__file__))
def assert_state_equal(actual, target):
for key in actual:
if isinstance(actual[key], dict):
assert_state_equal(actual[key], target[key])
elif isinstance(actual[key], np.ndarray):
assert_array_equal(actual[key], target[key])
else:
assert actual[key] == target[key]
def uint32_to_float32(u):
return ((u >> np.uint32(8)) * (1.0 / 2**24)).astype(np.float32)
def uniform32_from_uint64(x):
x = np.uint64(x)
upper = np.array(x >> np.uint64(32), dtype=np.uint32)
lower = np.uint64(0xffffffff)
lower = np.array(x & lower, dtype=np.uint32)
joined = np.column_stack([lower, upper]).ravel()
return uint32_to_float32(joined)
def uniform32_from_uint53(x):
x = np.uint64(x) >> np.uint64(16)
x = np.uint32(x & np.uint64(0xffffffff))
return uint32_to_float32(x)
def uniform32_from_uint32(x):
return uint32_to_float32(x)
def uniform32_from_uint(x, bits):
if bits == 64:
return uniform32_from_uint64(x)
elif bits == 53:
return uniform32_from_uint53(x)
elif bits == 32:
return uniform32_from_uint32(x)
else:
raise NotImplementedError
def uniform_from_uint(x, bits):
if bits in (64, 63, 53):
return uniform_from_uint64(x)
elif bits == 32:
return uniform_from_uint32(x)
def uniform_from_uint64(x):
return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
def uniform_from_uint32(x):
out = np.empty(len(x) // 2)
for i in range(0, len(x), 2):
a = x[i] >> 5
b = x[i + 1] >> 6
out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
return out
def uniform_from_dsfmt(x):
return x.view(np.double) - 1.0
def gauss_from_uint(x, n, bits):
if bits in (64, 63):
doubles = uniform_from_uint64(x)
elif bits == 32:
doubles = uniform_from_uint32(x)
else: # bits == 'dsfmt'
doubles = uniform_from_dsfmt(x)
gauss = []
loc = 0
x1 = x2 = 0.0
while len(gauss) < n:
r2 = 2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * doubles[loc] - 1.0
x2 = 2.0 * doubles[loc + 1] - 1.0
r2 = x1 * x1 + x2 * x2
loc += 2
f = np.sqrt(-2.0 * np.log(r2) / r2)
gauss.append(f * x2)
gauss.append(f * x1)
return gauss[:n]
def test_seedsequence():
from numpy.random.bit_generator import (ISeedSequence,
ISpawnableSeedSequence,
SeedlessSeedSequence)
s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6)
s1.spawn(10)
s2 = SeedSequence(**s1.state)
assert_equal(s1.state, s2.state)
assert_equal(s1.n_children_spawned, s2.n_children_spawned)
# The interfaces cannot be instantiated themselves.
assert_raises(TypeError, ISeedSequence)
assert_raises(TypeError, ISpawnableSeedSequence)
dummy = SeedlessSeedSequence()
assert_raises(NotImplementedError, dummy.generate_state, 10)
assert len(dummy.spawn(10)) == 10
class Base:
dtype = np.uint64
data2 = data1 = {}
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = []
@classmethod
def _read_csv(cls, filename):
with open(filename) as csv:
seed = csv.readline()
seed = seed.split(',')
seed = [int(s.strip(), 0) for s in seed[1:]]
data = []
for line in csv:
data.append(int(line.split(',')[-1].strip(), 0))
return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
def test_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data1['data'])
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw()
assert_equal(uints, self.data1['data'][0])
bit_generator = self.bit_generator(*self.data2['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data2['data'])
def test_random_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(output=False)
assert uints is None
uints = bit_generator.random_raw(1000, output=False)
assert uints is None
def test_gauss_inv(self):
n = 25
rs = RandomState(self.bit_generator(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, self.bits))
rs = RandomState(self.bit_generator(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, self.bits))
def test_uniform_double(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
def test_uniform_float(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform32_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform32_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
def test_repr(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in repr(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs)
def test_str(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in str(rs)
assert str(self.bit_generator.__name__) in str(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs)
def test_pickle(self):
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
bitgen_pkl = pickle.dumps(bit_generator)
reloaded = pickle.loads(bitgen_pkl)
reloaded_state = reloaded.state
assert_array_equal(Generator(bit_generator).standard_normal(1000),
Generator(reloaded).standard_normal(1000))
assert bit_generator is not reloaded
assert_state_equal(reloaded_state, state)
ss = SeedSequence(100)
aa = pickle.loads(pickle.dumps(ss))
assert_equal(ss.state, aa.state)
def test_invalid_state_type(self):
bit_generator = self.bit_generator(*self.data1['seed'])
with pytest.raises(TypeError):
bit_generator.state = {'1'}
def test_invalid_state_value(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
state['bit_generator'] = 'otherBitGenerator'
with pytest.raises(ValueError):
bit_generator.state = state
def test_invalid_init_type(self):
bit_generator = self.bit_generator
for st in self.invalid_init_types:
with pytest.raises(TypeError):
bit_generator(*st)
def test_invalid_init_values(self):
bit_generator = self.bit_generator
for st in self.invalid_init_values:
with pytest.raises((ValueError, OverflowError)):
bit_generator(*st)
def test_benchmark(self):
bit_generator = self.bit_generator(*self.data1['seed'])
bit_generator._benchmark(1)
bit_generator._benchmark(1, 'double')
with pytest.raises(ValueError):
bit_generator._benchmark(1, 'int32')
@pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
def test_cffi(self):
bit_generator = self.bit_generator(*self.data1['seed'])
cffi_interface = bit_generator.cffi
assert isinstance(cffi_interface, interface)
other_cffi_interface = bit_generator.cffi
assert other_cffi_interface is cffi_interface
@pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
def test_ctypes(self):
bit_generator = self.bit_generator(*self.data1['seed'])
ctypes_interface = bit_generator.ctypes
assert isinstance(ctypes_interface, interface)
other_ctypes_interface = bit_generator.ctypes
assert other_ctypes_interface is ctypes_interface
def test_getstate(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
alt_state = bit_generator.__getstate__()
assert_state_equal(state, alt_state)
class TestPhilox(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/philox-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/philox-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)]
def test_set_key(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
keyed = self.bit_generator(counter=state['state']['counter'],
key=state['state']['key'])
assert_state_equal(bit_generator.state, keyed.state)
class TestPCG64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
def test_advange_large(self):
rs = Generator(self.bit_generator(38219308213743))
pcg = rs.bit_generator
state = pcg.state["state"]
initial_state = 287608843259529770491897792873167516365
assert state["state"] == initial_state
pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
state = pcg.state["state"]
advanced_state = 135275564607035429730177404003164635391
assert state["state"] == advanced_state
class TestPCG64DXSM(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64DXSM
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
def test_advange_large(self):
rs = Generator(self.bit_generator(38219308213743))
pcg = rs.bit_generator
state = pcg.state
initial_state = 287608843259529770491897792873167516365
assert state["state"]["state"] == initial_state
pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
state = pcg.state["state"]
advanced_state = 277778083536782149546677086420637664879
assert state["state"] == advanced_state
class TestMT19937(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.bits = 32
cls.dtype = np.uint32
cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
cls.seed_error_type = ValueError
cls.invalid_init_types = []
cls.invalid_init_values = [(-1,)]
def test_seed_float_array(self):
assert_raises(TypeError, self.bit_generator, np.array([np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))
assert_raises(TypeError, self.bit_generator, [np.pi])
assert_raises(TypeError, self.bit_generator, [0, np.pi])
def test_state_tuple(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
bit_generator = rs.bit_generator
state = bit_generator.state
desired = rs.integers(2 ** 16)
tup = (state['bit_generator'], state['state']['key'],
state['state']['pos'])
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
tup = tup + (0, 0.0)
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
class TestSFC64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/sfc64-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/sfc64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
class TestDefaultRNG:
def test_seed(self):
for args in [(), (None,), (1234,), ([1234, 5678],)]:
rg = default_rng(*args)
assert isinstance(rg.bit_generator, PCG64)
def test_passthrough(self):
bg = Philox()
rg = default_rng(bg)
assert rg.bit_generator is bg
rg2 = default_rng(rg)
assert rg2 is rg
assert rg2.bit_generator is bg
| 16,429 | Python | 33.300626 | 83 | 0.601132 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_smoke.py | import pickle
from functools import partial
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_, assert_array_equal
from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64)
@pytest.fixture(scope='module',
params=(np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64))
def dtype(request):
return request.param
def params_0(f):
val = f()
assert_(np.isscalar(val))
val = f(10)
assert_(val.shape == (10,))
val = f((10, 10))
assert_(val.shape == (10, 10))
val = f((10, 10, 10))
assert_(val.shape == (10, 10, 10))
val = f(size=(5, 5))
assert_(val.shape == (5, 5))
def params_1(f, bounded=False):
a = 5.0
b = np.arange(2.0, 12.0)
c = np.arange(2.0, 102.0).reshape((10, 10))
d = np.arange(2.0, 1002.0).reshape((10, 10, 10))
e = np.array([2.0, 3.0])
g = np.arange(2.0, 12.0).reshape((1, 10, 1))
if bounded:
a = 0.5
b = b / (1.5 * b.max())
c = c / (1.5 * c.max())
d = d / (1.5 * d.max())
e = e / (1.5 * e.max())
g = g / (1.5 * g.max())
# Scalar
f(a)
# Scalar - size
f(a, size=(10, 10))
# 1d
f(b)
# 2d
f(c)
# 3d
f(d)
# 1d size
f(b, size=10)
# 2d - size - broadcast
f(e, size=(10, 2))
# 3d - size
f(g, size=(10, 10, 10))
def comp_state(state1, state2):
identical = True
if isinstance(state1, dict):
for key in state1:
identical &= comp_state(state1[key], state2[key])
elif type(state1) != type(state2):
identical &= type(state1) == type(state2)
else:
if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance(
state2, (list, tuple, np.ndarray))):
for s1, s2 in zip(state1, state2):
identical &= comp_state(s1, s2)
else:
identical &= state1 == state2
return identical
def warmup(rg, n=None):
if n is None:
n = 11 + np.random.randint(0, 20)
rg.standard_normal(n)
rg.standard_normal(n)
rg.standard_normal(n, dtype=np.float32)
rg.standard_normal(n, dtype=np.float32)
rg.integers(0, 2 ** 24, n, dtype=np.uint64)
rg.integers(0, 2 ** 48, n, dtype=np.uint64)
rg.standard_gamma(11.0, n)
rg.standard_gamma(11.0, n, dtype=np.float32)
rg.random(n, dtype=np.float64)
rg.random(n, dtype=np.float32)
class RNG:
@classmethod
def setup_class(cls):
# Overridden in test classes. Place holder to silence IDE noise
cls.bit_generator = PCG64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
@classmethod
def _extra_setup(cls):
cls.vec_1d = np.arange(2.0, 102.0)
cls.vec_2d = np.arange(2.0, 102.0)[None, :]
cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))
cls.seed_error = TypeError
def _reset_state(self):
self.rg.bit_generator.state = self.initial_state
def test_init(self):
rg = Generator(self.bit_generator())
state = rg.bit_generator.state
rg.standard_normal(1)
rg.standard_normal(1)
rg.bit_generator.state = state
new_state = rg.bit_generator.state
assert_(comp_state(state, new_state))
def test_advance(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'advance'):
self.rg.bit_generator.advance(self.advance)
assert_(not comp_state(state, self.rg.bit_generator.state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
pytest.skip(f'Advance is not supported by {bitgen_name}')
def test_jump(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'jumped'):
bit_gen2 = self.rg.bit_generator.jumped()
jumped_state = bit_gen2.state
assert_(not comp_state(state, jumped_state))
self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)
self.rg.bit_generator.state = state
bit_gen3 = self.rg.bit_generator.jumped()
rejumped_state = bit_gen3.state
assert_(comp_state(jumped_state, rejumped_state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
if bitgen_name not in ('SFC64',):
raise AttributeError(f'no "jumped" in {bitgen_name}')
pytest.skip(f'Jump is not supported by {bitgen_name}')
def test_uniform(self):
r = self.rg.uniform(-1.0, 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_uniform_array(self):
r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(np.array([-1.0] * 10),
np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_random(self):
assert_(len(self.rg.random(10)) == 10)
params_0(self.rg.random)
def test_standard_normal_zig(self):
assert_(len(self.rg.standard_normal(10)) == 10)
def test_standard_normal(self):
assert_(len(self.rg.standard_normal(10)) == 10)
params_0(self.rg.standard_normal)
def test_standard_gamma(self):
assert_(len(self.rg.standard_gamma(10, 10)) == 10)
assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10)
params_1(self.rg.standard_gamma)
def test_standard_exponential(self):
assert_(len(self.rg.standard_exponential(10)) == 10)
params_0(self.rg.standard_exponential)
def test_standard_exponential_float(self):
randoms = self.rg.standard_exponential(10, dtype='float32')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32'))
def test_standard_exponential_float_log(self):
randoms = self.rg.standard_exponential(10, dtype='float32',
method='inv')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32',
method='inv'))
def test_standard_cauchy(self):
assert_(len(self.rg.standard_cauchy(10)) == 10)
params_0(self.rg.standard_cauchy)
def test_standard_t(self):
assert_(len(self.rg.standard_t(10, 10)) == 10)
params_1(self.rg.standard_t)
def test_binomial(self):
assert_(self.rg.binomial(10, .5) >= 0)
assert_(self.rg.binomial(1000, .5) >= 0)
def test_reset_state(self):
state = self.rg.bit_generator.state
int_1 = self.rg.integers(2**31)
self.rg.bit_generator.state = state
int_2 = self.rg.integers(2**31)
assert_(int_1 == int_2)
def test_entropy_init(self):
rg = Generator(self.bit_generator())
rg2 = Generator(self.bit_generator())
assert_(not comp_state(rg.bit_generator.state,
rg2.bit_generator.state))
def test_seed(self):
rg = Generator(self.bit_generator(*self.seed))
rg2 = Generator(self.bit_generator(*self.seed))
rg.random()
rg2.random()
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_reset_state_gauss(self):
rg = Generator(self.bit_generator(*self.seed))
rg.standard_normal()
state = rg.bit_generator.state
n1 = rg.standard_normal(size=10)
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.standard_normal(size=10)
assert_array_equal(n1, n2)
def test_reset_state_uint32(self):
rg = Generator(self.bit_generator(*self.seed))
rg.integers(0, 2 ** 24, 120, dtype=np.uint32)
state = rg.bit_generator.state
n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32)
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32)
assert_array_equal(n1, n2)
def test_reset_state_float(self):
rg = Generator(self.bit_generator(*self.seed))
rg.random(dtype='float32')
state = rg.bit_generator.state
n1 = rg.random(size=10, dtype='float32')
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.random(size=10, dtype='float32')
assert_((n1 == n2).all())
def test_shuffle(self):
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
assert_((original != permuted).any())
def test_permutation(self):
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
assert_((original != permuted).any())
def test_beta(self):
vals = self.rg.beta(2.0, 2.0, 10)
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), 2.0)
assert_(len(vals) == 10)
vals = self.rg.beta(2.0, np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))
assert_(vals.shape == (10, 10))
def test_bytes(self):
vals = self.rg.bytes(10)
assert_(len(vals) == 10)
def test_chisquare(self):
vals = self.rg.chisquare(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.chisquare)
def test_exponential(self):
vals = self.rg.exponential(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential)
def test_f(self):
vals = self.rg.f(3, 1000, 10)
assert_(len(vals) == 10)
def test_gamma(self):
vals = self.rg.gamma(3, 2, 10)
assert_(len(vals) == 10)
def test_geometric(self):
vals = self.rg.geometric(0.5, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential, bounded=True)
def test_gumbel(self):
vals = self.rg.gumbel(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_laplace(self):
vals = self.rg.laplace(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logitic(self):
vals = self.rg.logistic(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logseries(self):
vals = self.rg.logseries(0.5, 10)
assert_(len(vals) == 10)
def test_negative_binomial(self):
vals = self.rg.negative_binomial(10, 0.2, 10)
assert_(len(vals) == 10)
def test_noncentral_chisquare(self):
vals = self.rg.noncentral_chisquare(10, 2, 10)
assert_(len(vals) == 10)
def test_noncentral_f(self):
vals = self.rg.noncentral_f(3, 1000, 2, 10)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10))
assert_(len(vals) == 10)
def test_normal(self):
vals = self.rg.normal(10, 0.2, 10)
assert_(len(vals) == 10)
def test_pareto(self):
vals = self.rg.pareto(3.0, 10)
assert_(len(vals) == 10)
def test_poisson(self):
vals = self.rg.poisson(10, 10)
assert_(len(vals) == 10)
vals = self.rg.poisson(np.array([10] * 10))
assert_(len(vals) == 10)
params_1(self.rg.poisson)
def test_power(self):
vals = self.rg.power(0.2, 10)
assert_(len(vals) == 10)
def test_integers(self):
vals = self.rg.integers(10, 20, 10)
assert_(len(vals) == 10)
def test_rayleigh(self):
vals = self.rg.rayleigh(0.2, 10)
assert_(len(vals) == 10)
params_1(self.rg.rayleigh, bounded=True)
def test_vonmises(self):
vals = self.rg.vonmises(10, 0.2, 10)
assert_(len(vals) == 10)
def test_wald(self):
vals = self.rg.wald(1.0, 1.0, 10)
assert_(len(vals) == 10)
def test_weibull(self):
vals = self.rg.weibull(1.0, 10)
assert_(len(vals) == 10)
def test_zipf(self):
vals = self.rg.zipf(10, 10)
assert_(len(vals) == 10)
vals = self.rg.zipf(self.vec_1d)
assert_(len(vals) == 100)
vals = self.rg.zipf(self.vec_2d)
assert_(vals.shape == (1, 100))
vals = self.rg.zipf(self.mat)
assert_(vals.shape == (100, 100))
def test_hypergeometric(self):
vals = self.rg.hypergeometric(25, 25, 20)
assert_(np.isscalar(vals))
vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20)
assert_(vals.shape == (10,))
def test_triangular(self):
vals = self.rg.triangular(-5, 0, 5)
assert_(np.isscalar(vals))
vals = self.rg.triangular(-5, np.array([0] * 10), 5)
assert_(vals.shape == (10,))
def test_multivariate_normal(self):
mean = [0, 0]
cov = [[1, 0], [0, 100]] # diagonal covariance
x = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_zig = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_inv = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
assert_((x_zig != x_inv).any())
def test_multinomial(self):
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3])
assert_(vals.shape == (2,))
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)
assert_(vals.shape == (10, 2))
def test_dirichlet(self):
s = self.rg.dirichlet((10, 5, 3), 20)
assert_(s.shape == (20, 3))
def test_pickle(self):
pick = pickle.dumps(self.rg)
unpick = pickle.loads(pick)
assert_((type(self.rg) == type(unpick)))
assert_(comp_state(self.rg.bit_generator.state,
unpick.bit_generator.state))
pick = pickle.dumps(self.rg)
unpick = pickle.loads(pick)
assert_((type(self.rg) == type(unpick)))
assert_(comp_state(self.rg.bit_generator.state,
unpick.bit_generator.state))
def test_seed_array(self):
if self.seed_vector_bits is None:
bitgen_name = self.bit_generator.__name__
pytest.skip(f'Vector seeding is not supported by {bitgen_name}')
if self.seed_vector_bits == 32:
dtype = np.uint32
else:
dtype = np.uint64
seed = np.array([1], dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(1)
state2 = bg.state
assert_(comp_state(state1, state2))
seed = np.arange(4, dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = np.arange(1500, dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = 2 ** np.mod(np.arange(1500, dtype=dtype),
self.seed_vector_bits - 1) + 1
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
def test_uniform_float(self):
rg = Generator(self.bit_generator(12345))
warmup(rg)
state = rg.bit_generator.state
r1 = rg.random(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.random(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_gamma_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_gamma(4.0, 11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_zig_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_output_fill(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.standard_normal(out=existing)
rg.bit_generator.state = state
direct = rg.standard_normal(size=size)
assert_equal(direct, existing)
sized = np.empty(size)
rg.bit_generator.state = state
rg.standard_normal(out=sized, size=sized.shape)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_normal(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_normal(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_uniform(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.random(out=existing)
rg.bit_generator.state = state
direct = rg.random(size=size)
assert_equal(direct, existing)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.random(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.random(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_exponential(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.standard_exponential(out=existing)
rg.bit_generator.state = state
direct = rg.standard_exponential(size=size)
assert_equal(direct, existing)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_exponential(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_exponential(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_gamma(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.zeros(size)
rg.bit_generator.state = state
rg.standard_gamma(1.0, out=existing)
rg.bit_generator.state = state
direct = rg.standard_gamma(1.0, size=size)
assert_equal(direct, existing)
existing = np.zeros(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_gamma(1.0, out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_gamma(1.0, size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_gamma_broadcast(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
mu = np.arange(97.0) + 1.0
existing = np.zeros(size)
rg.bit_generator.state = state
rg.standard_gamma(mu, out=existing)
rg.bit_generator.state = state
direct = rg.standard_gamma(mu, size=size)
assert_equal(direct, existing)
existing = np.zeros(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_gamma(mu, out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_gamma(mu, size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_fill_error(self):
rg = self.rg
size = (31, 7, 97)
existing = np.empty(size)
with pytest.raises(TypeError):
rg.standard_normal(out=existing, dtype=np.float32)
with pytest.raises(ValueError):
rg.standard_normal(out=existing[::3])
existing = np.empty(size, dtype=np.float32)
with pytest.raises(TypeError):
rg.standard_normal(out=existing, dtype=np.float64)
existing = np.zeros(size, dtype=np.float32)
with pytest.raises(TypeError):
rg.standard_gamma(1.0, out=existing, dtype=np.float64)
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32)
existing = np.zeros(size, dtype=np.float64)
with pytest.raises(TypeError):
rg.standard_gamma(1.0, out=existing, dtype=np.float32)
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3])
def test_integers_broadcast(self, dtype):
if dtype == np.bool_:
upper = 2
lower = 0
else:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
self._reset_state()
a = self.rg.integers(lower, [upper] * 10, dtype=dtype)
self._reset_state()
b = self.rg.integers([lower] * 10, upper, dtype=dtype)
assert_equal(a, b)
self._reset_state()
c = self.rg.integers(lower, upper, size=10, dtype=dtype)
assert_equal(a, c)
self._reset_state()
d = self.rg.integers(np.array(
[lower] * 10), np.array([upper], dtype=object), size=10,
dtype=dtype)
assert_equal(a, d)
self._reset_state()
e = self.rg.integers(
np.array([lower] * 10), np.array([upper] * 10), size=10,
dtype=dtype)
assert_equal(a, e)
self._reset_state()
a = self.rg.integers(0, upper, size=10, dtype=dtype)
self._reset_state()
b = self.rg.integers([upper] * 10, dtype=dtype)
assert_equal(a, b)
def test_integers_numpy(self, dtype):
high = np.array([1])
low = np.array([0])
out = self.rg.integers(low, high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low[0], high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low, high[0], dtype=dtype)
assert out.shape == (1,)
def test_integers_broadcast_errors(self, dtype):
if dtype == np.bool_:
upper = 2
lower = 0
else:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
with pytest.raises(ValueError):
self.rg.integers(lower, [upper + 1] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers(lower - 1, [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([lower - 1], [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([0], [0], dtype=dtype)
class TestMT19937(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.advance = None
cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 32
cls._extra_setup()
cls.seed_error = ValueError
def test_numpy_state(self):
nprg = np.random.RandomState()
nprg.standard_normal(99)
state = nprg.get_state()
self.rg.bit_generator.state = state
state2 = self.rg.bit_generator.state
assert_((state[1] == state2['state']['key']).all())
assert_((state[2] == state2['state']['pos']))
class TestPhilox(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestSFC64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 192
cls._extra_setup()
class TestPCG64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestPCG64DXSM(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64DXSM
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestDefaultRNG(RNG):
@classmethod
def setup_class(cls):
# This will duplicate some tests that directly instantiate a fresh
# Generator(), but that's okay.
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = np.random.default_rng(*cls.seed)
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
def test_default_is_pcg64(self):
# In order to change the default BitGenerator, we'll go through
# a deprecation cycle to move to a different function.
assert_(isinstance(self.rg.bit_generator, PCG64))
def test_seed(self):
np.random.default_rng()
np.random.default_rng(None)
np.random.default_rng(12345)
np.random.default_rng(0)
np.random.default_rng(43660444402423911716352051725018508569)
np.random.default_rng([43660444402423911716352051725018508569,
279705150948142787361475340226491943209])
with pytest.raises(ValueError):
np.random.default_rng(-1)
with pytest.raises(ValueError):
np.random.default_rng([12345, -1])
| 28,183 | Python | 33.412698 | 78 | 0.570947 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_seed_sequence.py | import numpy as np
from numpy.testing import assert_array_equal, assert_array_compare
from numpy.random import SeedSequence
def test_reference_data():
""" Check that SeedSequence generates data the same as the C++ reference.
https://gist.github.com/imneme/540829265469e673d045
"""
inputs = [
[3735928559, 195939070, 229505742, 305419896],
[3668361503, 4165561550, 1661411377, 3634257570],
[164546577, 4166754639, 1765190214, 1303880213],
[446610472, 3941463886, 522937693, 1882353782],
[1864922766, 1719732118, 3882010307, 1776744564],
[4141682960, 3310988675, 553637289, 902896340],
[1134851934, 2352871630, 3699409824, 2648159817],
[1240956131, 3107113773, 1283198141, 1924506131],
[2669565031, 579818610, 3042504477, 2774880435],
[2766103236, 2883057919, 4029656435, 862374500],
]
outputs = [
[3914649087, 576849849, 3593928901, 2229911004],
[2240804226, 3691353228, 1365957195, 2654016646],
[3562296087, 3191708229, 1147942216, 3726991905],
[1403443605, 3591372999, 1291086759, 441919183],
[1086200464, 2191331643, 560336446, 3658716651],
[3249937430, 2346751812, 847844327, 2996632307],
[2584285912, 4034195531, 3523502488, 169742686],
[959045797, 3875435559, 1886309314, 359682705],
[3978441347, 432478529, 3223635119, 138903045],
[296367413, 4262059219, 13109864, 3283683422],
]
outputs64 = [
[2477551240072187391, 9577394838764454085],
[15854241394484835714, 11398914698975566411],
[13708282465491374871, 16007308345579681096],
[15424829579845884309, 1898028439751125927],
[9411697742461147792, 15714068361935982142],
[10079222287618677782, 12870437757549876199],
[17326737873898640088, 729039288628699544],
[16644868984619524261, 1544825456798124994],
[1857481142255628931, 596584038813451439],
[18305404959516669237, 14103312907920476776],
]
for seed, expected, expected64 in zip(inputs, outputs, outputs64):
expected = np.array(expected, dtype=np.uint32)
ss = SeedSequence(seed)
state = ss.generate_state(len(expected))
assert_array_equal(state, expected)
state64 = ss.generate_state(len(expected64), dtype=np.uint64)
assert_array_equal(state64, expected64)
def test_zero_padding():
""" Ensure that the implicit zero-padding does not cause problems.
"""
# Ensure that large integers are inserted in little-endian fashion to avoid
# trailing 0s.
ss0 = SeedSequence(42)
ss1 = SeedSequence(42 << 32)
assert_array_compare(
np.not_equal,
ss0.generate_state(4),
ss1.generate_state(4))
# Ensure backwards compatibility with the original 0.17 release for small
# integers and no spawn key.
expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988],
dtype=np.uint32)
assert_array_equal(SeedSequence(42).generate_state(4), expected42)
# Regression test for gh-16539 to ensure that the implicit 0s don't
# conflict with spawn keys.
assert_array_compare(
np.not_equal,
SeedSequence(42, spawn_key=(0,)).generate_state(4),
expected42)
| 3,311 | Python | 39.888888 | 79 | 0.681365 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.