file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_scope_model.py | import omni.ui as ui
class LayerScopeModel(ui.AbstractValueModel):
def __init__(self, layer_model):
super().__init__()
self._layer_model = layer_model
def destroy(self):
self._layer_model = None
def get_value_as_bool(self):
# False means local mode
return self._layer_model.global_muteness_scope
def set_value(self, value):
self._layer_model.global_muteness_scope = value
| 445 | Python | 23.777776 | 55 | 0.626966 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_live_update_model.py | import omni.ui as ui
class LayerLiveUpdateModel(ui.AbstractValueModel):
def __init__(self, usd_context, layer_item):
super().__init__()
self._usd_context = usd_context
self._layer_item = layer_item
def destroy(self):
self._usd_context = None
self._layer_item = None
def get_value_as_bool(self):
return self._layer_item.is_in_live_session
def set_value(self, value):
pass
| 461 | Python | 22.099999 | 50 | 0.59436 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_auto_authoring.py | import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class LayerAutoAuthoringModel(ui.AbstractValueModel):
def __init__(self, layer_model):
super().__init__()
self._layer_model = layer_model
def destroy(self):
self._layer_model = None
def get_value_as_bool(self):
# False means local mode
return self._layer_model.auto_authoring_mode
def set_value(self, value):
LayerModelUtils.set_auto_authoring_mode(self._layer_model, value)
| 538 | Python | 27.36842 | 73 | 0.669145 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/save_model.py | import weakref
import omni
import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class SaveModel(ui.AbstractValueModel):
def __init__(self, layer_item):
super().__init__()
self._layer_item = layer_item
def destroy(self):
self._layer_item = None
def get_value_as_bool(self):
return self._layer_item.dirty or (self._layer_item.is_live_session_layer and self._layer_item.has_content)
def set_value(self, value):
if (
value
or not self._layer_item.dirty
or self._layer_item.missing
or not self._layer_item.editable
or self._layer_item.is_live_session_layer
):
return
LayerModelUtils.save_layer(self._layer_item)
| 798 | Python | 26.551723 | 114 | 0.616541 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/live_session_user_model.py | import omni.ui as ui
import omni.kit.usd.layers as layers
class LiveSessionUserModel(ui.AbstractValueModel):
def __init__(self, peer_user: layers.LiveSessionUser):
super().__init__()
self._peer_user = peer_user
@property
def peer_user(self):
return self._peer_user
@peer_user.setter
def peer_user(self, value):
self._peer_user = value
def destroy(self):
self._peer_user = None
def get_value_as_string(self):
if self._peer_user:
return layers.get_short_user_name(self._peer_user.user_name)
else:
return ""
def set_value(self, value):
# Cannot change layer name
pass
| 701 | Python | 21.645161 | 72 | 0.601997 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/save_all_model.py | import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class SaveAllModel(ui.AbstractValueModel):
def __init__(self, layer_model):
super().__init__()
self._layer_model = layer_model
def destroy(self):
self._layer_model = None
def get_value_as_bool(self):
return self._layer_model.has_dirty_layers()
def set_value(self, value):
if value:
return
LayerModelUtils.save_model(self._layer_model)
| 511 | Python | 23.380951 | 68 | 0.634051 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/prim_model.py | import re
from pxr import Sdf, Usd
from typing import List
from typing import Optional
import omni.usd
import omni.ui as ui
import omni.kit.usd.layers as layers
from omni.kit.widget.stage.stage_model import StageModel, StageItem
from ..layer_icons import LayerIcons
class PrimItem(StageItem): # pragma: no cover
def __init__(
self,
path: Sdf.Path,
stage: Usd.Stage,
stage_model: StageModel,
flat=False,
root_identifier=None,
load_payloads=False,
check_missing_references=False,
):
super().__init__(
path,
stage,
stage_model,
flat=flat,
root_identifier=root_identifier,
load_payloads=load_payloads,
check_missing_references=check_missing_references,
)
usd_context = omni.usd.get_context()
links = omni.kit.usd.layers.get_spec_layer_links(usd_context, path, False)
if links:
self._linked = True
else:
self._linked = False
self._linked_image = None
self._locked = omni.kit.usd.layers.is_spec_locked(usd_context, path)
self._locked_image = None
@property
def locked(self):
return self._locked
@locked.setter
def locked(self, value: bool):
if value != self._locked:
self._locked = value
if self._locked_image:
filename = LayerIcons().get("lock") if value else LayerIcons().get("lock_open")
image_style = {"": {"image_url": f'{filename}'} }
self._locked_image.set_style(image_style)
@property
def linked(self):
return self._linked
@linked.setter
def linked(self, value: bool):
if value != self._linked:
self._linked = value
if self._linked_image:
self._linked_image.visible = value
def set_linked_image(self, image: ui.Image):
self._linked_image = image
def set_locked_image(self, image: ui.Image):
self._locked_image = image
class PrimModel(StageModel): # pragma: no cover
def __init__(self, stage: Usd.Stage):
super().__init__(stage, flat=None, load_payloads=False, check_missing_references=False)
# replace the root item with PrimItem
if self._root:
self._root = PrimItem(
Sdf.Path.absoluteRootPath,
self.stage,
self,
False,
self.stage.GetRootLayer().identifier,
load_payloads=self.load_payloads,
check_missing_references=self.check_missing_references,
)
self._layers = layers.get_layers()
self._specs_linking = self._layers.get_specs_linking()
self._specs_locking = self._layers.get_specs_locking()
self._layers_event_subscription = self._layers.get_event_stream().create_subscription_to_pop(self._on_layer_events, name="Layers Prim Model")
def destroy(self):
self._layers_event_subscription = None
self._layers = None
self._specs_linking = None
self._specs_locking = None
super().destroy()
# this copy from StageItem._get_stage_item_from_cache()
# there two differents from StageItem,
# if code of StageItem.populate_children_get_stage_item_from_cache() changed, should copy and change this too
def _get_stage_item_from_cache(self, path: Sdf.Path, create_if_not_existed=False):
stage_item = super()._get_stage_item_from_cache(path, False)
if stage_item:
return stage_item
elif not create_if_not_existed:
return None
# Creates new
stage_item = super()._get_stage_item_from_cache(path, True)
if not stage_item:
return None
# Replaces it with customized PrimItem
stage_item = PrimItem(
path,
self.stage,
self,
self.flat,
load_payloads=self.load_payloads,
check_missing_references=self.check_missing_references,
)
super()._remove_stage_item_from_cache(path)
super()._cache_stage_item(stage_item)
return stage_item
def find(self, path: Sdf.Path):
"""Return item with the given path"""
path = Sdf.Path(path)
if path == Sdf.Path.absoluteRootPath:
return self.root
return super()._get_stage_item_from_cache(path)
def get_item_value_model_count(self, item):
"""Reimplemented from AbstractItemModel"""
return 3
def get_item_value_model(self, item, column_id):
"""Reimplemented from AbstractItemModel"""
if item is None:
item = self.root
if not item:
return None
if column_id == 2:
return item.name_model
def drop_accepted(self, target_item, source):
return False
def drop(self, target_item, source):
return
def _on_layer_events(self, event):
payload = layers.get_layer_event_payload(event)
if not payload:
return
if payload.event_type == layers.LayerEventType.SPECS_LINKING_CHANGED:
for _, spec_paths in payload.layer_spec_paths.items():
self._on_spec_links_changed(spec_paths)
elif payload.event_type == layers.LayerEventType.SPECS_LOCKING_CHANGED:
self._on_spec_locks_changed(payload.identifiers_or_spec_paths)
def _on_spec_links_changed(self, spec_paths: List[str]):
for spec_path in spec_paths:
item = self.find(spec_path)
if item:
item.linked = self._specs_linking.is_spec_linked(spec_path)
def _on_spec_locks_changed(self, spec_paths: List[str]):
for spec_path in spec_paths:
item = self.find(spec_path)
if item:
item.locked = self._specs_locking.is_spec_locked(spec_path)
| 5,985 | Python | 31.710382 | 153 | 0.589808 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/muteness_model.py | import weakref
import omni.ui as ui
import omni.kit.usd.layers as layers
class MutenessModel(ui.AbstractValueModel):
def __init__(self, usd_context, layer_item, local: bool):
super().__init__()
self.local = local
self._layer_item = layer_item
self._usd_context = usd_context
def destroy(self):
self._layer_item = None
self._usd_context = None
def get_value_as_bool(self):
if self.local:
return self._layer_item.locally_muted
else:
return self._layer_item.globally_muted
def set_value(self, value):
self._layer_item.muted = value
| 650 | Python | 25.039999 | 61 | 0.609231 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/prim_name_model.py | import omni
import weakref
import omni.ui as ui
class PrimNameModel(ui.AbstractValueModel):
def __init__(self, prim_item):
super().__init__()
self._prim_item = prim_item
def destroy(self):
self._prim_item = None
def get_value_as_string(self):
return self._prim_item.name
def set_value(self, value):
# Cannot change layer name
pass
| 407 | Python | 19.399999 | 43 | 0.601966 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/lock_model.py | import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class LockModel(ui.AbstractValueModel):
def __init__(self, layer_item):
super().__init__()
self._layer_item = layer_item
def destroy(self):
self._layer_item = None
def get_value_as_bool(self):
return self._layer_item.locked
def set_value(self, value):
LayerModelUtils.lock_layer(self._layer_item, value)
| 459 | Python | 24.555554 | 68 | 0.651416 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/models/layer_latest_model.py | import omni.ui as ui
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
class LayerLatestModel(ui.AbstractValueModel):
def __init__(self, usd_context, layer_item):
super().__init__()
self._usd_context = usd_context
self._layer_item = layer_item
def destroy(self):
self._usd_context = None
self._layer_item = None
def get_value_as_bool(self):
return not self._layer_item.latest
def set_value(self, _):
if not self._layer_item.latest:
LayerModelUtils.reload_layer(self._layer_item)
| 587 | Python | 26.999999 | 68 | 0.642249 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_selection.py | import omni.kit.test
import os
import uuid
import omni.client
import omni.kit.commands
from .base import TestLayerUIBase
from omni.kit.widget.layers import LayerUtils
from pxr import Sdf, Usd
class TestLayerSelection(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
self.layers_instance.show_window(None, True)
await self.usd_context.close_stage_async()
await super().tearDown()
async def _wait(self, frames=4):
for i in range(frames):
await self.app.next_update_async()
async def test_layer_selection(self):
layer1 = Sdf.Layer.CreateAnonymous()
layer2 = Sdf.Layer.CreateAnonymous()
self.stage.GetRootLayer().subLayerPaths.append(layer1.identifier)
self.stage.GetRootLayer().subLayerPaths.append(layer2.identifier)
await self._wait()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(len(root_layer_item.sublayers), 2)
selected_item = None
def on_selected(item):
nonlocal selected_item
selected_item = item
self.layers_instance.add_layer_selection_changed_fn(on_selected)
# Select two layers by simulating UI clicks.
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
layer_tree_view.selection = root_layer_item.sublayers
await self._wait()
# get_current_focused_layer_item returns none for multiple selection.
self.assertEqual(self.layers_instance.get_current_focused_layer_item(), None)
self.assertEqual(self.layers_instance.get_selected_items(), root_layer_item.sublayers)
self.assertEqual(selected_item, None)
layer_tree_view.selection = [root_layer_item.sublayers[0]]
await self._wait()
self.assertEqual(self.layers_instance.get_current_focused_layer_item(), root_layer_item.sublayers[0])
self.assertEqual(self.layers_instance.get_selected_items(), [root_layer_item.sublayers[0]])
self.assertEqual(selected_item, root_layer_item.sublayers[0])
layer_tree_view.selection = []
await self._wait()
self.assertEqual(self.layers_instance.get_current_focused_layer_item(), None)
self.assertEqual(self.layers_instance.get_selected_items(), [])
self.assertEqual(selected_item, None)
# Manually set focused layer item.
self.layers_instance.set_current_focused_layer_item(root_layer_item.sublayers[0].identifier)
await self._wait()
self.assertEqual(self.layers_instance.get_current_focused_layer_item(), root_layer_item.sublayers[0])
self.assertEqual(layer_tree_view.selection, [root_layer_item.sublayers[0]])
self.assertEqual(self.layers_instance.get_selected_items(), [root_layer_item.sublayers[0]])
self.assertEqual(selected_item, root_layer_item.sublayers[0])
# After listener is removed, it should not receive changed event anymore.
selected_item = None
self.layers_instance.remove_layer_selection_changed_fn(on_selected)
self.layers_instance.set_current_focused_layer_item(root_layer_item.sublayers[0].identifier)
await self._wait()
self.assertEqual(selected_item, None)
self.layers_instance.add_layer_selection_changed_fn(on_selected)
self.layers_instance.show_window(None, False)
await self._wait()
# When window is hidden, it cannot focus any layer item.
self.assertEqual(root_layer_item.sublayers, [])
self.layers_instance.set_current_focused_layer_item(root_layer_item)
await self._wait()
self.assertEqual(selected_item, None)
self.layers_instance.show_window(None, True)
await self._wait()
layer_model = self.layers_instance.get_layer_model()
# Old items are released after window is hidden, re-fetching it.
root_layer_item = layer_model.root_layer_item
self.layers_instance.set_current_focused_layer_item(root_layer_item.sublayers[0].identifier)
await self._wait()
self.assertEqual(selected_item, root_layer_item.sublayers[0])
async def test_prim_selection(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self._wait()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, prim_spec_paths)
omni.kit.commands.execute("SelectAll")
await self._wait()
all_prim_paths = []
for prim in self.stage.TraverseAll():
all_prim_paths.append(prim.GetPath())
all_root_specs = root_layer_item.absolute_root_spec.children
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
selections = layer_tree_view.selection
self.assertTrue(len(selections) != 0)
self.assertEqual(set(all_root_specs), set(selections))
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertEqual(len(selections), 0)
selection_api = self.usd_context.get_selection()
selection_api.set_selected_prim_paths([str(path) for path in all_prim_paths[10:20]], False)
await self._wait()
selections = layer_tree_view.selection
all_selected_paths = []
for selection in selections:
all_selected_paths.append(selection.path)
self.assertEqual(set(all_prim_paths[10:20]), set(all_selected_paths))
async def test_prim_selection_with_ui(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10])
await self._wait()
all_root_specs = root_layer_item.absolute_root_spec.children
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
# Select item one by one
layer_tree_view.selection = [all_root_specs[0]]
layer_tree_view.selection = [all_root_specs[1]]
layer_tree_view.selection = [all_root_specs[2]]
# Return back to item 1
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) != 0)
self.assertEqual(set([all_root_specs[1]]), set(selections))
# Return back to item 0
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) != 0)
self.assertEqual(set([all_root_specs[0]]), set(selections))
# Empty selection as startup
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) == 0)
# Select all and undo
layer_tree_view.selection = all_root_specs
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) == 0)
# OM-45941: Select one item and remove it will not trigger new command.
layer_tree_view.selection = [all_root_specs[0]]
layer_tree_view.selection = [all_root_specs[1]]
stage = omni.usd.get_context().get_stage()
LayerUtils.remove_prim_spec(stage.GetRootLayer(), all_root_specs[1].path)
await self._wait()
omni.kit.undo.undo()
await self._wait()
selections = layer_tree_view.selection
self.assertTrue(len(selections) != 0)
self.assertEqual(set([all_root_specs[0]]), set(selections))
| 7,948 | Python | 40.401041 | 109 | 0.659285 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_performance.py | import omni.kit.test
import time
from .base import TestLayerUIBase
from pxr import Sdf, Usd
class TestLayerPerformance(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
await self.usd_context.close_stage_async()
await super().tearDown()
async def test_create_10_sublayers(self):
start_time = time.time()
root_layer = self.stage.GetRootLayer()
self.create_sublayers(root_layer, [10])
print(f"Time costed to create 10 sublayers: {time.time() - start_time}")
async def test_search_1000_prim_specs(self):
temp_layer = Sdf.Layer.CreateAnonymous()
temp_stage = Usd.Stage.Open(temp_layer)
self.create_prim_specs(temp_stage, Sdf.Path.absoluteRootPath, [1000])
await self.usd_context.attach_stage_async(temp_stage)
start_time = time.time()
layer_model = self.layers_instance.get_layer_model()
layer_model.filter_by_text("xform")
print(f"Time costed to search 1000 prim specs: {time.time() - start_time}")
| 1,130 | Python | 35.48387 | 83 | 0.664602 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_live_session.py | import carb
import omni.kit.test
import omni.usd
import omni.client
import unittest
import omni.kit.app
import carb
import omni.kit.test
import omni.usd
import omni.client
import unittest
import omni.kit.app
import omni.kit.widget.live_session_management as lsm
from .base import TestLayerUIBase
from omni.kit.usd.layers.tests.mock_utils import (
MockLiveSyncingApi, join_new_simulated_user, quit_simulated_user,
quit_all_simulated_users
)
from omni.kit.usd.layers import get_layers, get_layer_event_payload, LayerEventType, LayerUtils
from pxr import Usd, Sdf
class TestLiveSession(TestLayerUIBase):
# Before running each test
async def setUp(self):
await super().setUp()
layers = get_layers()
self.live_syncing = layers.get_live_syncing()
async def tearDown(self):
await super().tearDown()
async def wait(self, frames=10):
for i in range(frames):
await self.app.next_update_async()
async def test_non_omniverse_stage(self):
import omni.kit.ui_test as ui_test
# For non-omniverse stage, it cannot start live session.
await self.usd_context.new_stage_async()
await ui_test.find("Layer").focus()
await ui_test.human_delay(100)
window = ui_test.find("Live Session")
self.assertFalse(window)
async def __create_fake_stage(self):
format = Sdf.FileFormat.FindByExtension(".usd")
# Sdf.Layer.New will not save layer so it won't fail.
# This can be used to test layer identifier with omniverse sheme without
# touching real server.
layer = Sdf.Layer.New(format, "omniverse://__fake_omniverse_server__/test/test.usd")
stage = self.usd_context.get_stage()
stage.GetRootLayer().subLayerPaths.append(layer.identifier)
return stage, layer
@MockLiveSyncingApi
async def test_session_management(self):
_, layer = await self.__create_fake_stage()
import omni.kit.ui_test as ui_test
await ui_test.find("Layer").focus()
await ui_test.human_delay(100)
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.select_context_menu("Create Session")
await ui_test.human_delay(100)
window = ui_test.find("Live Session")
self.assertTrue(window)
create_session_button = ui_test.find("Live Session//Frame/**/RadioButton[*].name=='create_session_radio_button'")
join_session_button = ui_test.find("Live Session//Frame/**/RadioButton[*].name=='join_session_radio_button'")
self.assertTrue(create_session_button)
self.assertTrue(join_session_button)
session_name_field = ui_test.find("Live Session//Frame/**/StringField[*].name=='new_session_name_field'")
self.assertTrue(session_name_field)
confirm_button = ui_test.find("Live Session//Frame/**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await self.wait()
session_name_field.model.set_value("")
# Empty session name is not valid
await confirm_button.click()
await ui_test.human_delay(100)
# Invalid session name will fail to join
await ui_test.human_delay(100)
session_name_field.model.set_value("11111test_session")
await confirm_button.click()
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
session_name_field.model.set_value("")
await ui_test.human_delay(100)
await session_name_field.input("test_session.,m,mn,m")
await confirm_button.click()
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
# Valid session name
session_name_field.model.set_value("")
await ui_test.human_delay(100)
await session_name_field.input("test_session")
await confirm_button.click()
await ui_test.human_delay(100)
self.assertTrue(self.live_syncing.is_stage_in_live_session())
current_live_session = self.live_syncing.get_current_live_session(layer.identifier)
self.assertEqual(current_live_session.name, "test_session")
layer_model = self.layers_instance.get_layer_model()
self.assertTrue(layer_model.get_layer_item_by_identifier(current_live_session.root))
join_new_simulated_user("user0", "user0", layer_identifier=layer.identifier)
await self.wait(20)
user_layout = ui_test.find("Layer//Frame/**/ZStack[*].identifier=='user0'")
self.assertTrue(user_layout)
# Creates another user
join_new_simulated_user("user1", "user1", layer_identifier=layer.identifier)
await self.wait(20)
user_layout = ui_test.find("Layer//Frame/**/ZStack[*].identifier=='user1'")
self.assertTrue(user_layout)
# Quits user should remove its icon
quit_simulated_user("user1", layer_identifier=layer.identifier)
await self.wait(20)
user_layout = ui_test.find("Layer//Frame/**/ZStack[*].identifier=='user1'")
self.assertFalse(user_layout)
# Joins another 10 users will show ellipsis since maximum count is 3.
all_user_ids = []
for i in range(10):
index = i + 10
user_id = f"user{index}"
all_user_ids.append(user_id)
join_new_simulated_user(user_id, user_id, layer_identifier=layer.identifier)
await self.wait(20)
user_layout = ui_test.find("Layer//Frame/**/Label[*].text=='...'")
self.assertTrue(user_layout)
# initialize mouse outside of list, so it doesn't accidentally hover on the wrong thing at the start
await ui_test.emulate_mouse_move(ui_test.Vec2(0,0))
await ui_test.emulate_mouse_move(user_layout.center)
await self.wait(100)
# Disable the check at this moment but only shows the dialog for coverage
# and ensure there is are no scripting errors since ui_test cannot find
# tooltip frame.
# for user_id in all_user_ids:
# user_layout = ui_test.find(f"Layer//Frame/**/HStack[*].identifier=='{user_id}'")
# self.assertTrue(user_layout)
quit_all_simulated_users(layer_identifier=layer.identifier)
await self.wait()
user_layout = ui_test.find("Layer//Frame/**/Label[*].text=='...'")
self.assertFalse(user_layout)
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Leave Session")
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
# Open session dialog again
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join Session")
await ui_test.human_delay(100)
window = ui_test.find("Live Session")
self.assertTrue(window)
create_session_button = ui_test.find("Live Session//Frame/**/RadioButton[*].name=='create_session_radio_button'")
join_session_button = ui_test.find("Live Session//Frame/**/RadioButton[*].name=='join_session_radio_button'")
self.assertTrue(create_session_button)
self.assertTrue(join_session_button)
# Click on join button will immediately join into the session.
confirm_button = ui_test.find("Live Session//Frame/**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await confirm_button.click()
await ui_test.human_delay(100)
self.assertTrue(self.live_syncing.is_stage_in_live_session())
self.assertEqual(current_live_session.name, "test_session")
# Quit session
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.select_context_menu("Leave Session")
await ui_test.human_delay(100)
self.assertFalse(self.live_syncing.is_stage_in_live_session())
# Cancel button test
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join Session")
cancel_button = ui_test.find("Live Session//Frame/**/Button[*].name=='cancel_button'")
self.assertTrue(cancel_button)
await cancel_button.click()
self.assertFalse(self.live_syncing.is_stage_in_live_session())
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join Session")
confirm_button = ui_test.find("Live Session//Frame/**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await confirm_button.click()
self.assertTrue(self.live_syncing.is_stage_in_live_session())
# Test leave session menu
await ui_test.human_delay(100)
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
# Select leave session will notify you if you want to leave
await ui_test.select_context_menu("Leave Session")
await ui_test.human_delay(100)
await confirm_button.click()
self.assertFalse(self.live_syncing.is_stage_in_live_session())
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
await ui_test.human_delay(100)
await ui_test.select_context_menu("Join Session")
confirm_button = ui_test.find("Live Session//Frame/**/Button[*].name=='confirm_button'")
self.assertTrue(confirm_button)
await confirm_button.click()
self.assertTrue(self.live_syncing.is_stage_in_live_session())
carb.settings.get_settings().set(lsm.VIEWER_ONLY_MODE_SETTING, True)
self.assertTrue(lsm.is_viewer_only_mode())
await ui_test.human_delay(100)
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
# Select leave session will notify you if you want to leave
with self.assertRaises(Exception):
await ui_test.select_context_menu("End and Merge")
carb.settings.get_settings().set(lsm.VIEWER_ONLY_MODE_SETTING, False)
self.assertFalse(lsm.is_viewer_only_mode())
# Join session and make some changes to test end session dialog
for confirm_or_cancel in [False, True]:
await self.wait()
live_update_button = ui_test.find("Layer//Frame/**/ToolButton[*].name=='live_update'")
self.assertTrue(live_update_button)
await live_update_button.right_click()
# Select leave session will notify you if you want to leave
await ui_test.select_context_menu("End and Merge")
await self.wait()
confirm_button = ui_test.find("Merge Options//Frame/**/Button[*].name=='confirm_button'")
cancel_button = ui_test.find("Merge Options//Frame/**/Button[*].name=='cancel_button'")
self.assertTrue(confirm_button)
self.assertTrue(cancel_button)
if confirm_or_cancel:
await confirm_button.click()
await self.wait()
self.assertFalse(self.live_syncing.is_stage_in_live_session())
else:
await cancel_button.click()
self.assertTrue(self.live_syncing.is_stage_in_live_session())
self.live_syncing.stop_all_live_sessions()
await self.wait()
| 12,563 | Python | 42.777003 | 121 | 0.648253 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/drag_drop_single.py | ## Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
import omni.kit.test
import omni.usd
import omni.kit.ui_test as ui_test
from omni.kit.test.async_unittest import AsyncTestCase
from omni.kit.test_suite.helpers import open_stage, wait_stage_loading, get_test_data_path, get_prims, arrange_windows
from omni.kit.window.content_browser.test_helper import ContentBrowserTestHelper
class DragDropFileStageSingle(AsyncTestCase):
# Before running each test
async def setUp(self):
await arrange_windows("Layer", 800, 600)
await open_stage(get_test_data_path(__name__, "bound_shapes.usda"))
await wait_stage_loading()
# After running each test
async def tearDown(self):
await wait_stage_loading()
async def test_l1_drag_drop_single_usd_stage(self):
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
await ui_test.find("Content").focus()
layer_window = ui_test.find("Layer")
await layer_window.focus()
# verify prims
paths = [prim.GetPath().pathString for prim in get_prims(stage) if not omni.usd.is_hidden_type(prim)]
self.assertEqual(paths, ['/World', '/World/defaultLight', '/World/Cone', '/World/Cube', '/World/Sphere', '/World/Cylinder', '/World/Looks', '/World/Looks/OmniPBR', '/World/Looks/OmniPBR/Shader', '/World/Looks/OmniGlass', '/World/Looks/OmniGlass/Shader', '/World/Looks/OmniSurface_Plastic', '/World/Looks/OmniSurface_Plastic/Shader'])
# drag/drop files to stage window
drag_target = layer_window.position + ui_test.Vec2(layer_window.size.x / 2, layer_window.size.y / 2)
async with ContentBrowserTestHelper() as content_browser_helper:
await content_browser_helper.toggle_grid_view_async(show_grid_view=False)
await ui_test.human_delay(50)
for file_path in ["4Lights.usda", "quatCube.usda"]:
usd_path = get_test_data_path(__name__, file_path)
await content_browser_helper.drag_and_drop_tree_view(usd_path, drag_target=drag_target)
# verify prims
paths = [prim.GetPath().pathString for prim in get_prims(stage) if not omni.usd.is_hidden_type(prim)]
self.assertEqual(paths, ['/Stage', '/Stage/SphereLight_01', '/Stage/SphereLight_02', '/Stage/SphereLight_03', '/Stage/SphereLight_00', '/Stage/Cube', '/World', '/World/defaultLight', '/World/Cone', '/World/Cube', '/World/Sphere', '/World/Cylinder', '/World/Looks', '/World/Looks/OmniPBR', '/World/Looks/OmniPBR/Shader', '/World/Looks/OmniGlass', '/World/Looks/OmniGlass/Shader', '/World/Looks/OmniSurface_Plastic', '/World/Looks/OmniSurface_Plastic/Shader'])
| 3,061 | Python | 55.703703 | 466 | 0.698465 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_material_watcher.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omni.kit.test
import omni.usd
import omni.kit.app
from pxr import Usd, UsdShade, Sdf
from omni.kit.test_suite.helpers import get_test_data_path
from omni.kit.usd.layers import LayerUtils
class TestMaterialWatcher(omni.kit.test.AsyncTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._test_scene = get_test_data_path(__name__, "ColorsSaltBox/SaltBox.usda")
async def setUp(self):
await omni.usd.get_context().new_stage_async()
self.stage = omni.usd.get_context().get_stage()
self.stage.GetRootLayer().subLayerPaths.append(self._test_scene)
self.context = omni.usd.get_context()
self.root_prim_path = Sdf.Path("/SaltBox")
self.shader_prim_path = Sdf.Path("/SaltBox/Looks/SaltBox_Paper1/Shader")
self.app = omni.kit.app.get_app()
self.selection = self.context.get_selection()
self.diffuse_constant_color_path = self.shader_prim_path.AppendProperty("inputs:diffuse_color_constant")
self.albedo_brightness_path = self.shader_prim_path.AppendProperty("inputs:albedo_brightness")
self.session_layer = self.stage.GetSessionLayer()
self.stage.SetEditTarget(Usd.EditTarget(self.stage.GetRootLayer()))
renderer = "rtx"
if renderer not in self.context.get_attached_hydra_engine_names():
omni.usd.add_hydra_engine(renderer, self.context)
await self._wait()
self.context.add_to_pending_creating_mdl_paths(str(self.shader_prim_path), True, True)
await self._wait(2)
async def tearDown(self):
pass
async def _wait(self, frames=5):
for _ in range(frames):
await self.app.next_update_async()
async def test_referenced_material(self):
await self.context.new_stage_async()
self.stage = self.context.get_stage()
self.session_layer = self.stage.GetSessionLayer()
layer = Sdf.Layer.CreateAnonymous()
self.stage.GetRootLayer().subLayerPaths.append(layer.identifier)
# Writes materials into the sublayer to keep edit target empty.
with Usd.EditContext(self.stage, layer):
root_prim = self.stage.DefinePrim("/SaltBox")
root_prim.GetReferences().AddReference(self._test_scene)
await self._wait()
# OM-84443: Populating material params for referenced prim will not write all back to root layer
self.selection.set_selected_prim_paths([str(self.shader_prim_path)], False)
await self._wait()
shader_prim = UsdShade.Shader.Get(self.stage, self.shader_prim_path)
self.assertTrue(root_prim)
self.assertTrue(shader_prim)
await self.__check_material(root_prim, shader_prim)
async def test_material_watcher(self):
root_prim = self.stage.GetPrimAtPath(self.root_prim_path)
shader_prim = UsdShade.Shader.Get(self.stage, self.shader_prim_path)
self.assertTrue(root_prim)
self.assertTrue(shader_prim)
# Populating material params
self.selection.set_selected_prim_paths([str(self.shader_prim_path)], False)
await self._wait()
await self.__check_material(root_prim, shader_prim)
async def __check_material(self, root_prim, shader_prim):
# Checking params in the session layer
shader_prim_spec = self.session_layer.GetPrimAtPath(self.shader_prim_path)
self.assertTrue(shader_prim_spec)
# Checkes to make sure params are not written to the current edit target after selection.
root_layer = self.stage.GetRootLayer()
self.assertFalse(root_layer.GetPrimAtPath(self.shader_prim_path))
# It's overrided already in USD, so it's empty in session layer.
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, None)
constant_color_composed = self.stage.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_composed)
self.assertEqual(constant_color_composed.Get(), (0, 0, 1))
albedo_brightness_spec = self.session_layer.GetPropertyAtPath(self.albedo_brightness_path)
self.assertFalse(albedo_brightness_spec.default)
variant_set = root_prim.GetVariantSet("materials")
self.assertTrue(variant_set)
variant_set.SetVariantSelection("renderLow")
await self._wait()
# It's not overrided in USD, then the default value will be populated into session layer.
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, (0.2, 0.2, 0.2))
self.assertFalse(albedo_brightness_spec.default)
# Sets value to root layer will remove the overrides in session layer
constant_color_composed = self.stage.GetPropertyAtPath(self.diffuse_constant_color_path)
constant_color_composed.Set((0.5, 0.5, 0.5))
await self._wait()
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(not constant_color_spec or not constant_color_spec.default)
# Removes the overrides will cause session layer to populate the default value.
LayerUtils.remove_prim_spec(self.stage.GetRootLayer(), self.shader_prim_path)
await self._wait()
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertEqual(constant_color_spec.default, (0.2, 0.2, 0.2))
# Switches it back will remove the default value in session layer.
variant_set.SetVariantSelection("renderHigh")
await self._wait()
# It's not overrided in USD, then the default value will be populated into session layer.
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, None)
self.assertFalse(albedo_brightness_spec.default)
# OM-110849: Sets default value in root layer and then edits it with EditContext in session layer will not
# been removed.
constant_color_composed = self.stage.GetPropertyAtPath(self.diffuse_constant_color_path)
constant_color_composed.Set((0.5, 0.5, 0.5))
await self._wait()
with Usd.EditContext(self.stage, self.session_layer):
constant_color_composed.Set((0.2, 0.2, 0.2))
await self._wait()
constant_color_spec = self.session_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, (0.2, 0.2, 0.2))
constant_color_spec = root_layer.GetPropertyAtPath(self.diffuse_constant_color_path)
self.assertTrue(constant_color_spec)
self.assertEqual(constant_color_spec.default, (0.5, 0.5, 0.5))
| 7,529 | Python | 46.35849 | 114 | 0.693585 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_usd_events.py | import omni.kit.test
import os
import uuid
import omni.client
import omni.kit.commands
import omni.kit.usd.layers as layers
from stat import S_IREAD, S_IWRITE
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.layers.prim_spec_item import PrimSpecSpecifier
from .base import TestLayerUIBase
from pxr import Sdf, UsdGeom, Usd
class TestLayerUsdEvents(TestLayerUIBase):
"""Tests for layer model refresh reacted to usd stage changes."""
async def setUp(self):
await super().setUp()
self.test_folder = omni.client.combine_urls(self.temp_dir, str(uuid.uuid1()))
self.test_folder += "/"
await omni.client.create_folder_async(self.test_folder)
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
await self.usd_context.close_stage_async()
await omni.client.delete_async(self.test_folder)
await super().tearDown()
async def test_empty_stage(self):
root_layer = self.stage.GetRootLayer()
session_layer = self.stage.GetSessionLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
self.assertTrue(root_layer_item)
self.assertTrue(session_layer_item)
self.check_layer_regular_fields(
root_layer_item, "Root Layer", root_layer.identifier,
is_edit_target=True, reserved=True,
from_session_layer=False, anonymous=True,
)
self.check_layer_regular_fields(
session_layer_item, "Session Layer", session_layer.identifier,
is_edit_target=False, reserved=True,
from_session_layer=True, anonymous=True,
)
async def test_create_sublayers(self):
root_layer = self.stage.GetRootLayer()
session_layer = self.stage.GetSessionLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
_, identifiers_map = self.create_sublayers(root_layer, [2, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
_, identifiers_map = self.create_sublayers(session_layer, [2, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(session_layer_item, identifiers_map)
async def test_edit_target_change(self):
root_layer = self.stage.GetRootLayer()
session_layer = self.stage.GetSessionLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
LayerUtils.set_edit_target(self.stage, session_layer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertTrue(session_layer_item.is_edit_target)
self.assertFalse(root_layer_item.is_edit_target)
LayerUtils.set_edit_target(self.stage, root_layer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertTrue(root_layer_item.is_edit_target)
self.assertFalse(session_layer_item.is_edit_target)
async def test_layer_misc_properties(self):
root_layer = self.stage.GetRootLayer()
root_layer.subLayerPaths.insert(0, "../invalid_path.usd")
await self.app.next_update_async()
await self.app.next_update_async()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(len(root_layer_item.sublayers), 1)
missing_layer = root_layer_item.sublayers[0]
self.check_layer_regular_fields(
missing_layer, "invalid_path.usd", missing_layer.identifier,
missing=True, anonymous=False, parent=root_layer_item
)
read_only_usd = omni.client.combine_urls(self.test_folder, "read_only.usd")
read_only_layer = Sdf.Layer.CreateNew(read_only_usd)
read_only_layer.Save()
read_only_layer = None
self.assertTrue(os.path.exists(read_only_usd))
os.chmod(read_only_usd, S_IREAD)
read_only_layer = Sdf.Layer.FindOrOpen(read_only_usd)
root_layer.subLayerPaths.append(read_only_usd)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
read_only_item = root_layer_item.sublayers[1]
self.check_layer_regular_fields(
read_only_item, "read_only.usd", read_only_layer.identifier,
read_only=True, parent=root_layer_item, anonymous=False
)
# Change the write permission back so it could be removed.
os.chmod(read_only_usd, S_IWRITE)
dirty_layer_usd = omni.client.combine_urls(self.test_folder, "dirty_layer.usd")
dirty_layer = Sdf.Layer.CreateNew(dirty_layer_usd)
dirty_layer.Save()
root_layer.subLayerPaths.append(dirty_layer_usd)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertTrue(len(root_layer_item.sublayers) == 3)
dirty_layer_item = root_layer_item.sublayers[2]
self.check_layer_regular_fields(
dirty_layer_item, "dirty_layer.usd", dirty_layer.identifier,
dirty=False, parent=root_layer_item, anonymous=False
)
# Change something
customLayerData = dirty_layer.customLayerData
customLayerData["test"] = 1
dirty_layer.customLayerData = customLayerData
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
dirty_layer_item, "dirty_layer.usd", dirty_layer.identifier,
dirty=True, parent=root_layer_item, anonymous=False
)
async def test_layer_local_mute_events(self):
root_layer = self.stage.GetRootLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
sublayers_map, _ = self.create_sublayers(root_layer, [1, 1, 1])
await self.app.next_update_async()
await self.app.next_update_async()
level_0_sublayer = sublayers_map[root_layer.identifier][0]
level_1_sublayer = sublayers_map[level_0_sublayer.identifier][0]
level_2_sublayer = sublayers_map[level_1_sublayer.identifier][0]
level_0_item = root_layer_item.sublayers[0]
level_1_item = level_0_item.sublayers[0]
level_2_item = level_1_item.sublayers[0]
self.stage.MuteLayer(level_2_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_2_item, level_2_sublayer.identifier, level_2_sublayer.identifier,
parent=level_1_item, anonymous=True, muted=True, muted_or_parent_muted=True,
)
self.assertTrue(level_2_item.locally_muted)
self.stage.UnmuteLayer(level_2_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_2_item, level_2_sublayer.identifier, level_2_sublayer.identifier,
parent=level_1_item, anonymous=True, muted=False, muted_or_parent_muted=False
)
self.assertFalse(level_2_item.locally_muted)
self.stage.MuteLayer(level_0_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=True, muted_or_parent_muted=True,
sublayer_list=[level_1_sublayer.identifier]
)
self.check_layer_regular_fields(
level_1_item, level_1_sublayer.identifier, level_1_sublayer.identifier,
parent=level_0_item, anonymous=True, muted=False, muted_or_parent_muted=True,
sublayer_list=[level_2_sublayer.identifier]
)
self.check_layer_regular_fields(
level_2_item, level_2_sublayer.identifier, level_2_sublayer.identifier,
parent=level_1_item, anonymous=True, muted=False, muted_or_parent_muted=True
)
self.stage.UnmuteLayer(level_0_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_1_item, level_1_sublayer.identifier, level_1_sublayer.identifier,
parent=level_0_item, anonymous=True, muted=False, muted_or_parent_muted=False,
sublayer_list=[level_2_sublayer.identifier]
)
self.check_layer_regular_fields(
level_2_item, level_2_sublayer.identifier, level_2_sublayer.identifier,
parent=level_1_item, anonymous=True, muted=False, muted_or_parent_muted=False
)
async def test_layer_global_mute_events(self):
root_layer = self.stage.GetRootLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layers_state = layers.get_layers().get_layers_state()
layers_state.set_muteness_scope(True)
sublayers_map, _ = self.create_sublayers(root_layer, [1])
await self.app.next_update_async()
await self.app.next_update_async()
level_0_sublayer = sublayers_map[root_layer.identifier][0]
level_0_item = root_layer_item.sublayers[0]
self.stage.MuteLayer(level_0_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=True, muted_or_parent_muted=True,
)
self.assertTrue(level_0_item.globally_muted)
self.stage.UnmuteLayer(level_0_sublayer.identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=False, muted_or_parent_muted=False,
)
self.assertFalse(level_0_item.globally_muted)
LayerUtils.set_layer_global_muteness(root_layer, level_0_sublayer.identifier, True)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=True, muted_or_parent_muted=True,
)
self.assertTrue(level_0_item.globally_muted)
LayerUtils.set_layer_global_muteness(root_layer, level_0_sublayer.identifier, False)
await self.app.next_update_async()
await self.app.next_update_async()
self.check_layer_regular_fields(
level_0_item, level_0_sublayer.identifier, level_0_sublayer.identifier,
parent=root_layer_item, anonymous=True, muted=False, muted_or_parent_muted=False,
)
self.assertFalse(level_0_item.globally_muted)
async def test_sublayer_edits(self):
root_layer = self.stage.GetRootLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
_, identifiers_map = self.create_sublayers(root_layer, [3, 3, 3])
await self.app.next_update_async()
await self.app.next_update_async()
level_0_sublayer0_identifier = identifiers_map[root_layer.identifier][0]
level_0_sublayer1_identifier = identifiers_map[root_layer.identifier][1]
level_0_sublayer2_identifier = identifiers_map[root_layer.identifier][2]
# Layer refresh after remove.
omni.kit.commands.execute("RemoveSublayer", layer_identifier=root_layer.identifier, sublayer_position=1)
complete_sublayers = identifiers_map[root_layer.identifier][:]
identifiers_map[root_layer.identifier] = [level_0_sublayer0_identifier, level_0_sublayer2_identifier]
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
identifiers_map[root_layer.identifier] = complete_sublayers
self.check_sublayer_tree(root_layer_item, identifiers_map)
# Layer refresh after create.
# Create layer before second sublayer of root layer.
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=root_layer.identifier,
sublayer_position=1,
new_layer_path="",
transfer_root_content=False,
create_or_insert=True,
layer_name="",
)
new_layer_identifier = self.stage.GetRootLayer().subLayerPaths[1]
new_layer = Sdf.Layer.FindOrOpen(new_layer_identifier)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertTrue(root_layer_item.sublayers[1].identifier == new_layer_identifier)
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
# Layer refresh after move.
omni.kit.commands.execute(
"MoveSublayer",
from_parent_layer_identifier=root_layer.identifier,
from_sublayer_position=2,
to_parent_layer_identifier=root_layer.identifier,
to_sublayer_position=0,
remove_source=True,
)
complete_sublayers = identifiers_map[root_layer.identifier][:]
identifiers_map[root_layer.identifier] = [
level_0_sublayer2_identifier,
level_0_sublayer0_identifier,
level_0_sublayer1_identifier
]
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
omni.kit.undo.undo()
identifiers_map[root_layer.identifier] = complete_sublayers
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
# Layer refresh after replace.
omni.kit.commands.execute(
"ReplaceSublayer",
layer_identifier=root_layer.identifier,
sublayer_position=1,
new_layer_path=new_layer_identifier,
)
complete_sublayers = identifiers_map[root_layer.identifier][:]
identifiers_map[root_layer.identifier] = [
level_0_sublayer0_identifier,
new_layer_identifier,
level_0_sublayer2_identifier
]
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
omni.kit.undo.undo()
identifiers_map[root_layer.identifier] = complete_sublayers
await self.app.next_update_async()
await self.app.next_update_async()
self.check_sublayer_tree(root_layer_item, identifiers_map)
async def test_prim_specs_create(self):
session_layer = self.stage.GetSessionLayer()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, prim_spec_paths)
LayerUtils.set_edit_target(self.stage, session_layer.identifier)
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(session_layer_item.absolute_root_spec, prim_spec_paths)
async def test_prim_specs_edits(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
prim1_of_root = root_layer_item.prim_specs[1].path
omni.kit.commands.execute(
"RemovePrimSpec",
layer_identifier=root_layer_item.identifier,
prim_spec_path=prim1_of_root
)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
changed_prim_specs = prim_spec_paths.copy()
for path in prim_spec_paths:
if path.HasPrefix(prim1_of_root):
changed_prim_specs.discard(path)
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, changed_prim_specs)
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, prim_spec_paths)
async def test_layer_flush(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
session_layer_item = layer_model.session_layer_item
prim_spec_paths = self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [10, 5, 4, 2])
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, prim_spec_paths)
session_layer_prim_spec_paths = self.get_all_prim_spec_paths(session_layer_item.absolute_root_spec)
root_layer_item.layer.TransferContent(session_layer_item.layer)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, session_layer_prim_spec_paths)
async def test_prim_spec_type_name_change(self):
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.stage.DefinePrim("/test")
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(root_layer_item.prim_specs[0].path, Sdf.Path("/test"))
self.assertEqual(root_layer_item.prim_specs[0].type_name, "")
UsdGeom.Cube.Define(self.stage, root_layer_item.prim_specs[0].path)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(root_layer_item.prim_specs[0].type_name, "Cube")
async def test_parenting_prim_refresh(self):
# Test for https://nvidia-omniverse.atlassian.net/browse/OM-34957
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
# Creates 3 prims
prim_spec_paths = list(self.create_prim_specs(self.stage, Sdf.Path.absoluteRootPath, [3]))
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, set(prim_spec_paths))
# Moves first two prims as the children of the 3rd one.
new_path0 = prim_spec_paths[2].AppendElementString(prim_spec_paths[0].name)
new_path1 = prim_spec_paths[2].AppendElementString(prim_spec_paths[1].name)
omni.kit.commands.execute("MovePrim", path_from=prim_spec_paths[0], path_to=new_path0)
omni.kit.commands.execute("MovePrim", path_from=prim_spec_paths[1], path_to=new_path1)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(len(root_layer_item.absolute_root_spec.children), 1)
self.assertEqual(root_layer_item.absolute_root_spec.children[0].path, prim_spec_paths[2])
self.assertEqual(len(root_layer_item.absolute_root_spec.children[0].children), 2)
self.check_prim_spec_children(root_layer_item.absolute_root_spec.children[0], set([new_path0, new_path1]))
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(len(root_layer_item.absolute_root_spec.children), 2)
self.check_prim_spec_children(root_layer_item.absolute_root_spec, set(prim_spec_paths[1:3]))
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, set([new_path0, prim_spec_paths[1], prim_spec_paths[2]]))
omni.kit.undo.undo()
await self.app.next_update_async()
await self.app.next_update_async()
self.check_prim_spec_tree(root_layer_item.absolute_root_spec, set(prim_spec_paths))
async def test_specifier_reference(self):
# Test for https://nvidia-omniverse.atlassian.net/browse/OM-34957
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layer_content = '''\
#sdf 1.0
def "test_prim" (
prepend references = @../invalid/reference2.usd@
)
{
}
'''
root_layer_item.layer.ImportFromString(layer_content)
await self.app.next_update_async()
await self.app.next_update_async()
test_prim_spec = root_layer_item.absolute_root_spec.children[0]
self.assertEqual(test_prim_spec.specifier, PrimSpecSpecifier.DEF_WITH_REFERENCE)
stage = self.stage
test_prim = stage.GetPrimAtPath("/test_prim")
ref_and_layers = omni.usd.get_composed_references_from_prim(test_prim)
for reference, layer in ref_and_layers:
with Usd.EditContext(stage, layer):
payload = Sdf.Payload(assetPath=reference.assetPath.replace("\\", "/"), primPath=reference.primPath, layerOffset=reference.layerOffset)
omni.kit.commands.execute("RemoveReference", stage=stage, prim_path=test_prim.GetPath(), reference=reference)
omni.kit.commands.execute("AddPayload", stage=stage, prim_path=test_prim.GetPath(), payload=payload)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(test_prim_spec.specifier, PrimSpecSpecifier.DEF_WITH_PAYLOAD)
ref_and_layers = omni.usd.get_composed_payloads_from_prim(test_prim)
for payload, layer in ref_and_layers:
with Usd.EditContext(stage, layer):
reference = Sdf.Reference(assetPath=payload.assetPath.replace("\\", "/"), primPath=payload.primPath, layerOffset=payload.layerOffset)
omni.kit.commands.execute("RemovePayload", stage=stage, prim_path=test_prim.GetPath(), payload=payload)
omni.kit.commands.execute("AddReference", stage=stage, prim_path=test_prim.GetPath(), reference=reference)
await self.app.next_update_async()
await self.app.next_update_async()
self.assertEqual(test_prim_spec.specifier, PrimSpecSpecifier.DEF_WITH_REFERENCE)
| 24,707 | Python | 45.097015 | 151 | 0.657506 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_path_utils.py | import omni.kit.test
import omni.usd
from omni.kit.widget.layers.path_utils import PathUtils
class TestPathUtils(omni.kit.test.AsyncTestCase):
def test_utils(self):
path = "omniverse://test-server/invalid_path"
self.assertTrue(PathUtils.is_omni_path(path))
path = "c:/file.usd"
self.assertFalse(PathUtils.is_omni_path(path))
def test_is_live_layer(self):
path = "omniverse://test-server/test.live"
self.assertTrue(PathUtils.is_omni_live(path))
path = "c:/file.usd"
self.assertFalse(PathUtils.is_omni_live(path))
| 587 | Python | 31.666665 | 55 | 0.674617 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/base.py | import carb
import omni
import omni.kit.test
import omni.usd
import omni.client
import omni.kit.widget.layers
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.layers.prim_spec_item import PrimSpecItem, PrimSpecSpecifier
from pxr import Sdf, Usd, UsdGeom, Gf
class TestLayerNonUIBase(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
self.usd_context = omni.usd.get_context()
self.app = omni.kit.app.get_app()
await omni.usd.get_context().new_stage_async()
async def tearDown(self):
await omni.usd.get_context().close_stage_async()
class TestLayerUIBase(omni.kit.test.AsyncTestCase):
"""Tests for layer model refresh reacted to usd stage changes."""
# Before running each test
async def setUp(self):
await omni.usd.get_context().new_stage_async()
self.usd_context = omni.usd.get_context()
self.layers_instance = omni.kit.widget.layers.get_instance()
self.app = omni.kit.app.get_app()
token = carb.tokens.get_tokens_interface()
self.temp_dir = token.resolve("${temp}")
if not self.temp_dir.endswith("/"):
self.temp_dir += "/"
async def tearDown(self):
if self.layers_instance.get_layer_model():
self.layers_instance.get_layer_model().spec_linking_mode = False
if self.usd_context.get_stage():
await omni.usd.get_context().close_stage_async()
def check_prim_spec_regular_fields(
self, prim_spec_item, name, path, type_name="",
specifier=PrimSpecSpecifier.DEF_ONLY, children=[],
has_missing_reference=False, instanceable=False,
filtered=False, has_children=False
):
self.assertEqual(prim_spec_item.name, name)
self.assertEqual(prim_spec_item.path, path)
self.assertEqual(prim_spec_item.type_name, type_name)
self.assertEqual(prim_spec_item.specifier, specifier)
prim_spec_paths = self.get_all_prim_spec_paths(prim_spec_item)
paths = set([])
for path in children:
paths.add(Sdf.Path(path))
prim_spec_paths.discard(prim_spec_item.path)
self.assertEqual(prim_spec_paths, paths)
self.assertEqual(prim_spec_item.has_missing_reference, has_missing_reference)
self.assertEqual(prim_spec_item.instanceable, instanceable)
self.assertEqual(prim_spec_item.filtered, filtered)
self.assertEqual(prim_spec_item.has_children, has_children)
def check_layer_regular_fields(
self, layer_item, name, identifier, missing=False, is_edit_target=False,
reserved=False, read_only=False, sublayer_list=[], muted=False,
muted_or_parent_muted=False, from_session_layer=False,
dirty=False, anonymous=False, filtered=False,
prim_spec_list=[], parent=None
):
self.assertEqual(layer_item.name, name)
self.assertEqual(layer_item.identifier, identifier)
self.assertEqual(layer_item.missing, missing)
self.assertEqual(layer_item.is_edit_target, is_edit_target)
self.assertEqual(layer_item.reserved, reserved)
self.assertEqual(layer_item.read_only_on_disk, read_only)
self.assertEqual(layer_item.from_session_layer, from_session_layer)
self.assertEqual(layer_item.muted, muted)
self.assertEqual(layer_item.editable, not muted and not layer_item.read_only_on_disk and not layer_item.locked)
if not anonymous:
self.assertEqual(layer_item.dirty, dirty)
self.assertEqual(layer_item.anonymous, anonymous)
self.assertEqual(layer_item.filtered, filtered)
self.assertEqual(layer_item.muted_or_parent_muted, muted_or_parent_muted)
self.assertEqual(layer_item.parent, parent)
paths = self.get_all_sublayer_identifiers(layer_item)
self.assertEqual(paths, sublayer_list)
prim_spec_paths = self.get_all_prim_spec_paths(layer_item.absolute_root_spec)
expected_paths = set(prim_spec_list)
self.assertEqual(prim_spec_paths, expected_paths)
def create_flat_sublayers(self, root_layer, num):
sublayers = []
identifiers = []
for i in range(num):
layer = LayerUtils.create_sublayer(root_layer, i, "")
sublayers.append(layer)
identifiers.append(layer.identifier)
return sublayers, identifiers
def create_flat_prim_specs(self, stage, parent_path, num):
prim_spec_paths = set([])
for i in range(num):
prim = stage.DefinePrim(parent_path.AppendElementString(f"xform{i}"), "Xform")
translation = Gf.Vec3d(-200, 0.0, 0.0)
common_api = UsdGeom.XformCommonAPI(prim)
common_api.SetTranslate(translation)
prim_spec_paths.add(prim.GetPath())
return prim_spec_paths
def get_all_prim_spec_items(self, prim_spec_item):
prim_specs = set([])
q = [prim_spec_item]
if prim_spec_item.path != Sdf.Path.absoluteRootPath:
prim_specs.add(prim_spec_item)
while len(q) > 0:
item = q.pop()
specs = item.children
for spec in specs:
prim_specs.add(spec)
q.append(spec)
return prim_specs
def get_all_prim_spec_paths(self, prim_spec_item):
specs = self.get_all_prim_spec_items(prim_spec_item)
paths = [spec.path for spec in specs]
return set(paths)
def get_all_sublayer_identifiers(self, layer_item):
paths = []
for sublayer in layer_item.sublayers:
paths.append(sublayer.identifier)
return paths
def create_sublayers(self, root_layer, level=[]):
if not level:
return {}, {}
sublayers, identifiers = self.create_flat_sublayers(root_layer, level[0])
sublayers_map = {}
identifiers_map = {}
sublayers_map[root_layer.identifier] = sublayers
identifiers_map[root_layer.identifier] = identifiers
for sublayer in sublayers:
lm, im = self.create_sublayers(sublayer, level[1:])
sublayers_map.update(lm)
identifiers_map.update(im)
return sublayers_map, identifiers_map
def create_prim_specs(self, stage, parent_prim_path, level=[]):
if not level:
return set([])
prim_spec_paths = self.create_flat_prim_specs(stage, parent_prim_path, level[0])
all_child_spec_paths = set([])
for prim_spec_path in prim_spec_paths:
all_child_spec_paths.update(self.create_prim_specs(stage, prim_spec_path, level[1:]))
prim_spec_paths.update(all_child_spec_paths)
return prim_spec_paths
def check_sublayer_tree(self, layer_item, identifiers_map):
layer_identifiers = identifiers_map.get(layer_item.identifier, [])
sublayer_paths = self.get_all_sublayer_identifiers(layer_item)
self.assertEqual(
sublayer_paths, layer_identifiers,
f"{layer_item.identifier}'s sublayers does not match"
)
for sublayer_item in layer_item.sublayers:
self.check_sublayer_tree(sublayer_item, identifiers_map)
def check_prim_spec_children(self, prim_spec_item: PrimSpecItem, expected_children_prim_paths):
paths = set({})
for child in prim_spec_item.children:
paths.add(child.path)
self.assertEqual(paths, set(expected_children_prim_paths))
def check_prim_spec_tree(self, prim_spec_item, expected_prim_paths):
paths = self.get_all_prim_spec_paths(prim_spec_item)
self.assertEqual(paths, expected_prim_paths)
async def prepare_empty_stage(self):
root_layer = Sdf.Layer.CreateAnonymous("__root__")
stage = Usd.Stage.Open(root_layer)
await self.usd_context.attach_stage_async(stage)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
return stage
| 8,020 | Python | 37.936893 | 119 | 0.647132 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_prim_spec_item.py | import omni.kit.test
import os
import uuid
import omni.client
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.layers.layer_settings import LayerSettings
from omni.kit.widget.layers.prim_spec_item import PrimSpecSpecifier
from .base import TestLayerUIBase
from pxr import Sdf, UsdGeom
class TestLayerPrimSpecItemAPI(TestLayerUIBase):
"""Tests for layer model refresh reacted to usd stage changes."""
async def setUp(self):
await super().setUp()
self.test_folder = omni.client.combine_urls(self.temp_dir, str(uuid.uuid1()))
self.test_folder += "/"
await omni.client.create_folder_async(self.test_folder)
self.enable_missing_reference = LayerSettings().show_missing_reference
LayerSettings().show_missing_reference = True
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
LayerSettings().show_missing_reference = self.enable_missing_reference
await self.usd_context.close_stage_async()
await omni.client.delete_async(self.test_folder)
await super().tearDown()
async def test_prim_spec_item_properties(self):
temp_layer = Sdf.Layer.CreateAnonymous()
typeless_prim = self.stage.DefinePrim("/test")
cube_prim = self.stage.DefinePrim("/test/cube", "Cube")
prim_with_reference = self.stage.DefinePrim("/test/reference", "Xform")
prim_with_reference.GetReferences().AddReference(temp_layer.identifier)
# Add invalid reference
prim_with_reference.GetReferences().AddReference("../invalid_reference.usd")
instanced_prim = self.stage.DefinePrim("/test/instanced", "Xform")
instanced_prim.SetInstanceable(True)
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(len(root_layer_item.prim_specs), 1)
typeless_prim_item = root_layer_item.prim_specs[0]
self.assertEqual(len(typeless_prim_item.children), 3)
cube_prim_item = typeless_prim_item.children[0]
prim_with_reference_item = typeless_prim_item.children[1]
instanced_prim_item = typeless_prim_item.children[2]
self.check_prim_spec_regular_fields(
typeless_prim_item, "test", "/test",
children=["/test/cube", "/test/reference", "/test/instanced"],
has_children=True
)
self.check_prim_spec_regular_fields(
cube_prim_item, "cube", "/test/cube", type_name="Cube"
)
self.check_prim_spec_regular_fields(
prim_with_reference_item, "reference", "/test/reference", type_name="Xform",
specifier=PrimSpecSpecifier.DEF_WITH_REFERENCE,
has_missing_reference=True
)
self.check_prim_spec_regular_fields(
instanced_prim_item, "instanced", "/test/instanced", type_name="Xform",
instanceable=True
)
async def test_prim_spec_item_filter(self):
self.stage.DefinePrim("/test/filter/keyword1/keyword2")
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
await self.app.next_update_async()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(len(root_layer_item.prim_specs), 1)
test_prim = root_layer_item.prim_specs[0]
filter_prim = test_prim.children[0]
keyword1_prim = filter_prim.children[0]
keyword2_prim = keyword1_prim.children[0]
root_layer_item.prefilter("keyword1")
self.assertTrue(layer_model.can_item_have_children(root_layer_item))
self.assertTrue(test_prim.filtered)
self.assertTrue(filter_prim.filtered)
self.assertTrue(keyword1_prim.filtered)
self.assertFalse(keyword2_prim.filtered)
| 4,079 | Python | 42.870967 | 88 | 0.667075 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_layer_model.py | import omni.kit.test
import os
import uuid
import omni.client
import tempfile
from .base import TestLayerUIBase
from pxr import Sdf, Usd
from omni.kit.usd.layers import LayerUtils
class TestLayerModelAPI(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.test_folder = omni.client.combine_urls(self.temp_dir, str(uuid.uuid1()))
self.test_folder += "/"
await omni.client.create_folder_async(self.test_folder)
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
if self.usd_context.get_stage():
await self.usd_context.close_stage_async()
await omni.client.delete_async(self.test_folder)
await super().tearDown()
async def test_authoring_mode_switch(self):
layer_model = self.layers_instance.get_layer_model()
layer_model.auto_authoring_mode = True
self.assertTrue(layer_model.auto_authoring_mode)
self.assertFalse(layer_model.normal_mode)
self.assertFalse(layer_model.spec_linking_mode)
layer_model.auto_authoring_mode = False
self.assertFalse(layer_model.auto_authoring_mode)
self.assertFalse(layer_model.spec_linking_mode)
self.assertTrue(layer_model.normal_mode)
layer_model.spec_linking_mode = True
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
self.assertFalse(layer_model.auto_authoring_mode)
self.assertTrue(layer_model.spec_linking_mode)
self.assertFalse(layer_model.normal_mode)
layer_model.spec_linking_mode = False
self.assertFalse(layer_model.auto_authoring_mode)
self.assertFalse(layer_model.spec_linking_mode)
self.assertTrue(layer_model.normal_mode)
async def test_api(self):
layer_model = self.layers_instance.get_layer_model()
# Test API call to make sure it does not throw errors.
# It's simply called here without any checking since
# test wrapper will catch console errors if it's failed.
# For functionality tests, it's covered in test.command.py already.
layer_model.flatten_all_layers()
with tempfile.TemporaryDirectory() as tmpdirname:
# save the file
tmp_file_path = os.path.join(tmpdirname, "tmp.usda")
tmp_file_path2 = os.path.join(tmpdirname, "tmp2.usda")
result = await omni.usd.get_context().save_as_stage_async(tmp_file_path)
self.assertTrue(result)
new_sublayer = Sdf.Layer.CreateNew(tmp_file_path2)
new_sublayer.Save()
stage = omni.usd.get_context().get_stage()
stage.GetRootLayer().subLayerPaths.append(new_sublayer.identifier)
stage.SetEditTarget(Usd.EditTarget(new_sublayer))
def on_save_done(success, error_str, saved_layers):
self.assertTrue(success)
self.assertEqual(saved_layers, [tmp_file_path])
layer_model.save_layers([tmp_file_path])
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# Close stage and re-open it to see if edit target is saved correctly
await omni.usd.get_context().close_stage_async()
await omni.usd.get_context().open_stage_async(tmp_file_path)
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
stage = omni.usd.get_context().get_stage()
self.assertEqual(stage.GetEditTarget().GetLayer(), new_sublayer)
async def _wait(self, frames=2):
for i in range(frames):
await omni.kit.app.get_app().next_update_async()
async def test_layer_move_and_reload(self):
usd_context = omni.usd.get_context()
with tempfile.TemporaryDirectory() as tmpdirname:
# save the file
tmp_file_path = os.path.join(tmpdirname, "tmp.usd")
result = await usd_context.save_as_stage_async(tmp_file_path)
self.assertTrue(result)
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
sublayer1 = Sdf.Layer.CreateAnonymous()
sublayer2 = Sdf.Layer.CreateAnonymous()
sublayer3 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(sublayer1.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer2.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer3.identifier)
await self._wait()
root_layer_item = layer_model.root_layer_item
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer2.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer3.identifier)
root_layer = stage.GetRootLayer()
LayerUtils.move_layer(root_layer.identifier, 0, root_layer.identifier, 1, True)
root_layer.Save()
await self._wait()
root_layer.Reload(True)
root_layer = None
stage = None
root_layer_item = layer_model.root_layer_item
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer2.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer3.identifier)
await usd_context.close_stage_async()
async def test_drag_and_drop_sublayer(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
sublayer1 = Sdf.Layer.CreateAnonymous()
sublayer2 = Sdf.Layer.CreateAnonymous()
sublayer3 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(sublayer1.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer2.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer3.identifier)
await self._wait()
root_layer_item = layer_model.root_layer_item
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer1_item, -1))
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer1_item, 0))
self.assertTrue(layer_model.drop_accepted(sublayer1_item, sublayer2_item, -1))
self.assertTrue(layer_model.drop_accepted(sublayer1_item, sublayer2_item, 0))
self.assertTrue(layer_model.drop_accepted(sublayer2_item, sublayer1_item, -1))
self.assertTrue(layer_model.drop_accepted(sublayer2_item, sublayer1_item, 0))
LayerUtils.set_layer_lock_status(stage.GetRootLayer(), sublayer1_item.identifier, True)
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer1_item, -1))
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer1_item, 0))
self.assertFalse(layer_model.drop_accepted(sublayer1_item, sublayer2_item, -1))
self.assertTrue(layer_model.drop_accepted(sublayer1_item, sublayer2_item, 0))
self.assertTrue(layer_model.drop_accepted(sublayer2_item, sublayer1_item, -1))
self.assertTrue(layer_model.drop_accepted(sublayer2_item, sublayer1_item, 0))
layer_model.drop(sublayer2_item, sublayer3_item, 1)
await self._wait()
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer3.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer2.identifier)
layer_model.drop(sublayer2_item, sublayer1_item, 2)
await self._wait()
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer3.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer2.identifier)
layer_model.drop(sublayer3_item, sublayer2_item, -1)
await self._wait()
self.assertEqual(len(root_layer_item.sublayers), 2)
self.assertEqual(len(sublayer3_item.sublayers), 1)
self.assertEqual(sublayer3_item.sublayers[0].identifier, sublayer2_item.identifier)
| 9,099 | Python | 46.643979 | 95 | 0.668315 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_legacy_layer_cpp_bindings.py | import carb
import omni.kit.test
import omni.kit.undo
import omni.kit.commands
import omni.timeline
import omni.usd
import tempfile
import omni.client
from omni.kit.widget.layers import LayerUtils
from pxr import Sdf, UsdGeom
from .base import TestLayerNonUIBase
class TestCppBindings(TestLayerNonUIBase):
async def setUp(self):
await super().setUp()
self.previous_retry_values = omni.client.set_retries(0, 0, 0)
async def tearDown(self):
omni.client.set_retries(*self.previous_retry_values)
await super().tearDown()
def check_sublayers(self, sublayer_paths, expected_layer_identifiers):
sublayer_paths = sorted(sublayer_paths)
expected_layer_identifiers = sorted(expected_layer_identifiers)
self.assertTrue(
sublayer_paths == expected_layer_identifiers,
f"Sublayers array does not match, got: {sublayer_paths}, expected: {expected_layer_identifiers}",
)
async def test_layers_state_apis(self):
layers = self.usd_context.get_layers()
stage = self.usd_context.get_stage()
self.assertFalse(layers.is_layer_muteness_global())
layers.set_layer_muteness_scope(True)
self.assertTrue(layers.is_layer_muteness_global())
layers.set_layer_muteness_scope(False)
self.assertFalse(layers.is_layer_muteness_global())
layer2 = Sdf.Layer.CreateAnonymous()
LayerUtils.insert_sublayer(stage.GetRootLayer(), 0, layer2.identifier)
self.assertFalse(layers.is_layer_locally_muted(layer2.identifier))
self.assertFalse(layers.is_layer_globally_muted(layer2.identifier))
stage = self.usd_context.get_stage()
stage.MuteLayer(layer2.identifier)
await omni.kit.app.get_app().next_update_async()
layers.set_layer_muteness_scope(False)
self.assertTrue(layers.is_layer_locally_muted(layer2.identifier))
self.assertFalse(layers.is_layer_globally_muted(layer2.identifier))
self.assertFalse(omni.usd.is_layer_globally_muted(self.usd_context, layer2.identifier))
layers.set_layer_muteness_scope(True)
self.assertTrue(layers.is_layer_muteness_global())
self.assertTrue(layers.is_layer_locally_muted(layer2.identifier))
self.assertFalse(layers.is_layer_globally_muted(layer2.identifier))
self.assertEqual(layers.get_layer_edit_mode(), omni.usd.LayerEditMode.NORMAL)
layers.set_layer_edit_mode(omni.usd.LayerEditMode.AUTO_AUTHORING)
self.assertEqual(layers.get_layer_edit_mode(), omni.usd.LayerEditMode.AUTO_AUTHORING)
layers.set_layer_edit_mode(omni.usd.LayerEditMode.NORMAL)
self.assertEqual(layers.get_layer_edit_mode(), omni.usd.LayerEditMode.NORMAL)
self.assertFalse(layers.is_layer_locked(layer2.identifier))
layers.set_layer_lock_state(layer2.identifier, True)
self.assertTrue(layers.is_layer_locked(layer2.identifier))
layers.set_layer_lock_state(layer2.identifier, False)
self.assertFalse(layers.is_layer_locked(layer2.identifier))
self.assertFalse(layers.is_layer_locked_by_other(layer2.identifier))
self.assertEqual(layers.get_layer_lock_user_name(layer2.identifier), "")
self.assertTrue(layers.is_layer_writable(layer2.identifier))
self.assertFalse(layers.is_layer_savable(layer2.identifier))
layers.set_layer_edit_mode(omni.usd.LayerEditMode.AUTO_AUTHORING)
layers.set_default_edit_layer_identifier(layer2.identifier)
self.assertEqual(layers.get_default_edit_layer_identifier(), layer2.identifier)
self.assertFalse(layers.is_auto_authoring_layer(layer2.identifier))
edit_target = stage.GetEditTarget()
self.assertTrue(layers.is_auto_authoring_layer(edit_target.GetLayer().identifier))
with omni.usd.active_authoring_layer_context(self.usd_context):
edit_target = stage.GetEditTarget()
self.assertFalse(layers.is_auto_authoring_layer(edit_target.GetLayer().identifier))
self.assertEqual(edit_target.GetLayer().identifier, layer2.identifier)
async def test_get_layer_name(self):
layers = self.usd_context.get_layers()
self.assertEqual("abc.usd", layers.get_layer_name("c:/a/b/abc.usd"))
self.assertEqual("abc.usda", layers.get_layer_name("c:/a/b/abc.usda"))
self.assertEqual("abc.usda", layers.get_layer_name("omniverse://ov-invalid-fake-server/a/b/abc.usda"))
layer = Sdf.Layer.CreateAnonymous()
self.assertEqual(layer.identifier, layers.get_layer_name(layer.identifier))
self.assertEqual("", layers.get_layer_name(""))
self.assertEqual("a b c.usda", layers.get_layer_name("omniverse://ov-invalid-fake-server/a/b/a%20b%20c.usda"))
async def test_get_used_sublayers(self):
layers = self.usd_context.get_layers()
stage = self.usd_context.get_stage()
root_layer = stage.GetRootLayer()
sublayers = layers.get_used_sublayers()
self.check_sublayers(sublayers, [root_layer.identifier])
layer0 = Sdf.Layer.CreateAnonymous()
LayerUtils.insert_sublayer(root_layer, 0, layer0.identifier)
sublayers = layers.get_used_sublayers()
self.check_sublayers(sublayers, [root_layer.identifier, layer0.identifier])
layer1 = Sdf.Layer.CreateAnonymous()
LayerUtils.insert_sublayer(root_layer, 1, layer1.identifier)
sublayers = layers.get_used_sublayers()
self.check_sublayers(sublayers, [root_layer.identifier, layer0.identifier, layer1.identifier])
layer2 = Sdf.Layer.CreateAnonymous()
layer3 = Sdf.Layer.CreateAnonymous()
layer4 = Sdf.Layer.CreateAnonymous()
layer5 = Sdf.Layer.CreateAnonymous()
LayerUtils.insert_sublayer(layer2, 0, layer3.identifier)
LayerUtils.insert_sublayer(layer2, 1, layer4.identifier)
LayerUtils.insert_sublayer(layer4, 0, layer5.identifier)
LayerUtils.insert_sublayer(root_layer, 2, layer2.identifier)
sublayers = layers.get_used_sublayers()
self.check_sublayers(
sublayers,
[
root_layer.identifier,
layer0.identifier,
layer1.identifier,
layer2.identifier,
layer3.identifier,
layer4.identifier,
layer5.identifier,
],
)
# Removes layer0
LayerUtils.remove_sublayer(root_layer, 0)
sublayers = layers.get_used_sublayers()
self.check_sublayers(
sublayers,
[
root_layer.identifier,
layer1.identifier,
layer2.identifier,
layer3.identifier,
layer4.identifier,
layer5.identifier,
],
)
# Removes layer2 will remove layer2, layer3, layer4, layer5
LayerUtils.remove_sublayer(root_layer, 1)
sublayers = layers.get_used_sublayers()
self.check_sublayers(sublayers, [root_layer.identifier, layer1.identifier])
async def test_get_dirty_sublayers(self):
usd_context = omni.usd.get_context()
layers = usd_context.get_layers()
stage = usd_context.get_stage()
root_layer = stage.GetRootLayer()
with tempfile.TemporaryDirectory() as tempdir:
format = Sdf.FileFormat.FindByExtension(".usd")
layer0 = Sdf.Layer.New(format, f"{tempdir}/1.usd")
layer1 = Sdf.Layer.New(format, f"{tempdir}/2.usd")
layer2 = Sdf.Layer.New(format, f"{tempdir}/3.usd")
layer3 = Sdf.Layer.New(format, f"{tempdir}/4.usd")
layer4 = Sdf.Layer.New(format, f"{tempdir}/5.usd")
layer5 = Sdf.Layer.New(format, f"{tempdir}/6.usd")
LayerUtils.insert_sublayer(root_layer, 0, layer0.identifier, False)
LayerUtils.insert_sublayer(root_layer, 0, layer1.identifier, False)
LayerUtils.insert_sublayer(layer2, 0, layer3.identifier, False)
LayerUtils.insert_sublayer(layer2, 0, layer4.identifier, False)
LayerUtils.insert_sublayer(layer4, 0, layer5.identifier, False)
LayerUtils.insert_sublayer(root_layer, 0, layer2.identifier, False)
sublayers = layers.get_used_sublayers()
self.check_sublayers(
sublayers,
[
root_layer.identifier,
layer0.identifier,
layer1.identifier,
layer2.identifier,
layer3.identifier,
layer4.identifier,
layer5.identifier,
],
)
# Checkes dirtiness of layers since layer2 and layer4 have been touched.
# They should be dirty at this moment
dirty_sublayers = layers.get_dirty_sublayers()
self.check_sublayers(dirty_sublayers, [layer2.identifier, layer4.identifier])
# Touches layer1
LayerUtils.set_edit_target(stage, layer1.identifier)
UsdGeom.Mesh.Define(stage, "/root/test")
dirty_sublayers = layers.get_dirty_sublayers()
self.check_sublayers(dirty_sublayers, [layer1.identifier, layer2.identifier, layer4.identifier])
| 9,314 | Python | 45.113861 | 118 | 0.654069 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/__init__.py | from .test_misc import *
from .test_usd_events import *
from .test_prim_spec_item import *
from .test_performance import *
from .test_layer_model import *
from .test_path_utils import *
from .test_extension import *
from .test_layer_mode_utils import *
from .test_selection import *
from .drag_drop_single import *
from .drag_drop_multi import *
from .test_live_session import *
from .test_context_menu import *
from .test_window_ui_states import *
from .test_material_watcher import *
from .test_hotkey import *
# Legacy tests
from .test_legacy_layer_cpp_bindings import *
from .test_legacy_edit_mode import *
| 612 | Python | 28.190475 | 45 | 0.754902 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_layer_mode_utils.py | import omni.kit.test
import os
import uuid
import omni.client
import omni.kit.ui
from omni.kit.widget.layers.layer_settings import LayerSettings
from omni.kit.widget.layers.layer_model_utils import LayerModelUtils
from omni.kit.widget.prompt import PromptManager
from omni.kit.usd.layers import LayerUtils
from .base import TestLayerUIBase
from pxr import Sdf, Usd, UsdGeom, Gf
class TestLayerModelUtils(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.previous_retry_values = omni.client.set_retries(0, 0, 0)
self.stage = await self.prepare_empty_stage()
self.old_warning_enabled = LayerSettings().show_merge_or_flatten_warning
self.test_folder = omni.client.combine_urls(self.temp_dir, str(uuid.uuid1()))
self.test_folder += "/"
await omni.client.create_folder_async(self.test_folder)
async def tearDown(self):
LayerSettings().show_merge_or_flatten_warning = self.old_warning_enabled
await self.usd_context.close_stage_async()
await omni.client.delete_async(self.test_folder)
omni.client.set_retries(*self.previous_retry_values)
await super().tearDown()
async def _wait(self, frames=2):
for i in range(frames):
await omni.kit.app.get_app().next_update_async()
async def test_merge_layers(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
layer0 = Sdf.Layer.CreateAnonymous()
layer1 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
stage.GetRootLayer().subLayerPaths.append(layer1.identifier)
await self._wait()
# Enable prompt and try to merge with ok button.
LayerSettings().show_merge_or_flatten_warning = True
LayerModelUtils.merge_layer_down(layer_model.root_layer_item.sublayers[0])
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
prompt = PromptManager.query_prompt_by_title("Merge Layer Down")
self.assertTrue(prompt)
prompt._on_ok_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
self.assertEqual(layer_model.root_layer_item.sublayers[0].identifier, layer1.identifier)
omni.kit.undo.undo()
await self._wait()
# Enable prompt and cancel merge
LayerModelUtils.merge_layer_down(layer_model.root_layer_item.sublayers[0])
await self._wait()
prompt = PromptManager.query_prompt_by_title("Merge Layer Down")
self.assertTrue(prompt)
prompt._on_cancel_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 2)
self.assertEqual(layer_model.root_layer_item.sublayers[0].identifier, layer0.identifier)
self.assertEqual(layer_model.root_layer_item.sublayers[1].identifier, layer1.identifier)
omni.kit.undo.undo()
await self._wait()
# Disable prompt and try to merge
LayerSettings().show_merge_or_flatten_warning = False
LayerModelUtils.merge_layer_down(layer_model.root_layer_item.sublayers[0])
await self._wait()
prompt = PromptManager.query_prompt_by_title("Merge Layer Down")
self.assertFalse(prompt)
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
self.assertEqual(layer_model.root_layer_item.sublayers[0].identifier, layer1.identifier)
# Make sure that all prompts are released
self.assertEqual(len(PromptManager._prompts), 0)
async def test_flatten_layers(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
layer0 = Sdf.Layer.CreateAnonymous()
layer1 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
stage.GetRootLayer().subLayerPaths.append(layer1.identifier)
await self._wait()
# Enable prompt and try to flatten with ok button.
LayerSettings().show_merge_or_flatten_warning = True
LayerModelUtils.flatten_all_layers(layer_model)
await self._wait()
prompt = PromptManager.query_prompt_by_title("Flatten All Layers")
self.assertTrue(prompt)
prompt._on_ok_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
omni.kit.undo.undo()
await self._wait()
# Enable prompt and cancel flatten
LayerModelUtils.flatten_all_layers(layer_model)
await self._wait()
prompt = PromptManager.query_prompt_by_title("Flatten All Layers")
self.assertTrue(prompt)
prompt._on_cancel_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 2)
self.assertEqual(layer_model.root_layer_item.sublayers[0].identifier, layer0.identifier)
self.assertEqual(layer_model.root_layer_item.sublayers[1].identifier, layer1.identifier)
omni.kit.undo.undo()
await self._wait()
# Disable prompt and try to merge
LayerSettings().show_merge_or_flatten_warning = False
LayerModelUtils.flatten_all_layers(layer_model)
await self._wait()
prompt = PromptManager.query_prompt_by_title("Flatten All Layers")
self.assertFalse(prompt)
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
async def test_layer_lock(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
format = Sdf.FileFormat.FindByExtension(".usd")
layer0 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test.usd")
layer1 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test2.usd")
layer2 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
layer0.subLayerPaths.append(layer1.identifier)
layer0.subLayerPaths.append(layer2.identifier)
await self._wait(10)
LayerModelUtils.lock_layer(layer_model.root_layer_item.sublayers[0], True)
await self._wait()
self.assertTrue(layer_model.root_layer_item.sublayers[0].locked)
self.assertTrue(layer_model.root_layer_item.sublayers[0].sublayers[0].locked)
# Anonymous layer cannot be locked.
self.assertFalse(layer_model.root_layer_item.sublayers[0].sublayers[1].locked)
async def test_move_sublayer(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
sublayer1 = Sdf.Layer.CreateAnonymous()
sublayer2 = Sdf.Layer.CreateAnonymous()
sublayer3 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(sublayer1.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer2.identifier)
stage.GetRootLayer().subLayerPaths.append(sublayer3.identifier)
await self._wait()
root_layer_item = layer_model.root_layer_item
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer1_item, -1))
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer1_item, 0))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer1_item, sublayer2_item, -1))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer1_item, sublayer2_item, 0))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer2_item, sublayer1_item, -1))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer2_item, sublayer1_item, 0))
LayerUtils.set_layer_lock_status(stage.GetRootLayer(), sublayer1_item.identifier, True)
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer1_item, -1))
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer1_item, 0))
self.assertFalse(LayerModelUtils.can_move_layer(sublayer1_item, sublayer2_item, -1))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer1_item, sublayer2_item, 0))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer2_item, sublayer1_item, -1))
self.assertTrue(LayerModelUtils.can_move_layer(sublayer2_item, sublayer1_item, 0))
# Cannot move it as sublayer1 is locked
LayerModelUtils.move_layer(sublayer1_item, sublayer2_item, -1)
await self._wait()
self.assertEqual(len(sublayer1_item.sublayers), 0)
LayerModelUtils.move_layer(root_layer_item, sublayer3_item, 1)
await self._wait()
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer3.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer2.identifier)
LayerModelUtils.move_layer(root_layer_item, sublayer1_item, 2)
await self._wait()
sublayer1_item = root_layer_item.sublayers[0]
sublayer2_item = root_layer_item.sublayers[1]
sublayer3_item = root_layer_item.sublayers[2]
self.assertEqual(sublayer1_item.identifier, sublayer3.identifier)
self.assertEqual(sublayer2_item.identifier, sublayer1.identifier)
self.assertEqual(sublayer3_item.identifier, sublayer2.identifier)
LayerModelUtils.move_layer(sublayer3_item, sublayer2_item, -1)
await self._wait()
self.assertEqual(len(root_layer_item.sublayers), 2)
self.assertEqual(len(sublayer3_item.sublayers), 1)
self.assertEqual(sublayer3_item.sublayers[0].identifier, sublayer2_item.identifier)
async def test_remove_sublayers(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
format = Sdf.FileFormat.FindByExtension(".usd")
layer0 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test.usd")
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
layer0.customLayerData["abc"] = "test"
with Usd.EditContext(stage, layer0):
UsdGeom.Cube.Define(stage, "/prim/test")
self.assertTrue(layer0.dirty)
await self._wait()
LayerModelUtils.remove_layer(layer_model.root_layer_item.sublayers[0])
await self._wait()
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer')
self.assertTrue(prompt)
prompt._on_ok_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
omni.kit.undo.undo()
await self._wait()
LayerModelUtils.remove_layer(layer_model.root_layer_item.sublayers[0])
await self._wait()
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer')
self.assertTrue(prompt)
prompt._on_cancel_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
omni.kit.undo.undo()
await self._wait()
async def test_remove_sublayers(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
format = Sdf.FileFormat.FindByExtension(".usd")
layer0 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test.usd")
layer1 = Sdf.Layer.New(format, "omniverse://omni-fake-invalid-server/test/test1.usd")
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
stage.GetRootLayer().subLayerPaths.append(layer1.identifier)
layer0.customLayerData["abc"] = "test"
with Usd.EditContext(stage, layer0):
UsdGeom.Cube.Define(stage, "/prim/test")
self.assertTrue(layer0.dirty)
await self._wait()
LayerModelUtils.remove_layers(layer_model.root_layer_item.sublayers)
await self._wait()
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer')
self.assertTrue(prompt)
prompt._on_ok_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
omni.kit.undo.undo()
await self._wait()
LayerModelUtils.remove_layers(layer_model.root_layer_item.sublayers)
await self._wait()
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Removing Layer')
self.assertTrue(prompt)
prompt._on_cancel_button_fn()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 2)
omni.kit.undo.undo()
await self._wait()
def _skip_existing_file_prompt(self, click_yes=False):
prompt = PromptManager.query_prompt_by_title(f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Overwrite')
if prompt:
if click_yes:
prompt._on_ok_button_fn()
else:
prompt._on_cancel_button_fn()
async def test_create_sublayer(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
def _skip_transfer_content_prompt():
prompt = PromptManager.query_prompt_by_title("Transfer Content")
if prompt:
prompt._on_cancel_button_fn()
# First create
LayerModelUtils.create_sublayer(layer_model.root_layer_item, 0)
await self._wait()
from omni.kit.widget.layers.layer_model_utils import _file_picker
self.assertTrue(_file_picker)
path = os.path.join(self.test_folder, "test.usd")
_file_picker._on_file_open([path])
_file_picker.hide()
self._skip_existing_file_prompt(True)
_skip_transfer_content_prompt()
status, _ = omni.client.stat(path)
self.assertEqual(status, omni.client.Result.OK)
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
# Change the content of it for further comparison
with Usd.EditContext(stage, layer_model.root_layer_item.sublayers[0].layer):
UsdGeom.Cube.Define(stage, "/world/test")
stage.Save()
omni.kit.undo.undo()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 0)
def _check_content(old_content):
# Make sure stage is saved and content is there for further comparison
layer = Sdf.Layer.FindOrOpen(path)
self.assertTrue(layer)
prim = layer.GetPrimAtPath("/world/test")
if old_content:
self.assertTrue(prim)
else:
self.assertFalse(prim)
layer = None
_check_content(True)
# Open create file dialog and cancel it
LayerModelUtils.create_sublayer(layer_model.root_layer_item, 0)
from omni.kit.widget.layers.layer_model_utils import _file_picker
self.assertTrue(_file_picker)
_file_picker._on_cancel_open()
_file_picker.hide()
_check_content(True)
# Second create with override
LayerModelUtils.create_sublayer(layer_model.root_layer_item, 0)
from omni.kit.widget.layers.layer_model_utils import _file_picker
self.assertTrue(_file_picker)
_file_picker._on_file_open([path])
_file_picker.hide()
self._skip_existing_file_prompt(True)
_skip_transfer_content_prompt()
_check_content(False)
def _create_layer(self, path):
layer = Sdf.Layer.FindOrOpen(path)
if not layer:
layer = Sdf.Layer.CreateNew(path)
return layer
async def test_insert_sublayer(self):
layer_model = self.layers_instance.get_layer_model()
# Create layer to be inserted
path = os.path.join(self.test_folder, "test.usd")
self._create_layer(path)
path2 = os.path.join(self.test_folder, "test2.usd")
self._create_layer(path2)
path3 = os.path.join(self.test_folder, "test3.usd")
self._create_layer(path3)
LayerModelUtils.insert_sublayer(layer_model.root_layer_item, 0)
await self._wait()
# Only the first one will be successfully.
from omni.kit.widget.layers.layer_model_utils import _file_picker
for i in range(3):
self.assertTrue(_file_picker)
_file_picker._on_file_open([path])
_file_picker.hide()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 1)
# Insert multiple layers at the same time
_file_picker._on_file_open([path2, path3])
_file_picker.hide()
await self._wait()
self.assertEqual(len(layer_model.root_layer_item.sublayers), 3)
all_sublayers = []
for sublayer_item in layer_model.root_layer_item.sublayers:
all_sublayers.append(os.path.normpath(sublayer_item.identifier))
expected_sublayers = [os.path.normpath(path), os.path.normpath(path2), os.path.normpath(path3)]
self.assertEqual(set(all_sublayers), set(expected_sublayers))
async def test_move_prim_spec(self):
layer_model = self.layers_instance.get_layer_model()
stage = layer_model.usd_context.get_stage()
layer0 = Sdf.Layer.CreateAnonymous()
stage.GetRootLayer().subLayerPaths.append(layer0.identifier)
await self._wait()
cube_prim_path = "/World/test"
root_item = layer_model.root_layer_item
sublayer_item0 = layer_model.root_layer_item.sublayers[0]
with Usd.EditContext(stage, root_item.layer):
cube = UsdGeom.Cube.Define(stage, cube_prim_path)
cube_prim = cube.GetPrim()
await self._wait()
# Move prim without conflict
world_prim = root_item.absolute_root_spec.children[0]
self.assertTrue(root_item.layer.GetPrimAtPath(cube_prim_path))
LayerModelUtils.move_prim_spec(layer_model, sublayer_item0, world_prim)
world_prim = None
await self._wait()
self.assertFalse(root_item.layer.GetPrimAtPath(cube_prim_path))
omni.kit.undo.undo()
await self._wait()
self.assertTrue(root_item.layer.GetPrimAtPath(cube_prim_path))
with Usd.EditContext(stage, sublayer_item0.layer):
UsdGeom.XformCommonAPI(cube_prim).SetTranslate(Gf.Vec3d(0, 0, 0))
await self._wait()
self.assertTrue(sublayer_item0.layer.GetPrimAtPath(cube_prim_path))
# Move prim with conflict
world_prim = root_item.absolute_root_spec.children[0]
LayerModelUtils.move_prim_spec(layer_model, sublayer_item0, world_prim)
# It should have prompt to remind user
prompt = PromptManager.query_prompt_by_title(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Merge Prim Spec',
)
self.assertTrue(prompt)
# Cancel it and make sure it's not moved
prompt._on_cancel_button_fn()
self.assertTrue(root_item.layer.GetPrimAtPath(cube_prim_path))
LayerModelUtils.move_prim_spec(layer_model, sublayer_item0, world_prim)
prompt = PromptManager.query_prompt_by_title(
f'{omni.kit.ui.get_custom_glyph_code("${glyphs}/exclamation.svg")} Merge Prim Spec',
)
self.assertTrue(prompt)
# Confirm it and make sure it's moved
prompt._on_ok_button_fn()
self.assertFalse(root_item.layer.GetPrimAtPath(cube_prim_path))
async def test_layer_save_as(self):
# Test for https://nvidia-omniverse.atlassian.net/browse/OM-35016
layer_model = self.layers_instance.get_layer_model()
LayerModelUtils.save_layer_as(layer_model.root_layer_item)
await self._wait(10)
from omni.kit.widget.layers.layer_model_utils import _file_picker
self.assertTrue(_file_picker)
self.assertEqual(_file_picker.get_current_filename(), layer_model.root_layer_item.layer.GetDisplayName())
saved_file = os.path.join(self.test_folder, "test_layer_save_as.usd")
_file_picker._on_file_open([saved_file])
_file_picker.hide()
self.assertTrue(os.path.exists(saved_file))
| 20,942 | Python | 41.915984 | 136 | 0.663165 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_extension.py | import omni.kit.test
from pxr import Sdf
from .base import TestLayerUIBase
class TestLayerExtension(TestLayerUIBase):
async def setUp(self):
await super().setUp()
self.stage = await self.prepare_empty_stage()
async def tearDown(self):
await self.usd_context.close_stage_async()
async def test_layer_insert(self):
layer = Sdf.Layer.CreateAnonymous()
root_layer = self.stage.GetRootLayer()
self.layers_instance._on_icon_menu_click(None, layer.identifier)
self.assertEqual(len(root_layer.subLayerPaths), 1)
self.assertEqual(root_layer.subLayerPaths[0], layer.identifier)
# Dont allow to insert root layer
self.layers_instance._on_icon_menu_click(None, root_layer.identifier)
self.assertEqual(len(root_layer.subLayerPaths), 1)
self.assertEqual(root_layer.subLayerPaths[0], layer.identifier)
# Don't allow to insert duplicate layer
self.layers_instance._on_icon_menu_click(None, layer.identifier)
self.assertEqual(len(root_layer.subLayerPaths), 1)
self.assertEqual(root_layer.subLayerPaths[0], layer.identifier)
| 1,155 | Python | 35.124999 | 77 | 0.69697 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_context_menu.py | import omni.kit.test
import os
import tempfile
import shutil
import omni.client
import omni.kit.app
from .base import TestLayerUIBase
from pxr import Usd, Sdf
from stat import S_IREAD, S_IWRITE
from omni.kit.usd.layers import LayerUtils
from omni.kit.widget.prompt import PromptManager
class TestContextMenu(TestLayerUIBase):
# Before running each test
async def setUp(self):
await super().setUp()
self.stage = self.usd_context.get_stage()
self._temp_dir = tempfile.TemporaryDirectory().name
self._writable_layer_path = os.path.join(self._temp_dir, "writable.usd")
self._writable_layer = Sdf.Layer.CreateNew(self._writable_layer_path)
self._writable_layer.Save()
self._readonly_layer_path = os.path.join(self._temp_dir, "readonly.usd")
layer = Sdf.Layer.CreateNew(self._readonly_layer_path)
layer.Save()
layer = None
os.chmod(self._readonly_layer_path, S_IREAD)
self._readonly_layer = Sdf.Layer.FindOrOpen(self._readonly_layer_path)
# Prepare stage
root_layer = self.stage.GetRootLayer()
root_layer.subLayerPaths.append(self._readonly_layer_path)
root_layer.subLayerPaths.append(self._writable_layer_path)
await self.wait()
await self._hide_prompt()
import omni.kit.ui_test as ui_test
await ui_test.find("Layer").focus()
async def tearDown(self):
await super().tearDown()
self._writable_layer = None
self._readonly_layer = None
self.stage = None
os.chmod(self._readonly_layer_path, S_IWRITE)
shutil.rmtree(self._temp_dir)
async def wait(self, frames=10):
for i in range(frames):
await self.app.next_update_async()
def _find_all_layer_items(self):
import omni.kit.ui_test as ui_test
writable_item = ui_test.find("Layer//Frame/**/Label[*].text=='writable.usd'")
self.assertTrue(writable_item)
readonly_item = ui_test.find("Layer//Frame/**/Label[*].text=='readonly.usd'")
self.assertTrue(readonly_item)
root_item = ui_test.find("Layer//Frame/**/Label[*].text=='Root Layer (Authoring Layer)'")
self.assertTrue(root_item)
return root_item, writable_item, readonly_item
async def test_set_authoring_layer(self):
import omni.kit.ui_test as ui_test
root_item, writable_item, readonly_item = self._find_all_layer_items()
await writable_item.right_click()
await ui_test.select_context_menu("Set Authoring Layer")
self.assertEqual(self._writable_layer.identifier, self.stage.GetEditTarget().GetLayer().identifier)
await readonly_item.right_click()
# Cannot found this menu item for readonly layer.
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu("Set Authoring Layer")
self.assertEqual(self._writable_layer.identifier, self.stage.GetEditTarget().GetLayer().identifier)
# Double click to change authoring layer will fail also.
await readonly_item.double_click()
self.assertEqual(self._writable_layer.identifier, self.stage.GetEditTarget().GetLayer().identifier)
# Switch back to root layer
await root_item.double_click()
self.assertEqual(self.stage.GetEditTarget().GetLayer().identifier, self.stage.GetEditTarget().GetLayer().identifier)
# Mute layer and try to set it as authoring layer will fail also
self.stage.MuteLayer(self._writable_layer.identifier)
await self.wait()
await writable_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu("Set Authoring Layer")
self.assertEqual(self.stage.GetEditTarget().GetLayer().identifier, self.stage.GetEditTarget().GetLayer().identifier)
self.stage.UnmuteLayer(self._writable_layer.identifier)
await self.wait()
# Lock layer and try to set it as authoring layer will fail also
LayerUtils.set_layer_lock_status(self.stage.GetRootLayer(), self._writable_layer.identifier, True)
await self.wait()
await writable_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu("Set Authoring Layer")
self.assertEqual(self.stage.GetEditTarget().GetLayer().identifier, self.stage.GetEditTarget().GetLayer().identifier)
LayerUtils.set_layer_lock_status(self.stage.GetRootLayer(), self._writable_layer.identifier, False)
await self.wait()
async def _hide_prompt(self):
prompt = PromptManager.query_prompt_by_title("Flatten All Layers")
if prompt:
prompt.visible = False
prompt = PromptManager.query_prompt_by_title("Merge Layer Down")
if prompt:
prompt.visible = False
async def _test_menu_item(
self,
item_name,
file_picker_name=None,
allow_read_only=False,
allow_mute=False,
allow_lock=False,
select_multiple=False
):
import omni.kit.ui_test as ui_test
root_item, writable_item, readonly_item = self._find_all_layer_items()
if select_multiple:
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
# Select two sublayers: writable and readonly sublayer of root.
layer_tree_view.selection = root_layer_item.sublayers
await self.wait()
# Ensure they are selected.
self.assertEqual(len(self.layers_instance.get_selected_items()), 2)
await writable_item.right_click()
await ui_test.select_context_menu(item_name)
await ui_test.human_delay()
if file_picker_name:
await self.wait()
file_picker = ui_test.find(file_picker_name)
await file_picker.focus()
self.assertTrue(file_picker)
file_picker.window.visible = False
# Special treatment for flatten sublayers
await self._hide_prompt()
await readonly_item.right_click()
# Cannot found this menu item for readonly layer.
if not allow_read_only:
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(item_name)
else:
await ui_test.select_context_menu(item_name)
# Special treatment for flatten sublayers
await self._hide_prompt()
# Mute layer and try to create a sublayer for it will fail also
self.stage.MuteLayer(self._writable_layer.identifier)
await self.wait()
await writable_item.right_click()
if not allow_mute:
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(item_name)
else:
await ui_test.select_context_menu(item_name)
# Special treatment for flatten sublayers
await self._hide_prompt()
self.stage.UnmuteLayer(self._writable_layer.identifier)
await self.wait()
# Lock layer and try to Create Sublayer will fail also
LayerUtils.set_layer_lock_status(self.stage.GetRootLayer(), self._writable_layer.identifier, True)
await self.wait()
await writable_item.right_click()
if not allow_lock:
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(item_name)
else:
await ui_test.select_context_menu(item_name)
# Special treatment for flatten sublayers
await self._hide_prompt()
LayerUtils.set_layer_lock_status(self.stage.GetRootLayer(), self._writable_layer.identifier, False)
await self.wait()
async def test_copy_url_link(self):
import omni.kit.ui_test as ui_test
root_item, _, _ = self._find_all_layer_items()
await root_item.right_click()
await ui_test.select_context_menu("Copy URL Link")
import omni.kit.clipboard
url = omni.kit.clipboard.paste()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
self.assertEqual(url, root_layer_item.identifier)
async def test_collapse_expand_tree(self):
import omni.kit.ui_test as ui_test
root_item, _, _ = self._find_all_layer_items()
await root_item.right_click()
await ui_test.select_context_menu("Collapse Tree")
await root_item.right_click()
await ui_test.select_context_menu("Expand Tree")
async def test_set_edit_layer(self):
import omni.kit.ui_test as ui_test
menu_name = "Set Default Edit Layer"
_, writable_item, readonly_item = self._find_all_layer_items()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layer_model.auto_authoring_mode = True
await self.wait()
await writable_item.right_click()
await ui_test.select_context_menu(menu_name)
layer = Sdf.Find(layer_model.default_edit_layer)
self.assertEqual(layer, self._writable_layer)
await readonly_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(menu_name)
async def test_refresh_references_or_payloads(self):
import omni.kit.ui_test as ui_test
prim = self.stage.DefinePrim("/reference0", "Xform")
prim.GetReferences().AddReference(self._writable_layer.identifier)
prim = self.stage.DefinePrim("/payload0", "Xform")
prim.GetPayloads().AddPayload(self._writable_layer.identifier)
prim = self.stage.DefinePrim("/reference_and_payload0", "Xform")
prim.GetReferences().AddReference(self._writable_layer.identifier)
prim.GetPayloads().AddPayload(self._writable_layer.identifier)
await self.wait()
reference_widget = ui_test.find("Layer//Frame/**/Label[*].text=='reference0'")
payload_widget = ui_test.find("Layer//Frame/**/Label[*].text=='payload0'")
reference_and_payload_widget = ui_test.find("Layer//Frame/**/Label[*].text=='reference_and_payload0'")
all_widgets = [reference_widget, payload_widget, reference_and_payload_widget]
all_menu_names = ["Refresh Reference", "Refresh Payload", "Refresh Payload & Reference"]
for prim_item, menu_name in zip(all_widgets, all_menu_names):
await prim_item.right_click()
import asyncio
await asyncio.sleep(3.0)
await ui_test.select_context_menu(menu_name)
async def test_save_sublayer(self):
import omni.kit.ui_test as ui_test
menu_name = "Save"
for layer in [self.stage.GetRootLayer(), self._writable_layer, self._readonly_layer]:
Sdf.CreatePrimInLayer(layer, "/test")
await self.wait()
root_item, writable_item, readonly_item = self._find_all_layer_items()
await writable_item.right_click()
await ui_test.select_context_menu(menu_name)
# When it's not dirty, the menu item is not shown.
# Cannot save readonly layer
# Cannot save anonymous layer
for layer_item in [root_item, writable_item, readonly_item]:
await layer_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(menu_name)
async def test_find_in_browser(self):
import omni.kit.ui_test as ui_test
menu_name = "Find in Content Browser"
root_item, writable_item, readonly_item = self._find_all_layer_items()
for layer_item in [writable_item, readonly_item]:
await layer_item.right_click()
await ui_test.select_context_menu(menu_name)
# Cannot browse anonymous layer
await root_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(menu_name)
async def test_move_selection(self):
import omni.kit.ui_test as ui_test
menu_name = "Move Selections To This Layer"
_, writable_item, readonly_item = self._find_all_layer_items()
prim0 = self.stage.DefinePrim("/reference0", "Xform")
prim1 = self.stage.DefinePrim("/payload0", "Xform")
await self.wait()
self.usd_context.get_selection().set_selected_prim_paths([str(prim0.GetPath()), str(prim1.GetPath())], True)
# Cannot modify readonly layer
await readonly_item.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu(menu_name)
await writable_item.right_click()
await ui_test.select_context_menu(menu_name)
await self.wait()
root_layer = self.stage.GetRootLayer()
self.assertFalse(root_layer.GetPrimAtPath(prim0.GetPath()))
self.assertFalse(root_layer.GetPrimAtPath(prim1.GetPath()))
self.assertTrue(self._writable_layer.GetPrimAtPath(prim0.GetPath()))
self.assertTrue(self._writable_layer.GetPrimAtPath(prim1.GetPath()))
async def _test_menu_without_selection(self, menu_name):
"""Right click on the empty area of layer window will pop up context menu also."""
import omni.kit.ui_test as ui_test
window = ui_test.find("Layer")
await window.bring_to_front()
await ui_test.emulate_mouse_move(ui_test.Vec2(-100, -100), human_delay_speed=10)
await ui_test.emulate_mouse_move(window.center)
await ui_test.emulate_mouse_click(right_click=True)
await ui_test.select_context_menu(menu_name)
async def test_create_sublayer_without_selection(self):
await self._test_menu_without_selection("Create Sublayer")
async def test_insert_sublayer_without_selection(self):
await self._test_menu_without_selection("Insert Sublayer")
async def test_create_sublayer(self):
await self._test_menu_item("Create Sublayer", "Create Sublayer")
async def test_insert_sublayer(self):
await self._test_menu_item("Insert Sublayer", "Insert Sublayer")
async def test_save_a_copy(self):
await self._test_menu_item("Save a Copy", "Save Layer As", True, True, True)
async def test_save_as(self):
await self._test_menu_item("Save As", "Save Layer As", True, False, True)
async def test_remove_sublayer(self):
await self._test_menu_item("Remove Layer", None, True, False, False)
async def test_remove_multiple_sublayers(self):
await self._test_menu_item("Remove Layer", None, False, False, False, True)
async def test_flatten_sublayers(self):
await self._test_menu_item("Flatten Sublayers", None, True, True, False)
async def test_reload_sublayer(self):
await self._test_menu_item("Reload Layer", None, True, False, False)
async def test_merge_layer_down(self):
import omni.kit.ui_test as ui_test
root_item, writable_item, readonly_item = self._find_all_layer_items()
layer = Sdf.Layer.CreateAnonymous()
self.stage.GetRootLayer().subLayerPaths.append(layer.identifier)
await self._test_menu_item("Merge Down One", None, False, False, False)
await writable_item.right_click()
await ui_test.select_context_menu("Merge Down One")
async def test_remove_prim(self):
index = 0
for layer in [self._writable_layer, self._readonly_layer]:
with Usd.EditContext(self.stage, layer):
self.stage.DefinePrim(f"/prim{index}")
index += 1
self.stage.DefinePrim(f"/prim{index}")
index += 1
await self.wait()
self.assertTrue(self._writable_layer.GetPrimAtPath("/prim0"))
self.assertTrue(self._writable_layer.GetPrimAtPath("/prim1"))
self.assertTrue(self._readonly_layer.GetPrimAtPath("/prim2"))
self.assertTrue(self._readonly_layer.GetPrimAtPath("/prim3"))
import omni.kit.ui_test as ui_test
# Select and delete single prim.
LayerUtils.set_edit_target(self.stage, self._writable_layer.identifier)
await self.wait()
omni.kit.commands.execute(
"SelectPrims",
old_selected_paths=[],
new_selected_paths=["/prim0"],
expand_in_stage=True
)
await self.wait()
prim0 = ui_test.find("Layer//Frame/**/Label[*].text=='prim0'")
await prim0.right_click()
await ui_test.select_context_menu("Delete")
self.assertFalse(self._writable_layer.GetPrimAtPath("/prim0"))
self.assertTrue(self._writable_layer.GetPrimAtPath("/prim1"))
omni.kit.undo.undo()
await self.wait()
# Select and delete multiple prims.
omni.kit.commands.execute(
"SelectPrims",
old_selected_paths=[],
new_selected_paths=["/prim0", "/prim1"],
expand_in_stage=True
)
await self.wait()
prim0 = ui_test.find("Layer//Frame/**/Label[*].text=='prim0'")
await prim0.right_click()
await ui_test.select_context_menu("Delete")
self.assertFalse(self._writable_layer.GetPrimAtPath("/prim0"))
self.assertFalse(self._writable_layer.GetPrimAtPath("/prim1"))
# Cannot remove prims in read-only layer.
LayerUtils.set_edit_target(self.stage, self._readonly_layer.identifier)
await self.wait()
omni.kit.commands.execute(
"SelectPrims",
old_selected_paths=[],
new_selected_paths=["/prim2"],
expand_in_stage=True
)
await self.wait()
prim2 = ui_test.find("Layer//Frame/**/Label[*].text=='prim2'")
await prim2.right_click()
with self.assertRaises(Exception) as context:
await ui_test.select_context_menu("Delete")
| 18,118 | Python | 38.561135 | 124 | 0.642952 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_misc.py | import os
import string
import random
import unittest
import carb
import omni
import omni.kit.test
import omni.usd
import omni.client
import omni.kit.widget.layers
from pathlib import Path
from omni.kit.usd.layers import LayerUtils
from pxr import Sdf, Usd, UsdGeom
from .base import TestLayerNonUIBase
class TestLayerMisc(TestLayerNonUIBase):
def get_random_string(self):
return "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
async def test_layer_release(self):
# Test fix for https://nvidia-omniverse.atlassian.net/browse/OM-18672
current_path = Path(__file__).parent
test_data_path = current_path.parent.parent.parent.parent.parent.joinpath("data")
sublayer_path = str(test_data_path.joinpath("sublayer.usd"))
usd_context = omni.usd.get_context()
await usd_context.new_stage_async()
stage = usd_context.get_stage()
root_layer = stage.GetRootLayer()
sublayer = LayerUtils.insert_sublayer(root_layer, 0, sublayer_path)
identifier = sublayer.identifier
self.assertTrue(sublayer != None)
sublayer = None # Release the ref count
# Remove sublayer to remove it from layer stack and also its reference from Layer Window
LayerUtils.remove_sublayer(root_layer, 0)
sublayer = Sdf.Find(identifier)
self.assertTrue(sublayer == None)
sublayer = LayerUtils.insert_sublayer(root_layer, 0, sublayer_path)
identifier = sublayer.identifier
self.assertTrue(sublayer != None)
sublayer = None # Release the ref count
# Reopen stage to see if the sublayer has been released
await usd_context.new_stage_async()
sublayer = Sdf.Find(identifier)
self.assertTrue(sublayer == None)
async def test_layer_dirtiness_after_save(self):
usd_context = omni.usd.get_context()
await usd_context.new_stage_async()
# Manually set current edit target identifier
self.layers_instance = omni.kit.widget.layers.get_instance()
layer_model = self.layers_instance.get_layer_model()
layer_model._edit_target_identifier = usd_context.get_stage_url()
token = carb.tokens.get_tokens_interface()
temp_dir = token.resolve("${temp}")
temp_usd = os.path.join(temp_dir, f"{self.get_random_string()}.usd")
temp_usd = omni.client.normalize_url(temp_usd)
success, _, saved_layers = await usd_context.save_layers_async(temp_usd, [])
self.assertTrue(success)
self.assertEqual(len(saved_layers), 1)
# Wait two frames to wait update event of layer_model to authoring edit target.
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
await omni.kit.app.get_app().next_update_async()
# Manually trigger on update to try to update edit target into root layer.
layer_model._pending_changed_edit_target = temp_usd
layer_model._on_update(0.0)
# Check dirtiness to make sure it's not dirty.
stage = usd_context.get_stage()
root_layer = stage.GetRootLayer()
self.assertFalse(root_layer.dirty)
async def test_create_sublayer_with_stage_axis(self):
usd_context = omni.usd.get_context()
for axis in [UsdGeom.Tokens.y, UsdGeom.Tokens.z]:
await usd_context.new_stage_async()
stage = usd_context.get_stage()
UsdGeom.SetStageUpAxis(stage, axis)
sublayer = Sdf.Layer.CreateAnonymous()
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=stage.GetRootLayer().identifier,
sublayer_position=0,
new_layer_path=sublayer.identifier,
transfer_root_content=False,
create_or_insert=True,
)
sublayer_stage = Usd.Stage.Open(sublayer)
self.assertEqual(UsdGeom.GetStageUpAxis(sublayer_stage), axis)
| 4,150 | Python | 38.533333 | 96 | 0.655181 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_window_ui_states.py | import omni.kit.test
import os
import tempfile
import shutil
import omni.client
import omni.kit.app
from .base import TestLayerNonUIBase
from pxr import Usd, Sdf
from stat import S_IREAD, S_IWRITE
from omni.kit.usd.layers import LayerUtils, get_layers
from omni.kit.widget.prompt import PromptManager
class TestWindowUiStates(TestLayerNonUIBase):
# Before running each test
async def setUp(self):
await super().setUp()
self.stage = self.usd_context.get_stage()
self._temp_dir = tempfile.TemporaryDirectory().name
self._writable_layer_path = os.path.join(self._temp_dir, "writable.usd")
self._writable_layer = Sdf.Layer.CreateNew(self._writable_layer_path)
self._writable_layer.Save()
self._readonly_layer_path = os.path.join(self._temp_dir, "readonly.usd")
layer = Sdf.Layer.CreateNew(self._readonly_layer_path)
layer.Save()
layer = None
os.chmod(self._readonly_layer_path, S_IREAD)
self._readonly_layer = Sdf.Layer.FindOrOpen(self._readonly_layer_path)
# Prepare stage
root_layer = self.stage.GetRootLayer()
root_layer.subLayerPaths.append(self._readonly_layer_path)
root_layer.subLayerPaths.append(self._writable_layer_path)
import omni.kit.ui_test as ui_test
await ui_test.find("Layer").focus()
async def tearDown(self):
await super().tearDown()
self._writable_layer = None
self._readonly_layer = None
self.stage = None
os.chmod(self._readonly_layer_path, S_IWRITE)
shutil.rmtree(self._temp_dir)
async def test_mute(self):
import omni.kit.ui_test as ui_test
local_mute_items = ui_test.find_all("Layer//Frame/**/ToolButton[*].identifier=='local_mute'")
global_mute_items = ui_test.find_all("Layer//Frame/**/ToolButton[*].identifier=='global_mute'")
# Root layer has no mute button.
self.assertEqual(len(local_mute_items), 2)
self.assertEqual(len(global_mute_items), 2)
for global_scope in [False, True]:
layers = get_layers()
layers_state = layers.get_layers_state()
layers_state.set_muteness_scope(global_scope)
# Local mute
# Mute readonly layer
await local_mute_items[0].click()
self.assertEqual(self.stage.IsLayerMuted(self._readonly_layer.identifier), not global_scope)
self.assertFalse(self.stage.IsLayerMuted(self._writable_layer.identifier))
# Unmute
await local_mute_items[0].click()
self.assertFalse(self.stage.IsLayerMuted(self._readonly_layer.identifier))
# Mute writable layer
await local_mute_items[1].click()
self.assertFalse(self.stage.IsLayerMuted(self._readonly_layer.identifier))
self.assertEqual(self.stage.IsLayerMuted(self._writable_layer.identifier), not global_scope)
# Unmute
await local_mute_items[1].click()
self.assertFalse(self.stage.IsLayerMuted(self._writable_layer.identifier))
# global mute
# Mute readonly layer
await global_mute_items[0].click()
self.assertEqual(self.stage.IsLayerMuted(self._readonly_layer.identifier), global_scope)
self.assertFalse(self.stage.IsLayerMuted(self._writable_layer.identifier))
# Unmute
await global_mute_items[0].click()
self.assertFalse(self.stage.IsLayerMuted(self._readonly_layer.identifier))
# Mute writable layer
await global_mute_items[1].click()
self.assertFalse(self.stage.IsLayerMuted(self._readonly_layer.identifier))
self.assertEqual(self.stage.IsLayerMuted(self._writable_layer.identifier), global_scope)
# Unmute
await global_mute_items[1].click()
self.assertFalse(self.stage.IsLayerMuted(self._writable_layer.identifier))
async def test_lock(self):
import omni.kit.ui_test as ui_test
lock_items = ui_test.find_all("Layer//Frame/**/ToolButton[*].identifier=='lock'")
# Root or readonly layer has no lock button.
self.assertEqual(len(lock_items), 1)
layers = get_layers()
layers_state = layers.get_layers_state()
await lock_items[0].click()
self.assertTrue(layers_state.is_layer_locked(self._writable_layer.identifier))
await lock_items[0].click()
self.assertFalse(layers_state.is_layer_locked(self._writable_layer.identifier))
| 4,583 | Python | 36.884297 | 104 | 0.64892 |
omniverse-code/kit/exts/omni.kit.widget.layers/omni/kit/widget/layers/tests/test_hotkey.py | import carb
import omni.kit.app
from .base import TestLayerUIBase
from omni.kit.test_suite.helpers import arrange_windows
class TestHotkey(TestLayerUIBase):
# Before running each test
async def setUp(self):
await super().setUp()
await arrange_windows("Layer", 800, 600)
self.stage = self.usd_context.get_stage()
async def tearDown(self):
await super().tearDown()
async def _wait(self, frames=4):
for i in range(frames):
await self.app.next_update_async()
async def test_remove_prim_with_hot_key(self):
self.stage.DefinePrim("/cube", "Cube")
self.stage.DefinePrim("/cube2", "Cube")
await self._wait()
layer_model = self.layers_instance.get_layer_model()
root_layer_item = layer_model.root_layer_item
layer_window = self.layers_instance._window
layer_tree_view = layer_window._layer_view
all_root_specs = root_layer_item.absolute_root_spec.children
self.assertTrue(len(all_root_specs) != 0)
layer_tree_view.selection = all_root_specs
await self._wait()
self.assertEqual(len(self.layers_instance.get_selected_items()), 2)
# FIXME: Not sure why there are two dangling windows that are visible underlying.
import omni.kit.ui_test as ui_test
window = ui_test.find("Create Sublayer")
if window:
window.window.visible = False
window = ui_test.find("Insert Sublayer")
if window:
window.window.visible = False
window = ui_test.find("Save Layer As")
if window:
window.window.visible = False
window = ui_test.find("Layer")
await window.bring_to_front()
await ui_test.emulate_mouse_move(ui_test.Vec2(-100, -100), human_delay_speed=10)
await ui_test.emulate_mouse_move(window.center)
await omni.kit.ui_test.emulate_keyboard_press(carb.input.KeyboardInput.DEL)
await self._wait()
self.assertFalse(self.stage.GetPrimAtPath("/cube"))
self.assertFalse(self.stage.GetPrimAtPath("/cube2"))
| 2,117 | Python | 32.619047 | 89 | 0.640529 |
omniverse-code/kit/exts/omni.kit.pip_archive/omni/kit/pip_archive/tests/__init__.py | from .test_pip_archive import *
| 32 | Python | 15.499992 | 31 | 0.75 |
omniverse-code/kit/exts/omni.kit.pip_archive/omni/kit/pip_archive/tests/test_pip_archive.py | import omni.kit.test
import omni.kit.pipapi
class TestPipArchive(omni.kit.test.AsyncTestCase):
async def test_pip_archive(self):
# Take one of packages from deps/pip.toml, it should be prebundled and available without need for going into online index
omni.kit.pipapi.install("numpy", version="1.19.0", use_online_index=False)
import numpy
self.assertIsNotNone(numpy)
| 406 | Python | 32.916664 | 129 | 0.721675 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/_pyrsistent_version.py | __version__ = '0.19.3'
| 23 | Python | 10.999995 | 22 | 0.478261 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/png.py | #!/usr/bin/env python
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <[email protected]>
# Portions Copyright (C) 2009 David Jones <[email protected]>
# And probably portions Copyright (C) 2006 Nicko van Someren <[email protected]>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The ``png`` module can read and write PNG files.
Installation and Overview
-------------------------
``pip install pypng``
For help, type ``import png; help(png)`` in your python interpreter.
A good place to start is the :class:`Reader` and :class:`Writer` classes.
Coverage of PNG formats is fairly complete;
all allowable bit depths (1/2/4/8/16/24/32/48/64 bits per pixel) and
colour combinations are supported:
- greyscale (1/2/4/8/16 bit);
- RGB, RGBA, LA (greyscale with alpha) with 8/16 bits per channel;
- colour mapped images (1/2/4/8 bit).
Interlaced images,
which support a progressive display when downloading,
are supported for both reading and writing.
A number of optional chunks can be specified (when writing)
and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
The ``sBIT`` chunk can be used to specify precision for
non-native bit depths.
Requires Python 3.5 or higher.
Installation is trivial,
but see the ``README.txt`` file (with the source distribution) for details.
Full use of all features will need some reading of the PNG specification
http://www.w3.org/TR/2003/REC-PNG-20031110/.
The package also comes with command line utilities.
- ``pripamtopng`` converts
`Netpbm <http://netpbm.sourceforge.net/>`_ PAM/PNM files to PNG;
- ``pripngtopam`` converts PNG to file PAM/PNM.
There are a few more for simple PNG manipulations.
Spelling and Terminology
------------------------
Generally British English spelling is used in the documentation.
So that's "greyscale" and "colour".
This not only matches the author's native language,
it's also used by the PNG specification.
Colour Models
-------------
The major colour models supported by PNG (and hence by PyPNG) are:
- greyscale;
- greyscale--alpha;
- RGB;
- RGB--alpha.
Also referred to using the abbreviations: L, LA, RGB, RGBA.
Each letter codes a single channel:
*L* is for Luminance or Luma or Lightness (greyscale images);
*A* stands for Alpha, the opacity channel
(used for transparency effects, but higher values are more opaque,
so it makes sense to call it opacity);
*R*, *G*, *B* stand for Red, Green, Blue (colour image).
Lists, arrays, sequences, and so on
-----------------------------------
When getting pixel data out of this module (reading) and
presenting data to this module (writing) there are
a number of ways the data could be represented as a Python value.
The preferred format is a sequence of *rows*,
which each row being a sequence of *values*.
In this format, the values are in pixel order,
with all the values from all the pixels in a row
being concatenated into a single sequence for that row.
Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
has RGB components:
Sequence of rows::
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Each row appears as its own list,
but the pixels are flattened so that three values for one pixel
simply follow the three values for the previous pixel.
This is the preferred because
it provides a good compromise between space and convenience.
PyPNG regards itself as at liberty to replace any sequence type with
any sufficiently compatible other sequence type;
in practice each row is an array (``bytearray`` or ``array.array``).
To allow streaming the outer list is sometimes
an iterator rather than an explicit list.
An alternative format is a single array holding all the values.
Array of values::
[R,G,B, R,G,B, R,G,B,
R,G,B, R,G,B, R,G,B]
The entire image is one single giant sequence of colour values.
Generally an array will be used (to save space), not a list.
The top row comes first,
and within each row the pixels are ordered from left-to-right.
Within a pixel the values appear in the order R-G-B-A
(or L-A for greyscale--alpha).
There is another format, which should only be used with caution.
It is mentioned because it is used internally,
is close to what lies inside a PNG file itself,
and has some support from the public API.
This format is called *packed*.
When packed, each row is a sequence of bytes (integers from 0 to 255),
just as it is before PNG scanline filtering is applied.
When the bit depth is 8 this is the same as a sequence of rows;
when the bit depth is less than 8 (1, 2 and 4),
several pixels are packed into each byte;
when the bit depth is 16 each pixel value is decomposed into 2 bytes
(and `packed` is a misnomer).
This format is used by the :meth:`Writer.write_packed` method.
It isn't usually a convenient format,
but may be just right if the source data for
the PNG image comes from something that uses a similar format
(for example, 1-bit BMPs, or another PNG file).
"""
__version__ = "0.20220715.0"
import collections
import io # For io.BytesIO
import itertools
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import re
import struct
import sys
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
import zlib
from array import array
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
# The xstart, ystart, xstep, ystep for the Adam7 interlace passes.
adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def adam7_generate(width, height):
"""
Generate the coordinates for the reduced scanlines
of an Adam7 interlaced image
of size `width` by `height` pixels.
Yields a generator for each pass,
and each pass generator yields a series of (x, y, xstep) triples,
each one identifying a reduced scanline consisting of
pixels starting at (x, y) and taking every xstep pixel to the right.
"""
for xstart, ystart, xstep, ystep in adam7:
if xstart >= width:
continue
yield ((xstart, y, xstep) for y in range(ystart, height, ystep))
# Models the 'pHYs' chunk (used by the Reader)
Resolution = collections.namedtuple('_Resolution', 'x y unit_is_meter')
def group(s, n):
return list(zip(* [iter(s)] * n))
def isarray(x):
return isinstance(x, array)
def check_palette(palette):
"""
Check a palette argument (to the :class:`Writer` class) for validity.
Returns the palette as a list if okay;
raises an exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ProtocolError(
"a palette must have between 1 and 256 entries,"
" see https://www.w3.org/TR/PNG/#11PLTE")
seen_triple = False
for i, t in enumerate(p):
if len(t) not in (3, 4):
raise ProtocolError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ProtocolError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ProtocolError(
"palette entry %d: "
"values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ProtocolError(
"size argument should be a pair (width, height) instead is %r" % (size,))
if width is not None and width != size[0]:
raise ProtocolError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ProtocolError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""
Checks that a colour argument for transparent or background options
is the right form.
Returns the colour
(which, if it's a bare integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ProtocolError("%s for greyscale must be 1-tuple" % which)
if not is_natural(c[0]):
raise ProtocolError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
is_natural(c[0]) and
is_natural(c[1]) and
is_natural(c[2])):
raise ProtocolError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""
Problem with input file format.
In other words, PNG file does not conform to
the specification in some way and is invalid.
"""
class ProtocolError(Error):
"""
Problem with the way the programming interface has been used,
or the data presented to it.
"""
class ChunkError(FormatError):
pass
class Default:
"""The default for the greyscale parameter."""
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=Default,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20,
x_pixels_per_unit=None,
y_pixels_per_unit=None,
unit_is_meter=False):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Pixels are greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16 (for each channel).
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
x_pixels_per_unit
Number of pixels a unit along the x axis (write a
`pHYs` chunk).
y_pixels_per_unit
Number of pixels a unit along the y axis (write a
`pHYs` chunk). Along with `x_pixel_unit`, this gives
the pixel size ratio.
unit_is_meter
`True` to indicate that the unit (for the `pHYs`
chunk) is metre.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument.
If `size` is used it should be a pair (*width*, *height*).
The `greyscale` argument indicates whether input pixels
are greyscale (when true), or colour (when false).
The default is true unless `palette=` is used.
The `alpha` argument (a boolean) specifies
whether input pixels have an alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each channel may have a different bit depth.
Each source pixel must have values that are
an integer between 0 and ``2**bitdepth-1``, where
`bitdepth` is the bit depth for the corresponding channel.
For example, 8-bit images have values between 0 and 255.
PNG only stores images with bit depths of
1,2,4,8, or 16 (the same for all channels).
When `bitdepth` is not one of these values or where
channels have different bit depths,
the next highest valid bit depth is selected,
and an ``sBIT`` (significant bits) chunk is generated
that specifies the original precision of the source image.
In this case the supplied pixel values will be rescaled to
fit the range of the selected bit depth.
The PNG file format supports many bit depth / colour model
combinations, but not all.
The details are somewhat arcane
(refer to the PNG specification for full details).
Briefly:
Bit depths < 8 (1,2,4) are only allowed with greyscale and
colour mapped images;
colour mapped images cannot have bit depth 16.
For colour mapped images
(in other words, when the `palette` argument is specified)
the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8.
(It is valid to have a PNG image with a palette and
an ``sBIT`` chunk, but the meaning is slightly different;
it would be awkward to use the `bitdepth` argument for this.)
The `palette` option, when specified,
causes a colour mapped image to be created:
the PNG colour type is set to 3;
`greyscale` must not be true; `alpha` must not be true;
`transparent` must not be set.
The bit depth must be 1,2,4, or 8.
When a colour mapped image is created,
the pixel values are palette indexes and
the `bitdepth` argument specifies the size of these indexes
(not the size of the colour values in the palette).
The palette argument value should be a sequence of 3- or
4-tuples.
3-tuples specify RGB palette entries;
4-tuples specify RGBA palette entries.
All the 4-tuples (if present) must come before all the 3-tuples.
A ``PLTE`` chunk is created;
if there are 4-tuples then a ``tRNS`` chunk is created as well.
The ``PLTE`` chunk will contain all the RGB triples in the same
sequence;
the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence.
Palette entries are always 8-bit.
If specified, the `transparent` and `background` parameters must be
a tuple with one element for each channel in the image.
Either a 3-tuple of integer (RGB) values for a colour image, or
a 1-tuple of a single integer for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a `float`).
A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file,
they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module.
Values from 1 to 9 (highest) specify compression.
0 means no compression.
-1 and ``None`` both mean that the ``zlib`` module uses
the default level of compression (which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interlace method, *Adam7*).
This does not affect how the pixels should be passed in,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be
partially decoded by the browser to give
a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image.
In order to avoid using large amounts of memory,
multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if not is_natural(width) or not is_natural(height):
raise ProtocolError("width and height must be integers")
if width <= 0 or height <= 0:
raise ProtocolError("width and height must be greater than zero")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2 ** 31 - 1 or height > 2 ** 31 - 1:
raise ProtocolError("width and height cannot exceed 2**31-1")
if alpha and transparent is not None:
raise ProtocolError(
"transparent colour not allowed with alpha channel")
# bitdepth is either single integer, or tuple of integers.
# Convert to tuple.
try:
len(bitdepth)
except TypeError:
bitdepth = (bitdepth, )
for b in bitdepth:
valid = is_natural(b) and 1 <= b <= 16
if not valid:
raise ProtocolError(
"each bitdepth %r must be a positive integer <= 16" %
(bitdepth,))
# Calculate channels, and
# expand bitdepth to be one element per channel.
palette = check_palette(palette)
alpha = bool(alpha)
colormap = bool(palette)
if greyscale is Default and palette:
greyscale = False
greyscale = bool(greyscale)
if colormap:
color_planes = 1
planes = 1
else:
color_planes = (3, 1)[greyscale]
planes = color_planes + alpha
if len(bitdepth) == 1:
bitdepth *= planes
bitdepth, self.rescale = check_bitdepth_rescale(
palette,
bitdepth,
transparent, alpha, greyscale)
# These are assertions, because above logic should have
# corrected or raised all problematic cases.
if bitdepth < 8:
assert greyscale or palette
assert not alpha
if bitdepth > 8:
assert not palette
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values
# (greyscale, alpha, colormap, interlace) are converted
# to bool because Iverson's convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = greyscale
self.alpha = alpha
self.colormap = colormap
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = (4 * self.alpha +
2 * (not greyscale) +
1 * self.colormap)
assert self.color_type in (0, 2, 3, 4, 6)
self.color_planes = color_planes
self.planes = planes
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth / 8) * self.planes
def write(self, outfile, rows):
"""
Write a PNG image to the output file.
`rows` should be an iterable that yields each row
(each row is a sequence of values).
The rows should be the rows of the original image,
so there should be ``self.height`` rows of
``self.width * self.planes`` values.
If `interlace` is specified (when creating the instance),
then an interlaced PNG file will be written.
Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing requires the entire image to be in working memory.
"""
# Values per row
vpr = self.width * self.planes
def check_rows(rows):
"""
Yield each row in rows,
but check each row first (for correct width).
"""
for i, row in enumerate(rows):
try:
wrong_length = len(row) != vpr
except TypeError:
# When using an itertools.ichain object or
# other generator not supporting __len__,
# we set this to False to skip the check.
wrong_length = False
if wrong_length:
# Note: row numbers start at 0.
raise ProtocolError(
"Expected %d values but got %d values, in row %d" %
(vpr, len(row), i))
yield row
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*check_rows(rows)))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, check_rows(rows))
if nrows != self.height:
raise ProtocolError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
return nrows
def write_passes(self, outfile, rows):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file.
For straightlaced images, this is the usual top to bottom ordering.
For interlaced images the rows should have been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row
(each row being a sequence of values).
"""
# Ensure rows are scaled (to 4-/8-/16-bit),
# and packed into bytes.
if self.rescale:
rows = rescale_rows(rows, self.rescale)
if self.bitdepth < 8:
rows = pack_rows(rows, self.bitdepth)
elif self.bitdepth == 16:
rows = unpack_rows(rows)
return self.write_packed(outfile, rows)
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`.
`rows` should be an iterator that yields each packed row;
a packed row being a sequence of packed bytes.
The rows have a filter byte prefixed and
are then compressed into one or more IDAT chunks.
They are not processed any further,
so if bitdepth is other than 1, 2, 4, 8, 16,
the pixel values should have been scaled
before passing them to this method.
This method does work for interlaced images but it is best avoided.
For interlaced images, the rows should be
presented in the order that they appear in the file.
"""
self.write_preamble(outfile)
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# data accumulates bytes to be compressed for the IDAT chunk;
# it's compressed when sufficiently large.
data = bytearray()
# raise i scope out of the for loop. set to -1, because the for loop
# sets i to 0 on the first pass
i = -1
for i, row in enumerate(rows):
# Add "None" filter type.
# Currently, it's essential that this filter type be used
# for every scanline as
# we do not mark the first row of a reduced pass image;
# that means we could accidentally compute
# the wrong filtered scanline if we used
# "up", "average", or "paeth" on such a line.
data.append(0)
data.extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(data)
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
data = bytearray()
compressed = compressor.compress(bytes(data))
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, b'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, b'IEND')
return i + 1
def write_preamble(self, outfile):
# http://www.w3.org/TR/PNG/#5PNG-file-signature
# This is the first write that is made when
# writing a PNG file.
# This one, and only this one, is checked for TypeError,
# which generally indicates that we are writing bytes
# into a text stream.
try:
outfile.write(signature)
except TypeError as e:
raise ProtocolError("PNG must be written to a binary stream") from e
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, b'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, b'gAMA',
struct.pack("!L", int(round(self.gamma * 1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(
outfile, b'sBIT',
struct.pack('%dB' % self.planes,
* [s[0] for s in self.rescale]))
# :chunk:order: Without a palette (PLTE chunk),
# ordering is relatively relaxed.
# With one, gAMA chunk must precede PLTE chunk
# which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p, t = make_palette_chunks(self.palette)
write_chunk(outfile, b'PLTE', p)
if t:
# tRNS chunk is optional;
# Only needed if palette entries have alpha.
write_chunk(outfile, b'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
fmt = "!1H"
else:
fmt = "!3H"
write_chunk(outfile, b'tRNS',
struct.pack(fmt, *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
fmt = "!1H"
else:
fmt = "!3H"
write_chunk(outfile, b'bKGD',
struct.pack(fmt, *self.background))
# http://www.w3.org/TR/PNG/#11pHYs
if (self.x_pixels_per_unit is not None and
self.y_pixels_per_unit is not None):
tup = (self.x_pixels_per_unit,
self.y_pixels_per_unit,
int(self.unit_is_meter))
write_chunk(outfile, b'pHYs', struct.pack("!LLB", *tup))
def write_array(self, outfile, pixels):
"""
Write an array that holds all the image values
as a PNG file on the output file.
See also :meth:`write` method.
"""
if self.interlace:
if type(pixels) != array:
# Coerce to array type
fmt = 'BH'[self.bitdepth > 8]
pixels = array(fmt, pixels)
return self.write_passes(
outfile,
self.array_scanlines_interlace(pixels)
)
else:
return self.write_passes(
outfile,
self.array_scanlines(pixels)
)
def array_scanlines(self, pixels):
"""
Generates rows (each a sequence of values) from
a single array of values.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array.
`pixels` is the full source image as a single array of values.
The generator yields each scanline of the reduced passes in turn,
each scanline being a sequence of values.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
# Each iteration generates a scanline starting at (x, y)
# and consisting of every xstep pixels.
for lines in adam7_generate(self.width, self.height):
for x, y, xstep in lines:
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Values per row (of reduced image)
reduced_row_len = ppr * self.planes
if xstep == 1:
# Easy case: line is a simple slice.
offset = y * vpr
yield pixels[offset: offset + vpr]
continue
# We have to step by xstep,
# which we can do one plane at a time
# using the step in Python slices.
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:reduced_row_len])
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset + i: end_offset: skip]
yield row
def write_chunk(outfile, tag, data=b''):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
data = bytes(data)
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2 ** 32 - 1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(signature)
for chunk in chunks:
write_chunk(out, *chunk)
def rescale_rows(rows, rescale):
"""
Take each row in rows (an iterator) and yield
a fresh row with the pixels scaled according to
the rescale parameters in the list `rescale`.
Each element of `rescale` is a tuple of
(source_bitdepth, target_bitdepth),
with one element per channel.
"""
# One factor for each channel
fs = [float(2 ** s[1] - 1)/float(2 ** s[0] - 1)
for s in rescale]
# Assume all target_bitdepths are the same
target_bitdepths = set(s[1] for s in rescale)
assert len(target_bitdepths) == 1
(target_bitdepth, ) = target_bitdepths
typecode = 'BH'[target_bitdepth > 8]
# Number of channels
n_chans = len(rescale)
for row in rows:
rescaled_row = array(typecode, iter(row))
for i in range(n_chans):
channel = array(
typecode,
(int(round(fs[i] * x)) for x in row[i::n_chans]))
rescaled_row[i::n_chans] = channel
yield rescaled_row
def pack_rows(rows, bitdepth):
"""Yield packed rows that are a byte array.
Each byte is packed with the values from several pixels.
"""
assert bitdepth < 8
assert 8 % bitdepth == 0
# samples per byte
spb = int(8 / bitdepth)
def make_byte(block):
"""Take a block of (2, 4, or 8) values,
and pack them into a single byte.
"""
res = 0
for v in block:
res = (res << bitdepth) + v
return res
for row in rows:
a = bytearray(row)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
n = float(len(a))
extra = math.ceil(n / spb) * spb - n
a.extend([0] * int(extra))
# Pack into bytes.
# Each block is the samples for one byte.
blocks = group(a, spb)
yield bytearray(make_byte(block) for block in blocks)
def unpack_rows(rows):
"""Unpack each row from being 16-bits per value,
to being a sequence of bytes.
"""
for row in rows:
fmt = '!%dH' % len(row)
yield bytearray(struct.pack(fmt, *row))
def make_palette_chunks(palette):
"""
Create the byte sequences for a ``PLTE`` and
if necessary a ``tRNS`` chunk.
Returned as a pair (*p*, *t*).
*t* will be ``None`` if no ``tRNS`` chunk is necessary.
"""
p = bytearray()
t = bytearray()
for x in palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
if t:
return p, t
return p, None
def check_bitdepth_rescale(
palette, bitdepth, transparent, alpha, greyscale):
"""
Returns (bitdepth, rescale) pair.
"""
if palette:
if len(bitdepth) != 1:
raise ProtocolError(
"with palette, only a single bitdepth may be used")
(bitdepth, ) = bitdepth
if bitdepth not in (1, 2, 4, 8):
raise ProtocolError(
"with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ProtocolError("transparent and palette not compatible")
if alpha:
raise ProtocolError("alpha and palette not compatible")
if greyscale:
raise ProtocolError("greyscale and palette not compatible")
return bitdepth, None
# No palette, check for sBIT chunk generation.
if greyscale and not alpha:
# Single channel, L.
(bitdepth,) = bitdepth
if bitdepth in (1, 2, 4, 8, 16):
return bitdepth, None
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5, 6, 7)
targetbitdepth = 8
return targetbitdepth, [(bitdepth, targetbitdepth)]
assert alpha or not greyscale
depth_set = tuple(set(bitdepth))
if depth_set in [(8,), (16,)]:
# No sBIT required.
(bitdepth, ) = depth_set
return bitdepth, None
targetbitdepth = (8, 16)[max(bitdepth) > 8]
return targetbitdepth, [(b, targetbitdepth) for b in bitdepth]
# Regex for decoding mode string
RegexModeDecode = re.compile("(LA?|RGBA?);?([0-9]*)", flags=re.IGNORECASE)
def from_array(a, mode=None, info={}):
"""
Create a PNG :class:`Image` object from a 2-dimensional array.
One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
Unless they are specified using the *info* parameter,
the PNG's height and width are taken from the array size.
The first axis is the height; the second axis is the
ravelled width and channel index.
The array is treated is a sequence of rows,
each row being a sequence of values (``width*channels`` in number).
So an RGB image that is 16 pixels high and 8 wide will
occupy a 2-dimensional array that is 16x24
(each row will be 8*3 = 24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth
(overriding how this function normally derives the bit depth,
see below).
Appending ``';16'`` to the mode will cause the PNG to be
16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array,
but it can be any suitable Python sequence.
For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``.
The exact rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension.
It's slightly more complicated than that because
an iterator of rows can be used, and it all still works.
Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from
the array element's datatype
(but if *mode* specifies a bitdepth then that is used instead).
The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects.
A 1 byte datatype will give a bit depth of 8,
a 2 byte datatype will give a bit depth of 16.
If the datatype does not have an implicit size,
like the above example where it is a plain Python list of lists,
then a default of 8 is used.
The *info* parameter is a dictionary that can
be used to specify metadata (in the same style as
the arguments to the :class:`png.Writer` class).
For this function the keys that are useful are:
height
overrides the height derived from the array dimensions and
allows *a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype
(but must match *mode* if that also specifies a bit depth).
Generally anything specified in the *info* dictionary will
override any implicit choices that this function would otherwise make,
but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and
false when mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
match = RegexModeDecode.match(mode)
if not match:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode, bitdepth = match.groups()
if bitdepth:
bitdepth = int(bitdepth)
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise ProtocolError("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
alpha = 'A' in mode
if 'alpha' in info:
if bool(info['alpha']) != alpha:
raise ProtocolError("info['alpha'] should match mode.")
info['alpha'] = alpha
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get("bitdepth") and bitdepth != info['bitdepth']:
raise ProtocolError(
"bitdepth (%d) should match bitdepth of info (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
width, height = check_sizes(
info.get("size"),
info.get("width"),
info.get("height"))
if width:
info["width"] = width
if height:
info["height"] = height
if "height" not in info:
try:
info['height'] = len(a)
except TypeError:
raise ProtocolError(
"len(a) does not work, supply info['height'] instead.")
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a, t = itertools.tee(a)
row = next(t)
del t
testelement = row
if 'width' not in info:
width = len(row) // planes
info['width'] = width
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's datatype,
# use a default of 8.
bitdepth = 8
else:
# If we got here without exception,
# we now assume that the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in ["width", "height", "bitdepth", "greyscale", "alpha"]:
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to the named *file*.
See `.write()` if you already have an open file object.
In general, you can only call this method once;
after it has been called the first time the PNG image is written,
the source data will have been streamed, and
cannot be streamed again.
"""
w = Writer(**self.info)
with open(file, 'wb') as fd:
w.write(fd, self.rows)
def stream(self):
"""Stream the rows into a list, so that the rows object
can be accessed multiple times, or randomly.
"""
self.rows = list(self.rows)
def write(self, file):
"""Write the image to the open file object.
See `.save()` if you have a filename.
In general, you can only call this method once;
after it has been called the first time the PNG image is written,
the source data will have been streamed, and
cannot be streamed again.
"""
w = Writer(**self.info)
w.write(file, self.rows)
class Reader:
"""
Pure Python PNG decoder in pure Python.
"""
def __init__(self, _guess=None, filename=None, file=None, bytes=None):
"""
The constructor expects exactly one keyword argument.
If you supply a positional argument instead,
it will guess the input type.
Choose from the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``bytes`` or ``bytearray`` with PNG data.
"""
keywords_supplied = (
(_guess is not None) +
(filename is not None) +
(file is not None) +
(bytes is not None))
if keywords_supplied != 1:
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type).
# See preamble method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
bytes = _guess
elif isinstance(_guess, str):
filename = _guess
elif hasattr(_guess, 'read'):
file = _guess
if bytes is not None:
self.file = io.BytesIO(bytes)
elif filename is not None:
self.file = open(filename, "rb")
elif file is not None:
self.file = file
else:
raise ProtocolError("expecting filename, file or bytes array")
def chunk(self, lenient=False):
"""
Read the next PNG chunk from the input file;
returns a (*type*, *data*) tuple.
*type* is the chunk's type as a byte string
(all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if not self.atchunk:
raise ChunkError("No more chunks.")
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError(
'Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.' % type)
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = ("Checksum error in %s chunk: 0x%08X != 0x%08X."
% (type.decode('ascii'), a, b))
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t, v = self.chunk()
yield t, v
if t == b'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""
Undo the filter for a scanline.
`scanline` is a sequence of bytes that
does not include the initial filter type byte.
`previous` is decoded previous scanline
(for straightlaced images this is the previous pixel row,
but for interlaced images, it is
the previous scanline in the reduced image,
which in general is not the previous pixel row in the final image).
When there is no previous scanline
(the first row of a straightlaced image,
or the first row in one of the passes in an interlaced image),
then this argument should be ``None``.
The scanline will have the effects of filtering removed;
the result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
result = scanline
if filter_type == 0:
return result
if filter_type not in (1, 2, 3, 4):
raise FormatError(
'Invalid PNG Filter Type. '
'See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = bytearray([0] * len(scanline))
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
fn = (None,
undo_filter_sub,
undo_filter_up,
undo_filter_average,
undo_filter_paeth)[filter_type]
fn(fu, scanline, previous, result)
return result
def _deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return a single array of values.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Values per image
vpi = vpr * self.height
# Interleaving writes to the output array randomly
# (well, not quite), so the entire output array must be in memory.
# Make a result array, and make it big enough.
if self.bitdepth > 8:
a = array('H', [0] * vpi)
else:
a = bytearray([0] * vpi)
source_offset = 0
for lines in adam7_generate(self.width, self.height):
# The previous (reconstructed) scanline.
# `None` at the beginning of a pass
# to indicate that there is no previous line.
recon = None
for x, y, xstep in lines:
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width - x) / float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset: source_offset + row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self._bytes_to_values(recon, width=ppr)
if xstep == 1:
assert x == 0
offset = y * vpr
a[offset: offset + vpr] = flat
else:
offset = y * vpr + x * self.planes
end_offset = (y + 1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset + i: end_offset: skip] = \
flat[i:: self.planes]
return a
def _iter_bytes_to_values(self, byte_rows):
"""
Iterator that yields each scanline;
each scanline being a sequence of values.
`byte_rows` should be an iterator that yields
the bytes of each row in turn.
"""
for row in byte_rows:
yield self._bytes_to_values(row)
def _bytes_to_values(self, bs, width=None):
"""Convert a packed row of bytes into a row of values.
Result will be a freshly allocated object,
not shared with the argument.
"""
if self.bitdepth == 8:
return bytearray(bs)
if self.bitdepth == 16:
return array('H',
struct.unpack('!%dH' % (len(bs) // 2), bs))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8 // self.bitdepth
out = bytearray()
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in bs:
out.extend([mask & (o >> i) for i in shifts])
return out[:width]
def _iter_straight_packed(self, byte_blocks):
"""Iterator that undoes the effect of filtering;
yields each row as a sequence of packed bytes.
Assumes input is straightlaced.
`byte_blocks` should be an iterable that yields the raw bytes
in blocks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = bytearray()
# The previous (reconstructed) scanline.
# None indicates first line of image.
recon = None
for some_bytes in byte_blocks:
a.extend(some_bytes)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1: rb + 1]
del a[: rb + 1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError('Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""
If signature (header) has not been read then read and
validate it; otherwise do nothing.
No signature (empty read()) will raise EOFError;
An invalid signature will raise FormatError.
EOFError is raised to make possible the case where
a program can read multiple PNG files from the same stream.
The end of the stream can be distinguished from non-PNG files
or corrupted PNG files.
"""
if self.signature:
return
self.signature = self.file.read(8)
if len(self.signature) == 0:
raise EOFError("End of PNG stream.")
if self.signature != signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading
the initial part of the PNG file up to
the start of the ``IDAT`` chunk.
All the chunks that precede the ``IDAT`` chunk are
read and either processed for metadata or discarded.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if self.atchunk is None:
raise FormatError('This PNG file has no IDAT chunks.')
if self.atchunk[1] == b'IDAT':
return
self.process_chunk(lenient=lenient)
def _chunk_len_type(self):
"""
Reads just enough of the input to
determine the next chunk's length and type;
return a (*length*, *type*) pair where *type* is a byte sequence.
If there are no more chunks, ``None`` is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length, type = struct.unpack('!I4s', x)
if length > 2 ** 31 - 1:
raise FormatError('Chunk %s is too large: %d.' % (type, length))
# Check that all bytes are in valid ASCII range.
# https://www.w3.org/TR/2003/REC-PNG-20031110/#5Chunk-layout
type_bytes = set(bytearray(type))
if not(type_bytes <= set(range(65, 91)) | set(range(97, 123))):
raise FormatError(
'Chunk %r has invalid Chunk Type.'
% list(type))
return length, type
def process_chunk(self, lenient=False):
"""
Process the next chunk and its data.
This only processes the following chunk types:
``IHDR``, ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``.
All other chunk types are ignored.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = '_process_' + type.decode('ascii')
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise FormatError(
"Unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError(
"Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0, 1):
raise FormatError(
"Unknown interlace method %d, see "
"http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods"
" ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not(self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3, 1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth) / float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sBIT chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2 ** self.bitdepth) * 3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte) / 3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def _process_pHYs(self, data):
# http://www.w3.org/TR/PNG/#11pHYs
self.phys = data
fmt = "!LLB"
if len(data) != struct.calcsize(fmt):
raise FormatError("pHYs chunk has incorrect length.")
self.x_pixels_per_unit, self.y_pixels_per_unit, unit = \
struct.unpack(fmt, data)
self.unit_is_meter = bool(unit)
def read(self, lenient=False):
"""
Read the PNG file and decode it.
Returns (`width`, `height`, `rows`, `info`).
May use excessive memory.
`rows` is a sequence of rows;
each row is a sequence of values.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
type, data = self.chunk(lenient=lenient)
if type == b'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != b'IDAT':
continue
# type == b'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
self.preamble(lenient=lenient)
raw = decompress(iteridat())
if self.interlace:
def rows_from_interlace():
"""Yield each row from an interlaced PNG."""
# It's important that this iterator doesn't read
# IDAT chunks until it yields the first row.
bs = bytearray(itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth > 8]
# Like :meth:`group` but
# producing an array.array object for each row.
values = self._deinterlace(bs)
vpr = self.width * self.planes
for i in range(0, len(values), vpr):
row = array(arraycode, values[i:i+vpr])
yield row
rows = rows_from_interlace()
else:
rows = self._iter_bytes_to_values(self._iter_straight_packed(raw))
info = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
info[attr] = getattr(self, attr)
info['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
info[attr] = a
if getattr(self, 'x_pixels_per_unit', None):
info['physical'] = Resolution(self.x_pixels_per_unit,
self.y_pixels_per_unit,
self.unit_is_meter)
if self.plte:
info['palette'] = self.palette()
return self.width, self.height, rows, info
def read_flat(self):
"""
Read a PNG file and decode it into a single array of values.
Returns (*width*, *height*, *values*, *info*).
May use excessive memory.
`values` is a single array.
The :meth:`read` method is more stream-friendly than this,
because it returns a sequence of rows.
"""
x, y, pixel, info = self.read()
arraycode = 'BH'[info['bitdepth'] > 8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, info
def palette(self, alpha='natural'):
"""
Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks.
These chunks should have already been processed (for example,
by calling the :meth:`preamble` method).
All the tuples are the same size:
3-tuples if there is no ``tRNS`` chunk,
4-tuples when there is a ``tRNS`` chunk.
Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or [])
trns.extend([255] * (len(plte) - len(trns)))
plte = list(map(operator.add, plte, group(trns, 1)))
return plte
def asDirect(self):
"""
Returns the image data as a direct representation of
an ``x * y * planes`` array.
This removes the need for callers to deal with
palettes and transparency themselves.
Images with a palette (colour type 3) are converted to RGB or RGBA;
images with transparency (a ``tRNS`` chunk) are converted to
LA or RGBA as appropriate.
When returned in this format the pixel values represent
the colour value directly without needing to refer
to palettes or transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *rows*, *info*)
This method normally returns pixel values with
the bit depth they have in the source image, but
when the source PNG has an ``sBIT`` chunk it is inspected and
can reduce the bit depth of the result pixels;
pixel values will be reduced according to the bit depth
specified in the ``sBIT`` chunk.
PNG nerds should note a single result bit depth is
used for all channels:
the maximum of the ones specified in the ``sBIT`` chunk.
An RGB565 image will be rescaled to 6-bit RGB666.
The *info* dictionary that is returned reflects
the `direct` format and not the original source image.
For example, an RGB source image with a ``tRNS`` chunk
to represent a transparent colour,
will start with ``planes=3`` and ``alpha=False`` for the
source image,
but the *info* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because
an alpha channel is synthesized and added.
*rows* is a sequence of rows;
each row being a sequence of values
(like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x, y, pixels, info = self.read()
if self.colormap:
info['colormap'] = False
info['alpha'] = bool(self.trns)
info['bitdepth'] = 8
info['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = [plte[x] for x in row]
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2 ** info['bitdepth'] - 1
planes = info['planes']
info['alpha'] = True
info['planes'] += 1
typecode = 'BH'[info['bitdepth'] > 8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = list(zip(opa)) # convert to 1-tuples
yield array(
typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > info['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit, self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth:
shift = info['bitdepth'] - targetbitdepth
info['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield [p >> shift for p in row]
pixels = itershift(pixels)
return x, y, pixels, info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width, height, pixels, info = get()
maxval = 2**info['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
info['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield [int(round(x * factor)) for x in row]
if maxval == targetmaxval:
return width, height, pixels, info
else:
return width, height, iterscale(), info
def asRGB8(self):
"""
Return the image data as an RGB pixels with 8-bits per sample.
This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that
they are all between 0 and 255 (8-bit).
In the case where the source image has a bit depth < 8
the transformation preserves all the information;
where the source image has bit depth > 8, then
rescaling to 8-bit values loses precision.
No dithering is performed.
Like :meth:`asRGB`,
an alpha channel in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *rows*, *info*).
*width*, *height*, *info* are as per the :meth:`read` method.
*rows* is the pixel data as a sequence of rows.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""
Return the image data as RGBA pixels with 8-bits per sample.
This method is similar to :meth:`asRGB8` and :meth:`asRGBA`:
The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255.
The alpha channel is synthesized if necessary
(with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""
Return image as RGB pixels.
RGB colour images are passed through unchanged;
greyscales are expanded into RGB triplets
(there is a small speed overhead for doing this).
An alpha channel in the source image will raise an exception.
The return values are as for the :meth:`read` method except that
the *info* reflect the returned pixels, not the source image.
In particular,
for this method ``info['greyscale']`` will be ``False``.
"""
width, height, pixels, info = self.asDirect()
if info['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not info['greyscale']:
return width, height, pixels, info
info['greyscale'] = False
info['planes'] = 3
if info['bitdepth'] > 8:
def newarray():
return array('H', [0])
else:
def newarray():
return bytearray([0])
def iterrgb():
for row in pixels:
a = newarray() * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width, height, iterrgb(), info
def asRGBA(self):
"""
Return image as RGBA pixels.
Greyscales are expanded into RGB triplets;
an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method except that
the *info* reflect the returned pixels, not the source image.
In particular, for this method
``info['greyscale']`` will be ``False``, and
``info['alpha']`` will be ``True``.
"""
width, height, pixels, info = self.asDirect()
if info['alpha'] and not info['greyscale']:
return width, height, pixels, info
typecode = 'BH'[info['bitdepth'] > 8]
maxval = 2**info['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
if info['bitdepth'] > 8:
def newarray():
return array('H', maxbuffer)
else:
def newarray():
return bytearray(maxbuffer)
if info['alpha'] and info['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
convert_la_to_rgba(row, a)
yield a
elif info['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
convert_l_to_rgba(row, a)
yield a
else:
assert not info['alpha'] and not info['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
convert_rgb_to_rgba(row, a)
yield a
info['alpha'] = True
info['greyscale'] = False
info['planes'] = 4
return width, height, convert(), info
def decompress(data_blocks):
"""
`data_blocks` should be an iterable that
yields the compressed data (from the ``IDAT`` chunks).
This yields decompressed byte strings.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in data_blocks:
# :todo: add a max_length argument here to limit output size.
yield bytearray(d.decompress(data))
yield bytearray(d.flush())
def check_bitdepth_colortype(bitdepth, colortype):
"""
Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination.
Returns (None) if valid, raise an Exception if not valid.
"""
if bitdepth not in (1, 2, 4, 8, 16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0, 2, 3, 4, 6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0, 3):
raise FormatError(
"Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def is_natural(x):
"""A non-negative integer."""
try:
is_integer = int(x) == x
except (TypeError, ValueError):
return False
return is_integer and x >= 0
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
def convert_l_to_rgba(row, result):
"""
Convert a grayscale image to RGBA.
This method assumes the alpha channel in result is
already correctly initialized.
"""
for i in range(3):
result[i::4] = row
def convert_rgb_to_rgba(row, result):
"""
Convert an RGB image to RGBA.
This method assumes the alpha channel in result is
already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
# Only reason to include this in this module is that
# several utilities need it, and it is small.
def binary_stdin():
"""
A sys.stdin that returns bytes.
"""
return sys.stdin.buffer
def binary_stdout():
"""
A sys.stdout that accepts bytes.
"""
stdout = sys.stdout.buffer
# On Windows the C runtime file orientation needs changing.
if sys.platform == "win32":
import msvcrt
import os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return stdout
def cli_open(path):
if path == "-":
return binary_stdin()
return open(path, "rb")
def main(argv):
"""
Run command line PNG.
Which reports version.
"""
print(__version__, __file__)
if __name__ == '__main__':
try:
main(sys.argv)
except Error as e:
print(e, file=sys.stderr)
| 82,781 | Python | 33.884956 | 85 | 0.576727 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/six.py | # Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.16.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
if PY34:
from importlib.util import spec_from_loader
else:
spec_from_loader = None
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def find_spec(self, fullname, path, target=None):
if fullname in self.known_modules:
return spec_from_loader(fullname, self)
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
_assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return getattr(self, _assertNotRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
# This does exactly the same what the :func:`py3:functools.update_wrapper`
# function does on Python versions after 3.2. It sets the ``__wrapped__``
# attribute on ``wrapper`` object and it doesn't raise an error if any of
# the attributes mentioned in ``assigned`` and ``updated`` are missing on
# ``wrapped`` object.
def _update_wrapper(wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
continue
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
wrapper.__wrapped__ = wrapped
return wrapper
_update_wrapper.__doc__ = functools.update_wrapper.__doc__
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
wraps.__doc__ = functools.wraps.__doc__
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| 34,549 | Python | 33.584585 | 118 | 0.624649 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/idna_ssl.py | import ssl
import sys
import idna
__version__ = '1.1.0'
real_match_hostname = ssl.match_hostname
PY_370 = sys.version_info >= (3, 7, 0)
def patched_match_hostname(cert, hostname):
try:
hostname = idna.encode(hostname, uts46=True).decode('ascii')
except UnicodeError:
hostname = hostname.encode('idna').decode('ascii')
return real_match_hostname(cert, hostname)
def patch_match_hostname():
if PY_370:
return
if hasattr(ssl.match_hostname, 'patched'):
return
ssl.match_hostname = patched_match_hostname
ssl.match_hostname.patched = True
def reset_match_hostname():
if PY_370:
return
if not hasattr(ssl.match_hostname, 'patched'):
return
ssl.match_hostname = real_match_hostname
| 779 | Python | 18.02439 | 68 | 0.654685 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/typing_extensions.py | import abc
import collections
import contextlib
import sys
import typing
import collections.abc as collections_abc
import operator
# These are used by Protocol implementation
# We use internal typing helpers here, but this significantly reduces
# code duplication. (Also this is only until Protocol is in typing.)
from typing import Generic, Callable, TypeVar, Tuple
# After PEP 560, internal typing API was substantially reworked.
# This is especially important for Protocol class which uses internal APIs
# quite extensivelly.
PEP_560 = sys.version_info[:3] >= (3, 7, 0)
if PEP_560:
GenericMeta = TypingMeta = type
from typing import _GenericAlias
else:
from typing import GenericMeta, TypingMeta
OLD_GENERICS = False
try:
from typing import _type_vars, _next_in_mro, _type_check
except ImportError:
OLD_GENERICS = True
try:
from typing import _subs_tree # noqa
SUBS_TREE = True
except ImportError:
SUBS_TREE = False
try:
from typing import _tp_cache
except ImportError:
def _tp_cache(x):
return x
try:
from typing import _TypingEllipsis, _TypingEmpty
except ImportError:
class _TypingEllipsis:
pass
class _TypingEmpty:
pass
# The two functions below are copies of typing internal helpers.
# They are needed by _ProtocolMeta
def _no_slots_copy(dct):
dict_copy = dict(dct)
if '__slots__' in dict_copy:
for slot in dict_copy['__slots__']:
dict_copy.pop(slot, None)
return dict_copy
def _check_generic(cls, parameters):
if not cls.__parameters__:
raise TypeError("%s is not a generic class" % repr(cls))
alen = len(parameters)
elen = len(cls.__parameters__)
if alen != elen:
raise TypeError("Too %s parameters for %s; actual %s, expected %s" %
("many" if alen > elen else "few", repr(cls), alen, elen))
if hasattr(typing, '_generic_new'):
_generic_new = typing._generic_new
else:
# Note: The '_generic_new(...)' function is used as a part of the
# process of creating a generic type and was added to the typing module
# as of Python 3.5.3.
#
# We've defined '_generic_new(...)' below to exactly match the behavior
# implemented in older versions of 'typing' bundled with Python 3.5.0 to
# 3.5.2. This helps eliminate redundancy when defining collection types
# like 'Deque' later.
#
# See https://github.com/python/typing/pull/308 for more details -- in
# particular, compare and contrast the definition of types like
# 'typing.List' before and after the merge.
def _generic_new(base_cls, cls, *args, **kwargs):
return base_cls.__new__(cls, *args, **kwargs)
# See https://github.com/python/typing/pull/439
if hasattr(typing, '_geqv'):
from typing import _geqv
_geqv_defined = True
else:
_geqv = None
_geqv_defined = False
if sys.version_info[:2] >= (3, 6):
import _collections_abc
_check_methods_in_mro = _collections_abc._check_methods
else:
def _check_methods_in_mro(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'ClassVar',
'Concatenate',
'Final',
'ParamSpec',
'Type',
# ABCs (from collections.abc).
# The following are added depending on presence
# of their non-generic counterparts in stdlib:
# 'Awaitable',
# 'AsyncIterator',
# 'AsyncIterable',
# 'Coroutine',
# 'AsyncGenerator',
# 'AsyncContextManager',
# 'ChainMap',
# Concrete collection types.
'ContextManager',
'Counter',
'Deque',
'DefaultDict',
'OrderedDict',
'TypedDict',
# Structural checks, a.k.a. protocols.
'SupportsIndex',
# One-off things.
'final',
'IntVar',
'Literal',
'NewType',
'overload',
'Text',
'TypeAlias',
'TypeGuard',
'TYPE_CHECKING',
]
# Annotated relies on substitution trees of pep 560. It will not work for
# versions of typing older than 3.5.3
HAVE_ANNOTATED = PEP_560 or SUBS_TREE
if PEP_560:
__all__.extend(["get_args", "get_origin", "get_type_hints"])
if HAVE_ANNOTATED:
__all__.append("Annotated")
# Protocols are hard to backport to the original version of typing 3.5.0
HAVE_PROTOCOLS = sys.version_info[:3] != (3, 5, 0)
if HAVE_PROTOCOLS:
__all__.extend(['Protocol', 'runtime', 'runtime_checkable'])
# TODO
if hasattr(typing, 'NoReturn'):
NoReturn = typing.NoReturn
elif hasattr(typing, '_FinalTypingBase'):
class _NoReturn(typing._FinalTypingBase, _root=True):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("NoReturn cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("NoReturn cannot be used with issubclass().")
NoReturn = _NoReturn(_root=True)
else:
class _NoReturnMeta(typing.TypingMeta):
"""Metaclass for NoReturn"""
def __new__(cls, name, bases, namespace, _root=False):
return super().__new__(cls, name, bases, namespace, _root=_root)
def __instancecheck__(self, obj):
raise TypeError("NoReturn cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("NoReturn cannot be used with issubclass().")
class NoReturn(typing.Final, metaclass=_NoReturnMeta, _root=True):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
__slots__ = ()
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = typing.TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = typing.TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
if hasattr(typing, 'ClassVar'):
ClassVar = typing.ClassVar
elif hasattr(typing, '_FinalTypingBase'):
class _ClassVar(typing._FinalTypingBase, _root=True):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
'{} accepts only single type.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _ClassVar):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
ClassVar = _ClassVar(_root=True)
else:
class _ClassVarMeta(typing.TypingMeta):
"""Metaclass for ClassVar"""
def __new__(cls, name, bases, namespace, tp=None, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
if tp is not None:
self.__type__ = tp
return self
def __instancecheck__(self, obj):
raise TypeError("ClassVar cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("ClassVar cannot be used with issubclass().")
def __getitem__(self, item):
cls = type(self)
if self.__type__ is not None:
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
param = typing._type_check(
item,
'{} accepts only single type.'.format(cls.__name__[1:]))
return cls(self.__name__, self.__bases__,
dict(self.__dict__), tp=param, _root=True)
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(self.__name__, self.__bases__,
dict(self.__dict__), tp=self.__type__,
_root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, ClassVar):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
class ClassVar(typing.Final, metaclass=_ClassVarMeta, _root=True):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
__type__ = None
# On older versions of typing there is an internal class named "Final".
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
elif sys.version_info[:2] >= (3, 7):
class _FinalForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
'{} accepts only single type'.format(self._name))
return _GenericAlias(self, (item,))
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.""")
elif hasattr(typing, '_FinalTypingBase'):
class _Final(typing._FinalTypingBase, _root=True):
"""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
'{} accepts only single type.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _Final):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
Final = _Final(_root=True)
else:
class _FinalMeta(typing.TypingMeta):
"""Metaclass for Final"""
def __new__(cls, name, bases, namespace, tp=None, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
if tp is not None:
self.__type__ = tp
return self
def __instancecheck__(self, obj):
raise TypeError("Final cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Final cannot be used with issubclass().")
def __getitem__(self, item):
cls = type(self)
if self.__type__ is not None:
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
param = typing._type_check(
item,
'{} accepts only single type.'.format(cls.__name__[1:]))
return cls(self.__name__, self.__bases__,
dict(self.__dict__), tp=param, _root=True)
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(self.__name__, self.__bases__,
dict(self.__dict__), tp=self.__type__,
_root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, Final):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
class Final(typing.Final, metaclass=_FinalMeta, _root=True):
"""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
__type__ = None
if hasattr(typing, 'final'):
final = typing.final
else:
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties.
"""
return f
def IntVar(name):
return TypeVar(name)
if hasattr(typing, 'Literal'):
Literal = typing.Literal
elif sys.version_info[:2] >= (3, 7):
class _LiteralForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return _GenericAlias(self, parameters)
Literal = _LiteralForm('Literal',
doc="""A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
elif hasattr(typing, '_FinalTypingBase'):
class _Literal(typing._FinalTypingBase, _root=True):
"""A type that can be used to indicate to type checkers that the
corresponding value has a value literally equivalent to the
provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to the
value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime checking
verifying that the parameter is actually a value instead of a type.
"""
__slots__ = ('__values__',)
def __init__(self, values=None, **kwds):
self.__values__ = values
def __getitem__(self, values):
cls = type(self)
if self.__values__ is None:
if not isinstance(values, tuple):
values = (values,)
return cls(values, _root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
return self
def __repr__(self):
r = super().__repr__()
if self.__values__ is not None:
r += '[{}]'.format(', '.join(map(typing._type_repr, self.__values__)))
return r
def __hash__(self):
return hash((type(self).__name__, self.__values__))
def __eq__(self, other):
if not isinstance(other, _Literal):
return NotImplemented
if self.__values__ is not None:
return self.__values__ == other.__values__
return self is other
Literal = _Literal(_root=True)
else:
class _LiteralMeta(typing.TypingMeta):
"""Metaclass for Literal"""
def __new__(cls, name, bases, namespace, values=None, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
if values is not None:
self.__values__ = values
return self
def __instancecheck__(self, obj):
raise TypeError("Literal cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Literal cannot be used with issubclass().")
def __getitem__(self, item):
cls = type(self)
if self.__values__ is not None:
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
if not isinstance(item, tuple):
item = (item,)
return cls(self.__name__, self.__bases__,
dict(self.__dict__), values=item, _root=True)
def _eval_type(self, globalns, localns):
return self
def __repr__(self):
r = super().__repr__()
if self.__values__ is not None:
r += '[{}]'.format(', '.join(map(typing._type_repr, self.__values__)))
return r
def __hash__(self):
return hash((type(self).__name__, self.__values__))
def __eq__(self, other):
if not isinstance(other, Literal):
return NotImplemented
if self.__values__ is not None:
return self.__values__ == other.__values__
return self is other
class Literal(typing.Final, metaclass=_LiteralMeta, _root=True):
"""A type that can be used to indicate to type checkers that the
corresponding value has a value literally equivalent to the
provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to the
value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime checking
verifying that the parameter is actually a value instead of a type.
"""
__values__ = None
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
# This is not a real generic class. Don't use outside annotations.
if hasattr(typing, 'Type'):
Type = typing.Type
else:
# Internal type variable used for Type[].
CT_co = typing.TypeVar('CT_co', covariant=True, bound=type)
class Type(typing.Generic[CT_co], extra=type):
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
__slots__ = ()
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
def _define_guard(type_name):
"""
Returns True if the given type isn't defined in typing but
is defined in collections_abc.
Adds the type to __all__ if the collection is found in either
typing or collection_abc.
"""
if hasattr(typing, type_name):
__all__.append(type_name)
globals()[type_name] = getattr(typing, type_name)
return False
elif hasattr(collections_abc, type_name):
__all__.append(type_name)
return True
else:
return False
class _ExtensionsGenericMeta(GenericMeta):
def __subclasscheck__(self, subclass):
"""This mimics a more modern GenericMeta.__subclasscheck__() logic
(that does not have problems with recursion) to work around interactions
between collections, typing, and typing_extensions on older
versions of Python, see https://github.com/python/typing/issues/501.
"""
if sys.version_info[:3] >= (3, 5, 3) or sys.version_info[:3] < (3, 5, 0):
if self.__origin__ is not None:
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
raise TypeError("Parameterized generics cannot be used with class "
"or instance checks")
return False
if not self.__extra__:
return super().__subclasscheck__(subclass)
res = self.__extra__.__subclasshook__(subclass)
if res is not NotImplemented:
return res
if self.__extra__ in subclass.__mro__:
return True
for scls in self.__extra__.__subclasses__():
if isinstance(scls, GenericMeta):
continue
if issubclass(subclass, scls):
return True
return False
if _define_guard('Awaitable'):
class Awaitable(typing.Generic[T_co], metaclass=_ExtensionsGenericMeta,
extra=collections_abc.Awaitable):
__slots__ = ()
if _define_guard('Coroutine'):
class Coroutine(Awaitable[V_co], typing.Generic[T_co, T_contra, V_co],
metaclass=_ExtensionsGenericMeta,
extra=collections_abc.Coroutine):
__slots__ = ()
if _define_guard('AsyncIterable'):
class AsyncIterable(typing.Generic[T_co],
metaclass=_ExtensionsGenericMeta,
extra=collections_abc.AsyncIterable):
__slots__ = ()
if _define_guard('AsyncIterator'):
class AsyncIterator(AsyncIterable[T_co],
metaclass=_ExtensionsGenericMeta,
extra=collections_abc.AsyncIterator):
__slots__ = ()
if hasattr(typing, 'Deque'):
Deque = typing.Deque
elif _geqv_defined:
class Deque(collections.deque, typing.MutableSequence[T],
metaclass=_ExtensionsGenericMeta,
extra=collections.deque):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Deque):
return collections.deque(*args, **kwds)
return _generic_new(collections.deque, cls, *args, **kwds)
else:
class Deque(collections.deque, typing.MutableSequence[T],
metaclass=_ExtensionsGenericMeta,
extra=collections.deque):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Deque:
return collections.deque(*args, **kwds)
return _generic_new(collections.deque, cls, *args, **kwds)
if hasattr(typing, 'ContextManager'):
ContextManager = typing.ContextManager
elif hasattr(contextlib, 'AbstractContextManager'):
class ContextManager(typing.Generic[T_co],
metaclass=_ExtensionsGenericMeta,
extra=contextlib.AbstractContextManager):
__slots__ = ()
else:
class ContextManager(typing.Generic[T_co]):
__slots__ = ()
def __enter__(self):
return self
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
return None
@classmethod
def __subclasshook__(cls, C):
if cls is ContextManager:
# In Python 3.6+, it is possible to set a method to None to
# explicitly indicate that the class does not implement an ABC
# (https://bugs.python.org/issue25958), but we do not support
# that pattern here because this fallback class is only used
# in Python 3.5 and earlier.
if (any("__enter__" in B.__dict__ for B in C.__mro__) and
any("__exit__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
if hasattr(typing, 'AsyncContextManager'):
AsyncContextManager = typing.AsyncContextManager
__all__.append('AsyncContextManager')
elif hasattr(contextlib, 'AbstractAsyncContextManager'):
class AsyncContextManager(typing.Generic[T_co],
metaclass=_ExtensionsGenericMeta,
extra=contextlib.AbstractAsyncContextManager):
__slots__ = ()
__all__.append('AsyncContextManager')
elif sys.version_info[:2] >= (3, 5):
exec("""
class AsyncContextManager(typing.Generic[T_co]):
__slots__ = ()
async def __aenter__(self):
return self
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_value, traceback):
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncContextManager:
return _check_methods_in_mro(C, "__aenter__", "__aexit__")
return NotImplemented
__all__.append('AsyncContextManager')
""")
if hasattr(typing, 'DefaultDict'):
DefaultDict = typing.DefaultDict
elif _geqv_defined:
class DefaultDict(collections.defaultdict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.defaultdict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, DefaultDict):
return collections.defaultdict(*args, **kwds)
return _generic_new(collections.defaultdict, cls, *args, **kwds)
else:
class DefaultDict(collections.defaultdict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.defaultdict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is DefaultDict:
return collections.defaultdict(*args, **kwds)
return _generic_new(collections.defaultdict, cls, *args, **kwds)
if hasattr(typing, 'OrderedDict'):
OrderedDict = typing.OrderedDict
elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
elif _geqv_defined:
class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.OrderedDict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, OrderedDict):
return collections.OrderedDict(*args, **kwds)
return _generic_new(collections.OrderedDict, cls, *args, **kwds)
else:
class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.OrderedDict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is OrderedDict:
return collections.OrderedDict(*args, **kwds)
return _generic_new(collections.OrderedDict, cls, *args, **kwds)
if hasattr(typing, 'Counter'):
Counter = typing.Counter
elif (3, 5, 0) <= sys.version_info[:3] <= (3, 5, 1):
assert _geqv_defined
_TInt = typing.TypeVar('_TInt')
class _CounterMeta(typing.GenericMeta):
"""Metaclass for Counter"""
def __getitem__(self, item):
return super().__getitem__((item, int))
class Counter(collections.Counter,
typing.Dict[T, int],
metaclass=_CounterMeta,
extra=collections.Counter):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Counter):
return collections.Counter(*args, **kwds)
return _generic_new(collections.Counter, cls, *args, **kwds)
elif _geqv_defined:
class Counter(collections.Counter,
typing.Dict[T, int],
metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Counter):
return collections.Counter(*args, **kwds)
return _generic_new(collections.Counter, cls, *args, **kwds)
else:
class Counter(collections.Counter,
typing.Dict[T, int],
metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Counter:
return collections.Counter(*args, **kwds)
return _generic_new(collections.Counter, cls, *args, **kwds)
if hasattr(typing, 'ChainMap'):
ChainMap = typing.ChainMap
__all__.append('ChainMap')
elif hasattr(collections, 'ChainMap'):
# ChainMap only exists in 3.3+
if _geqv_defined:
class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.ChainMap):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, ChainMap):
return collections.ChainMap(*args, **kwds)
return _generic_new(collections.ChainMap, cls, *args, **kwds)
else:
class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.ChainMap):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is ChainMap:
return collections.ChainMap(*args, **kwds)
return _generic_new(collections.ChainMap, cls, *args, **kwds)
__all__.append('ChainMap')
if _define_guard('AsyncGenerator'):
class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
metaclass=_ExtensionsGenericMeta,
extra=collections_abc.AsyncGenerator):
__slots__ = ()
if hasattr(typing, 'NewType'):
NewType = typing.NewType
else:
def NewType(name, tp):
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy function that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def new_type(x):
return x
new_type.__name__ = name
new_type.__supertype__ = tp
return new_type
if hasattr(typing, 'Text'):
Text = typing.Text
else:
Text = str
if hasattr(typing, 'TYPE_CHECKING'):
TYPE_CHECKING = typing.TYPE_CHECKING
else:
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
def _gorg(cls):
"""This function exists for compatibility with old typing versions."""
assert isinstance(cls, GenericMeta)
if hasattr(cls, '_gorg'):
return cls._gorg
while cls.__origin__ is not None:
cls = cls.__origin__
return cls
if OLD_GENERICS:
def _next_in_mro(cls): # noqa
"""This function exists for compatibility with old typing versions."""
next_in_mro = object
for i, c in enumerate(cls.__mro__[:-1]):
if isinstance(c, GenericMeta) and _gorg(c) is Generic:
next_in_mro = cls.__mro__[i + 1]
return next_in_mro
_PROTO_WHITELIST = ['Callable', 'Awaitable',
'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
'ContextManager', 'AsyncContextManager']
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if (not attr.startswith('_abc_') and attr not in (
'__abstractmethods__', '__annotations__', '__weakref__',
'_is_protocol', '_is_runtime_protocol', '__dict__',
'__args__', '__slots__',
'__next_in_mro__', '__parameters__', '__origin__',
'__orig_bases__', '__extra__', '__tree_hash__',
'__doc__', '__subclasshook__', '__init__', '__new__',
'__module__', '_MutableMapping__marker', '_gorg')):
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
elif HAVE_PROTOCOLS and not PEP_560:
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(GenericMeta):
"""Internal metaclass for Protocol.
This exists so Protocol classes can be generic without deriving
from Generic.
"""
if not OLD_GENERICS:
def __new__(cls, name, bases, namespace,
tvars=None, args=None, origin=None, extra=None, orig_bases=None):
# This is just a version copied from GenericMeta.__new__ that
# includes "Protocol" special treatment. (Comments removed for brevity.)
assert extra is None # Protocols should not have extra
if tvars is not None:
assert origin is not None
assert all(isinstance(t, TypeVar) for t in tvars), tvars
else:
tvars = _type_vars(bases)
gvars = None
for base in bases:
if base is Generic:
raise TypeError("Cannot inherit from plain Generic")
if (isinstance(base, GenericMeta) and
base.__origin__ in (Generic, Protocol)):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] or"
" Protocol[...] multiple times.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
raise TypeError(
"Some type variables (%s) "
"are not listed in %s[%s]" %
(", ".join(str(t) for t in tvars if t not in gvarset),
"Generic" if any(b.__origin__ is Generic
for b in bases) else "Protocol",
", ".join(str(g) for g in gvars)))
tvars = gvars
initial_bases = bases
if (extra is not None and type(extra) is abc.ABCMeta and
extra not in bases):
bases = (extra,) + bases
bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
for b in bases)
if any(isinstance(b, GenericMeta) and b is not Generic for b in bases):
bases = tuple(b for b in bases if b is not Generic)
namespace.update({'__origin__': origin, '__extra__': extra})
self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
_root=True)
super(GenericMeta, self).__setattr__('_gorg',
self if not origin else
_gorg(origin))
self.__parameters__ = tvars
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in args) if args else None
self.__next_in_mro__ = _next_in_mro(self)
if orig_bases is None:
self.__orig_bases__ = initial_bases
elif origin is not None:
self._abc_registry = origin._abc_registry
self._abc_cache = origin._abc_cache
if hasattr(self, '_subs_tree'):
self.__tree_hash__ = (hash(self._subs_tree()) if origin else
super(GenericMeta, self).__hash__())
return self
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol or
isinstance(b, _ProtocolMeta) and
b.__origin__ is Protocol
for b in cls.__bases__)
if cls._is_protocol:
for base in cls.__mro__[1:]:
if not (base in (object, Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, TypingMeta) and base._is_protocol or
isinstance(base, GenericMeta) and
base.__origin__ is Generic):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
def __instancecheck__(self, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(self, '_is_protocol', False) or
_is_callable_members_only(self)) and
issubclass(instance.__class__, self)):
return True
if self._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(self, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(self)):
return True
return super(GenericMeta, self).__instancecheck__(instance)
def __subclasscheck__(self, cls):
if self.__origin__ is not None:
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
raise TypeError("Parameterized generics cannot be used with class "
"or instance checks")
return False
if (self.__dict__.get('_is_protocol', None) and
not self.__dict__.get('_is_runtime_protocol', None)):
if sys._getframe(1).f_globals['__name__'] in ['abc',
'functools',
'typing']:
return False
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if (self.__dict__.get('_is_runtime_protocol', None) and
not _is_callable_members_only(self)):
if sys._getframe(1).f_globals['__name__'] in ['abc',
'functools',
'typing']:
return super(GenericMeta, self).__subclasscheck__(cls)
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
return super(GenericMeta, self).__subclasscheck__(cls)
if not OLD_GENERICS:
@_tp_cache
def __getitem__(self, params):
# We also need to copy this from GenericMeta.__getitem__ to get
# special treatment of "Protocol". (Comments removed for brevity.)
if not isinstance(params, tuple):
params = (params,)
if not params and _gorg(self) is not Tuple:
raise TypeError(
"Parameter list to %s[...] cannot be empty" % self.__qualname__)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if self in (Generic, Protocol):
if not all(isinstance(p, TypeVar) for p in params):
raise TypeError(
"Parameters to %r[...] must all be type variables" % self)
if len(set(params)) != len(params):
raise TypeError(
"Parameters to %r[...] must all be unique" % self)
tvars = params
args = params
elif self in (Tuple, Callable):
tvars = _type_vars(params)
args = params
elif self.__origin__ in (Generic, Protocol):
raise TypeError("Cannot subscript already-subscripted %s" %
repr(self))
else:
_check_generic(self, params)
tvars = _type_vars(params)
args = params
prepend = (self,) if self.__origin__ is None else ()
return self.__class__(self.__name__,
prepend + self.__bases__,
_no_slots_copy(self.__dict__),
tvars=tvars,
args=args,
origin=self,
extra=self.__extra__,
orig_bases=self.__orig_bases__)
class Protocol(metaclass=_ProtocolMeta):
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto({bases}):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if _gorg(cls) is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can be used only as a base class")
if OLD_GENERICS:
return _generic_new(_next_in_mro(cls), cls, *args, **kwds)
return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
if Protocol.__doc__ is not None:
Protocol.__doc__ = Protocol.__doc__.format(bases="Protocol, Generic[T]" if
OLD_GENERICS else "Protocol[T]")
elif PEP_560:
from typing import _type_check, _collect_type_vars # noqa
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(abc.ABCMeta):
# This metaclass is a bit unfortunate and exists only because of the lack
# of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(metaclass=_ProtocolMeta):
# There is quite a lot of overlapping code with typing.Generic.
# Unfortunately it is hard to avoid this while these live in two different
# modules. The duplicated code will be removed when Protocol is moved to typing.
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if cls is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can only be used as a base class")
return super().__new__(cls)
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
"Parameter list to {}[...] cannot be empty".format(cls.__qualname__))
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if cls is Protocol:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, TypeVar) for p in params):
i = 0
while isinstance(params[i], TypeVar):
i += 1
raise TypeError(
"Parameters to Protocol[...] must all be type variables."
" Parameter {} is {}".format(i + 1, params[i]))
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Protocol[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
_check_generic(cls, params)
return _GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...] and/or Protocol[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ in (Generic, Protocol)):
# for error messages
the_base = 'Generic' if base.__origin__ is Generic else 'Protocol'
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...]"
" and/or Protocol[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError("Some type variables ({}) are"
" not listed in {}[{}]".format(s_vars,
the_base, s_args))
tvars = gvars
cls.__parameters__ = tuple(tvars)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not getattr(cls, '_is_runtime_protocol', False):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if not _is_callable_members_only(cls):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols.
if not cls._is_protocol:
return
# Check consistency of bases.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, _ProtocolMeta) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init
if hasattr(typing, 'runtime_checkable'):
runtime_checkable = typing.runtime_checkable
elif HAVE_PROTOCOLS:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol, so that it
can be used with isinstance() and issubclass(). Raise TypeError
if applied to a non-protocol class.
This allows a simple-minded structural check very similar to the
one-offs in collections.abc such as Hashable.
"""
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
' got %r' % cls)
cls._is_runtime_protocol = True
return cls
if HAVE_PROTOCOLS:
# Exists for backwards compatibility.
runtime = runtime_checkable
if hasattr(typing, 'SupportsIndex'):
SupportsIndex = typing.SupportsIndex
elif HAVE_PROTOCOLS:
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
if sys.version_info >= (3, 9, 2):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
TypedDict = typing.TypedDict
else:
def _check_fails(cls, other):
try:
if sys._getframe(1).f_globals['__name__'] not in ['abc',
'functools',
'typing']:
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
except (AttributeError, ValueError):
pass
return False
def _dict_new(*args, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
return dict(*args, **kwargs)
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
def _typeddict_new(*args, total=True, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
if args:
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
elif '_typename' in kwargs:
typename = kwargs.pop('_typename')
import warnings
warnings.warn("Passing '_typename' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError("TypedDict.__new__() missing 1 required positional "
"argument: '_typename'")
if args:
try:
fields, = args # allow the "_fields" keyword be passed
except ValueError:
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
'positional arguments but {} '
'were given'.format(len(args) + 2))
elif '_fields' in kwargs and len(kwargs) == 1:
fields = kwargs.pop('_fields')
import warnings
warnings.warn("Passing '_fields' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
fields = None
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns, total=total)
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
class _TypedDictMeta(type):
def __init__(cls, name, bases, ns, total=True):
# In Python 3.4 and 3.5 the __init__ method also needs to support the
# keyword arguments.
# See https://www.python.org/dev/peps/pep-0487/#implementation-details
super(_TypedDictMeta, cls).__init__(name, bases, ns)
def __new__(cls, name, bases, ns, total=True):
# Create new typed dict class object.
# This method is called directly when TypedDict is subclassed,
# or via _typeddict_new when TypedDict is instantiated. This way
# TypedDict supports all three syntaxes described in its docstring.
# Subclasses and instances of TypedDict return actual dictionaries
# via _dict_new.
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
tp_dict = super(_TypedDictMeta, cls).__new__(cls, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__instancecheck__ = __subclasscheck__ = _check_fails
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
get_type_hints = typing.get_type_hints
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
elif PEP_560:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return "typing_extensions.Annotated[{}, {}]".format(
typing._type_repr(self.__origin__),
", ".join(repr(a) for a in self.__metadata__)
)
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"Cannot subclass {}.Annotated".format(cls.__module__)
)
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
res = t.copy_with(stripped_args)
res._special = t._special
return res
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_annotations(t) for k, t in hint.items()}
elif HAVE_ANNOTATED:
def _is_dunder(name):
"""Returns True if name is a __dunder_variable_name__."""
return len(name) > 4 and name.startswith('__') and name.endswith('__')
# Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
# checks, argument expansion etc. are done on the _subs_tre. As a result we
# can't provide a get_type_hints function that strips out annotations.
class AnnotatedMeta(typing.GenericMeta):
"""Metaclass for Annotated"""
def __new__(cls, name, bases, namespace, **kwargs):
if any(b is not object for b in bases):
raise TypeError("Cannot subclass " + str(Annotated))
return super().__new__(cls, name, bases, namespace, **kwargs)
@property
def __metadata__(self):
return self._subs_tree()[2]
def _tree_repr(self, tree):
cls, origin, metadata = tree
if not isinstance(origin, tuple):
tp_repr = typing._type_repr(origin)
else:
tp_repr = origin[0]._tree_repr(origin)
metadata_reprs = ", ".join(repr(arg) for arg in metadata)
return '%s[%s, %s]' % (cls, tp_repr, metadata_reprs)
def _subs_tree(self, tvars=None, args=None): # noqa
if self is Annotated:
return Annotated
res = super()._subs_tree(tvars=tvars, args=args)
# Flatten nested Annotated
if isinstance(res[1], tuple) and res[1][0] is Annotated:
sub_tp = res[1][1]
sub_annot = res[1][2]
return (Annotated, sub_tp, sub_annot + res[2])
return res
def _get_cons(self):
"""Return the class used to create instance of this type."""
if self.__origin__ is None:
raise TypeError("Cannot get the underlying type of a "
"non-specialized Annotated type.")
tree = self._subs_tree()
while isinstance(tree, tuple) and tree[0] is Annotated:
tree = tree[1]
if isinstance(tree, tuple):
return tree[0]
else:
return tree
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if self.__origin__ is not None: # specializing an instantiated type
return super().__getitem__(params)
elif not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be instantiated "
"with at least two arguments (a type and an "
"annotation).")
else:
msg = "Annotated[t, ...]: t must be a type."
tp = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return self.__class__(
self.__name__,
self.__bases__,
_no_slots_copy(self.__dict__),
tvars=_type_vars((tp,)),
# Metadata is a tuple so it won't be touched by _replace_args et al.
args=(tp, metadata),
origin=self,
)
def __call__(self, *args, **kwargs):
cons = self._get_cons()
result = cons(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __getattr__(self, attr):
# For simplicity we just don't relay all dunder names
if self.__origin__ is not None and not _is_dunder(attr):
return getattr(self._get_cons(), attr)
raise AttributeError(attr)
def __setattr__(self, attr, value):
if _is_dunder(attr) or attr.startswith('_abc_'):
super().__setattr__(attr, value)
elif self.__origin__ is None:
raise AttributeError(attr)
else:
setattr(self._get_cons(), attr, value)
def __instancecheck__(self, obj):
raise TypeError("Annotated cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Annotated cannot be used with issubclass().")
class Annotated(metaclass=AnnotatedMeta):
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type, the remaining
arguments are kept as a tuple in the __metadata__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those, only Python 3.9 versions will do.
# Similarly, Python 3.9's implementation doesn't support ParamSpecArgs and
# ParamSpecKwargs.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
elif PEP_560:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = _GenericAlias
try:
# 3.9+
from typing import GenericAlias
except ImportError:
GenericAlias = _GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (_GenericAlias, GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is Generic:
return Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (_GenericAlias, GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
elif sys.version_info[:2] >= (3, 9):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeAliasForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError("{} is not subscriptable".format(self))
elif sys.version_info[:2] >= (3, 7):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
TypeAlias = _TypeAliasForm('TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above.""")
elif hasattr(typing, '_FinalTypingBase'):
class _TypeAliasMeta(typing.TypingMeta):
"""Metaclass for TypeAlias"""
def __repr__(self):
return 'typing_extensions.TypeAlias'
class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("TypeAlias cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeAlias cannot be used with issubclass().")
def __repr__(self):
return 'typing_extensions.TypeAlias'
TypeAlias = _TypeAliasBase(_root=True)
else:
class _TypeAliasMeta(typing.TypingMeta):
"""Metaclass for TypeAlias"""
def __instancecheck__(self, obj):
raise TypeError("TypeAlias cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeAlias cannot be used with issubclass().")
def __call__(self, *args, **kwargs):
raise TypeError("Cannot instantiate TypeAlias")
class TypeAlias(metaclass=_TypeAliasMeta, _root=True):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
__slots__ = ()
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return "{}.args".format(self.__origin__.__name__)
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return "{}.kwargs".format(self.__origin__.__name__)
if hasattr(typing, 'ParamSpec'):
ParamSpec = typing.ParamSpec
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
super().__init__([self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
if not PEP_560:
# Only needed in 3.6 and lower.
def _get_type_vars(self, tvars):
if self not in tvars:
tvars.append(self)
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
if PEP_560:
__class__ = typing._GenericAlias
elif sys.version_info[:3] == (3, 5, 2):
__class__ = typing.TypingMeta
else:
__class__ = typing._TypingBase
# Flag in 3.8.
_special = False
# Attribute in 3.6 and earlier.
if sys.version_info[:3] == (3, 5, 2):
_gorg = typing.GenericMeta
else:
_gorg = typing.Generic
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return '{origin}[{args}]' \
.format(origin=_type_repr(self.__origin__),
args=', '.join(_type_repr(arg) for arg in self.__args__))
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(tp for tp in self.__args__ if isinstance(tp, (TypeVar, ParamSpec)))
if not PEP_560:
# Only required in 3.6 and lower.
def _get_type_vars(self, tvars):
if self.__origin__ and self.__parameters__:
typing._get_type_vars(self.__parameters__, tvars)
@_tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
elif sys.version_info[:2] >= (3, 9):
@_TypeAliasForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
elif sys.version_info[:2] >= (3, 7):
class _ConcatenateForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
elif hasattr(typing, '_FinalTypingBase'):
class _ConcatenateAliasMeta(typing.TypingMeta):
"""Metaclass for Concatenate."""
def __repr__(self):
return 'typing_extensions.Concatenate'
class _ConcatenateAliasBase(typing._FinalTypingBase,
metaclass=_ConcatenateAliasMeta,
_root=True):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("Concatenate cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Concatenate cannot be used with issubclass().")
def __repr__(self):
return 'typing_extensions.Concatenate'
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateAliasBase(_root=True)
# For 3.5.0 - 3.5.2
else:
class _ConcatenateAliasMeta(typing.TypingMeta):
"""Metaclass for Concatenate."""
def __instancecheck__(self, obj):
raise TypeError("TypeAlias cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeAlias cannot be used with issubclass().")
def __call__(self, *args, **kwargs):
raise TypeError("Cannot instantiate TypeAlias")
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
class Concatenate(metaclass=_ConcatenateAliasMeta, _root=True):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
__slots__ = ()
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
elif sys.version_info[:2] >= (3, 9):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeGuardForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, '{} accepts only single type.'.format(self))
return _GenericAlias(self, (item,))
elif sys.version_info[:2] >= (3, 7):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
'{} accepts only a single type'.format(self._name))
return _GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
elif hasattr(typing, '_FinalTypingBase'):
class _TypeGuard(typing._FinalTypingBase, _root=True):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
'{} accepts only a single type.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _TypeGuard):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
TypeGuard = _TypeGuard(_root=True)
else:
class _TypeGuardMeta(typing.TypingMeta):
"""Metaclass for TypeGuard"""
def __new__(cls, name, bases, namespace, tp=None, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
if tp is not None:
self.__type__ = tp
return self
def __instancecheck__(self, obj):
raise TypeError("TypeGuard cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeGuard cannot be used with issubclass().")
def __getitem__(self, item):
cls = type(self)
if self.__type__ is not None:
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
param = typing._type_check(
item,
'{} accepts only single type.'.format(cls.__name__[1:]))
return cls(self.__name__, self.__bases__,
dict(self.__dict__), tp=param, _root=True)
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(self.__name__, self.__bases__,
dict(self.__dict__), tp=self.__type__,
_root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not hasattr(other, "__type__"):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
class TypeGuard(typing.Final, metaclass=_TypeGuardMeta, _root=True):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
__type__ = None
| 109,284 | Python | 37.426512 | 90 | 0.544856 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/matlib.py | import warnings
# 2018-05-29, PendingDeprecationWarning added to matrix.__new__
# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning
warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. "
"The matrix subclass is not the recommended way to represent "
"matrices or deal with linear algebra (see "
"https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). "
"Please adjust your code to use regular ndarray. ",
PendingDeprecationWarning, stacklevel=2)
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# Matlib.py contains all functions in the numpy namespace with a few
# replacements. See doc/source/reference/routines.matlib.rst for details.
# Need * as we're copying the numpy namespace.
from numpy import * # noqa: F403
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307], # random
[ 7.39337286e-309, 3.22135945e-309]])
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0], # random
[ 6586976, 22740995]])
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[1., 1., 1.],
[1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[0., 0., 0.],
[0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0], dtype=dtype)
b = empty((n, n), dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float, order='C'):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.RandomState.rand
Examples
--------
>>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[0.69646919, 0.28613933, 0.22685145],
[0.55131477, 0.71946897, 0.42310646]])
>>> np.matlib.rand((2, 3))
matrix([[0.9807642 , 0.68482974, 0.4809319 ],
[0.39211752, 0.34317802, 0.72904971]])
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[0.43857224, 0.0596779 , 0.39804426],
[0.73799541, 0.18249173, 0.17545176]])
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, numpy.random.RandomState.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-1.0856306]])
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.99734545, 0.2829785 , -1.50629471],
[-0.57860025, 1.65143654, -2.42667924]])
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462],
[2.76322758, 6.72847407, 1.40274501, 1.8900451 ]])
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols)
| 10,365 | Python | 26.496021 | 84 | 0.566136 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_distributor_init.py |
'''
Helper to preload windows dlls to prevent dll not found errors.
Once a DLL is preloaded, its namespace is made available to any
subsequent DLL. This file originated in the numpy-wheels repo,
and is created as part of the scripts that build the wheel.
'''
import os
import glob
if os.name == 'nt':
# convention for storing / loading the DLL from
# numpy/.libs/, if present
try:
from ctypes import WinDLL
basedir = os.path.dirname(__file__)
except:
pass
else:
libs_dir = os.path.abspath(os.path.join(basedir, '.libs'))
DLL_filenames = []
if os.path.isdir(libs_dir):
for filename in glob.glob(os.path.join(libs_dir,
'*openblas*dll')):
# NOTE: would it change behavior to load ALL
# DLLs at this path vs. the name restriction?
WinDLL(os.path.abspath(filename))
DLL_filenames.append(filename)
if len(DLL_filenames) > 1:
import warnings
warnings.warn("loaded more than 1 DLL from .libs:"
"\n%s" % "\n".join(DLL_filenames),
stacklevel=1)
| 1,215 | Python | 35.848484 | 69 | 0.567901 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_pytesttester.py | """
Pytest test running.
This module implements the ``test()`` function for NumPy modules. The usual
boiler plate for doing that is to put the following in the module
``__init__.py`` file::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
Warnings filtering and other runtime settings should be dealt with in the
``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
whether or not that file is found as follows:
* ``pytest.ini`` is present (develop mode)
All warnings except those explicitly filtered out are raised as error.
* ``pytest.ini`` is absent (release mode)
DeprecationWarnings and PendingDeprecationWarnings are ignored, other
warnings are passed through.
In practice, tests run from the numpy repo are run in develop mode. That
includes the standard ``python runtests.py`` invocation.
This module is imported by every numpy subpackage, so lies at the top level to
simplify circular import issues. For the same reason, it contains no numpy
imports at module scope, instead importing numpy within function calls.
"""
import sys
import os
__all__ = ['PytestTester']
def _show_numpy_info():
import numpy as np
print("NumPy version %s" % np.__version__)
relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
info = np.lib.utils._opt_info()
print("NumPy CPU features: ", (info if info else 'nothing enabled'))
class PytestTester:
"""
Pytest test runner.
A test function is typically added to a package's __init__.py like so::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
Calling this test function finds and runs all tests associated with the
module and all its sub-modules.
Attributes
----------
module_name : str
Full path to the package to test.
Parameters
----------
module_name : module name
The name of the module to test.
Notes
-----
Unlike the previous ``nose``-based implementation, this class is not
publicly exposed as it performs some ``numpy``-specific warning
suppression.
"""
def __init__(self, module_name):
self.module_name = module_name
def __call__(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, durations=-1, tests=None):
"""
Run tests for module using pytest.
Parameters
----------
label : {'fast', 'full'}, optional
Identifies the tests to run. When set to 'fast', tests decorated
with `pytest.mark.slow` are skipped, when 'full', the slow marker
is ignored.
verbose : int, optional
Verbosity value for test outputs, in the range 1-3. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to pytests.
doctests : bool, optional
.. note:: Not supported
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
Requires installation of (pip) pytest-cov.
durations : int, optional
If < 0, do nothing, If 0, report time of all tests, if > 0,
report the time of the slowest `timer` tests. Default is -1.
tests : test or list of tests
Tests to be executed with pytest '--pyargs'
Returns
-------
result : bool
Return True on success, false otherwise.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for
it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
...
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
>>> result
True
"""
import pytest
import warnings
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
# setup the pytest arguments
pytest_args = ["-l"]
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
with warnings.catch_warnings():
warnings.simplefilter("always")
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
from numpy.distutils import cpuinfo
with warnings.catch_warnings(record=True):
# Ignore the warning from importing the array_api submodule. This
# warning is done on import, so it would break pytest collection,
# but importing it early here prevents the warning from being
# issued when it imported again.
import numpy.array_api
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
"-W ignore:Not importing directory",
"-W ignore:numpy.dtype size changed",
"-W ignore:numpy.ufunc size changed",
"-W ignore::UserWarning:cpuinfo",
]
# When testing matrices, ignore their PendingDeprecationWarnings
pytest_args += [
"-W ignore:the matrix subclass is not",
"-W ignore:Importing from numpy.matlib is",
]
if doctests:
raise ValueError("Doctests not supported")
if extra_argv:
pytest_args += list(extra_argv)
if verbose > 1:
pytest_args += ["-" + "v"*(verbose - 1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
# not importing at the top level to avoid circular import of module
from numpy.testing import IS_PYPY
if IS_PYPY:
pytest_args += ["-m", "not slow and not slow_pypy"]
else:
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if durations >= 0:
pytest_args += ["--durations=%s" % durations]
if tests is None:
tests = [self.module_name]
pytest_args += ["--pyargs"] + list(tests)
# run tests.
_show_numpy_info()
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return code == 0
| 6,676 | Python | 30.947368 | 79 | 0.599011 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ctypeslib.py | """
============================
``ctypes`` Utility Functions
============================
See Also
--------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> _lib.foo_func.restype = None #doctest: +SKIP
>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array',
'as_ctypes_type']
import os
from numpy import (
integer, ndarray, dtype as _dtype, asarray, frombuffer
)
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
"""
Dummy object that raises an ImportError if ctypes is not available.
Raises
------
ImportError
If ctypes is not available.
"""
raise ImportError("ctypes is not available.")
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
_ndptr_base = object
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
_ndptr_base = ctypes.c_void_p
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
"""
It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
.. versionchanged:: 1.20.0
Allow libname and loader_path to take any
:term:`python:path-like object`.
Parameters
----------
libname : path-like
Name of the library, which can have 'lib' as a prefix,
but without an extension.
loader_path : path-like
Where the library can be found.
Returns
-------
ctypes.cdll[libpath] : library object
A ctypes library object
Raises
------
OSError
If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work "
"with ctypes < 1.0.1", stacklevel=2)
# Convert path-like objects into strings
libname = os.fsdecode(libname)
loader_path = os.fsdecode(loader_path)
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
from numpy.distutils.misc_util import get_shared_lib_extension
so_ext = get_shared_lib_extension()
libname_ext = [libname + so_ext]
# mac, windows and linux >= py3.2 shared library and loadable
# module have different extensions so try both
so_ext2 = get_shared_lib_extension(is_python_ext=True)
if not so_ext2 == so_ext:
libname_ext.insert(0, libname + so_ext2)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
libpath = os.path.join(libdir, ln)
if os.path.exists(libpath):
try:
return ctypes.cdll[libpath]
except OSError:
## defective lib file
raise
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'WRITEBACKIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(_ndptr_base):
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError("argument must be an ndarray")
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError("array must have data type %s" % cls._dtype_)
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError("array must have %d dimension(s)" % cls._ndim_)
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError("array must have shape %s" % str(cls._shape_))
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError("array must have flags %s" %
_flags_fromnum(cls._flags_))
return obj.ctypes
class _concrete_ndptr(_ndptr):
"""
Like _ndptr, but with `_shape_` and `_dtype_` specified.
Notably, this means the pointer has enough information to reconstruct
the array, which is not generally true.
"""
def _check_retval_(self):
"""
This method is called when this class is used as the .restype
attribute for a shared-library function, to automatically wrap the
pointer into an array.
"""
return self.contents
@property
def contents(self):
"""
Get an ndarray viewing the data pointed to by this pointer.
This mirrors the `contents` attribute of a normal ctypes pointer
"""
full_dtype = _dtype((self._dtype_, self._shape_))
full_ctype = ctypes.c_char * full_dtype.itemsize
buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- WRITEBACKIFCOPY / X
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP
"""
# normalize dtype to an Optional[dtype]
if dtype is not None:
dtype = _dtype(dtype)
# normalize flags to an Optional[int]
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except Exception as e:
raise TypeError("invalid flags specification") from e
num = _num_fromflags(flags)
# normalize shape to an Optional[tuple]
if shape is not None:
try:
shape = tuple(shape)
except TypeError:
# single integer -> 1-tuple
shape = (shape,)
cache_key = (dtype, ndim, shape, num)
try:
return _pointer_type_cache[cache_key]
except KeyError:
pass
# produce a name for the new type
if dtype is None:
name = 'any'
elif dtype.names is not None:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
name += "_"+"x".join(str(x) for x in shape)
if flags is not None:
name += "_"+"_".join(flags)
if dtype is not None and shape is not None:
base = _concrete_ndptr
else:
base = _ndptr
klass = type("ndpointer_%s"%name, (base,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[cache_key] = klass
return klass
if ctypes is not None:
def _ctype_ndarray(element_type, shape):
""" Create an ndarray of the given element type and shape """
for dim in shape[::-1]:
element_type = dim * element_type
# prevent the type name include np.ctypeslib
element_type.__module__ = None
return element_type
def _get_scalar_type_map():
"""
Return a dictionary mapping native endian scalar dtype to ctypes types
"""
ct = ctypes
simple_types = [
ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
ct.c_float, ct.c_double,
ct.c_bool,
]
return {_dtype(ctype): ctype for ctype in simple_types}
_scalar_type_map = _get_scalar_type_map()
def _ctype_from_dtype_scalar(dtype):
# swapping twice ensure that `=` is promoted to <, >, or |
dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
dtype_native = dtype.newbyteorder('=')
try:
ctype = _scalar_type_map[dtype_native]
except KeyError as e:
raise NotImplementedError(
"Converting {!r} to a ctypes type".format(dtype)
) from None
if dtype_with_endian.byteorder == '>':
ctype = ctype.__ctype_be__
elif dtype_with_endian.byteorder == '<':
ctype = ctype.__ctype_le__
return ctype
def _ctype_from_dtype_subarray(dtype):
element_dtype, shape = dtype.subdtype
ctype = _ctype_from_dtype(element_dtype)
return _ctype_ndarray(ctype, shape)
def _ctype_from_dtype_structured(dtype):
# extract offsets of each field
field_data = []
for name in dtype.names:
field_dtype, offset = dtype.fields[name][:2]
field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
# ctypes doesn't care about field order
field_data = sorted(field_data, key=lambda f: f[0])
if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):
# union, if multiple fields all at address 0
size = 0
_fields_ = []
for offset, name, ctype in field_data:
_fields_.append((name, ctype))
size = max(size, ctypes.sizeof(ctype))
# pad to the right size
if dtype.itemsize != size:
_fields_.append(('', ctypes.c_char * dtype.itemsize))
# we inserted manual padding, so always `_pack_`
return type('union', (ctypes.Union,), dict(
_fields_=_fields_,
_pack_=1,
__module__=None,
))
else:
last_offset = 0
_fields_ = []
for offset, name, ctype in field_data:
padding = offset - last_offset
if padding < 0:
raise NotImplementedError("Overlapping fields")
if padding > 0:
_fields_.append(('', ctypes.c_char * padding))
_fields_.append((name, ctype))
last_offset = offset + ctypes.sizeof(ctype)
padding = dtype.itemsize - last_offset
if padding > 0:
_fields_.append(('', ctypes.c_char * padding))
# we inserted manual padding, so always `_pack_`
return type('struct', (ctypes.Structure,), dict(
_fields_=_fields_,
_pack_=1,
__module__=None,
))
def _ctype_from_dtype(dtype):
if dtype.fields is not None:
return _ctype_from_dtype_structured(dtype)
elif dtype.subdtype is not None:
return _ctype_from_dtype_subarray(dtype)
else:
return _ctype_from_dtype_scalar(dtype)
def as_ctypes_type(dtype):
r"""
Convert a dtype into a ctypes type.
Parameters
----------
dtype : dtype
The dtype to convert
Returns
-------
ctype
A ctype scalar, union, array, or struct
Raises
------
NotImplementedError
If the conversion is not possible
Notes
-----
This function does not losslessly round-trip in either direction.
``np.dtype(as_ctypes_type(dt))`` will:
- insert padding fields
- reorder fields to be sorted by offset
- discard field titles
``as_ctypes_type(np.dtype(ctype))`` will:
- discard the class names of `ctypes.Structure`\ s and
`ctypes.Union`\ s
- convert single-element `ctypes.Union`\ s into single-element
`ctypes.Structure`\ s
- insert padding fields
"""
return _ctype_from_dtype(_dtype(dtype))
def as_array(obj, shape=None):
"""
Create a numpy array from a ctypes array or POINTER.
The numpy array shares the memory with the ctypes object.
The shape parameter must be given if converting from a ctypes POINTER.
The shape parameter is ignored if converting from a ctypes array
"""
if isinstance(obj, ctypes._Pointer):
# convert pointers to an array of the desired shape
if shape is None:
raise TypeError(
'as_array() requires a shape argument when called on a '
'pointer')
p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
obj = ctypes.cast(obj, p_arr_type).contents
return asarray(obj)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
# can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
# dtype.itemsize (gh-14214)
ctype_scalar = as_ctypes_type(ai["typestr"])
result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
result = result_type.from_address(addr)
result.__keep = obj
return result
| 17,460 | Python | 30.863139 | 90 | 0.560538 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/__init__.py | """
NumPy
=====
Provides
1. An array object of arbitrary homogeneous items
2. Fast mathematical operations over arrays
3. Linear Algebra, Fourier Transforms, Random Number Generation
How to use the documentation
----------------------------
Documentation is available in two forms: docstrings provided
with the code, and a loose standing reference guide, available from
`the NumPy homepage <https://numpy.org>`_.
We recommend exploring the docstrings using
`IPython <https://ipython.org>`_, an advanced Python shell with
TAB-completion and introspection capabilities. See below for further
instructions.
The docstring examples assume that `numpy` has been imported as `np`::
>>> import numpy as np
Code snippets are indicated by three greater-than signs::
>>> x = 42
>>> x = x + 1
Use the built-in ``help`` function to view a function's docstring::
>>> help(np.sort)
... # doctest: +SKIP
For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
of the help() page. Ufuncs are implemented in C, not Python, for speed.
The native Python help() does not know how to view their help, but our
np.info() function does.
To search for documents containing a keyword, do::
>>> np.lookfor('keyword')
... # doctest: +SKIP
General-purpose documents like a glossary and help on the basic concepts
of numpy are available under the ``doc`` sub-module::
>>> from numpy import doc
>>> help(doc)
... # doctest: +SKIP
Available subpackages
---------------------
lib
Basic functions used by several sub-packages.
random
Core Random Tools
linalg
Core Linear Algebra Tools
fft
Core FFT routines
polynomial
Polynomial tools
testing
NumPy testing tools
distutils
Enhancements to distutils with support for
Fortran compilers support and more.
Utilities
---------
test
Run numpy unittests
show_config
Show numpy build configuration
dual
Overwrite certain functions with high-performance SciPy tools.
Note: `numpy.dual` is deprecated. Use the functions from NumPy or Scipy
directly instead of importing them from `numpy.dual`.
matlib
Make everything matrices.
__version__
NumPy version string
Viewing documentation using IPython
-----------------------------------
Start IPython with the NumPy profile (``ipython -p numpy``), which will
import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
paste examples into the shell. To see which functions are available in
`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
down the list. To view the docstring for a function, use
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
the source code).
Copies vs. in-place operation
-----------------------------
Most of the functions in `numpy` return a copy of the array argument
(e.g., `np.sort`). In-place versions of these functions are often
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
import sys
import warnings
from ._globals import (
ModuleDeprecationWarning, VisibleDeprecationWarning,
_NoValue, _CopyMode
)
# We first need to detect if we're being called as part of the numpy setup
# procedure itself in a reliable manner.
try:
__NUMPY_SETUP__
except NameError:
__NUMPY_SETUP__ = False
if __NUMPY_SETUP__:
sys.stderr.write('Running from numpy source directory.\n')
else:
try:
from numpy.__config__ import show as show_config
except ImportError as e:
msg = """Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there."""
raise ImportError(msg) from e
__all__ = ['ModuleDeprecationWarning',
'VisibleDeprecationWarning']
# mapping of {name: (value, deprecation_msg)}
__deprecated_attrs__ = {}
# Allow distributors to run custom init code
from . import _distributor_init
from . import core
from .core import *
from . import compat
from . import lib
# NOTE: to be revisited following future namespace cleanup.
# See gh-14454 and gh-15672 for discussion.
from .lib import *
from . import linalg
from . import fft
from . import polynomial
from . import random
from . import ctypeslib
from . import ma
from . import matrixlib as _mat
from .matrixlib import *
# Deprecations introduced in NumPy 1.20.0, 2020-06-06
import builtins as _builtins
_msg = (
"`np.{n}` is a deprecated alias for the builtin `{n}`. "
"To silence this warning, use `{n}` by itself. Doing this will not "
"modify any behavior and is safe. {extended_msg}\n"
"Deprecated in NumPy 1.20; for more details and guidance: "
"https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
_specific_msg = (
"If you specifically wanted the numpy scalar type, use `np.{}` here.")
_int_extended_msg = (
"When replacing `np.{}`, you may wish to use e.g. `np.int64` "
"or `np.int32` to specify the precision. If you wish to review "
"your current use, check the release note link for "
"additional information.")
_type_info = [
("object", ""), # The NumPy scalar only exists by name.
("bool", _specific_msg.format("bool_")),
("float", _specific_msg.format("float64")),
("complex", _specific_msg.format("complex128")),
("str", _specific_msg.format("str_")),
("int", _int_extended_msg.format("int"))]
__deprecated_attrs__.update({
n: (getattr(_builtins, n), _msg.format(n=n, extended_msg=extended_msg))
for n, extended_msg in _type_info
})
# Numpy 1.20.0, 2020-10-19
__deprecated_attrs__["typeDict"] = (
core.numerictypes.typeDict,
"`np.typeDict` is a deprecated alias for `np.sctypeDict`."
)
# NumPy 1.22, 2021-10-20
__deprecated_attrs__["MachAr"] = (
core._machar.MachAr,
"`np.MachAr` is deprecated (NumPy 1.22)."
)
_msg = (
"`np.{n}` is a deprecated alias for `np.compat.{n}`. "
"To silence this warning, use `np.compat.{n}` by itself. "
"In the likely event your code does not need to work on Python 2 "
"you can use the builtin `{n2}` for which `np.compat.{n}` is itself "
"an alias. Doing this will not modify any behaviour and is safe. "
"{extended_msg}\n"
"Deprecated in NumPy 1.20; for more details and guidance: "
"https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
__deprecated_attrs__["long"] = (
getattr(compat, "long"),
_msg.format(n="long", n2="int",
extended_msg=_int_extended_msg.format("long")))
__deprecated_attrs__["unicode"] = (
getattr(compat, "unicode"),
_msg.format(n="unicode", n2="str",
extended_msg=_specific_msg.format("str_")))
del _msg, _specific_msg, _int_extended_msg, _type_info, _builtins
from .core import round, abs, max, min
# now that numpy modules are imported, can initialize limits
core.getlimits._register_known_types()
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
# Remove one of the two occurrences of `issubdtype`, which is exposed as
# both `numpy.core.issubdtype` and `numpy.lib.issubdtype`.
__all__.remove('issubdtype')
# These are exported by np.core, but are replaced by the builtins below
# remove them to ensure that we don't end up with `np.long == np.int_`,
# which would be a breaking change.
del long, unicode
__all__.remove('long')
__all__.remove('unicode')
# Remove things that are in the numpy.lib but not in the numpy namespace
# Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
# that prevents adding more things to the main namespace by accident.
# The list below will grow until the `from .lib import *` fixme above is
# taken care of
__all__.remove('Arrayterator')
del Arrayterator
# These names were removed in NumPy 1.20. For at least one release,
# attempts to access these names in the numpy namespace will trigger
# a warning, and calling the function will raise an exception.
_financial_names = ['fv', 'ipmt', 'irr', 'mirr', 'nper', 'npv', 'pmt',
'ppmt', 'pv', 'rate']
__expired_functions__ = {
name: (f'In accordance with NEP 32, the function {name} was removed '
'from NumPy version 1.20. A replacement for this function '
'is available in the numpy_financial library: '
'https://pypi.org/project/numpy-financial')
for name in _financial_names}
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
# oldnumeric and numarray were removed in 1.9. In case some packages import
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
numarray = 'removed'
def __getattr__(attr):
# Warn for expired attributes, and return a dummy function
# that always raises an exception.
try:
msg = __expired_functions__[attr]
except KeyError:
pass
else:
warnings.warn(msg, DeprecationWarning, stacklevel=2)
def _expired(*args, **kwds):
raise RuntimeError(msg)
return _expired
# Emit warnings for deprecated attributes
try:
val, msg = __deprecated_attrs__[attr]
except KeyError:
pass
else:
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return val
# Importing Tester requires importing all of UnitTest which is not a
# cheap import Since it is mainly used in test suits, we lazy import it
# here to save on the order of 10 ms of import time for most users
#
# The previous way Tester was imported also had a side effect of adding
# the full `numpy.testing` namespace
if attr == 'testing':
import numpy.testing as testing
return testing
elif attr == 'Tester':
from .testing import Tester
return Tester
raise AttributeError("module {!r} has no attribute "
"{!r}".format(__name__, attr))
def __dir__():
return list(globals().keys() | {'Tester', 'testing'})
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
There are some cases e.g. with wrong BLAS ABI that cause wrong
results under specific runtime conditions that are not necessarily
achieved during test suite runs, and it is useful to catch those early.
See https://github.com/numpy/numpy/issues/8577 and other
similar bug reports.
"""
try:
x = ones(2, dtype=float32)
if not abs(x.dot(x) - 2.0) < 1e-5:
raise AssertionError()
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
"pass simple sanity checks. This can be caused for example "
"by incorrect BLAS library being linked in, or by mixing "
"package managers (pip, conda, apt, ...). Search closed "
"numpy issues for similar problems.")
raise RuntimeError(msg.format(__file__)) from None
_sanity_check()
del _sanity_check
def _mac_os_check():
"""
Quick Sanity check for Mac OS look for accelerate build bugs.
Testing numpy polyfit calls init_dgelsd(LAPACK)
"""
try:
c = array([3., 2., 1.])
x = linspace(0, 2, 5)
y = polyval(c, x)
_ = polyfit(x, y, 2, cov=True)
except ValueError:
pass
import sys
if sys.platform == "darwin":
with warnings.catch_warnings(record=True) as w:
_mac_os_check()
# Throw runtime error, if the test failed Check for warning and error_message
error_message = ""
if len(w) > 0:
error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message))
msg = (
"Polyfit sanity test emitted a warning, most likely due "
"to using a buggy Accelerate backend."
"\nIf you compiled yourself, more information is available at:"
"\nhttps://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries"
"\nOtherwise report this to the vendor "
"that provided NumPy.\n{}\n".format(error_message))
raise RuntimeError(msg)
del _mac_os_check
# We usually use madvise hugepages support, but on some old kernels it
# is slow and thus better avoided.
# Specifically kernel version 4.6 had a bug fix which probably fixed this:
# https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
import os
use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
if sys.platform == "linux" and use_hugepage is None:
# If there is an issue with parsing the kernel version,
# set use_hugepages to 0. Usage of LooseVersion will handle
# the kernel version parsing better, but avoided since it
# will increase the import time. See: #16679 for related discussion.
try:
use_hugepage = 1
kernel_version = os.uname().release.split(".")[:2]
kernel_version = tuple(int(v) for v in kernel_version)
if kernel_version < (4, 6):
use_hugepage = 0
except ValueError:
use_hugepages = 0
elif use_hugepage is None:
# This is not Linux, so it should not matter, just enable anyway
use_hugepage = 1
else:
use_hugepage = int(use_hugepage)
# Note that this will currently only make a difference on Linux
core.multiarray._set_madvise_hugepage(use_hugepage)
# Give a warning if NumPy is reloaded or imported on a sub-interpreter
# We do this from python, since the C-module may not be reloaded and
# it is tidier organized.
core.multiarray._multiarray_umath._reload_guard()
# Tell PyInstaller where to find hook-numpy.py
def _pyinstaller_hooks_dir():
from pathlib import Path
return [str(Path(__file__).with_name("_pyinstaller").resolve())]
# get the version using versioneer
from .version import __version__, git_revision as __git_version__
| 15,398 | Python | 35.664286 | 105 | 0.628004 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/version.py | from __future__ import annotations
from ._version import get_versions
__ALL__ = ['version', '__version__', 'full_version', 'git_revision', 'release']
vinfo: dict[str, str] = get_versions()
version = vinfo["version"]
__version__ = vinfo.get("closest-tag", vinfo["version"])
full_version = vinfo['version']
git_revision = vinfo['full-revisionid']
release = 'dev0' not in version and '+' not in version
short_version = vinfo['version'].split("+")[0]
del get_versions, vinfo
| 475 | Python | 28.749998 | 79 | 0.68 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/setup.py | #!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy', parent_package, top_path)
config.add_subpackage('array_api')
config.add_subpackage('compat')
config.add_subpackage('core')
config.add_subpackage('distutils')
config.add_subpackage('doc')
config.add_subpackage('f2py')
config.add_subpackage('fft')
config.add_subpackage('lib')
config.add_subpackage('linalg')
config.add_subpackage('ma')
config.add_subpackage('matrixlib')
config.add_subpackage('polynomial')
config.add_subpackage('random')
config.add_subpackage('testing')
config.add_subpackage('typing')
config.add_subpackage('_typing')
config.add_data_dir('doc')
config.add_data_files('py.typed')
config.add_data_files('*.pyi')
config.add_subpackage('tests')
config.add_subpackage('_pyinstaller')
config.make_config_py() # installs __config__.py
return config
if __name__ == '__main__':
print('This is the wrong setup.py file to run')
| 1,101 | Python | 32.393938 | 61 | 0.682107 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/dual.py | """
.. deprecated:: 1.20
*This module is deprecated. Instead of importing functions from*
``numpy.dual``, *the functions should be imported directly from NumPy
or SciPy*.
Aliases for functions which may be accelerated by SciPy.
SciPy_ can be built to use accelerated or otherwise improved libraries
for FFTs, linear algebra, and special functions. This module allows
developers to transparently support these accelerated functions when
SciPy is available but still support users who have only installed
NumPy.
.. _SciPy : https://www.scipy.org
"""
import warnings
warnings.warn('The module numpy.dual is deprecated. Instead of using dual, '
'use the functions directly from numpy or scipy.',
category=DeprecationWarning,
stacklevel=2)
# This module should be used for functions both in numpy and scipy if
# you want to use the numpy version if available but the scipy version
# otherwise.
# Usage --- from numpy.dual import fft, inv
__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2',
'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals',
'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0']
import numpy.linalg as linpkg
import numpy.fft as fftpkg
from numpy.lib import i0
import sys
fft = fftpkg.fft
ifft = fftpkg.ifft
fftn = fftpkg.fftn
ifftn = fftpkg.ifftn
fft2 = fftpkg.fft2
ifft2 = fftpkg.ifft2
norm = linpkg.norm
inv = linpkg.inv
svd = linpkg.svd
solve = linpkg.solve
det = linpkg.det
eig = linpkg.eig
eigvals = linpkg.eigvals
eigh = linpkg.eigh
eigvalsh = linpkg.eigvalsh
lstsq = linpkg.lstsq
pinv = linpkg.pinv
cholesky = linpkg.cholesky
_restore_dict = {}
def register_func(name, func):
if name not in __all__:
raise ValueError("{} not a dual function.".format(name))
f = sys._getframe(0).f_globals
_restore_dict[name] = f[name]
f[name] = func
def restore_func(name):
if name not in __all__:
raise ValueError("{} not a dual function.".format(name))
try:
val = _restore_dict[name]
except KeyError:
return
else:
sys._getframe(0).f_globals[name] = val
def restore_all():
for name in _restore_dict.keys():
restore_func(name)
| 2,214 | Python | 25.369047 | 77 | 0.67841 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_globals.py | """
Module defining global singleton classes.
This module raises a RuntimeError if an attempt to reload it is made. In that
way the identities of the classes defined here are fixed and will remain so
even if numpy itself is reloaded. In particular, a function like the following
will still work correctly after numpy is reloaded::
def foo(arg=np._NoValue):
if arg is np._NoValue:
...
That was not the case when the singleton classes were defined in the numpy
``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
motivated this module.
"""
import enum
__ALL__ = [
'ModuleDeprecationWarning', 'VisibleDeprecationWarning',
'_NoValue', '_CopyMode'
]
# Disallow reloading this module so as to preserve the identities of the
# classes defined here.
if '_is_loaded' in globals():
raise RuntimeError('Reloading numpy._globals is not allowed')
_is_loaded = True
class ModuleDeprecationWarning(DeprecationWarning):
"""Module deprecation warning.
The nose tester turns ordinary Deprecation warnings into test failures.
That makes it hard to deprecate whole modules, because they get
imported by default. So this is a special Deprecation warning that the
nose tester will let pass without making tests fail.
"""
ModuleDeprecationWarning.__module__ = 'numpy'
class VisibleDeprecationWarning(UserWarning):
"""Visible deprecation warning.
By default, python will not show deprecation warnings, so this class
can be used when a very visible warning is helpful, for example because
the usage is most likely a user bug.
"""
VisibleDeprecationWarning.__module__ = 'numpy'
class _NoValueType:
"""Special keyword value.
The instance of this class may be used as the default value assigned to a
keyword if no other obvious default (e.g., `None`) is suitable,
Common reasons for using this keyword are:
- A new keyword is added to a function, and that function forwards its
inputs to another function or method which can be defined outside of
NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims``
keyword was added that could only be forwarded if the user explicitly
specified ``keepdims``; downstream array libraries may not have added
the same keyword, so adding ``x.std(..., keepdims=keepdims)``
unconditionally could have broken previously working code.
- A keyword is being deprecated, and a deprecation warning must only be
emitted when the keyword is used.
"""
__instance = None
def __new__(cls):
# ensure that only one instance exists
if not cls.__instance:
cls.__instance = super().__new__(cls)
return cls.__instance
# needed for python 2 to preserve identity through a pickle
def __reduce__(self):
return (self.__class__, ())
def __repr__(self):
return "<no value>"
_NoValue = _NoValueType()
class _CopyMode(enum.Enum):
"""
An enumeration for the copy modes supported
by numpy.copy() and numpy.array(). The following three modes are supported,
- ALWAYS: This means that a deep copy of the input
array will always be taken.
- IF_NEEDED: This means that a deep copy of the input
array will be taken only if necessary.
- NEVER: This means that the deep copy will never be taken.
If a copy cannot be avoided then a `ValueError` will be
raised.
Note that the buffer-protocol could in theory do copies. NumPy currently
assumes an object exporting the buffer protocol will never do this.
"""
ALWAYS = True
IF_NEEDED = False
NEVER = 2
def __bool__(self):
# For backwards compatibility
if self == _CopyMode.ALWAYS:
return True
if self == _CopyMode.IF_NEEDED:
return False
raise ValueError(f"{self} is neither True nor False.")
_CopyMode.__module__ = 'numpy'
| 4,012 | Python | 29.869231 | 79 | 0.679711 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/conftest.py | """
Pytest configuration and fixtures for the Numpy test suite.
"""
import os
import tempfile
import hypothesis
import pytest
import numpy
from numpy.core._multiarray_tests import get_fpu_mode
_old_fpu_mode = None
_collect_results = {}
# Use a known and persistent tmpdir for hypothesis' caches, which
# can be automatically cleared by the OS or user.
hypothesis.configuration.set_hypothesis_home_dir(
os.path.join(tempfile.gettempdir(), ".hypothesis")
)
# We register two custom profiles for Numpy - for details see
# https://hypothesis.readthedocs.io/en/latest/settings.html
# The first is designed for our own CI runs; the latter also
# forces determinism and is designed for use via np.test()
hypothesis.settings.register_profile(
name="numpy-profile", deadline=None, print_blob=True,
)
hypothesis.settings.register_profile(
name="np.test() profile",
deadline=None, print_blob=True, database=None, derandomize=True,
suppress_health_check=hypothesis.HealthCheck.all(),
)
# Note that the default profile is chosen based on the presence
# of pytest.ini, but can be overridden by passing the
# --hypothesis-profile=NAME argument to pytest.
_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
hypothesis.settings.load_profile(
"numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
)
def pytest_configure(config):
config.addinivalue_line("markers",
"valgrind_error: Tests that are known to error under valgrind.")
config.addinivalue_line("markers",
"leaks_references: Tests that are known to leak references.")
config.addinivalue_line("markers",
"slow: Tests that are very slow.")
config.addinivalue_line("markers",
"slow_pypy: Tests that are very slow on pypy.")
def pytest_addoption(parser):
parser.addoption("--available-memory", action="store", default=None,
help=("Set amount of memory available for running the "
"test suite. This can result to tests requiring "
"especially large amounts of memory to be skipped. "
"Equivalent to setting environment variable "
"NPY_AVAILABLE_MEM. Default: determined"
"automatically."))
def pytest_sessionstart(session):
available_mem = session.config.getoption('available_memory')
if available_mem is not None:
os.environ['NPY_AVAILABLE_MEM'] = available_mem
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
"""
Check FPU precision mode was not changed during test collection.
The clumsy way we do it here is mainly necessary because numpy
still uses yield tests, which can execute code at test collection
time.
"""
global _old_fpu_mode
mode = get_fpu_mode()
if _old_fpu_mode is None:
_old_fpu_mode = mode
elif mode != _old_fpu_mode:
_collect_results[item] = (_old_fpu_mode, mode)
_old_fpu_mode = mode
@pytest.fixture(scope="function", autouse=True)
def check_fpu_mode(request):
"""
Check FPU precision mode was not changed during the test.
"""
old_mode = get_fpu_mode()
yield
new_mode = get_fpu_mode()
if old_mode != new_mode:
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
" during the test".format(old_mode, new_mode))
collect_result = _collect_results.get(request.node)
if collect_result is not None:
old_mode, new_mode = collect_result
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
" when collecting the test".format(old_mode,
new_mode))
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace['np'] = numpy
@pytest.fixture(autouse=True)
def env_setup(monkeypatch):
monkeypatch.setenv('PYTHONHASHSEED', '0')
| 4,032 | Python | 32.608333 | 79 | 0.659474 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/__config__.py | # This file is generated by numpy's setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.add_dll_directory(extra_dll_dir)
openblas64__info={'library_dirs': ['D:\\a\\numpy\\numpy\\build\\openblas64__info'], 'libraries': ['openblas64__info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)]}
blas_ilp64_opt_info={'library_dirs': ['D:\\a\\numpy\\numpy\\build\\openblas64__info'], 'libraries': ['openblas64__info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)]}
openblas64__lapack_info={'library_dirs': ['D:\\a\\numpy\\numpy\\build\\openblas64__lapack_info'], 'libraries': ['openblas64__lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)]}
lapack_ilp64_opt_info={'library_dirs': ['D:\\a\\numpy\\numpy\\build\\openblas64__lapack_info'], 'libraries': ['openblas64__lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)]}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
"""
Show libraries in the system on which NumPy was built.
Print information about various resources (libraries, library
directories, include directories, etc.) in the system on which
NumPy was built.
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
1. Classes specifying the information to be printed are defined
in the `numpy.distutils.system_info` module.
Information may include:
* ``language``: language used to write the libraries (mostly
C or f77)
* ``libraries``: names of libraries found in the system
* ``library_dirs``: directories containing the libraries
* ``include_dirs``: directories containing library header files
* ``src_dirs``: directories containing library source files
* ``define_macros``: preprocessor macros used by
``distutils.setup``
* ``baseline``: minimum CPU features required
* ``found``: dispatched features supported in the system
* ``not found``: dispatched features that are not supported
in the system
2. NumPy BLAS/LAPACK Installation Notes
Installing a numpy wheel (``pip install numpy`` or force it
via ``pip install numpy --only-binary :numpy: numpy``) includes
an OpenBLAS implementation of the BLAS and LAPACK linear algebra
APIs. In this case, ``library_dirs`` reports the original build
time configuration as compiled with gcc/gfortran; at run time
the OpenBLAS library is in
``site-packages/numpy.libs/`` (linux), or
``site-packages/numpy/.dylibs/`` (macOS), or
``site-packages/numpy/.libs/`` (windows).
Installing numpy from source
(``pip install numpy --no-binary numpy``) searches for BLAS and
LAPACK dynamic link libraries at build time as influenced by
environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
or the optional file ``~/.numpy-site.cfg``.
NumPy remembers those locations and expects to load the same
libraries at run-time.
In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
library) is in the default build-time search order after
'openblas'.
Examples
--------
>>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
define_macros = [('HAVE_CBLAS', None)]
libraries = ['openblas', 'openblas']
library_dirs = ['/usr/local/lib']
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
features_found.append(feature)
else:
features_not_found.append(feature)
print("Supported SIMD extensions in this NumPy install:")
print(" baseline = %s" % (','.join(__cpu_baseline__)))
print(" found = %s" % (','.join(features_found)))
print(" not found = %s" % (','.join(features_not_found)))
| 5,083 | Python | 42.827586 | 281 | 0.616762 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/__init__.py | """
========================
Random Number Generation
========================
Use ``default_rng()`` to create a `Generator` and call its methods.
=============== =========================================================
Generator
--------------- ---------------------------------------------------------
Generator Class implementing all of the random number distributions
default_rng Default constructor for ``Generator``
=============== =========================================================
============================================= ===
BitGenerator Streams that work with Generator
--------------------------------------------- ---
MT19937
PCG64
PCG64DXSM
Philox
SFC64
============================================= ===
============================================= ===
Getting entropy to initialize a BitGenerator
--------------------------------------------- ---
SeedSequence
============================================= ===
Legacy
------
For backwards compatibility with previous versions of numpy before 1.17, the
various aliases to the global `RandomState` methods are left alone and do not
use the new `Generator` API.
==================== =========================================================
Utility functions
-------------------- ---------------------------------------------------------
random Uniformly distributed floats over ``[0, 1)``
bytes Uniformly distributed random bytes.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility
functions - removed
in the new API
-------------------- ---------------------------------------------------------
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
random_integers Uniformly distributed integers in a given range.
(deprecated, use ``integers(..., closed=True)`` instead)
random_sample Alias for `random_sample`
randint Uniformly distributed integers in a given range
seed Seed the legacy random number generator.
==================== =========================================================
==================== =========================================================
Univariate
distributions
-------------------- ---------------------------------------------------------
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== ==========================================================
Multivariate
distributions
-------------------- ----------------------------------------------------------
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== ==========================================================
==================== =========================================================
Standard
distributions
-------------------- ---------------------------------------------------------
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
-------------------- ---------------------------------------------------------
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random',
'random_integers',
'random_sample',
'ranf',
'rayleigh',
'sample',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf',
]
# add these for module-freeze analysis (like PyInstaller)
from . import _pickle
from . import _common
from . import _bounded_integers
from ._generator import Generator, default_rng
from .bit_generator import SeedSequence, BitGenerator
from ._mt19937 import MT19937
from ._pcg64 import PCG64, PCG64DXSM
from ._philox import Philox
from ._sfc64 import SFC64
from .mtrand import *
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
'BitGenerator']
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this
function's entire purpose is to return a newly allocated RandomState whose
state pickle can set. Consequently the RandomState returned by this function
is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 7,506 | Python | 33.754629 | 81 | 0.493472 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_pickle.py | from .mtrand import RandomState
from ._philox import Philox
from ._pcg64 import PCG64, PCG64DXSM
from ._sfc64 import SFC64
from ._generator import Generator
from ._mt19937 import MT19937
BitGenerators = {'MT19937': MT19937,
'PCG64': PCG64,
'PCG64DXSM': PCG64DXSM,
'Philox': Philox,
'SFC64': SFC64,
}
def __generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a Generator object
Parameters
----------
bit_generator_name : str
String containing the core BitGenerator
Returns
-------
rg : Generator
Generator using the named core BitGenerator
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return Generator(bit_generator())
def __bit_generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a bit generator object
Parameters
----------
bit_generator_name : str
String containing the name of the BitGenerator
Returns
-------
bit_generator : BitGenerator
BitGenerator instance
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return bit_generator()
def __randomstate_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a legacy RandomState-like object
Parameters
----------
bit_generator_name : str
String containing the core BitGenerator
Returns
-------
rs : RandomState
Legacy RandomState using the named core BitGenerator
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return RandomState(bit_generator())
| 2,305 | Python | 26.452381 | 74 | 0.597831 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/setup.py | import os
import platform
import sys
from os.path import join
from numpy.distutils.system_info import platform_bits
is_msvc = (platform.platform().startswith('Windows') and
platform.python_compiler().startswith('MS'))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
if sys.platform == 'win32':
libs.extend(['Advapi32', 'Kernel32'])
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
if sys.platform[:3] == 'aix':
defs = [('_LARGE_FILES', None)]
else:
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1')]
defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_subpackage('tests')
config.add_data_dir('tests/data')
config.add_data_dir('_examples')
EXTRA_LINK_ARGS = []
EXTRA_LIBRARIES = ['npyrandom']
if os.name != 'nt':
# Math lib
EXTRA_LIBRARIES.append('m')
# Some bit generators exclude GCC inlining
EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__']
if is_msvc and platform_bits == 32:
# 32-bit windows requires explicit sse2 option
EXTRA_COMPILE_ARGS += ['/arch:SSE2']
elif not is_msvc:
# Some bit generators require c99
EXTRA_COMPILE_ARGS += ['-std=c99']
if sys.platform == 'cygwin':
# Export symbols without __declspec(dllexport) for using by cython.
# Using __declspec(dllexport) does not export other necessary symbols
# in Cygwin package's Cython environment, making it impossible to
# import modules.
EXTRA_LINK_ARGS += ['-Wl,--export-all-symbols']
# Use legacy integer variable sizes
LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')]
PCG64_DEFS = []
# One can force emulated 128-bit arithmetic if one wants.
#PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
depends = ['__init__.pxd', 'c_distributions.pxd', 'bit_generator.pxd']
# npyrandom - a library like npymath
npyrandom_sources = [
'src/distributions/logfactorial.c',
'src/distributions/distributions.c',
'src/distributions/random_mvhg_count.c',
'src/distributions/random_mvhg_marginals.c',
'src/distributions/random_hypergeometric.c',
]
def gl_if_msvc(build_cmd):
""" Add flag if we are using MSVC compiler
We can't see this in our scope, because we have not initialized the
distutils build command, so use this deferred calculation to run when
we are building the library.
"""
# Keep in sync with numpy/core/setup.py
if build_cmd.compiler.compiler_type == 'msvc':
# explicitly disable whole-program optimization
return ['/GL-']
return []
config.add_installed_library('npyrandom',
sources=npyrandom_sources,
install_dir='lib',
build_info={
'include_dirs' : [], # empty list required for creating npyrandom.h
'extra_compiler_args': [gl_if_msvc],
})
for gen in ['mt19937']:
# gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
config.add_extension(f'_{gen}',
sources=[f'_{gen}.c',
f'src/{gen}/{gen}.c',
f'src/{gen}/{gen}-jump.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'_{gen}.pyx'],
define_macros=defs,
)
for gen in ['philox', 'pcg64', 'sfc64']:
# gen.pyx, src/gen/gen.c
_defs = defs + PCG64_DEFS if gen == 'pcg64' else defs
config.add_extension(f'_{gen}',
sources=[f'_{gen}.c',
f'src/{gen}/{gen}.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'_{gen}.pyx',
'bit_generator.pyx', 'bit_generator.pxd'],
define_macros=_defs,
)
for gen in ['_common', 'bit_generator']:
# gen.pyx
config.add_extension(gen,
sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
include_dirs=['.', 'src'],
depends=depends + [f'{gen}.pyx', f'{gen}.pxd',],
define_macros=defs,
)
config.add_data_files(f'{gen}.pxd')
for gen in ['_generator', '_bounded_integers']:
# gen.pyx, src/distributions/distributions.c
config.add_extension(gen,
sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES + ['npymath'],
extra_compile_args=EXTRA_COMPILE_ARGS,
include_dirs=['.', 'src'],
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'{gen}.pyx'],
define_macros=defs,
)
config.add_data_files('_bounded_integers.pxd')
mtrand_libs = ['m', 'npymath'] if os.name != 'nt' else ['npymath']
config.add_extension('mtrand',
sources=['mtrand.c',
'src/legacy/legacy-distributions.c',
'src/distributions/distributions.c',
],
include_dirs=['.', 'src', 'src/legacy'],
libraries=mtrand_libs,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + ['mtrand.pyx'],
define_macros=defs + LEGACY_DEFS,
)
config.add_data_files(*depends)
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 6,998 | Python | 40.170588 | 80 | 0.507288 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_random.py | import warnings
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
class TestSeed:
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState,
np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
def test_multidimensional_pvals(self):
assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestSetState:
def setup_method(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint:
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71',
'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404',
'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup_method(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random(self):
np.random.seed(self.seed)
actual = np.random.random((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(np.random.randint(0, -10, size=0).shape, (0,))
assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
assert_equal(np.random.choice(0, size=0).shape, (0,))
assert_equal(np.random.choice([], size=(0,)).shape, (0,))
assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, np.random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, np.random.choice, a, p=p)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object), ("b", np.int32)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
@pytest.mark.parametrize("random",
[np.random, np.random.RandomState(), np.random.default_rng()])
def test_shuffle_untyped_warning(self, random):
# Create a dict works like a sequence but isn't one
values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6}
with pytest.warns(UserWarning,
match="you are shuffling a 'dict' object") as rec:
random.shuffle(values)
assert "test_random" in rec[0].filename
@pytest.mark.parametrize("random",
[np.random, np.random.RandomState(), np.random.default_rng()])
@pytest.mark.parametrize("use_array_like", [True, False])
def test_shuffle_no_object_unpacking(self, random, use_array_like):
class MyArr(np.ndarray):
pass
items = [
None, np.array([3]), np.float64(3), np.array(10), np.float64(7)
]
arr = np.array(items, dtype=object)
item_ids = {id(i) for i in items}
if use_array_like:
arr = arr.view(MyArr)
# The array was created fine, and did not modify any objects:
assert all(id(i) in item_ids for i in arr)
if use_array_like and not isinstance(random, np.random.Generator):
# The old API gives incorrect results, but warns about it.
with pytest.warns(UserWarning,
match="Shuffling a one dimensional array.*"):
random.shuffle(arr)
else:
random.shuffle(arr)
assert all(id(i) in item_ids for i in arr)
def test_shuffle_memoryview(self):
# gh-18273
# allow graceful handling of memoryviews
# (treat the same as arrays)
np.random.seed(self.seed)
a = np.arange(5).data
np.random.shuffle(a)
assert_equal(np.asarray(a), [0, 1, 4, 3, 2])
rng = np.random.RandomState(self.seed)
rng.shuffle(a)
assert_equal(np.asarray(a), [0, 1, 2, 3, 4])
rng = np.random.default_rng(self.seed)
rng.shuffle(a)
assert_equal(np.asarray(a), [4, 1, 0, 3, 2])
def test_shuffle_not_writeable(self):
a = np.zeros(3)
a.flags.writeable = False
with pytest.raises(ValueError, match='read-only'):
np.random.shuffle(a)
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10, 5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
np.random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
__index__ = __int__
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
np.random.seed(self.seed)
assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup_method(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.setSeed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup_method(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
| 69,988 | Python | 39.085338 | 92 | 0.567955 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_extending.py | import os
import pytest
import shutil
import subprocess
import sys
import warnings
import numpy as np
from numpy.distutils.misc_util import exec_mod_from_location
try:
import cffi
except ImportError:
cffi = None
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
cffi = None
try:
with warnings.catch_warnings(record=True) as w:
# numba issue gh-4733
warnings.filterwarnings('always', '', DeprecationWarning)
import numba
except ImportError:
numba = None
try:
import cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
cython = None
else:
from numpy.compat import _pep440
# Cython 0.29.30 is required for Python 3.11 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with the one in pyproject.toml
required_version = '0.29.30'
if _pep440.parse(cython_version) < _pep440.Version(required_version):
# too old or wrong cython, skip the test
cython = None
@pytest.mark.skipif(cython is None, reason="requires cython")
@pytest.mark.slow
def test_cython(tmp_path):
srcdir = os.path.join(os.path.dirname(__file__), '..')
shutil.copytree(srcdir, tmp_path / 'random')
# build the examples and "install" them into a temporary directory
build_dir = tmp_path / 'random' / '_examples' / 'cython'
subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
'--prefix', str(tmp_path / 'installdir'),
'--single-version-externally-managed',
'--record', str(tmp_path/ 'tmp_install_log.txt'),
],
cwd=str(build_dir),
)
# gh-16162: make sure numpy's __init__.pxd was used for cython
# not really part of this test, but it is a convenient place to check
with open(build_dir / 'extending.c') as fid:
txt_to_find = 'NumPy API declarations from "numpy/__init__.pxd"'
for i, line in enumerate(fid):
if txt_to_find in line:
break
else:
assert False, ("Could not find '{}' in C file, "
"wrong pxd used".format(txt_to_find))
# get the path to the so's
so1 = so2 = None
with open(tmp_path /'tmp_install_log.txt') as fid:
for line in fid:
if 'extending.' in line:
so1 = line.strip()
if 'extending_distributions' in line:
so2 = line.strip()
assert so1 is not None
assert so2 is not None
# import the so's without adding the directory to sys.path
exec_mod_from_location('extending', so1)
extending_distributions = exec_mod_from_location(
'extending_distributions', so2)
# actually test the cython c-extension
from numpy.random import PCG64
values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')
assert values.shape == (10,)
assert values.dtype == np.float64
@pytest.mark.skipif(numba is None or cffi is None,
reason="requires numba and cffi")
def test_numba():
from numpy.random._examples.numba import extending # noqa: F401
@pytest.mark.skipif(cffi is None, reason="requires cffi")
def test_cffi():
from numpy.random._examples.cffi import extending # noqa: F401
| 3,488 | Python | 35.34375 | 78 | 0.62586 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_regression.py | import sys
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
from numpy import random
import numpy as np
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = np.random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
np.random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
np.random.multivariate_normal([0], [[0]], size=1)
np.random.multivariate_normal([0], [[0]], size=np.int_(1))
np.random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
np.random.seed(1234567890)
x = np.random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
np.random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = np.random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, np.random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
np.random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
np.random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
np.random.seed(1)
orig = np.arange(3).view(N)
perm = np.random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
np.random.seed(1)
m = M()
perm = np.random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
| 5,439 | Python | 35.266666 | 77 | 0.561684 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_randomstate_regression.py | import sys
import pytest
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
import numpy as np
from numpy import random
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
random.seed(0)
rvsn = random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
random.multivariate_normal([0], [[0]], size=1)
random.multivariate_normal([0], [[0]], size=np.int_(1))
random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
random.seed(1234567890)
x = random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
random.seed(1)
orig = np.arange(3).view(N)
perm = random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
random.seed(1)
m = M()
perm = random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
def test_warns_byteorder(self):
# GH 13159
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.deprecated_call(match='non-native byteorder is not'):
random.randint(0, 200, size=10, dtype=other_byteord_dt)
def test_named_argument_initialization(self):
# GH 13669
rs1 = np.random.RandomState(123456789)
rs2 = np.random.RandomState(seed=123456789)
assert rs1.randint(0, 100) == rs2.randint(0, 100)
def test_choice_retun_dtype(self):
# GH 9867
c = np.random.choice(10, p=[.1]*10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, p=[.1]*10, replace=False, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, replace=False, size=2)
assert c.dtype == np.dtype(int)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_randint_117(self):
# GH 14189
random.seed(0)
expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
2588848963, 3684848379, 2340255427, 3638918503,
1819583497, 2678185683], dtype='int64')
actual = random.randint(2**32, size=10)
assert_array_equal(actual, expected)
def test_p_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(12345)
assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
[0, 0, 0, 1, 1])
def test_n_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(8675309)
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)
def test_multinomial_empty():
# gh-20483
# Ensure that empty p-vals are correctly handled
assert random.multinomial(10, []).shape == (0,)
assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0)
def test_multinomial_1d_pval():
# gh-20483
with pytest.raises(TypeError, match="pvals must be a 1-d"):
random.multinomial(10, 0.3)
| 7,917 | Python | 35.488479 | 77 | 0.563092 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_randomstate.py | import hashlib
import pickle
import sys
import warnings
import numpy as np
import pytest
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy.random import MT19937, PCG64
from numpy import random
INT_FUNCS = {'binomial': (100.0, 0.6),
'geometric': (.5,),
'hypergeometric': (20, 20, 10),
'logseries': (.5,),
'multinomial': (20, np.ones(6) / 6.0),
'negative_binomial': (100, .5),
'poisson': (10.0,),
'zipf': (2,),
}
if np.iinfo(int).max < 2**32:
# Windows and some 32-bit platforms, e.g., ARM
INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263',
'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb',
'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf',
'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67',
'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3',
'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824',
'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7',
'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f',
}
else:
INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112',
'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9',
'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657',
'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db',
'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605',
'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61',
'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4',
'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45',
}
@pytest.fixture(scope='module', params=INT_FUNCS)
def int_func(request):
return (request.param, INT_FUNCS[request.param],
INT_FUNC_HASHES[request.param])
def assert_mt19937_state_equal(a, b):
assert_equal(a['bit_generator'], b['bit_generator'])
assert_array_equal(a['state']['key'], b['state']['key'])
assert_array_equal(a['state']['pos'], b['state']['pos'])
assert_equal(a['has_gauss'], b['has_gauss'])
assert_equal(a['gauss'], b['gauss'])
class TestSeed:
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, -0.5)
assert_raises(ValueError, random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, [-0.5])
assert_raises(ValueError, random.RandomState, [-1])
assert_raises(ValueError, random.RandomState, [4294967296])
assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, random.RandomState, np.array([],
dtype=np.int64))
assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, random.RandomState, [[1, 2, 3],
[4, 5, 6]])
def test_cannot_seed(self):
rs = random.RandomState(PCG64(0))
with assert_raises(TypeError):
rs.seed(1234)
def test_invalid_initialization(self):
assert_raises(ValueError, random.RandomState, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random.seed(1432985819)
non_contig = random.multinomial(100, pvals=pvals)
random.seed(1432985819)
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multinomial_pvals_float32(self):
x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09,
1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32)
pvals = x / x.sum()
match = r"[\w\s]*pvals array is cast to 64-bit floating"
with pytest.raises(ValueError, match=match):
random.multinomial(1, pvals)
class TestSetState:
def setup_method(self):
self.seed = 1234567890
self.random_state = random.RandomState(self.seed)
self.state = self.random_state.get_state()
def test_basic(self):
old = self.random_state.tomaxint(16)
self.random_state.set_state(self.state)
new = self.random_state.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(self.state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.random_state.standard_normal()
state = self.random_state.get_state()
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.random_state.standard_normal(size=16)
self.random_state.set_state(old_state)
x2 = self.random_state.standard_normal(size=16)
self.random_state.set_state(self.state)
x3 = self.random_state.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.random_state.negative_binomial(0.5, 0.5)
def test_get_state_warning(self):
rs = random.RandomState(PCG64())
with suppress_warnings() as sup:
w = sup.record(RuntimeWarning)
state = rs.get_state()
assert_(len(w) == 1)
assert isinstance(state, dict)
assert state['bit_generator'] == 'PCG64'
def test_invalid_legacy_state_setting(self):
state = self.random_state.get_state()
new_state = ('Unknown', ) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state,
np.array(new_state, dtype=object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
def test_pickle(self):
self.random_state.seed(0)
self.random_state.random_sample(100)
self.random_state.standard_normal()
pickled = self.random_state.get_state(legacy=False)
assert_equal(pickled['has_gauss'], 1)
rs_unpick = pickle.loads(pickle.dumps(self.random_state))
unpickled = rs_unpick.get_state(legacy=False)
assert_mt19937_state_equal(pickled, unpickled)
def test_state_setting(self):
attr_state = self.random_state.__getstate__()
self.random_state.standard_normal()
self.random_state.__setstate__(attr_state)
state = self.random_state.get_state(legacy=False)
assert_mt19937_state_equal(attr_state, state)
def test_repr(self):
assert repr(self.random_state).startswith('RandomState(MT19937)')
class TestRandint:
rfunc = random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71',
'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404',
'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'}
for dt in self.itype[1:]:
random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[3992670689, 2438360420, 2557845020],
[4107320065, 4142558326, 3216529513],
[1605979228, 2807061240, 665605495]],
[[3211410639, 4128781000, 457175120],
[1712592594, 1282922662, 3081439808],
[3997822960, 2008322436, 1563495165]],
[[1398375547, 4269260146, 115316740],
[3414372578, 3437564012, 2112038651],
[3572980305, 2260248732, 3908238631]],
[[2561372503, 223155946, 3127879445],
[ 441282060, 3514786552, 2148440361],
[1629275283, 3479737011, 3003195987]],
[[ 412181688, 940383289, 3047321305],
[2978368172, 764731833, 2282559898],
[ 105711276, 720447391, 3596512484]]])
for size in [None, (5, 3, 3)]:
random.seed(12345)
x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup_method(self):
self.seed = 1234567890
def test_rand(self):
random.seed(self.seed)
actual = random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rand_singleton(self):
random.seed(self.seed)
actual = random.rand()
desired = 0.61879477158567997
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
random.seed(self.seed)
actual = random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.randn()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_randint(self):
random.seed(self.seed)
actual = random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(198, size=(3, 2))
assert_(len(w) == 1)
assert_array_equal(actual, desired + 100)
def test_tomaxint(self):
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
if np.iinfo(int).max == 2147483647:
desired = np.array([[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889]], dtype=np.int64)
else:
desired = np.array([[5707374374421908479, 5456764827585442327],
[8196659375100692377, 8224063923314595285],
[4220315081820346526, 7177518203184491332]],
dtype=np.int64)
assert_equal(actual, desired)
rs.seed(self.seed)
actual = rs.tomaxint()
assert_equal(actual, desired[0, 0])
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
typer = np.dtype('l').type
actual = random.random_integers(typer(np.iinfo('l').max),
typer(np.iinfo('l').max))
assert_(len(w) == 1)
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
random.seed(self.seed)
actual = random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.random_sample()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_choice_uniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random.seed(self.seed)
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.randint(0, -10, size=0).shape, (0,))
assert_equal(random.randint(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random.seed(self.seed)
non_contig = random.choice(5, 3, p=p[::2])
random.seed(self.seed)
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_bytes(self):
random.seed(self.seed)
actual = random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_invalid_objects(self):
x = np.array(3)
assert_raises(TypeError, random.shuffle, x)
def test_permutation(self):
random.seed(self.seed)
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
assert_array_equal(actual, desired)
random.seed(self.seed)
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
random.seed(self.seed)
bad_x_str = "abcd"
assert_raises(IndexError, random.permutation, bad_x_str)
random.seed(self.seed)
bad_x_float = 1.2
assert_raises(IndexError, random.permutation, bad_x_float)
integer_val = 10
desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2]
random.seed(self.seed)
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_beta(self):
random.seed(self.seed)
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random.seed(self.seed)
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
random.seed(self.seed)
actual = random.binomial(100.123, .456)
desired = 37
assert_array_equal(actual, desired)
def test_chisquare(self):
random.seed(self.seed)
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random.seed(self.seed)
non_contig = random.dirichlet(alpha, size=(3, 2))
random.seed(self.seed)
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random.seed(self.seed)
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random.seed(self.seed)
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random.seed(self.seed)
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random.seed(self.seed)
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random.seed(self.seed)
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random.seed(self.seed)
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random.seed(self.seed)
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random.seed(self.seed)
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random.seed(self.seed)
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random.seed(self.seed)
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_logseries_zero(self):
assert random.logseries(0) == 1
@pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.])
def test_logseries_exceptions(self, value):
with np.errstate(invalid="ignore"):
with pytest.raises(ValueError):
random.logseries(value)
with pytest.raises(ValueError):
# contiguous path:
random.logseries(np.array([value] * 10))
with pytest.raises(ValueError):
# non-contiguous path:
random.logseries(np.array([value] * 10)[::2])
def test_multinomial(self):
random.seed(self.seed)
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random.seed(self.seed)
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random.seed(self.seed)
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random.seed(self.seed)
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random.seed(self.seed)
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random.seed(self.seed)
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random.seed(self.seed)
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random.seed(self.seed)
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random.seed(self.seed)
actual = random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
random.seed(self.seed)
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random.seed(self.seed)
actual = random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn_singleton(self):
random.seed(self.seed)
actual = random.randn()
desired = np.array(1.34016345771863121)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
random.seed(self.seed)
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random.seed(self.seed)
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random.seed(self.seed)
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random.seed(self.seed)
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_large(self):
# guard against changes in RandomState when Generator is fixed
random.seed(self.seed)
actual = random.vonmises(mu=0., kappa=1e7, size=3)
desired = np.array([4.634253748521111e-04,
3.558873596114509e-04,
-2.337119622577433e-04])
assert_array_almost_equal(actual, desired, decimal=8)
def test_vonmises_nan(self):
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random.seed(self.seed)
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random.seed(self.seed)
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random.seed(self.seed)
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random.seed(self.seed)
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup_method(self):
self.seed = 123456789
def set_seed(self):
random.seed(self.seed)
def test_uniform(self):
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.set_seed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.set_seed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.set_seed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.set_seed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.set_seed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.set_seed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.set_seed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.set_seed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.set_seed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.set_seed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.set_seed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.set_seed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.set_seed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.set_seed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.set_seed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.set_seed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.set_seed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.set_seed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.set_seed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.set_seed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.set_seed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.set_seed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.set_seed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.set_seed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.set_seed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
self.set_seed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.set_seed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.set_seed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
self.set_seed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.set_seed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.set_seed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.set_seed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = random.binomial
desired = np.array([1, 1, 1])
self.set_seed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.set_seed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = random.negative_binomial
desired = np.array([1, 0, 1])
self.set_seed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.set_seed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = random.poisson
desired = np.array([1, 1, 0])
self.set_seed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = random.zipf
desired = np.array([2, 2, 1])
self.set_seed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = random.geometric
desired = np.array([2, 2, 2])
self.set_seed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = random.hypergeometric
desired = np.array([1, 1, 1])
self.set_seed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, 0)
assert_raises(ValueError, hypergeom, 10, 10, 25)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = random.logseries
desired = np.array([1, 1, 1])
self.set_seed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup_method(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
# Ensure returned array dtype is correct for platform
def test_integer_dtype(int_func):
random.seed(123456789)
fname, args, sha256 = int_func
f = getattr(random, fname)
actual = f(*args, size=2)
assert_(actual.dtype == np.dtype('l'))
def test_integer_repeat(int_func):
random.seed(123456789)
fname, args, sha256 = int_func
f = getattr(random, fname)
val = f(*args, size=1000000)
if sys.byteorder != 'little':
val = val.byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(res == sha256)
def test_broadcast_size_error():
# GH-16833
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
| 81,906 | Python | 39.308563 | 111 | 0.571228 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_generator_mt19937_regressions.py | from numpy.testing import (assert_, assert_array_equal)
import numpy as np
import pytest
from numpy.random import Generator, MT19937
mt19937 = Generator(MT19937())
class TestRegression:
def test_vonmises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = mt19937.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
assert_(mt19937.hypergeometric(*args) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
mt19937 = Generator(MT19937(0))
rvsn = mt19937.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
mt19937 = Generator(MT19937(12345))
shuffled = np.array(t, dtype=object)
mt19937.shuffle(shuffled)
expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom BitGenerator does not call into global state
res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4])
for i in range(3):
mt19937 = Generator(MT19937(i))
m = Generator(MT19937(4321))
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
mt19937.multivariate_normal([0], [[0]], size=1)
mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
mt19937 = Generator(MT19937(1234567890))
x = mt19937.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
mt19937 = Generator(MT19937(1234))
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = mt19937.choice(a, p=probs)
assert_(c in a)
with pytest.raises(ValueError):
mt19937.choice(a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
mt19937 = Generator(MT19937(1234))
a = np.array(['a', 'a' * 1000])
for _ in range(100):
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
mt19937 = Generator(MT19937(1234))
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
mt19937 = Generator(MT19937(1))
orig = np.arange(3).view(N)
perm = mt19937.permutation(orig)
assert_array_equal(perm, np.array([2, 0, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
mt19937 = Generator(MT19937(1))
m = M()
perm = mt19937.permutation(m)
assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
assert_array_equal(m.__array__(), np.arange(5))
def test_gamma_0(self):
assert mt19937.standard_gamma(0.0) == 0.0
assert_array_equal(mt19937.standard_gamma([0.0]), 0.0)
actual = mt19937.standard_gamma([0.0], dtype='float')
expected = np.array([0.], dtype=np.float32)
assert_array_equal(actual, expected)
| 5,639 | Python | 36.350993 | 77 | 0.577762 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_direct.py | import os
from os.path import join
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
assert_raises)
import pytest
from numpy.random import (
Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence,
SFC64, default_rng
)
from numpy.random._common import interface
try:
import cffi # noqa: F401
MISSING_CFFI = False
except ImportError:
MISSING_CFFI = True
try:
import ctypes # noqa: F401
MISSING_CTYPES = False
except ImportError:
MISSING_CTYPES = False
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
MISSING_CFFI = True
pwd = os.path.dirname(os.path.abspath(__file__))
def assert_state_equal(actual, target):
for key in actual:
if isinstance(actual[key], dict):
assert_state_equal(actual[key], target[key])
elif isinstance(actual[key], np.ndarray):
assert_array_equal(actual[key], target[key])
else:
assert actual[key] == target[key]
def uint32_to_float32(u):
return ((u >> np.uint32(8)) * (1.0 / 2**24)).astype(np.float32)
def uniform32_from_uint64(x):
x = np.uint64(x)
upper = np.array(x >> np.uint64(32), dtype=np.uint32)
lower = np.uint64(0xffffffff)
lower = np.array(x & lower, dtype=np.uint32)
joined = np.column_stack([lower, upper]).ravel()
return uint32_to_float32(joined)
def uniform32_from_uint53(x):
x = np.uint64(x) >> np.uint64(16)
x = np.uint32(x & np.uint64(0xffffffff))
return uint32_to_float32(x)
def uniform32_from_uint32(x):
return uint32_to_float32(x)
def uniform32_from_uint(x, bits):
if bits == 64:
return uniform32_from_uint64(x)
elif bits == 53:
return uniform32_from_uint53(x)
elif bits == 32:
return uniform32_from_uint32(x)
else:
raise NotImplementedError
def uniform_from_uint(x, bits):
if bits in (64, 63, 53):
return uniform_from_uint64(x)
elif bits == 32:
return uniform_from_uint32(x)
def uniform_from_uint64(x):
return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
def uniform_from_uint32(x):
out = np.empty(len(x) // 2)
for i in range(0, len(x), 2):
a = x[i] >> 5
b = x[i + 1] >> 6
out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
return out
def uniform_from_dsfmt(x):
return x.view(np.double) - 1.0
def gauss_from_uint(x, n, bits):
if bits in (64, 63):
doubles = uniform_from_uint64(x)
elif bits == 32:
doubles = uniform_from_uint32(x)
else: # bits == 'dsfmt'
doubles = uniform_from_dsfmt(x)
gauss = []
loc = 0
x1 = x2 = 0.0
while len(gauss) < n:
r2 = 2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * doubles[loc] - 1.0
x2 = 2.0 * doubles[loc + 1] - 1.0
r2 = x1 * x1 + x2 * x2
loc += 2
f = np.sqrt(-2.0 * np.log(r2) / r2)
gauss.append(f * x2)
gauss.append(f * x1)
return gauss[:n]
def test_seedsequence():
from numpy.random.bit_generator import (ISeedSequence,
ISpawnableSeedSequence,
SeedlessSeedSequence)
s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6)
s1.spawn(10)
s2 = SeedSequence(**s1.state)
assert_equal(s1.state, s2.state)
assert_equal(s1.n_children_spawned, s2.n_children_spawned)
# The interfaces cannot be instantiated themselves.
assert_raises(TypeError, ISeedSequence)
assert_raises(TypeError, ISpawnableSeedSequence)
dummy = SeedlessSeedSequence()
assert_raises(NotImplementedError, dummy.generate_state, 10)
assert len(dummy.spawn(10)) == 10
class Base:
dtype = np.uint64
data2 = data1 = {}
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = []
@classmethod
def _read_csv(cls, filename):
with open(filename) as csv:
seed = csv.readline()
seed = seed.split(',')
seed = [int(s.strip(), 0) for s in seed[1:]]
data = []
for line in csv:
data.append(int(line.split(',')[-1].strip(), 0))
return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
def test_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data1['data'])
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw()
assert_equal(uints, self.data1['data'][0])
bit_generator = self.bit_generator(*self.data2['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data2['data'])
def test_random_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(output=False)
assert uints is None
uints = bit_generator.random_raw(1000, output=False)
assert uints is None
def test_gauss_inv(self):
n = 25
rs = RandomState(self.bit_generator(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, self.bits))
rs = RandomState(self.bit_generator(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, self.bits))
def test_uniform_double(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
def test_uniform_float(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform32_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform32_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
def test_repr(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in repr(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs)
def test_str(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in str(rs)
assert str(self.bit_generator.__name__) in str(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs)
def test_pickle(self):
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
bitgen_pkl = pickle.dumps(bit_generator)
reloaded = pickle.loads(bitgen_pkl)
reloaded_state = reloaded.state
assert_array_equal(Generator(bit_generator).standard_normal(1000),
Generator(reloaded).standard_normal(1000))
assert bit_generator is not reloaded
assert_state_equal(reloaded_state, state)
ss = SeedSequence(100)
aa = pickle.loads(pickle.dumps(ss))
assert_equal(ss.state, aa.state)
def test_invalid_state_type(self):
bit_generator = self.bit_generator(*self.data1['seed'])
with pytest.raises(TypeError):
bit_generator.state = {'1'}
def test_invalid_state_value(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
state['bit_generator'] = 'otherBitGenerator'
with pytest.raises(ValueError):
bit_generator.state = state
def test_invalid_init_type(self):
bit_generator = self.bit_generator
for st in self.invalid_init_types:
with pytest.raises(TypeError):
bit_generator(*st)
def test_invalid_init_values(self):
bit_generator = self.bit_generator
for st in self.invalid_init_values:
with pytest.raises((ValueError, OverflowError)):
bit_generator(*st)
def test_benchmark(self):
bit_generator = self.bit_generator(*self.data1['seed'])
bit_generator._benchmark(1)
bit_generator._benchmark(1, 'double')
with pytest.raises(ValueError):
bit_generator._benchmark(1, 'int32')
@pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
def test_cffi(self):
bit_generator = self.bit_generator(*self.data1['seed'])
cffi_interface = bit_generator.cffi
assert isinstance(cffi_interface, interface)
other_cffi_interface = bit_generator.cffi
assert other_cffi_interface is cffi_interface
@pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
def test_ctypes(self):
bit_generator = self.bit_generator(*self.data1['seed'])
ctypes_interface = bit_generator.ctypes
assert isinstance(ctypes_interface, interface)
other_ctypes_interface = bit_generator.ctypes
assert other_ctypes_interface is ctypes_interface
def test_getstate(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
alt_state = bit_generator.__getstate__()
assert_state_equal(state, alt_state)
class TestPhilox(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/philox-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/philox-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)]
def test_set_key(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
keyed = self.bit_generator(counter=state['state']['counter'],
key=state['state']['key'])
assert_state_equal(bit_generator.state, keyed.state)
class TestPCG64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
def test_advange_large(self):
rs = Generator(self.bit_generator(38219308213743))
pcg = rs.bit_generator
state = pcg.state["state"]
initial_state = 287608843259529770491897792873167516365
assert state["state"] == initial_state
pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
state = pcg.state["state"]
advanced_state = 135275564607035429730177404003164635391
assert state["state"] == advanced_state
class TestPCG64DXSM(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64DXSM
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
def test_advange_large(self):
rs = Generator(self.bit_generator(38219308213743))
pcg = rs.bit_generator
state = pcg.state
initial_state = 287608843259529770491897792873167516365
assert state["state"]["state"] == initial_state
pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
state = pcg.state["state"]
advanced_state = 277778083536782149546677086420637664879
assert state["state"] == advanced_state
class TestMT19937(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.bits = 32
cls.dtype = np.uint32
cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
cls.seed_error_type = ValueError
cls.invalid_init_types = []
cls.invalid_init_values = [(-1,)]
def test_seed_float_array(self):
assert_raises(TypeError, self.bit_generator, np.array([np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))
assert_raises(TypeError, self.bit_generator, [np.pi])
assert_raises(TypeError, self.bit_generator, [0, np.pi])
def test_state_tuple(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
bit_generator = rs.bit_generator
state = bit_generator.state
desired = rs.integers(2 ** 16)
tup = (state['bit_generator'], state['state']['key'],
state['state']['pos'])
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
tup = tup + (0, 0.0)
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
class TestSFC64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/sfc64-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/sfc64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
class TestDefaultRNG:
def test_seed(self):
for args in [(), (None,), (1234,), ([1234, 5678],)]:
rg = default_rng(*args)
assert isinstance(rg.bit_generator, PCG64)
def test_passthrough(self):
bg = Philox()
rg = default_rng(bg)
assert rg.bit_generator is bg
rg2 = default_rng(rg)
assert rg2 is rg
assert rg2.bit_generator is bg
| 16,429 | Python | 33.300626 | 83 | 0.601132 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_smoke.py | import pickle
from functools import partial
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_, assert_array_equal
from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64)
@pytest.fixture(scope='module',
params=(np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64))
def dtype(request):
return request.param
def params_0(f):
val = f()
assert_(np.isscalar(val))
val = f(10)
assert_(val.shape == (10,))
val = f((10, 10))
assert_(val.shape == (10, 10))
val = f((10, 10, 10))
assert_(val.shape == (10, 10, 10))
val = f(size=(5, 5))
assert_(val.shape == (5, 5))
def params_1(f, bounded=False):
a = 5.0
b = np.arange(2.0, 12.0)
c = np.arange(2.0, 102.0).reshape((10, 10))
d = np.arange(2.0, 1002.0).reshape((10, 10, 10))
e = np.array([2.0, 3.0])
g = np.arange(2.0, 12.0).reshape((1, 10, 1))
if bounded:
a = 0.5
b = b / (1.5 * b.max())
c = c / (1.5 * c.max())
d = d / (1.5 * d.max())
e = e / (1.5 * e.max())
g = g / (1.5 * g.max())
# Scalar
f(a)
# Scalar - size
f(a, size=(10, 10))
# 1d
f(b)
# 2d
f(c)
# 3d
f(d)
# 1d size
f(b, size=10)
# 2d - size - broadcast
f(e, size=(10, 2))
# 3d - size
f(g, size=(10, 10, 10))
def comp_state(state1, state2):
identical = True
if isinstance(state1, dict):
for key in state1:
identical &= comp_state(state1[key], state2[key])
elif type(state1) != type(state2):
identical &= type(state1) == type(state2)
else:
if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance(
state2, (list, tuple, np.ndarray))):
for s1, s2 in zip(state1, state2):
identical &= comp_state(s1, s2)
else:
identical &= state1 == state2
return identical
def warmup(rg, n=None):
if n is None:
n = 11 + np.random.randint(0, 20)
rg.standard_normal(n)
rg.standard_normal(n)
rg.standard_normal(n, dtype=np.float32)
rg.standard_normal(n, dtype=np.float32)
rg.integers(0, 2 ** 24, n, dtype=np.uint64)
rg.integers(0, 2 ** 48, n, dtype=np.uint64)
rg.standard_gamma(11.0, n)
rg.standard_gamma(11.0, n, dtype=np.float32)
rg.random(n, dtype=np.float64)
rg.random(n, dtype=np.float32)
class RNG:
@classmethod
def setup_class(cls):
# Overridden in test classes. Place holder to silence IDE noise
cls.bit_generator = PCG64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
@classmethod
def _extra_setup(cls):
cls.vec_1d = np.arange(2.0, 102.0)
cls.vec_2d = np.arange(2.0, 102.0)[None, :]
cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))
cls.seed_error = TypeError
def _reset_state(self):
self.rg.bit_generator.state = self.initial_state
def test_init(self):
rg = Generator(self.bit_generator())
state = rg.bit_generator.state
rg.standard_normal(1)
rg.standard_normal(1)
rg.bit_generator.state = state
new_state = rg.bit_generator.state
assert_(comp_state(state, new_state))
def test_advance(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'advance'):
self.rg.bit_generator.advance(self.advance)
assert_(not comp_state(state, self.rg.bit_generator.state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
pytest.skip(f'Advance is not supported by {bitgen_name}')
def test_jump(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'jumped'):
bit_gen2 = self.rg.bit_generator.jumped()
jumped_state = bit_gen2.state
assert_(not comp_state(state, jumped_state))
self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)
self.rg.bit_generator.state = state
bit_gen3 = self.rg.bit_generator.jumped()
rejumped_state = bit_gen3.state
assert_(comp_state(jumped_state, rejumped_state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
if bitgen_name not in ('SFC64',):
raise AttributeError(f'no "jumped" in {bitgen_name}')
pytest.skip(f'Jump is not supported by {bitgen_name}')
def test_uniform(self):
r = self.rg.uniform(-1.0, 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_uniform_array(self):
r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(np.array([-1.0] * 10),
np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_random(self):
assert_(len(self.rg.random(10)) == 10)
params_0(self.rg.random)
def test_standard_normal_zig(self):
assert_(len(self.rg.standard_normal(10)) == 10)
def test_standard_normal(self):
assert_(len(self.rg.standard_normal(10)) == 10)
params_0(self.rg.standard_normal)
def test_standard_gamma(self):
assert_(len(self.rg.standard_gamma(10, 10)) == 10)
assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10)
params_1(self.rg.standard_gamma)
def test_standard_exponential(self):
assert_(len(self.rg.standard_exponential(10)) == 10)
params_0(self.rg.standard_exponential)
def test_standard_exponential_float(self):
randoms = self.rg.standard_exponential(10, dtype='float32')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32'))
def test_standard_exponential_float_log(self):
randoms = self.rg.standard_exponential(10, dtype='float32',
method='inv')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32',
method='inv'))
def test_standard_cauchy(self):
assert_(len(self.rg.standard_cauchy(10)) == 10)
params_0(self.rg.standard_cauchy)
def test_standard_t(self):
assert_(len(self.rg.standard_t(10, 10)) == 10)
params_1(self.rg.standard_t)
def test_binomial(self):
assert_(self.rg.binomial(10, .5) >= 0)
assert_(self.rg.binomial(1000, .5) >= 0)
def test_reset_state(self):
state = self.rg.bit_generator.state
int_1 = self.rg.integers(2**31)
self.rg.bit_generator.state = state
int_2 = self.rg.integers(2**31)
assert_(int_1 == int_2)
def test_entropy_init(self):
rg = Generator(self.bit_generator())
rg2 = Generator(self.bit_generator())
assert_(not comp_state(rg.bit_generator.state,
rg2.bit_generator.state))
def test_seed(self):
rg = Generator(self.bit_generator(*self.seed))
rg2 = Generator(self.bit_generator(*self.seed))
rg.random()
rg2.random()
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_reset_state_gauss(self):
rg = Generator(self.bit_generator(*self.seed))
rg.standard_normal()
state = rg.bit_generator.state
n1 = rg.standard_normal(size=10)
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.standard_normal(size=10)
assert_array_equal(n1, n2)
def test_reset_state_uint32(self):
rg = Generator(self.bit_generator(*self.seed))
rg.integers(0, 2 ** 24, 120, dtype=np.uint32)
state = rg.bit_generator.state
n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32)
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32)
assert_array_equal(n1, n2)
def test_reset_state_float(self):
rg = Generator(self.bit_generator(*self.seed))
rg.random(dtype='float32')
state = rg.bit_generator.state
n1 = rg.random(size=10, dtype='float32')
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.random(size=10, dtype='float32')
assert_((n1 == n2).all())
def test_shuffle(self):
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
assert_((original != permuted).any())
def test_permutation(self):
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
assert_((original != permuted).any())
def test_beta(self):
vals = self.rg.beta(2.0, 2.0, 10)
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), 2.0)
assert_(len(vals) == 10)
vals = self.rg.beta(2.0, np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))
assert_(vals.shape == (10, 10))
def test_bytes(self):
vals = self.rg.bytes(10)
assert_(len(vals) == 10)
def test_chisquare(self):
vals = self.rg.chisquare(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.chisquare)
def test_exponential(self):
vals = self.rg.exponential(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential)
def test_f(self):
vals = self.rg.f(3, 1000, 10)
assert_(len(vals) == 10)
def test_gamma(self):
vals = self.rg.gamma(3, 2, 10)
assert_(len(vals) == 10)
def test_geometric(self):
vals = self.rg.geometric(0.5, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential, bounded=True)
def test_gumbel(self):
vals = self.rg.gumbel(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_laplace(self):
vals = self.rg.laplace(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logitic(self):
vals = self.rg.logistic(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logseries(self):
vals = self.rg.logseries(0.5, 10)
assert_(len(vals) == 10)
def test_negative_binomial(self):
vals = self.rg.negative_binomial(10, 0.2, 10)
assert_(len(vals) == 10)
def test_noncentral_chisquare(self):
vals = self.rg.noncentral_chisquare(10, 2, 10)
assert_(len(vals) == 10)
def test_noncentral_f(self):
vals = self.rg.noncentral_f(3, 1000, 2, 10)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10))
assert_(len(vals) == 10)
def test_normal(self):
vals = self.rg.normal(10, 0.2, 10)
assert_(len(vals) == 10)
def test_pareto(self):
vals = self.rg.pareto(3.0, 10)
assert_(len(vals) == 10)
def test_poisson(self):
vals = self.rg.poisson(10, 10)
assert_(len(vals) == 10)
vals = self.rg.poisson(np.array([10] * 10))
assert_(len(vals) == 10)
params_1(self.rg.poisson)
def test_power(self):
vals = self.rg.power(0.2, 10)
assert_(len(vals) == 10)
def test_integers(self):
vals = self.rg.integers(10, 20, 10)
assert_(len(vals) == 10)
def test_rayleigh(self):
vals = self.rg.rayleigh(0.2, 10)
assert_(len(vals) == 10)
params_1(self.rg.rayleigh, bounded=True)
def test_vonmises(self):
vals = self.rg.vonmises(10, 0.2, 10)
assert_(len(vals) == 10)
def test_wald(self):
vals = self.rg.wald(1.0, 1.0, 10)
assert_(len(vals) == 10)
def test_weibull(self):
vals = self.rg.weibull(1.0, 10)
assert_(len(vals) == 10)
def test_zipf(self):
vals = self.rg.zipf(10, 10)
assert_(len(vals) == 10)
vals = self.rg.zipf(self.vec_1d)
assert_(len(vals) == 100)
vals = self.rg.zipf(self.vec_2d)
assert_(vals.shape == (1, 100))
vals = self.rg.zipf(self.mat)
assert_(vals.shape == (100, 100))
def test_hypergeometric(self):
vals = self.rg.hypergeometric(25, 25, 20)
assert_(np.isscalar(vals))
vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20)
assert_(vals.shape == (10,))
def test_triangular(self):
vals = self.rg.triangular(-5, 0, 5)
assert_(np.isscalar(vals))
vals = self.rg.triangular(-5, np.array([0] * 10), 5)
assert_(vals.shape == (10,))
def test_multivariate_normal(self):
mean = [0, 0]
cov = [[1, 0], [0, 100]] # diagonal covariance
x = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_zig = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_inv = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
assert_((x_zig != x_inv).any())
def test_multinomial(self):
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3])
assert_(vals.shape == (2,))
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)
assert_(vals.shape == (10, 2))
def test_dirichlet(self):
s = self.rg.dirichlet((10, 5, 3), 20)
assert_(s.shape == (20, 3))
def test_pickle(self):
pick = pickle.dumps(self.rg)
unpick = pickle.loads(pick)
assert_((type(self.rg) == type(unpick)))
assert_(comp_state(self.rg.bit_generator.state,
unpick.bit_generator.state))
pick = pickle.dumps(self.rg)
unpick = pickle.loads(pick)
assert_((type(self.rg) == type(unpick)))
assert_(comp_state(self.rg.bit_generator.state,
unpick.bit_generator.state))
def test_seed_array(self):
if self.seed_vector_bits is None:
bitgen_name = self.bit_generator.__name__
pytest.skip(f'Vector seeding is not supported by {bitgen_name}')
if self.seed_vector_bits == 32:
dtype = np.uint32
else:
dtype = np.uint64
seed = np.array([1], dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(1)
state2 = bg.state
assert_(comp_state(state1, state2))
seed = np.arange(4, dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = np.arange(1500, dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = 2 ** np.mod(np.arange(1500, dtype=dtype),
self.seed_vector_bits - 1) + 1
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
def test_uniform_float(self):
rg = Generator(self.bit_generator(12345))
warmup(rg)
state = rg.bit_generator.state
r1 = rg.random(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.random(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_gamma_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_gamma(4.0, 11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_zig_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_output_fill(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.standard_normal(out=existing)
rg.bit_generator.state = state
direct = rg.standard_normal(size=size)
assert_equal(direct, existing)
sized = np.empty(size)
rg.bit_generator.state = state
rg.standard_normal(out=sized, size=sized.shape)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_normal(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_normal(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_uniform(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.random(out=existing)
rg.bit_generator.state = state
direct = rg.random(size=size)
assert_equal(direct, existing)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.random(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.random(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_exponential(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.standard_exponential(out=existing)
rg.bit_generator.state = state
direct = rg.standard_exponential(size=size)
assert_equal(direct, existing)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_exponential(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_exponential(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_gamma(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.zeros(size)
rg.bit_generator.state = state
rg.standard_gamma(1.0, out=existing)
rg.bit_generator.state = state
direct = rg.standard_gamma(1.0, size=size)
assert_equal(direct, existing)
existing = np.zeros(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_gamma(1.0, out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_gamma(1.0, size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_gamma_broadcast(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
mu = np.arange(97.0) + 1.0
existing = np.zeros(size)
rg.bit_generator.state = state
rg.standard_gamma(mu, out=existing)
rg.bit_generator.state = state
direct = rg.standard_gamma(mu, size=size)
assert_equal(direct, existing)
existing = np.zeros(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_gamma(mu, out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_gamma(mu, size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_fill_error(self):
rg = self.rg
size = (31, 7, 97)
existing = np.empty(size)
with pytest.raises(TypeError):
rg.standard_normal(out=existing, dtype=np.float32)
with pytest.raises(ValueError):
rg.standard_normal(out=existing[::3])
existing = np.empty(size, dtype=np.float32)
with pytest.raises(TypeError):
rg.standard_normal(out=existing, dtype=np.float64)
existing = np.zeros(size, dtype=np.float32)
with pytest.raises(TypeError):
rg.standard_gamma(1.0, out=existing, dtype=np.float64)
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32)
existing = np.zeros(size, dtype=np.float64)
with pytest.raises(TypeError):
rg.standard_gamma(1.0, out=existing, dtype=np.float32)
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3])
def test_integers_broadcast(self, dtype):
if dtype == np.bool_:
upper = 2
lower = 0
else:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
self._reset_state()
a = self.rg.integers(lower, [upper] * 10, dtype=dtype)
self._reset_state()
b = self.rg.integers([lower] * 10, upper, dtype=dtype)
assert_equal(a, b)
self._reset_state()
c = self.rg.integers(lower, upper, size=10, dtype=dtype)
assert_equal(a, c)
self._reset_state()
d = self.rg.integers(np.array(
[lower] * 10), np.array([upper], dtype=object), size=10,
dtype=dtype)
assert_equal(a, d)
self._reset_state()
e = self.rg.integers(
np.array([lower] * 10), np.array([upper] * 10), size=10,
dtype=dtype)
assert_equal(a, e)
self._reset_state()
a = self.rg.integers(0, upper, size=10, dtype=dtype)
self._reset_state()
b = self.rg.integers([upper] * 10, dtype=dtype)
assert_equal(a, b)
def test_integers_numpy(self, dtype):
high = np.array([1])
low = np.array([0])
out = self.rg.integers(low, high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low[0], high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low, high[0], dtype=dtype)
assert out.shape == (1,)
def test_integers_broadcast_errors(self, dtype):
if dtype == np.bool_:
upper = 2
lower = 0
else:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
with pytest.raises(ValueError):
self.rg.integers(lower, [upper + 1] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers(lower - 1, [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([lower - 1], [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([0], [0], dtype=dtype)
class TestMT19937(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.advance = None
cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 32
cls._extra_setup()
cls.seed_error = ValueError
def test_numpy_state(self):
nprg = np.random.RandomState()
nprg.standard_normal(99)
state = nprg.get_state()
self.rg.bit_generator.state = state
state2 = self.rg.bit_generator.state
assert_((state[1] == state2['state']['key']).all())
assert_((state[2] == state2['state']['pos']))
class TestPhilox(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestSFC64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 192
cls._extra_setup()
class TestPCG64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestPCG64DXSM(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64DXSM
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestDefaultRNG(RNG):
@classmethod
def setup_class(cls):
# This will duplicate some tests that directly instantiate a fresh
# Generator(), but that's okay.
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = np.random.default_rng(*cls.seed)
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
def test_default_is_pcg64(self):
# In order to change the default BitGenerator, we'll go through
# a deprecation cycle to move to a different function.
assert_(isinstance(self.rg.bit_generator, PCG64))
def test_seed(self):
np.random.default_rng()
np.random.default_rng(None)
np.random.default_rng(12345)
np.random.default_rng(0)
np.random.default_rng(43660444402423911716352051725018508569)
np.random.default_rng([43660444402423911716352051725018508569,
279705150948142787361475340226491943209])
with pytest.raises(ValueError):
np.random.default_rng(-1)
with pytest.raises(ValueError):
np.random.default_rng([12345, -1])
| 28,183 | Python | 33.412698 | 78 | 0.570947 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_seed_sequence.py | import numpy as np
from numpy.testing import assert_array_equal, assert_array_compare
from numpy.random import SeedSequence
def test_reference_data():
""" Check that SeedSequence generates data the same as the C++ reference.
https://gist.github.com/imneme/540829265469e673d045
"""
inputs = [
[3735928559, 195939070, 229505742, 305419896],
[3668361503, 4165561550, 1661411377, 3634257570],
[164546577, 4166754639, 1765190214, 1303880213],
[446610472, 3941463886, 522937693, 1882353782],
[1864922766, 1719732118, 3882010307, 1776744564],
[4141682960, 3310988675, 553637289, 902896340],
[1134851934, 2352871630, 3699409824, 2648159817],
[1240956131, 3107113773, 1283198141, 1924506131],
[2669565031, 579818610, 3042504477, 2774880435],
[2766103236, 2883057919, 4029656435, 862374500],
]
outputs = [
[3914649087, 576849849, 3593928901, 2229911004],
[2240804226, 3691353228, 1365957195, 2654016646],
[3562296087, 3191708229, 1147942216, 3726991905],
[1403443605, 3591372999, 1291086759, 441919183],
[1086200464, 2191331643, 560336446, 3658716651],
[3249937430, 2346751812, 847844327, 2996632307],
[2584285912, 4034195531, 3523502488, 169742686],
[959045797, 3875435559, 1886309314, 359682705],
[3978441347, 432478529, 3223635119, 138903045],
[296367413, 4262059219, 13109864, 3283683422],
]
outputs64 = [
[2477551240072187391, 9577394838764454085],
[15854241394484835714, 11398914698975566411],
[13708282465491374871, 16007308345579681096],
[15424829579845884309, 1898028439751125927],
[9411697742461147792, 15714068361935982142],
[10079222287618677782, 12870437757549876199],
[17326737873898640088, 729039288628699544],
[16644868984619524261, 1544825456798124994],
[1857481142255628931, 596584038813451439],
[18305404959516669237, 14103312907920476776],
]
for seed, expected, expected64 in zip(inputs, outputs, outputs64):
expected = np.array(expected, dtype=np.uint32)
ss = SeedSequence(seed)
state = ss.generate_state(len(expected))
assert_array_equal(state, expected)
state64 = ss.generate_state(len(expected64), dtype=np.uint64)
assert_array_equal(state64, expected64)
def test_zero_padding():
""" Ensure that the implicit zero-padding does not cause problems.
"""
# Ensure that large integers are inserted in little-endian fashion to avoid
# trailing 0s.
ss0 = SeedSequence(42)
ss1 = SeedSequence(42 << 32)
assert_array_compare(
np.not_equal,
ss0.generate_state(4),
ss1.generate_state(4))
# Ensure backwards compatibility with the original 0.17 release for small
# integers and no spawn key.
expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988],
dtype=np.uint32)
assert_array_equal(SeedSequence(42).generate_state(4), expected42)
# Regression test for gh-16539 to ensure that the implicit 0s don't
# conflict with spawn keys.
assert_array_compare(
np.not_equal,
SeedSequence(42, spawn_key=(0,)).generate_state(4),
expected42)
| 3,311 | Python | 39.888888 | 79 | 0.681365 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/tests/test_generator_mt19937.py | import sys
import hashlib
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence, RandomState
random = Generator(MT19937())
JUMP_TEST_DATA = [
{
"seed": 0,
"steps": 10,
"initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9},
"jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598},
},
{
"seed":384908324,
"steps":312,
"initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311},
"jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276},
},
{
"seed": [839438204, 980239840, 859048019, 821],
"steps": 511,
"initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510},
"jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475},
},
]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multinomial_pvals_float32(self):
x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09,
1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32)
pvals = x / x.sum()
random = Generator(MT19937(1432985819))
match = r"[\w\s]*pvals array is cast to 64-bit floating"
with pytest.raises(ValueError, match=match):
random.multinomial(1, pvals)
class TestMultivariateHypergeometric:
def setup_method(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup_method(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3',
'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4',
'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b',
'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1',
'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1',
'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4',
'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b',
'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1',
'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup_method(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
@pytest.mark.parametrize('dtype, uint_view_type',
[(np.float32, np.uint32),
(np.float64, np.uint64)])
def test_random_distribution_of_lsb(self, dtype, uint_view_type):
random = Generator(MT19937(self.seed))
sample = random.random(100000, dtype=dtype)
num_ones_in_lsb = np.count_nonzero(sample.view(uint_view_type) & 1)
# The probability of a 1 in the least significant bit is 0.25.
# With a sample size of 100000, the probability that num_ones_in_lsb
# is outside the following range is less than 5e-11.
assert 24100 < num_ones_in_lsb < 25900
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.sha256(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_custom_axis_empty(self):
random = Generator(MT19937(self.seed))
desired = np.array([]).reshape((0, 6))
for axis in (0, 1):
actual = np.array([]).reshape((0, 6))
random.shuffle(actual, axis=axis)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
arr = np.array(3)
assert_raises(TypeError, random.shuffle, arr)
arr = np.ones((3, 2))
assert_raises(np.AxisError, random.shuffle, arr, 2)
def test_shuffle_not_writeable(self):
random = Generator(MT19937(self.seed))
a = np.zeros(5)
a.flags.writeable = False
with pytest.raises(ValueError, match='read-only'):
random.shuffle(a)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
@pytest.mark.parametrize("dtype", [int, object])
@pytest.mark.parametrize("axis, expected",
[(None, np.array([[3, 7, 0, 9, 10, 11],
[8, 4, 2, 5, 1, 6]])),
(0, np.array([[6, 1, 2, 9, 10, 11],
[0, 7, 8, 3, 4, 5]])),
(1, np.array([[ 5, 3, 4, 0, 2, 1],
[11, 9, 10, 6, 8, 7]]))])
def test_permuted(self, dtype, axis, expected):
random = Generator(MT19937(self.seed))
x = np.arange(12).reshape(2, 6).astype(dtype)
random.permuted(x, axis=axis, out=x)
assert_array_equal(x, expected)
random = Generator(MT19937(self.seed))
x = np.arange(12).reshape(2, 6).astype(dtype)
y = random.permuted(x, axis=axis)
assert y.dtype == dtype
assert_array_equal(y, expected)
def test_permuted_with_strides(self):
random = Generator(MT19937(self.seed))
x0 = np.arange(22).reshape(2, 11)
x1 = x0.copy()
x = x0[:, ::3]
y = random.permuted(x, axis=1, out=x)
expected = np.array([[0, 9, 3, 6],
[14, 20, 11, 17]])
assert_array_equal(y, expected)
x1[:, ::3] = expected
# Verify that the original x0 was modified in-place as expected.
assert_array_equal(x1, x0)
def test_permuted_empty(self):
y = random.permuted([])
assert_array_equal(y, [])
@pytest.mark.parametrize('outshape', [(2, 3), 5])
def test_permuted_out_with_wrong_shape(self, outshape):
a = np.array([1, 2, 3])
out = np.zeros(outshape, dtype=a.dtype)
with pytest.raises(ValueError, match='same shape'):
random.permuted(a, out=out)
def test_permuted_out_with_wrong_type(self):
out = np.zeros((3, 5), dtype=np.int32)
x = np.ones((3, 5))
with pytest.raises(TypeError, match='Cannot cast'):
random.permuted(x, axis=1, out=out)
def test_permuted_not_writeable(self):
x = np.zeros((2, 5))
x.flags.writeable = False
with pytest.raises(ValueError, match='read-only'):
random.permuted(x, axis=1, out=x)
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[1, 11],
[1, 12],
[11, 17]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_zero(self):
random = Generator(MT19937(self.seed))
assert random.logseries(0) == 1
@pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.])
def test_logseries_exceptions(self, value):
random = Generator(MT19937(self.seed))
with np.errstate(invalid="ignore"):
with pytest.raises(ValueError):
random.logseries(value)
with pytest.raises(ValueError):
# contiguous path:
random.logseries(np.array([value] * 10))
with pytest.raises(ValueError):
# non-contiguous path:
random.logseries(np.array([value] * 10)[::2])
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_negative_binomial_invalid_p_n_combination(self):
# Verify that values of p and n that would result in an overflow
# or infinite loop raise an exception.
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 2**62, 0.1)
assert_raises(ValueError, random.negative_binomial, [2**62], [0.1])
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[4.19494429102666, 16.66920198906598],
[3.67184544902662, 17.74695521962917],
[16.27935397855501, 21.08355560691792]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_uniform_zero_range(self):
func = random.uniform
result = func(1.5, 1.5)
assert_allclose(result, 1.5)
result = func([0.0, np.pi], [0.0, np.pi])
assert_allclose(result, [0.0, np.pi])
result = func([[2145.12], [2145.12]], [2145.12, 2145.12])
assert_allclose(result, 2145.12 + np.zeros((2, 2)))
def test_uniform_neg_range(self):
func = random.uniform
assert_raises(ValueError, func, 2, 1)
assert_raises(ValueError, func, [1, 2], [1, 1])
assert_raises(ValueError, func, [[0, 1],[2, 3]], 2)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
@pytest.mark.parametrize("kappa", [1e4, 1e15])
def test_vonmises_large_kappa(self, kappa):
random = Generator(MT19937(self.seed))
rs = RandomState(random.bit_generator)
state = random.bit_generator.state
random_state_vals = rs.vonmises(0, kappa, size=10)
random.bit_generator.state = state
gen_vals = random.vonmises(0, kappa, size=10)
if kappa < 1e6:
assert_allclose(random_state_vals, gen_vals)
else:
assert np.all(random_state_vals != gen_vals)
@pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2])
@pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15])
def test_vonmises_large_kappa_range(self, mu, kappa):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu, kappa, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup_method(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array(
[1.1597068009872629,
0.6539188836253857,
1.1981526554349398]
)
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [[1 / 6.] * 6] * 2)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([[5], [20]], [[1 / 6.] * 6] * 2)
desired = np.array([[[0, 0, 2, 1, 2, 0],
[0, 0, 2, 1, 1, 1]],
[[4, 2, 3, 3, 5, 3],
[7, 2, 2, 1, 4, 4]]], dtype=np.int64)
assert_array_equal(actual, desired)
@pytest.mark.parametrize("n", [10,
np.array([10, 10]),
np.array([[[10]], [[10]]])
]
)
def test_multinomial_pval_broadcast(self, n):
random = Generator(MT19937(self.seed))
pvals = np.array([1 / 4] * 4)
actual = random.multinomial(n, pvals)
n_shape = tuple() if isinstance(n, int) else n.shape
expected_shape = n_shape + (4,)
assert actual.shape == expected_shape
pvals = np.vstack([pvals, pvals])
actual = random.multinomial(n, pvals)
expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + (4,)
assert actual.shape == expected_shape
pvals = np.vstack([[pvals], [pvals]])
actual = random.multinomial(n, pvals)
expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1])
assert actual.shape == expected_shape + (4,)
actual = random.multinomial(n, pvals, size=(3, 2) + expected_shape)
assert actual.shape == (3, 2) + expected_shape + (4,)
with pytest.raises(ValueError):
# Ensure that size is not broadcast
actual = random.multinomial(n, pvals, size=(1,) * 6)
def test_invalid_pvals_broadcast(self):
random = Generator(MT19937(self.seed))
pvals = [[1 / 6] * 6, [1 / 4] * 6]
assert_raises(ValueError, random.multinomial, 1, pvals)
assert_raises(ValueError, random.multinomial, 6, 0.5)
def test_empty_outputs(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(np.empty((10, 0, 6), "i8"), [1 / 6] * 6)
assert actual.shape == (10, 0, 6, 6)
actual = random.multinomial(12, np.empty((10, 0, 10)))
assert actual.shape == (10, 0, 10)
actual = random.multinomial(np.empty((3, 0, 7), "i8"),
np.empty((3, 0, 7, 4)))
assert actual.shape == (3, 0, 7, 4)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup_method(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the sha256 hashes of the initial and the final states' keys and
# the position of the initial and the final state.
# These were produced using the original C implementation.
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
# Burn step
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
sha256 = hashlib.sha256(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert sha256.hexdigest() == config["initial"]["key_sha256"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
sha256 = hashlib.sha256(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert sha256.hexdigest() == config["jumped"]["key_sha256"]
def test_broadcast_size_error():
mu = np.ones(3)
sigma = np.ones((4, 3))
size = (10, 4, 2)
assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=size)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(1, 3))
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(4, 1, 1))
# 1 arg
shape = np.ones((4, 3))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=size)
with pytest.raises(ValueError):
random.standard_gamma(shape, size=(3,))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=3)
# Check out
out = np.empty(size)
with pytest.raises(ValueError):
random.standard_gamma(shape, out=out)
# 2 arg
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.multinomial([2, 2], [.3, .7], size=(2, 1))
# 3 arg
a = random.chisquare(5, size=3)
b = random.chisquare(5, size=(4, 3))
c = random.chisquare(5, size=(5, 4, 3))
assert random.noncentral_f(a, b, c).shape == (5, 4, 3)
with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"):
random.noncentral_f(a, b, c, size=(6, 5, 1, 1))
def test_broadcast_size_scalar():
mu = np.ones(3)
sigma = np.ones(3)
random.normal(mu, sigma, size=3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=2)
def test_ragged_shuffle():
# GH 18142
seq = [[], [], 1]
gen = Generator(MT19937(0))
assert_no_warnings(gen.shuffle, seq)
assert seq == [1, [], []]
@pytest.mark.parametrize("high", [-2, [-2]])
@pytest.mark.parametrize("endpoint", [True, False])
def test_single_arg_integer_exception(high, endpoint):
# GH 14333
gen = Generator(MT19937(0))
msg = 'high < 0' if endpoint else 'high <= 0'
with pytest.raises(ValueError, match=msg):
gen.integers(high, endpoint=endpoint)
msg = 'low > high' if endpoint else 'low >= high'
with pytest.raises(ValueError, match=msg):
gen.integers(-1, high, endpoint=endpoint)
with pytest.raises(ValueError, match=msg):
gen.integers([-1], high, endpoint=endpoint)
@pytest.mark.parametrize("dtype", ["f4", "f8"])
def test_c_contig_req_out(dtype):
# GH 18704
out = np.empty((2, 3), order="F", dtype=dtype)
shape = [1, 2, 3]
with pytest.raises(ValueError, match="Supplied output array"):
random.standard_gamma(shape, out=out, dtype=dtype)
with pytest.raises(ValueError, match="Supplied output array"):
random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype)
@pytest.mark.parametrize("dtype", ["f4", "f8"])
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("dist", [random.standard_normal, random.random])
def test_contig_req_out(dist, order, dtype):
# GH 18704
out = np.empty((2, 3), dtype=dtype, order=order)
variates = dist(out=out, dtype=dtype)
assert variates is out
variates = dist(out=out, dtype=dtype, size=out.shape)
assert variates is out
| 113,856 | Python | 41.106879 | 114 | 0.567559 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_examples/cython/setup.py | #!/usr/bin/env python3
"""
Build the Cython demonstrations of low-level access to NumPy random
Usage: python setup.py build_ext -i
"""
from os.path import dirname, join, abspath
from setuptools import setup
from setuptools.extension import Extension
import numpy as np
from Cython.Build import cythonize
path = dirname(__file__)
src_dir = join(dirname(path), '..', 'src')
defs = [('NPY_NO_DEPRECATED_API', 0)]
inc_path = np.get_include()
# Add paths for npyrandom and npymath libraries:
lib_path = [
abspath(join(np.get_include(), '..', '..', 'random', 'lib')),
abspath(join(np.get_include(), '..', 'lib'))
]
extending = Extension("extending",
sources=[join('.', 'extending.pyx')],
include_dirs=[
np.get_include(),
join(path, '..', '..')
],
define_macros=defs,
)
distributions = Extension("extending_distributions",
sources=[join('.', 'extending_distributions.pyx')],
include_dirs=[inc_path],
library_dirs=lib_path,
libraries=['npyrandom', 'npymath'],
define_macros=defs,
)
extensions = [extending, distributions]
setup(
ext_modules=cythonize(extensions)
)
| 1,401 | Python | 28.829787 | 77 | 0.533191 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_examples/cffi/extending.py | """
Use cffi to access any of the underlying C functions from distributions.h
"""
import os
import numpy as np
import cffi
from .parse import parse_distributions_h
ffi = cffi.FFI()
inc_dir = os.path.join(np.get_include(), 'numpy')
# Basic numpy types
ffi.cdef('''
typedef intptr_t npy_intp;
typedef unsigned char npy_bool;
''')
parse_distributions_h(ffi, inc_dir)
lib = ffi.dlopen(np.random._generator.__file__)
# Compare the distributions.h random_standard_normal_fill to
# Generator.standard_random
bit_gen = np.random.PCG64()
rng = np.random.Generator(bit_gen)
state = bit_gen.state
interface = rng.bit_generator.cffi
n = 100
vals_cffi = ffi.new('double[%d]' % n)
lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
# reset the state
bit_gen.state = state
vals = rng.standard_normal(n)
for i in range(n):
assert vals[i] == vals_cffi[i]
| 880 | Python | 20.487804 | 73 | 0.714773 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_examples/cffi/parse.py | import os
def parse_distributions_h(ffi, inc_dir):
"""
Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
Read the function declarations without the "#define ..." macros that will
be filled in when loading the library.
"""
with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
s = []
for line in fid:
# massage the include file
if line.strip().startswith('#'):
continue
s.append(line)
ffi.cdef('\n'.join(s))
with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
s = []
in_skip = 0
ignoring = False
for line in fid:
# check for and remove extern "C" guards
if ignoring:
if line.strip().startswith('#endif'):
ignoring = False
continue
if line.strip().startswith('#ifdef __cplusplus'):
ignoring = True
# massage the include file
if line.strip().startswith('#'):
continue
# skip any inlined function definition
# which starts with 'static NPY_INLINE xxx(...) {'
# and ends with a closing '}'
if line.strip().startswith('static NPY_INLINE'):
in_skip += line.count('{')
continue
elif in_skip > 0:
in_skip += line.count('{')
in_skip -= line.count('}')
continue
# replace defines with their value or remove them
line = line.replace('DECLDIR', '')
line = line.replace('NPY_INLINE', '')
line = line.replace('RAND_INT_TYPE', 'int64_t')
s.append(line)
ffi.cdef('\n'.join(s))
| 1,829 | Python | 31.678571 | 78 | 0.50082 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_examples/numba/extending.py | import numpy as np
import numba as nb
from numpy.random import PCG64
from timeit import timeit
bit_gen = PCG64()
next_d = bit_gen.cffi.next_double
state_addr = bit_gen.cffi.state_address
def normals(n, state):
out = np.empty(n)
for i in range((n + 1) // 2):
x1 = 2.0 * next_d(state) - 1.0
x2 = 2.0 * next_d(state) - 1.0
r2 = x1 * x1 + x2 * x2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * next_d(state) - 1.0
x2 = 2.0 * next_d(state) - 1.0
r2 = x1 * x1 + x2 * x2
f = np.sqrt(-2.0 * np.log(r2) / r2)
out[2 * i] = f * x1
if 2 * i + 1 < n:
out[2 * i + 1] = f * x2
return out
# Compile using Numba
normalsj = nb.jit(normals, nopython=True)
# Must use state address not state with numba
n = 10000
def numbacall():
return normalsj(n, state_addr)
rg = np.random.Generator(PCG64())
def numpycall():
return rg.normal(size=n)
# Check that the functions work
r1 = numbacall()
r2 = numpycall()
assert r1.shape == (n,)
assert r1.shape == r2.shape
t1 = timeit(numbacall, number=1000)
print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms')
t2 = timeit(numpycall, number=1000)
print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms')
# example 2
next_u32 = bit_gen.ctypes.next_uint32
ctypes_state = bit_gen.ctypes.state
@nb.jit(nopython=True)
def bounded_uint(lb, ub, state):
mask = delta = ub - lb
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
val = next_u32(state) & mask
while val > delta:
val = next_u32(state) & mask
return lb + val
print(bounded_uint(323, 2394691, ctypes_state.value))
@nb.jit(nopython=True)
def bounded_uints(lb, ub, n, state):
out = np.empty(n, dtype=np.uint32)
for i in range(n):
out[i] = bounded_uint(lb, ub, state)
bounded_uints(323, 2394691, 10000000, ctypes_state.value)
| 1,957 | Python | 22.035294 | 68 | 0.594788 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/random/_examples/numba/extending_distributions.py | r"""
Building the required library in this example requires a source distribution
of NumPy or clone of the NumPy git repository since distributions.c is not
included in binary distributions.
On *nix, execute in numpy/random/src/distributions
export ${PYTHON_VERSION}=3.8 # Python version
export PYTHON_INCLUDE=#path to Python's include folder, usually \
${PYTHON_HOME}/include/python${PYTHON_VERSION}m
export NUMPY_INCLUDE=#path to numpy's include folder, usually \
${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
gcc -shared -o libdistributions.so -fPIC distributions.c \
-I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
mv libdistributions.so ../../_examples/numba/
On Windows
rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
set PYTHON_HOME=c:\Anaconda
set PYTHON_VERSION=38
cl.exe /LD .\distributions.c -DDLL_EXPORT \
-I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
-I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
move distributions.dll ../../_examples/numba/
"""
import os
import numba as nb
import numpy as np
from cffi import FFI
from numpy.random import PCG64
ffi = FFI()
if os.path.exists('./distributions.dll'):
lib = ffi.dlopen('./distributions.dll')
elif os.path.exists('./libdistributions.so'):
lib = ffi.dlopen('./libdistributions.so')
else:
raise RuntimeError('Required DLL/so file was not found.')
ffi.cdef("""
double random_standard_normal(void *bitgen_state);
""")
x = PCG64()
xffi = x.cffi
bit_generator = xffi.bit_generator
random_standard_normal = lib.random_standard_normal
def normals(n, bit_generator):
out = np.empty(n)
for i in range(n):
out[i] = random_standard_normal(bit_generator)
return out
normalsj = nb.jit(normals, nopython=True)
# Numba requires a memory address for void *
# Can also get address from x.ctypes.bit_generator.value
bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
norm = normalsj(1000, bit_generator_address)
print(norm[:12])
| 2,034 | Python | 28.92647 | 79 | 0.735497 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/__init__.py | """
============================
Typing (:mod:`numpy.typing`)
============================
.. versionadded:: 1.20
Large parts of the NumPy API have PEP-484-style type annotations. In
addition a number of type aliases are available to users, most prominently
the two below:
- `ArrayLike`: objects that can be converted to arrays
- `DTypeLike`: objects that can be converted to dtypes
.. _typing-extensions: https://pypi.org/project/typing-extensions/
Mypy plugin
-----------
.. versionadded:: 1.21
.. automodule:: numpy.typing.mypy_plugin
.. currentmodule:: numpy.typing
Differences from the runtime NumPy API
--------------------------------------
NumPy is very flexible. Trying to describe the full range of
possibilities statically would result in types that are not very
helpful. For that reason, the typed NumPy API is often stricter than
the runtime NumPy API. This section describes some notable
differences.
ArrayLike
~~~~~~~~~
The `ArrayLike` type tries to avoid creating object arrays. For
example,
.. code-block:: python
>>> np.array(x**2 for x in range(10))
array(<generator object <genexpr> at ...>, dtype=object)
is valid NumPy code which will create a 0-dimensional object
array. Type checkers will complain about the above example when using
the NumPy types however. If you really intended to do the above, then
you can either use a ``# type: ignore`` comment:
.. code-block:: python
>>> np.array(x**2 for x in range(10)) # type: ignore
or explicitly type the array like object as `~typing.Any`:
.. code-block:: python
>>> from typing import Any
>>> array_like: Any = (x**2 for x in range(10))
>>> np.array(array_like)
array(<generator object <genexpr> at ...>, dtype=object)
ndarray
~~~~~~~
It's possible to mutate the dtype of an array at runtime. For example,
the following code is valid:
.. code-block:: python
>>> x = np.array([1, 2])
>>> x.dtype = np.bool_
This sort of mutation is not allowed by the types. Users who want to
write statically typed code should instead use the `numpy.ndarray.view`
method to create a view of the array with a different dtype.
DTypeLike
~~~~~~~~~
The `DTypeLike` type tries to avoid creation of dtype objects using
dictionary of fields like below:
.. code-block:: python
>>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)})
Although this is valid NumPy code, the type checker will complain about it,
since its usage is discouraged.
Please see : :ref:`Data type objects <arrays.dtypes>`
Number precision
~~~~~~~~~~~~~~~~
The precision of `numpy.number` subclasses is treated as a covariant generic
parameter (see :class:`~NBitBase`), simplifying the annotating of processes
involving precision-based casting.
.. code-block:: python
>>> from typing import TypeVar
>>> import numpy as np
>>> import numpy.typing as npt
>>> T = TypeVar("T", bound=npt.NBitBase)
>>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]":
... ...
Consequently, the likes of `~numpy.float16`, `~numpy.float32` and
`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to
runtime, they're not necessarily considered as sub-classes.
Timedelta64
~~~~~~~~~~~
The `~numpy.timedelta64` class is not considered a subclass of
`~numpy.signedinteger`, the former only inheriting from `~numpy.generic`
while static type checking.
0D arrays
~~~~~~~~~
During runtime numpy aggressively casts any passed 0D arrays into their
corresponding `~numpy.generic` instance. Until the introduction of shape
typing (see :pep:`646`) it is unfortunately not possible to make the
necessary distinction between 0D and >0D arrays. While thus not strictly
correct, all operations are that can potentially perform a 0D-array -> scalar
cast are currently annotated as exclusively returning an `ndarray`.
If it is known in advance that an operation _will_ perform a
0D-array -> scalar cast, then one can consider manually remedying the
situation with either `typing.cast` or a ``# type: ignore`` comment.
Record array dtypes
~~~~~~~~~~~~~~~~~~~
The dtype of `numpy.recarray`, and the `numpy.rec` functions in general,
can be specified in one of two ways:
* Directly via the ``dtype`` argument.
* With up to five helper arguments that operate via `numpy.format_parser`:
``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``.
These two approaches are currently typed as being mutually exclusive,
*i.e.* if ``dtype`` is specified than one may not specify ``formats``.
While this mutual exclusivity is not (strictly) enforced during runtime,
combining both dtype specifiers can lead to unexpected or even downright
buggy behavior.
API
---
"""
# NOTE: The API section will be appended with additional entries
# further down in this file
from numpy._typing import (
ArrayLike,
DTypeLike,
NBitBase,
NDArray,
)
__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"]
if __doc__ is not None:
from numpy._typing._add_docstring import _docstrings
__doc__ += _docstrings
__doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n'
del _docstrings
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 5,231 | Python | 28.727273 | 79 | 0.699293 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/setup.py | def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('typing', parent_package, top_path)
config.add_subpackage('tests')
config.add_data_dir('tests/data')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 374 | Python | 30.249997 | 62 | 0.708556 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/mypy_plugin.py | """A mypy_ plugin for managing a number of platform-specific annotations.
Its functionality can be split into three distinct parts:
* Assigning the (platform-dependent) precisions of certain `~numpy.number`
subclasses, including the likes of `~numpy.int_`, `~numpy.intp` and
`~numpy.longlong`. See the documentation on
:ref:`scalar types <arrays.scalars.built-in>` for a comprehensive overview
of the affected classes. Without the plugin the precision of all relevant
classes will be inferred as `~typing.Any`.
* Removing all extended-precision `~numpy.number` subclasses that are
unavailable for the platform in question. Most notably this includes the
likes of `~numpy.float128` and `~numpy.complex256`. Without the plugin *all*
extended-precision types will, as far as mypy is concerned, be available
to all platforms.
* Assigning the (platform-dependent) precision of `~numpy.ctypeslib.c_intp`.
Without the plugin the type will default to `ctypes.c_int64`.
.. versionadded:: 1.22
Examples
--------
To enable the plugin, one must add it to their mypy `configuration file`_:
.. code-block:: ini
[mypy]
plugins = numpy.typing.mypy_plugin
.. _mypy: http://mypy-lang.org/
.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html
"""
from __future__ import annotations
from collections.abc import Iterable
from typing import Final, TYPE_CHECKING, Callable
import numpy as np
try:
import mypy.types
from mypy.types import Type
from mypy.plugin import Plugin, AnalyzeTypeContext
from mypy.nodes import MypyFile, ImportFrom, Statement
from mypy.build import PRI_MED
_HookFunc = Callable[[AnalyzeTypeContext], Type]
MYPY_EX: None | ModuleNotFoundError = None
except ModuleNotFoundError as ex:
MYPY_EX = ex
__all__: list[str] = []
def _get_precision_dict() -> dict[str, str]:
names = [
("_NBitByte", np.byte),
("_NBitShort", np.short),
("_NBitIntC", np.intc),
("_NBitIntP", np.intp),
("_NBitInt", np.int_),
("_NBitLongLong", np.longlong),
("_NBitHalf", np.half),
("_NBitSingle", np.single),
("_NBitDouble", np.double),
("_NBitLongDouble", np.longdouble),
]
ret = {}
for name, typ in names:
n: int = 8 * typ().dtype.itemsize
ret[f'numpy._typing._nbit.{name}'] = f"numpy._{n}Bit"
return ret
def _get_extended_precision_list() -> list[str]:
extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble]
extended_names = {
"uint128",
"uint256",
"int128",
"int256",
"float80",
"float96",
"float128",
"float256",
"complex160",
"complex192",
"complex256",
"complex512",
}
return [i.__name__ for i in extended_types if i.__name__ in extended_names]
def _get_c_intp_name() -> str:
# Adapted from `np.core._internal._getintp_ctype`
char = np.dtype('p').char
if char == 'i':
return "c_int"
elif char == 'l':
return "c_long"
elif char == 'q':
return "c_longlong"
else:
return "c_long"
#: A dictionary mapping type-aliases in `numpy._typing._nbit` to
#: concrete `numpy.typing.NBitBase` subclasses.
_PRECISION_DICT: Final = _get_precision_dict()
#: A list with the names of all extended precision `np.number` subclasses.
_EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list()
#: The name of the ctypes quivalent of `np.intp`
_C_INTP: Final = _get_c_intp_name()
def _hook(ctx: AnalyzeTypeContext) -> Type:
"""Replace a type-alias with a concrete ``NBitBase`` subclass."""
typ, _, api = ctx
name = typ.name.split(".")[-1]
name_new = _PRECISION_DICT[f"numpy._typing._nbit.{name}"]
return api.named_type(name_new)
if TYPE_CHECKING or MYPY_EX is None:
def _index(iterable: Iterable[Statement], id: str) -> int:
"""Identify the first ``ImportFrom`` instance the specified `id`."""
for i, value in enumerate(iterable):
if getattr(value, "id", None) == id:
return i
raise ValueError("Failed to identify a `ImportFrom` instance "
f"with the following id: {id!r}")
def _override_imports(
file: MypyFile,
module: str,
imports: list[tuple[str, None | str]],
) -> None:
"""Override the first `module`-based import with new `imports`."""
# Construct a new `from module import y` statement
import_obj = ImportFrom(module, 0, names=imports)
import_obj.is_top_level = True
# Replace the first `module`-based import statement with `import_obj`
for lst in [file.defs, file.imports]: # type: list[Statement]
i = _index(lst, module)
lst[i] = import_obj
class _NumpyPlugin(Plugin):
"""A mypy plugin for handling versus numpy-specific typing tasks."""
def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc:
"""Set the precision of platform-specific `numpy.number`
subclasses.
For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`.
"""
if fullname in _PRECISION_DICT:
return _hook
return None
def get_additional_deps(
self, file: MypyFile
) -> list[tuple[int, str, int]]:
"""Handle all import-based overrides.
* Import platform-specific extended-precision `numpy.number`
subclasses (*e.g.* `numpy.float96`, `numpy.float128` and
`numpy.complex256`).
* Import the appropriate `ctypes` equivalent to `numpy.intp`.
"""
ret = [(PRI_MED, file.fullname, -1)]
if file.fullname == "numpy":
_override_imports(
file, "numpy._typing._extended_precision",
imports=[(v, v) for v in _EXTENDED_PRECISION_LIST],
)
elif file.fullname == "numpy.ctypeslib":
_override_imports(
file, "ctypes",
imports=[(_C_INTP, "_c_intp")],
)
return ret
def plugin(version: str) -> type[_NumpyPlugin]:
"""An entry-point for mypy."""
return _NumpyPlugin
else:
def plugin(version: str) -> type[_NumpyPlugin]:
"""An entry-point for mypy."""
raise MYPY_EX
| 6,479 | Python | 31.727273 | 79 | 0.605186 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/test_runtime.py | """Test the runtime usage of `numpy.typing`."""
from __future__ import annotations
import sys
from typing import (
get_type_hints,
Union,
NamedTuple,
get_args,
get_origin,
Any,
)
import pytest
import numpy as np
import numpy.typing as npt
import numpy._typing as _npt
class TypeTup(NamedTuple):
typ: type
args: tuple[type, ...]
origin: None | type
if sys.version_info >= (3, 9):
NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray)
else:
NDArrayTup = TypeTup(npt.NDArray, (), None)
TYPES = {
"ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union),
"DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union),
"NBitBase": TypeTup(npt.NBitBase, (), None),
"NDArray": NDArrayTup,
}
@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
def test_get_args(name: type, tup: TypeTup) -> None:
"""Test `typing.get_args`."""
typ, ref = tup.typ, tup.args
out = get_args(typ)
assert out == ref
@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
def test_get_origin(name: type, tup: TypeTup) -> None:
"""Test `typing.get_origin`."""
typ, ref = tup.typ, tup.origin
out = get_origin(typ)
assert out == ref
@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
def test_get_type_hints(name: type, tup: TypeTup) -> None:
"""Test `typing.get_type_hints`."""
typ = tup.typ
# Explicitly set `__annotations__` in order to circumvent the
# stringification performed by `from __future__ import annotations`
def func(a): pass
func.__annotations__ = {"a": typ, "return": None}
out = get_type_hints(func)
ref = {"a": typ, "return": type(None)}
assert out == ref
@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
def test_get_type_hints_str(name: type, tup: TypeTup) -> None:
"""Test `typing.get_type_hints` with string-representation of types."""
typ_str, typ = f"npt.{name}", tup.typ
# Explicitly set `__annotations__` in order to circumvent the
# stringification performed by `from __future__ import annotations`
def func(a): pass
func.__annotations__ = {"a": typ_str, "return": None}
out = get_type_hints(func)
ref = {"a": typ, "return": type(None)}
assert out == ref
def test_keys() -> None:
"""Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced."""
keys = TYPES.keys()
ref = set(npt.__all__)
assert keys == ref
PROTOCOLS: dict[str, tuple[type[Any], object]] = {
"_SupportsDType": (_npt._SupportsDType, np.int64(1)),
"_SupportsArray": (_npt._SupportsArray, np.arange(10)),
"_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)),
"_NestedSequence": (_npt._NestedSequence, [1]),
}
@pytest.mark.parametrize("cls,obj", PROTOCOLS.values(), ids=PROTOCOLS.keys())
class TestRuntimeProtocol:
def test_isinstance(self, cls: type[Any], obj: object) -> None:
assert isinstance(obj, cls)
assert not isinstance(None, cls)
def test_issubclass(self, cls: type[Any], obj: object) -> None:
if cls is _npt._SupportsDType:
pytest.xfail(
"Protocols with non-method members don't support issubclass()"
)
assert issubclass(type(obj), cls)
assert not issubclass(type(None), cls)
| 3,375 | Python | 28.614035 | 78 | 0.632296 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/test_generic_alias.py | from __future__ import annotations
import sys
import copy
import types
import pickle
import weakref
from typing import TypeVar, Any, Union, Callable
import pytest
import numpy as np
from numpy._typing._generic_alias import _GenericAlias
from typing_extensions import Unpack
ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
T1 = TypeVar("T1")
T2 = TypeVar("T2")
DType = _GenericAlias(np.dtype, (ScalarType,))
NDArray = _GenericAlias(np.ndarray, (Any, DType))
# NOTE: The `npt._GenericAlias` *class* isn't quite stable on python >=3.11.
# This is not a problem during runtime (as it's 3.8-exclusive), but we still
# need it for the >=3.9 in order to verify its semantics match
# `types.GenericAlias` replacement. xref numpy/numpy#21526
if sys.version_info >= (3, 9):
DType_ref = types.GenericAlias(np.dtype, (ScalarType,))
NDArray_ref = types.GenericAlias(np.ndarray, (Any, DType_ref))
FuncType = Callable[["_GenericAlias | types.GenericAlias"], Any]
else:
DType_ref = Any
NDArray_ref = Any
FuncType = Callable[["_GenericAlias"], Any]
GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS)
BUFFER = np.array([1], dtype=np.int64)
BUFFER.setflags(write=False)
def _get_subclass_mro(base: type) -> tuple[type, ...]:
class Subclass(base): # type: ignore[misc,valid-type]
pass
return Subclass.__mro__[1:]
class TestGenericAlias:
"""Tests for `numpy._typing._generic_alias._GenericAlias`."""
@pytest.mark.parametrize("name,func", [
("__init__", lambda n: n),
("__init__", lambda n: _GenericAlias(np.ndarray, Any)),
("__init__", lambda n: _GenericAlias(np.ndarray, (Any,))),
("__init__", lambda n: _GenericAlias(np.ndarray, (Any, Any))),
("__init__", lambda n: _GenericAlias(np.ndarray, T1)),
("__init__", lambda n: _GenericAlias(np.ndarray, (T1,))),
("__init__", lambda n: _GenericAlias(np.ndarray, (T1, T2))),
("__origin__", lambda n: n.__origin__),
("__args__", lambda n: n.__args__),
("__parameters__", lambda n: n.__parameters__),
("__mro_entries__", lambda n: n.__mro_entries__([object])),
("__hash__", lambda n: hash(n)),
("__repr__", lambda n: repr(n)),
("__getitem__", lambda n: n[np.float64]),
("__getitem__", lambda n: n[ScalarType][np.float64]),
("__getitem__", lambda n: n[Union[np.int64, ScalarType]][np.float64]),
("__getitem__", lambda n: n[Union[T1, T2]][np.float32, np.float64]),
("__eq__", lambda n: n == n),
("__ne__", lambda n: n != np.ndarray),
("__call__", lambda n: n((1,), np.int64, BUFFER)),
("__call__", lambda n: n(shape=(1,), dtype=np.int64, buffer=BUFFER)),
("subclassing", lambda n: _get_subclass_mro(n)),
("pickle", lambda n: n == pickle.loads(pickle.dumps(n))),
])
def test_pass(self, name: str, func: FuncType) -> None:
"""Compare `types.GenericAlias` with its numpy-based backport.
Checker whether ``func`` runs as intended and that both `GenericAlias`
and `_GenericAlias` return the same result.
"""
value = func(NDArray)
if sys.version_info >= (3, 9):
value_ref = func(NDArray_ref)
assert value == value_ref
@pytest.mark.parametrize("name,func", [
("__copy__", lambda n: n == copy.copy(n)),
("__deepcopy__", lambda n: n == copy.deepcopy(n)),
])
def test_copy(self, name: str, func: FuncType) -> None:
value = func(NDArray)
# xref bpo-45167
GE_398 = (
sys.version_info[:2] == (3, 9) and sys.version_info >= (3, 9, 8)
)
if GE_398 or sys.version_info >= (3, 10, 1):
value_ref = func(NDArray_ref)
assert value == value_ref
def test_dir(self) -> None:
value = dir(NDArray)
if sys.version_info < (3, 9):
return
# A number attributes only exist in `types.GenericAlias` in >= 3.11
if sys.version_info < (3, 11, 0, "beta", 3):
value.remove("__typing_unpacked_tuple_args__")
if sys.version_info < (3, 11, 0, "beta", 1):
value.remove("__unpacked__")
assert value == dir(NDArray_ref)
@pytest.mark.parametrize("name,func,dev_version", [
("__iter__", lambda n: len(list(n)), ("beta", 1)),
("__iter__", lambda n: next(iter(n)), ("beta", 1)),
("__unpacked__", lambda n: n.__unpacked__, ("beta", 1)),
("Unpack", lambda n: Unpack[n], ("beta", 1)),
# The right operand should now have `__unpacked__ = True`,
# and they are thus now longer equivalent
("__ne__", lambda n: n != next(iter(n)), ("beta", 1)),
# >= beta3
("__typing_unpacked_tuple_args__",
lambda n: n.__typing_unpacked_tuple_args__, ("beta", 3)),
# >= beta4
("__class__", lambda n: n.__class__ == type(n), ("beta", 4)),
])
def test_py311_features(
self,
name: str,
func: FuncType,
dev_version: tuple[str, int],
) -> None:
"""Test Python 3.11 features."""
value = func(NDArray)
if sys.version_info >= (3, 11, 0, *dev_version):
value_ref = func(NDArray_ref)
assert value == value_ref
def test_weakref(self) -> None:
"""Test ``__weakref__``."""
value = weakref.ref(NDArray)()
if sys.version_info >= (3, 9, 1): # xref bpo-42332
value_ref = weakref.ref(NDArray_ref)()
assert value == value_ref
@pytest.mark.parametrize("name", GETATTR_NAMES)
def test_getattr(self, name: str) -> None:
"""Test that `getattr` wraps around the underlying type,
aka ``__origin__``.
"""
value = getattr(NDArray, name)
value_ref1 = getattr(np.ndarray, name)
if sys.version_info >= (3, 9):
value_ref2 = getattr(NDArray_ref, name)
assert value == value_ref1 == value_ref2
else:
assert value == value_ref1
@pytest.mark.parametrize("name,exc_type,func", [
("__getitem__", TypeError, lambda n: n[()]),
("__getitem__", TypeError, lambda n: n[Any, Any]),
("__getitem__", TypeError, lambda n: n[Any][Any]),
("isinstance", TypeError, lambda n: isinstance(np.array(1), n)),
("issublass", TypeError, lambda n: issubclass(np.ndarray, n)),
("setattr", AttributeError, lambda n: setattr(n, "__origin__", int)),
("setattr", AttributeError, lambda n: setattr(n, "test", int)),
("getattr", AttributeError, lambda n: getattr(n, "test")),
])
def test_raise(
self,
name: str,
exc_type: type[BaseException],
func: FuncType,
) -> None:
"""Test operations that are supposed to raise."""
with pytest.raises(exc_type):
func(NDArray)
if sys.version_info >= (3, 9):
with pytest.raises(exc_type):
func(NDArray_ref)
| 7,030 | Python | 36.201058 | 78 | 0.559744 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/test_isfile.py | import os
from pathlib import Path
import numpy as np
from numpy.testing import assert_
ROOT = Path(np.__file__).parents[0]
FILES = [
ROOT / "py.typed",
ROOT / "__init__.pyi",
ROOT / "ctypeslib.pyi",
ROOT / "core" / "__init__.pyi",
ROOT / "distutils" / "__init__.pyi",
ROOT / "f2py" / "__init__.pyi",
ROOT / "fft" / "__init__.pyi",
ROOT / "lib" / "__init__.pyi",
ROOT / "linalg" / "__init__.pyi",
ROOT / "ma" / "__init__.pyi",
ROOT / "matrixlib" / "__init__.pyi",
ROOT / "polynomial" / "__init__.pyi",
ROOT / "random" / "__init__.pyi",
ROOT / "testing" / "__init__.pyi",
]
class TestIsFile:
def test_isfile(self):
"""Test if all ``.pyi`` files are properly installed."""
for file in FILES:
assert_(os.path.isfile(file))
| 812 | Python | 25.225806 | 64 | 0.524631 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/test_typing.py | from __future__ import annotations
import importlib.util
import itertools
import os
import re
import shutil
from collections import defaultdict
from collections.abc import Iterator
from typing import IO, TYPE_CHECKING
import pytest
import numpy as np
import numpy.typing as npt
from numpy.typing.mypy_plugin import (
_PRECISION_DICT,
_EXTENDED_PRECISION_LIST,
_C_INTP,
)
try:
from mypy import api
except ImportError:
NO_MYPY = True
else:
NO_MYPY = False
if TYPE_CHECKING:
# We need this as annotation, but it's located in a private namespace.
# As a compromise, do *not* import it during runtime
from _pytest.mark.structures import ParameterSet
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
MISC_DIR = os.path.join(DATA_DIR, "misc")
MYPY_INI = os.path.join(DATA_DIR, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
#: A dictionary with file names as keys and lists of the mypy stdout as values.
#: To-be populated by `run_mypy`.
OUTPUT_MYPY: dict[str, list[str]] = {}
def _key_func(key: str) -> str:
"""Split at the first occurrence of the ``:`` character.
Windows drive-letters (*e.g.* ``C:``) are ignored herein.
"""
drive, tail = os.path.splitdrive(key)
return os.path.join(drive, tail.split(":", 1)[0])
def _strip_filename(msg: str) -> str:
"""Strip the filename from a mypy message."""
_, tail = os.path.splitdrive(msg)
return tail.split(":", 1)[-1]
def strip_func(match: re.Match[str]) -> str:
"""`re.sub` helper function for stripping module names."""
return match.groups()[1]
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.fixture(scope="module", autouse=True)
def run_mypy() -> None:
"""Clears the cache and run mypy before running any of the typing tests.
The mypy results are cached in `OUTPUT_MYPY` for further use.
The cache refresh can be skipped using
NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests
"""
if (
os.path.isdir(CACHE_DIR)
and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True))
):
shutil.rmtree(CACHE_DIR)
for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR):
# Run mypy
stdout, stderr, exit_code = api.run([
"--config-file",
MYPY_INI,
"--cache-dir",
CACHE_DIR,
directory,
])
if stderr:
pytest.fail(f"Unexpected mypy standard error\n\n{stderr}")
elif exit_code not in {0, 1}:
pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}")
stdout = stdout.replace('*', '')
# Parse the output
iterator = itertools.groupby(stdout.split("\n"), key=_key_func)
OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
def get_test_cases(directory: str) -> Iterator[ParameterSet]:
for root, _, files in os.walk(directory):
for fname in files:
short_fname, ext = os.path.splitext(fname)
if ext in (".pyi", ".py"):
fullpath = os.path.join(root, fname)
yield pytest.param(fullpath, id=short_fname)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path) -> None:
# Alias `OUTPUT_MYPY` so that it appears in the local namespace
output_mypy = OUTPUT_MYPY
if path in output_mypy:
msg = "Unexpected mypy output\n\n"
msg += "\n".join(_strip_filename(v) for v in output_mypy[path])
raise AssertionError(msg)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path: str) -> None:
__tracebackhide__ = True
with open(path) as fin:
lines = fin.readlines()
errors = defaultdict(lambda: "")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
error_line = _strip_filename(error_line).split("\n", 1)[0]
match = re.match(
r"(?P<lineno>\d+): (error|note): .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected error line format: {error_line}")
lineno = int(match.group('lineno'))
errors[lineno] += f'{error_line}\n'
for i, line in enumerate(lines):
lineno = i + 1
if (
line.startswith('#')
or (" E:" not in line and lineno not in errors)
):
continue
target_line = lines[lineno - 1]
if "# E:" in target_line:
expression, _, marker = target_line.partition(" # E: ")
expected_error = errors[lineno].strip()
marker = marker.strip()
_test_fail(path, expression, marker, expected_error, lineno)
else:
pytest.fail(
f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}"
)
_FAIL_MSG1 = """Extra error at line {}
Expression: {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expression: {}
Expected error: {!r}
Observed error: {!r}
"""
def _test_fail(
path: str,
expression: str,
error: str,
expected_error: None | str,
lineno: int,
) -> None:
if expected_error is None:
raise AssertionError(_FAIL_MSG1.format(lineno, expression, error))
elif error not in expected_error:
raise AssertionError(_FAIL_MSG2.format(
lineno, expression, expected_error, error
))
def _construct_ctypes_dict() -> dict[str, str]:
dct = {
"ubyte": "c_ubyte",
"ushort": "c_ushort",
"uintc": "c_uint",
"uint": "c_ulong",
"ulonglong": "c_ulonglong",
"byte": "c_byte",
"short": "c_short",
"intc": "c_int",
"int_": "c_long",
"longlong": "c_longlong",
"single": "c_float",
"double": "c_double",
"longdouble": "c_longdouble",
}
# Match `ctypes` names to the first ctypes type with a given kind and
# precision, e.g. {"c_double": "c_double", "c_longdouble": "c_double"}
# if both types represent 64-bit floats.
# In this context "first" is defined by the order of `dct`
ret = {}
visited: dict[tuple[str, int], str] = {}
for np_name, ct_name in dct.items():
np_scalar = getattr(np, np_name)()
# Find the first `ctypes` type for a given `kind`/`itemsize` combo
key = (np_scalar.dtype.kind, np_scalar.dtype.itemsize)
ret[ct_name] = visited.setdefault(key, f"ctypes.{ct_name}")
return ret
def _construct_format_dict() -> dict[str, str]:
dct = {k.split(".")[-1]: v.replace("numpy", "numpy._typing") for
k, v in _PRECISION_DICT.items()}
return {
"uint8": "numpy.unsignedinteger[numpy._typing._8Bit]",
"uint16": "numpy.unsignedinteger[numpy._typing._16Bit]",
"uint32": "numpy.unsignedinteger[numpy._typing._32Bit]",
"uint64": "numpy.unsignedinteger[numpy._typing._64Bit]",
"uint128": "numpy.unsignedinteger[numpy._typing._128Bit]",
"uint256": "numpy.unsignedinteger[numpy._typing._256Bit]",
"int8": "numpy.signedinteger[numpy._typing._8Bit]",
"int16": "numpy.signedinteger[numpy._typing._16Bit]",
"int32": "numpy.signedinteger[numpy._typing._32Bit]",
"int64": "numpy.signedinteger[numpy._typing._64Bit]",
"int128": "numpy.signedinteger[numpy._typing._128Bit]",
"int256": "numpy.signedinteger[numpy._typing._256Bit]",
"float16": "numpy.floating[numpy._typing._16Bit]",
"float32": "numpy.floating[numpy._typing._32Bit]",
"float64": "numpy.floating[numpy._typing._64Bit]",
"float80": "numpy.floating[numpy._typing._80Bit]",
"float96": "numpy.floating[numpy._typing._96Bit]",
"float128": "numpy.floating[numpy._typing._128Bit]",
"float256": "numpy.floating[numpy._typing._256Bit]",
"complex64": ("numpy.complexfloating"
"[numpy._typing._32Bit, numpy._typing._32Bit]"),
"complex128": ("numpy.complexfloating"
"[numpy._typing._64Bit, numpy._typing._64Bit]"),
"complex160": ("numpy.complexfloating"
"[numpy._typing._80Bit, numpy._typing._80Bit]"),
"complex192": ("numpy.complexfloating"
"[numpy._typing._96Bit, numpy._typing._96Bit]"),
"complex256": ("numpy.complexfloating"
"[numpy._typing._128Bit, numpy._typing._128Bit]"),
"complex512": ("numpy.complexfloating"
"[numpy._typing._256Bit, numpy._typing._256Bit]"),
"ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]",
"ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]",
"uintc": f"numpy.unsignedinteger[{dct['_NBitIntC']}]",
"uintp": f"numpy.unsignedinteger[{dct['_NBitIntP']}]",
"uint": f"numpy.unsignedinteger[{dct['_NBitInt']}]",
"ulonglong": f"numpy.unsignedinteger[{dct['_NBitLongLong']}]",
"byte": f"numpy.signedinteger[{dct['_NBitByte']}]",
"short": f"numpy.signedinteger[{dct['_NBitShort']}]",
"intc": f"numpy.signedinteger[{dct['_NBitIntC']}]",
"intp": f"numpy.signedinteger[{dct['_NBitIntP']}]",
"int_": f"numpy.signedinteger[{dct['_NBitInt']}]",
"longlong": f"numpy.signedinteger[{dct['_NBitLongLong']}]",
"half": f"numpy.floating[{dct['_NBitHalf']}]",
"single": f"numpy.floating[{dct['_NBitSingle']}]",
"double": f"numpy.floating[{dct['_NBitDouble']}]",
"longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]",
"csingle": ("numpy.complexfloating"
f"[{dct['_NBitSingle']}, {dct['_NBitSingle']}]"),
"cdouble": ("numpy.complexfloating"
f"[{dct['_NBitDouble']}, {dct['_NBitDouble']}]"),
"clongdouble": (
"numpy.complexfloating"
f"[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]"
),
# numpy.typing
"_NBitInt": dct['_NBitInt'],
# numpy.ctypeslib
"c_intp": f"ctypes.{_C_INTP}"
}
#: A dictionary with all supported format keys (as keys)
#: and matching values
FORMAT_DICT: dict[str, str] = _construct_format_dict()
FORMAT_DICT.update(_construct_ctypes_dict())
def _parse_reveals(file: IO[str]) -> tuple[npt.NDArray[np.str_], list[str]]:
"""Extract and parse all ``" # E: "`` comments from the passed
file-like object.
All format keys will be substituted for their respective value
from `FORMAT_DICT`, *e.g.* ``"{float64}"`` becomes
``"numpy.floating[numpy._typing._64Bit]"``.
"""
string = file.read().replace("*", "")
# Grab all `# E:`-based comments and matching expressions
expression_array, _, comments_array = np.char.partition(
string.split("\n"), sep=" # E: "
).T
comments = "/n".join(comments_array)
# Only search for the `{*}` pattern within comments, otherwise
# there is the risk of accidentally grabbing dictionaries and sets
key_set = set(re.findall(r"\{(.*?)\}", comments))
kwargs = {
k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for
k in key_set
}
fmt_str = comments.format(**kwargs)
return expression_array, fmt_str.split("/n")
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path: str) -> None:
"""Validate that mypy correctly infers the return-types of
the expressions in `path`.
"""
__tracebackhide__ = True
with open(path) as fin:
expression_array, reveal_list = _parse_reveals(fin)
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
error_line = _strip_filename(error_line)
match = re.match(
r"(?P<lineno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group('lineno')) - 1
assert "Revealed type is" in error_line
marker = reveal_list[lineno]
expression = expression_array[lineno]
_test_reveal(path, expression, marker, error_line, 1 + lineno)
_REVEAL_MSG = """Reveal mismatch at line {}
Expression: {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
_STRIP_PATTERN = re.compile(r"(\w+\.)+(\w+)")
def _test_reveal(
path: str,
expression: str,
reveal: str,
expected_reveal: str,
lineno: int,
) -> None:
"""Error-reporting helper function for `test_reveal`."""
stripped_reveal = _STRIP_PATTERN.sub(strip_func, reveal)
stripped_expected_reveal = _STRIP_PATTERN.sub(strip_func, expected_reveal)
if stripped_reveal not in stripped_expected_reveal:
raise AssertionError(
_REVEAL_MSG.format(lineno,
expression,
stripped_expected_reveal,
stripped_reveal)
)
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_code_runs(path: str) -> None:
"""Validate that the code in `path` properly during runtime."""
path_without_extension, _ = os.path.splitext(path)
dirname, filename = path.split(os.sep)[-2:]
spec = importlib.util.spec_from_file_location(
f"{dirname}.{filename}", path
)
assert spec is not None
assert spec.loader is not None
test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(test_module)
LINENO_MAPPING = {
3: "uint128",
4: "uint256",
6: "int128",
7: "int256",
9: "float80",
10: "float96",
11: "float128",
12: "float256",
14: "complex160",
15: "complex192",
16: "complex256",
17: "complex512",
}
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
def test_extended_precision() -> None:
path = os.path.join(MISC_DIR, "extended_precision.pyi")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
with open(path, "r") as f:
expression_list = f.readlines()
for _msg in output_mypy[path]:
*_, _lineno, msg_typ, msg = _msg.split(":")
msg = _strip_filename(msg)
lineno = int(_lineno)
expression = expression_list[lineno - 1].rstrip("\n")
msg_typ = msg_typ.strip()
assert msg_typ in {"error", "note"}
if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST:
if msg_typ == "error":
raise ValueError(f"Unexpected reveal line format: {lineno}")
else:
marker = FORMAT_DICT[LINENO_MAPPING[lineno]]
_test_reveal(path, expression, marker, msg, lineno)
else:
if msg_typ == "error":
marker = "Module has no attribute"
_test_fail(path, expression, marker, msg, lineno)
| 15,312 | Python | 32.58114 | 79 | 0.597309 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/multiarray.py | import numpy as np
import numpy.typing as npt
AR_f8: npt.NDArray[np.float64] = np.array([1.0])
AR_i4 = np.array([1], dtype=np.int32)
AR_u1 = np.array([1], dtype=np.uint8)
AR_LIKE_f = [1.5]
AR_LIKE_i = [1]
b_f8 = np.broadcast(AR_f8)
b_i4_f8_f8 = np.broadcast(AR_i4, AR_f8, AR_f8)
next(b_f8)
b_f8.reset()
b_f8.index
b_f8.iters
b_f8.nd
b_f8.ndim
b_f8.numiter
b_f8.shape
b_f8.size
next(b_i4_f8_f8)
b_i4_f8_f8.reset()
b_i4_f8_f8.ndim
b_i4_f8_f8.index
b_i4_f8_f8.iters
b_i4_f8_f8.nd
b_i4_f8_f8.numiter
b_i4_f8_f8.shape
b_i4_f8_f8.size
np.inner(AR_f8, AR_i4)
np.where([True, True, False])
np.where([True, True, False], 1, 0)
np.lexsort([0, 1, 2])
np.can_cast(np.dtype("i8"), int)
np.can_cast(AR_f8, "f8")
np.can_cast(AR_f8, np.complex128, casting="unsafe")
np.min_scalar_type([1])
np.min_scalar_type(AR_f8)
np.result_type(int, AR_i4)
np.result_type(AR_f8, AR_u1)
np.result_type(AR_f8, np.complex128)
np.dot(AR_LIKE_f, AR_i4)
np.dot(AR_u1, 1)
np.dot(1.5j, 1)
np.dot(AR_u1, 1, out=AR_f8)
np.vdot(AR_LIKE_f, AR_i4)
np.vdot(AR_u1, 1)
np.vdot(1.5j, 1)
np.bincount(AR_i4)
np.copyto(AR_f8, [1.6])
np.putmask(AR_f8, [True], 1.5)
np.packbits(AR_i4)
np.packbits(AR_u1)
np.unpackbits(AR_u1)
np.shares_memory(1, 2)
np.shares_memory(AR_f8, AR_f8, max_work=1)
np.may_share_memory(1, 2)
np.may_share_memory(AR_f8, AR_f8, max_work=1)
| 1,331 | Python | 16.298701 | 51 | 0.658152 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/array_like.py | from __future__ import annotations
from typing import Any
import numpy as np
from numpy._typing import ArrayLike, _SupportsArray
x1: ArrayLike = True
x2: ArrayLike = 5
x3: ArrayLike = 1.0
x4: ArrayLike = 1 + 1j
x5: ArrayLike = np.int8(1)
x6: ArrayLike = np.float64(1)
x7: ArrayLike = np.complex128(1)
x8: ArrayLike = np.array([1, 2, 3])
x9: ArrayLike = [1, 2, 3]
x10: ArrayLike = (1, 2, 3)
x11: ArrayLike = "foo"
x12: ArrayLike = memoryview(b'foo')
class A:
def __array__(self, dtype: None | np.dtype[Any] = None) -> np.ndarray:
return np.array([1, 2, 3])
x13: ArrayLike = A()
scalar: _SupportsArray = np.int64(1)
scalar.__array__()
array: _SupportsArray = np.array(1)
array.__array__()
a: _SupportsArray = A()
a.__array__()
a.__array__()
# Escape hatch for when you mean to make something like an object
# array.
object_array_scalar: Any = (i for i in range(10))
np.array(object_array_scalar)
| 916 | Python | 20.833333 | 74 | 0.662664 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/dtype.py | import numpy as np
dtype_obj = np.dtype(np.str_)
void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)])
np.dtype(dtype=np.int64)
np.dtype(int)
np.dtype("int")
np.dtype(None)
np.dtype((int, 2))
np.dtype((int, (1,)))
np.dtype({"names": ["a", "b"], "formats": [int, float]})
np.dtype({"names": ["a"], "formats": [int], "titles": [object]})
np.dtype({"names": ["a"], "formats": [int], "titles": [object()]})
np.dtype([("name", np.unicode_, 16), ("grades", np.float64, (2,)), ("age", "int32")])
np.dtype(
{
"names": ["a", "b"],
"formats": [int, float],
"itemsize": 9,
"aligned": False,
"titles": ["x", "y"],
"offsets": [0, 1],
}
)
np.dtype((np.float_, float))
class Test:
dtype = np.dtype(float)
np.dtype(Test())
# Methods and attributes
dtype_obj.base
dtype_obj.subdtype
dtype_obj.newbyteorder()
dtype_obj.type
dtype_obj.name
dtype_obj.names
dtype_obj * 0
dtype_obj * 2
0 * dtype_obj
2 * dtype_obj
void_dtype_obj["f0"]
void_dtype_obj[0]
void_dtype_obj[["f0", "f1"]]
void_dtype_obj[["f0"]]
| 1,073 | Python | 17.517241 | 85 | 0.572227 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/lib_utils.py | from __future__ import annotations
from io import StringIO
import numpy as np
FILE = StringIO()
AR = np.arange(10, dtype=np.float64)
def func(a: int) -> bool: ...
np.deprecate(func)
np.deprecate()
np.deprecate_with_doc("test")
np.deprecate_with_doc(None)
np.byte_bounds(AR)
np.byte_bounds(np.float64())
np.info(1, output=FILE)
np.source(np.interp, output=FILE)
np.lookfor("binary representation", output=FILE)
| 420 | Python | 15.192307 | 48 | 0.719048 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/ufunclike.py | from __future__ import annotations
from typing import Any
import numpy as np
class Object:
def __ceil__(self) -> Object:
return self
def __floor__(self) -> Object:
return self
def __ge__(self, value: object) -> bool:
return True
def __array__(self) -> np.ndarray[Any, np.dtype[np.object_]]:
ret = np.empty((), dtype=object)
ret[()] = self
return ret
AR_LIKE_b = [True, True, False]
AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)]
AR_LIKE_i = [1, 2, 3]
AR_LIKE_f = [1.0, 2.0, 3.0]
AR_LIKE_O = [Object(), Object(), Object()]
AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5")
np.fix(AR_LIKE_b)
np.fix(AR_LIKE_u)
np.fix(AR_LIKE_i)
np.fix(AR_LIKE_f)
np.fix(AR_LIKE_O)
np.fix(AR_LIKE_f, out=AR_U)
np.isposinf(AR_LIKE_b)
np.isposinf(AR_LIKE_u)
np.isposinf(AR_LIKE_i)
np.isposinf(AR_LIKE_f)
np.isposinf(AR_LIKE_f, out=AR_U)
np.isneginf(AR_LIKE_b)
np.isneginf(AR_LIKE_u)
np.isneginf(AR_LIKE_i)
np.isneginf(AR_LIKE_f)
np.isneginf(AR_LIKE_f, out=AR_U)
| 1,039 | Python | 21.127659 | 66 | 0.616939 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/index_tricks.py | from __future__ import annotations
from typing import Any
import numpy as np
AR_LIKE_b = [[True, True], [True, True]]
AR_LIKE_i = [[1, 2], [3, 4]]
AR_LIKE_f = [[1.0, 2.0], [3.0, 4.0]]
AR_LIKE_U = [["1", "2"], ["3", "4"]]
AR_i8: np.ndarray[Any, np.dtype[np.int64]] = np.array(AR_LIKE_i, dtype=np.int64)
np.ndenumerate(AR_i8)
np.ndenumerate(AR_LIKE_f)
np.ndenumerate(AR_LIKE_U)
np.ndenumerate(AR_i8).iter
np.ndenumerate(AR_LIKE_f).iter
np.ndenumerate(AR_LIKE_U).iter
next(np.ndenumerate(AR_i8))
next(np.ndenumerate(AR_LIKE_f))
next(np.ndenumerate(AR_LIKE_U))
iter(np.ndenumerate(AR_i8))
iter(np.ndenumerate(AR_LIKE_f))
iter(np.ndenumerate(AR_LIKE_U))
iter(np.ndindex(1, 2, 3))
next(np.ndindex(1, 2, 3))
np.unravel_index([22, 41, 37], (7, 6))
np.unravel_index([31, 41, 13], (7, 6), order='F')
np.unravel_index(1621, (6, 7, 8, 9))
np.ravel_multi_index(AR_LIKE_i, (7, 6))
np.ravel_multi_index(AR_LIKE_i, (7, 6), order='F')
np.ravel_multi_index(AR_LIKE_i, (4, 6), mode='clip')
np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=('clip', 'wrap'))
np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))
np.mgrid[1:1:2]
np.mgrid[1:1:2, None:10]
np.ogrid[1:1:2]
np.ogrid[1:1:2, None:10]
np.index_exp[0:1]
np.index_exp[0:1, None:3]
np.index_exp[0, 0:1, ..., [0, 1, 3]]
np.s_[0:1]
np.s_[0:1, None:3]
np.s_[0, 0:1, ..., [0, 1, 3]]
np.ix_(AR_LIKE_b[0])
np.ix_(AR_LIKE_i[0], AR_LIKE_f[0])
np.ix_(AR_i8[0])
np.fill_diagonal(AR_i8, 5)
np.diag_indices(4)
np.diag_indices(2, 3)
np.diag_indices_from(AR_i8)
| 1,492 | Python | 21.96923 | 80 | 0.623995 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/literal.py | from __future__ import annotations
from functools import partial
from collections.abc import Callable
import pytest # type: ignore
import numpy as np
AR = np.array(0)
AR.setflags(write=False)
KACF = frozenset({None, "K", "A", "C", "F"})
ACF = frozenset({None, "A", "C", "F"})
CF = frozenset({None, "C", "F"})
order_list: list[tuple[frozenset, Callable]] = [
(KACF, partial(np.ndarray, 1)),
(KACF, AR.tobytes),
(KACF, partial(AR.astype, int)),
(KACF, AR.copy),
(ACF, partial(AR.reshape, 1)),
(KACF, AR.flatten),
(KACF, AR.ravel),
(KACF, partial(np.array, 1)),
(CF, partial(np.zeros, 1)),
(CF, partial(np.ones, 1)),
(CF, partial(np.empty, 1)),
(CF, partial(np.full, 1, 1)),
(KACF, partial(np.zeros_like, AR)),
(KACF, partial(np.ones_like, AR)),
(KACF, partial(np.empty_like, AR)),
(KACF, partial(np.full_like, AR, 1)),
(KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__
(ACF, partial(np.reshape, AR, 1)),
(KACF, partial(np.ravel, AR)),
(KACF, partial(np.asarray, 1)),
(KACF, partial(np.asanyarray, 1)),
]
for order_set, func in order_list:
for order in order_set:
func(order=order)
invalid_orders = KACF - order_set
for order in invalid_orders:
with pytest.raises(ValueError):
func(order=order)
| 1,331 | Python | 26.749999 | 60 | 0.600301 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/ufunc_config.py | """Typing tests for `numpy.core._ufunc_config`."""
import numpy as np
def func1(a: str, b: int) -> None: ...
def func2(a: str, b: int, c: float = ...) -> None: ...
def func3(a: str, b: int) -> int: ...
class Write1:
def write(self, a: str) -> None: ...
class Write2:
def write(self, a: str, b: int = ...) -> None: ...
class Write3:
def write(self, a: str) -> int: ...
_err_default = np.geterr()
_bufsize_default = np.getbufsize()
_errcall_default = np.geterrcall()
try:
np.seterr(all=None)
np.seterr(divide="ignore")
np.seterr(over="warn")
np.seterr(under="call")
np.seterr(invalid="raise")
np.geterr()
np.setbufsize(4096)
np.getbufsize()
np.seterrcall(func1)
np.seterrcall(func2)
np.seterrcall(func3)
np.seterrcall(Write1())
np.seterrcall(Write2())
np.seterrcall(Write3())
np.geterrcall()
with np.errstate(call=func1, all="call"):
pass
with np.errstate(call=Write1(), divide="log", over="log"):
pass
finally:
np.seterr(**_err_default)
np.setbufsize(_bufsize_default)
np.seterrcall(_errcall_default)
| 1,120 | Python | 20.980392 | 62 | 0.60625 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py | import numpy as np
nd1 = np.array([[1, 2], [3, 4]])
# reshape
nd1.reshape(4)
nd1.reshape(2, 2)
nd1.reshape((2, 2))
nd1.reshape((2, 2), order="C")
nd1.reshape(4, order="C")
# resize
nd1.resize()
nd1.resize(4)
nd1.resize(2, 2)
nd1.resize((2, 2))
nd1.resize((2, 2), refcheck=True)
nd1.resize(4, refcheck=True)
nd2 = np.array([[1, 2], [3, 4]])
# transpose
nd2.transpose()
nd2.transpose(1, 0)
nd2.transpose((1, 0))
# swapaxes
nd2.swapaxes(0, 1)
# flatten
nd2.flatten()
nd2.flatten("C")
# ravel
nd2.ravel()
nd2.ravel("C")
# squeeze
nd2.squeeze()
nd3 = np.array([[1, 2]])
nd3.squeeze(0)
nd4 = np.array([[[1, 2]]])
nd4.squeeze((0, 1))
| 640 | Python | 12.354166 | 33 | 0.61875 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/fromnumeric.py | """Tests for :mod:`numpy.core.fromnumeric`."""
import numpy as np
A = np.array(True, ndmin=2, dtype=bool)
B = np.array(1.0, ndmin=2, dtype=np.float32)
A.setflags(write=False)
B.setflags(write=False)
a = np.bool_(True)
b = np.float32(1.0)
c = 1.0
d = np.array(1.0, dtype=np.float32) # writeable
np.take(a, 0)
np.take(b, 0)
np.take(c, 0)
np.take(A, 0)
np.take(B, 0)
np.take(A, [0])
np.take(B, [0])
np.reshape(a, 1)
np.reshape(b, 1)
np.reshape(c, 1)
np.reshape(A, 1)
np.reshape(B, 1)
np.choose(a, [True, True])
np.choose(A, [1.0, 1.0])
np.repeat(a, 1)
np.repeat(b, 1)
np.repeat(c, 1)
np.repeat(A, 1)
np.repeat(B, 1)
np.swapaxes(A, 0, 0)
np.swapaxes(B, 0, 0)
np.transpose(a)
np.transpose(b)
np.transpose(c)
np.transpose(A)
np.transpose(B)
np.partition(a, 0, axis=None)
np.partition(b, 0, axis=None)
np.partition(c, 0, axis=None)
np.partition(A, 0)
np.partition(B, 0)
np.argpartition(a, 0)
np.argpartition(b, 0)
np.argpartition(c, 0)
np.argpartition(A, 0)
np.argpartition(B, 0)
np.sort(A, 0)
np.sort(B, 0)
np.argsort(A, 0)
np.argsort(B, 0)
np.argmax(A)
np.argmax(B)
np.argmax(A, axis=0)
np.argmax(B, axis=0)
np.argmin(A)
np.argmin(B)
np.argmin(A, axis=0)
np.argmin(B, axis=0)
np.searchsorted(A[0], 0)
np.searchsorted(B[0], 0)
np.searchsorted(A[0], [0])
np.searchsorted(B[0], [0])
np.resize(a, (5, 5))
np.resize(b, (5, 5))
np.resize(c, (5, 5))
np.resize(A, (5, 5))
np.resize(B, (5, 5))
np.squeeze(a)
np.squeeze(b)
np.squeeze(c)
np.squeeze(A)
np.squeeze(B)
np.diagonal(A)
np.diagonal(B)
np.trace(A)
np.trace(B)
np.ravel(a)
np.ravel(b)
np.ravel(c)
np.ravel(A)
np.ravel(B)
np.nonzero(A)
np.nonzero(B)
np.shape(a)
np.shape(b)
np.shape(c)
np.shape(A)
np.shape(B)
np.compress([True], a)
np.compress([True], b)
np.compress([True], c)
np.compress([True], A)
np.compress([True], B)
np.clip(a, 0, 1.0)
np.clip(b, -1, 1)
np.clip(a, 0, None)
np.clip(b, None, 1)
np.clip(c, 0, 1)
np.clip(A, 0, 1)
np.clip(B, 0, 1)
np.clip(B, [0, 1], [1, 2])
np.sum(a)
np.sum(b)
np.sum(c)
np.sum(A)
np.sum(B)
np.sum(A, axis=0)
np.sum(B, axis=0)
np.all(a)
np.all(b)
np.all(c)
np.all(A)
np.all(B)
np.all(A, axis=0)
np.all(B, axis=0)
np.all(A, keepdims=True)
np.all(B, keepdims=True)
np.any(a)
np.any(b)
np.any(c)
np.any(A)
np.any(B)
np.any(A, axis=0)
np.any(B, axis=0)
np.any(A, keepdims=True)
np.any(B, keepdims=True)
np.cumsum(a)
np.cumsum(b)
np.cumsum(c)
np.cumsum(A)
np.cumsum(B)
np.ptp(b)
np.ptp(c)
np.ptp(B)
np.ptp(B, axis=0)
np.ptp(B, keepdims=True)
np.amax(a)
np.amax(b)
np.amax(c)
np.amax(A)
np.amax(B)
np.amax(A, axis=0)
np.amax(B, axis=0)
np.amax(A, keepdims=True)
np.amax(B, keepdims=True)
np.amin(a)
np.amin(b)
np.amin(c)
np.amin(A)
np.amin(B)
np.amin(A, axis=0)
np.amin(B, axis=0)
np.amin(A, keepdims=True)
np.amin(B, keepdims=True)
np.prod(a)
np.prod(b)
np.prod(c)
np.prod(A)
np.prod(B)
np.prod(a, dtype=None)
np.prod(A, dtype=None)
np.prod(A, axis=0)
np.prod(B, axis=0)
np.prod(A, keepdims=True)
np.prod(B, keepdims=True)
np.prod(b, out=d)
np.prod(B, out=d)
np.cumprod(a)
np.cumprod(b)
np.cumprod(c)
np.cumprod(A)
np.cumprod(B)
np.ndim(a)
np.ndim(b)
np.ndim(c)
np.ndim(A)
np.ndim(B)
np.size(a)
np.size(b)
np.size(c)
np.size(A)
np.size(B)
np.around(a)
np.around(b)
np.around(c)
np.around(A)
np.around(B)
np.mean(a)
np.mean(b)
np.mean(c)
np.mean(A)
np.mean(B)
np.mean(A, axis=0)
np.mean(B, axis=0)
np.mean(A, keepdims=True)
np.mean(B, keepdims=True)
np.mean(b, out=d)
np.mean(B, out=d)
np.std(a)
np.std(b)
np.std(c)
np.std(A)
np.std(B)
np.std(A, axis=0)
np.std(B, axis=0)
np.std(A, keepdims=True)
np.std(B, keepdims=True)
np.std(b, out=d)
np.std(B, out=d)
np.var(a)
np.var(b)
np.var(c)
np.var(A)
np.var(B)
np.var(A, axis=0)
np.var(B, axis=0)
np.var(A, keepdims=True)
np.var(B, keepdims=True)
np.var(b, out=d)
np.var(B, out=d)
| 3,742 | Python | 13.340996 | 48 | 0.643506 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/arrayprint.py | import numpy as np
AR = np.arange(10)
AR.setflags(write=False)
with np.printoptions():
np.set_printoptions(
precision=1,
threshold=2,
edgeitems=3,
linewidth=4,
suppress=False,
nanstr="Bob",
infstr="Bill",
formatter={},
sign="+",
floatmode="unique",
)
np.get_printoptions()
str(AR)
np.array2string(
AR,
max_line_width=5,
precision=2,
suppress_small=True,
separator=";",
prefix="test",
threshold=5,
floatmode="fixed",
suffix="?",
legacy="1.13",
)
np.format_float_scientific(1, precision=5)
np.format_float_positional(1, trim="k")
np.array_repr(AR)
np.array_str(AR)
| 766 | Python | 19.18421 | 46 | 0.530026 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/numeric.py | """
Tests for :mod:`numpy.core.numeric`.
Does not include tests which fall under ``array_constructors``.
"""
from __future__ import annotations
import numpy as np
class SubClass(np.ndarray):
...
i8 = np.int64(1)
A = np.arange(27).reshape(3, 3, 3)
B: list[list[list[int]]] = A.tolist()
C = np.empty((27, 27)).view(SubClass)
np.count_nonzero(i8)
np.count_nonzero(A)
np.count_nonzero(B)
np.count_nonzero(A, keepdims=True)
np.count_nonzero(A, axis=0)
np.isfortran(i8)
np.isfortran(A)
np.argwhere(i8)
np.argwhere(A)
np.flatnonzero(i8)
np.flatnonzero(A)
np.correlate(B[0][0], A.ravel(), mode="valid")
np.correlate(A.ravel(), A.ravel(), mode="same")
np.convolve(B[0][0], A.ravel(), mode="valid")
np.convolve(A.ravel(), A.ravel(), mode="same")
np.outer(i8, A)
np.outer(B, A)
np.outer(A, A)
np.outer(A, A, out=C)
np.tensordot(B, A)
np.tensordot(A, A)
np.tensordot(A, A, axes=0)
np.tensordot(A, A, axes=(0, 1))
np.isscalar(i8)
np.isscalar(A)
np.isscalar(B)
np.roll(A, 1)
np.roll(A, (1, 2))
np.roll(B, 1)
np.rollaxis(A, 0, 1)
np.moveaxis(A, 0, 1)
np.moveaxis(A, (0, 1), (1, 2))
np.cross(B, A)
np.cross(A, A)
np.indices([0, 1, 2])
np.indices([0, 1, 2], sparse=False)
np.indices([0, 1, 2], sparse=True)
np.binary_repr(1)
np.base_repr(1)
np.allclose(i8, A)
np.allclose(B, A)
np.allclose(A, A)
np.isclose(i8, A)
np.isclose(B, A)
np.isclose(A, A)
np.array_equal(i8, A)
np.array_equal(B, A)
np.array_equal(A, A)
np.array_equiv(i8, A)
np.array_equiv(B, A)
np.array_equiv(A, A)
| 1,490 | Python | 15.384615 | 63 | 0.648993 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/modules.py | import numpy as np
from numpy import f2py
np.char
np.ctypeslib
np.emath
np.fft
np.lib
np.linalg
np.ma
np.matrixlib
np.polynomial
np.random
np.rec
np.testing
np.version
np.lib.format
np.lib.mixins
np.lib.scimath
np.lib.stride_tricks
np.ma.extras
np.polynomial.chebyshev
np.polynomial.hermite
np.polynomial.hermite_e
np.polynomial.laguerre
np.polynomial.legendre
np.polynomial.polynomial
np.__path__
np.__version__
np.__git_version__
np.__all__
np.char.__all__
np.ctypeslib.__all__
np.emath.__all__
np.lib.__all__
np.ma.__all__
np.random.__all__
np.rec.__all__
np.testing.__all__
f2py.__all__
| 595 | Python | 12.545454 | 24 | 0.731092 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/einsumfunc.py | from __future__ import annotations
from typing import Any
import numpy as np
AR_LIKE_b = [True, True, True]
AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)]
AR_LIKE_i = [1, 2, 3]
AR_LIKE_f = [1.0, 2.0, 3.0]
AR_LIKE_c = [1j, 2j, 3j]
AR_LIKE_U = ["1", "2", "3"]
OUT_f: np.ndarray[Any, np.dtype[np.float64]] = np.empty(3, dtype=np.float64)
OUT_c: np.ndarray[Any, np.dtype[np.complex128]] = np.empty(3, dtype=np.complex128)
np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)
np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)
np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)
np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)
np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)
np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)
np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)
np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")
np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")
np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, out=OUT_c)
np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=int, casting="unsafe", out=OUT_f)
np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)
np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)
np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)
np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)
np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)
np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)
np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)
| 1,370 | Python | 36.054053 | 82 | 0.619708 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/warnings_and_errors.py | import numpy as np
np.AxisError("test")
np.AxisError(1, ndim=2)
np.AxisError(1, ndim=2, msg_prefix="error")
np.AxisError(1, ndim=2, msg_prefix=None)
| 150 | Python | 20.571426 | 43 | 0.72 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/flatiter.py | import numpy as np
a = np.empty((2, 2)).flat
a.base
a.copy()
a.coords
a.index
iter(a)
next(a)
a[0]
a[[0, 1, 2]]
a[...]
a[:]
a.__array__()
a.__array__(np.dtype(np.float64))
| 174 | Python | 9.294117 | 33 | 0.563218 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/arithmetic.py | from __future__ import annotations
from typing import Any
import numpy as np
c16 = np.complex128(1)
f8 = np.float64(1)
i8 = np.int64(1)
u8 = np.uint64(1)
c8 = np.complex64(1)
f4 = np.float32(1)
i4 = np.int32(1)
u4 = np.uint32(1)
dt = np.datetime64(1, "D")
td = np.timedelta64(1, "D")
b_ = np.bool_(1)
b = bool(1)
c = complex(1)
f = float(1)
i = int(1)
class Object:
def __array__(self) -> np.ndarray[Any, np.dtype[np.object_]]:
ret = np.empty((), dtype=object)
ret[()] = self
return ret
def __sub__(self, value: Any) -> Object:
return self
def __rsub__(self, value: Any) -> Object:
return self
def __floordiv__(self, value: Any) -> Object:
return self
def __rfloordiv__(self, value: Any) -> Object:
return self
def __mul__(self, value: Any) -> Object:
return self
def __rmul__(self, value: Any) -> Object:
return self
def __pow__(self, value: Any) -> Object:
return self
def __rpow__(self, value: Any) -> Object:
return self
AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True])
AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32)
AR_i: np.ndarray[Any, np.dtype[np.int64]] = np.array([1])
AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0])
AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1j])
AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64(1, "D")])
AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64(1, "D")])
AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([Object()])
AR_LIKE_b = [True]
AR_LIKE_u = [np.uint32(1)]
AR_LIKE_i = [1]
AR_LIKE_f = [1.0]
AR_LIKE_c = [1j]
AR_LIKE_m = [np.timedelta64(1, "D")]
AR_LIKE_M = [np.datetime64(1, "D")]
AR_LIKE_O = [Object()]
# Array subtractions
AR_b - AR_LIKE_u
AR_b - AR_LIKE_i
AR_b - AR_LIKE_f
AR_b - AR_LIKE_c
AR_b - AR_LIKE_m
AR_b - AR_LIKE_O
AR_LIKE_u - AR_b
AR_LIKE_i - AR_b
AR_LIKE_f - AR_b
AR_LIKE_c - AR_b
AR_LIKE_m - AR_b
AR_LIKE_M - AR_b
AR_LIKE_O - AR_b
AR_u - AR_LIKE_b
AR_u - AR_LIKE_u
AR_u - AR_LIKE_i
AR_u - AR_LIKE_f
AR_u - AR_LIKE_c
AR_u - AR_LIKE_m
AR_u - AR_LIKE_O
AR_LIKE_b - AR_u
AR_LIKE_u - AR_u
AR_LIKE_i - AR_u
AR_LIKE_f - AR_u
AR_LIKE_c - AR_u
AR_LIKE_m - AR_u
AR_LIKE_M - AR_u
AR_LIKE_O - AR_u
AR_i - AR_LIKE_b
AR_i - AR_LIKE_u
AR_i - AR_LIKE_i
AR_i - AR_LIKE_f
AR_i - AR_LIKE_c
AR_i - AR_LIKE_m
AR_i - AR_LIKE_O
AR_LIKE_b - AR_i
AR_LIKE_u - AR_i
AR_LIKE_i - AR_i
AR_LIKE_f - AR_i
AR_LIKE_c - AR_i
AR_LIKE_m - AR_i
AR_LIKE_M - AR_i
AR_LIKE_O - AR_i
AR_f - AR_LIKE_b
AR_f - AR_LIKE_u
AR_f - AR_LIKE_i
AR_f - AR_LIKE_f
AR_f - AR_LIKE_c
AR_f - AR_LIKE_O
AR_LIKE_b - AR_f
AR_LIKE_u - AR_f
AR_LIKE_i - AR_f
AR_LIKE_f - AR_f
AR_LIKE_c - AR_f
AR_LIKE_O - AR_f
AR_c - AR_LIKE_b
AR_c - AR_LIKE_u
AR_c - AR_LIKE_i
AR_c - AR_LIKE_f
AR_c - AR_LIKE_c
AR_c - AR_LIKE_O
AR_LIKE_b - AR_c
AR_LIKE_u - AR_c
AR_LIKE_i - AR_c
AR_LIKE_f - AR_c
AR_LIKE_c - AR_c
AR_LIKE_O - AR_c
AR_m - AR_LIKE_b
AR_m - AR_LIKE_u
AR_m - AR_LIKE_i
AR_m - AR_LIKE_m
AR_LIKE_b - AR_m
AR_LIKE_u - AR_m
AR_LIKE_i - AR_m
AR_LIKE_m - AR_m
AR_LIKE_M - AR_m
AR_M - AR_LIKE_b
AR_M - AR_LIKE_u
AR_M - AR_LIKE_i
AR_M - AR_LIKE_m
AR_M - AR_LIKE_M
AR_LIKE_M - AR_M
AR_O - AR_LIKE_b
AR_O - AR_LIKE_u
AR_O - AR_LIKE_i
AR_O - AR_LIKE_f
AR_O - AR_LIKE_c
AR_O - AR_LIKE_O
AR_LIKE_b - AR_O
AR_LIKE_u - AR_O
AR_LIKE_i - AR_O
AR_LIKE_f - AR_O
AR_LIKE_c - AR_O
AR_LIKE_O - AR_O
AR_u += AR_b
AR_u += AR_u
AR_u += 1 # Allowed during runtime as long as the object is 0D and >=0
# Array floor division
AR_b // AR_LIKE_b
AR_b // AR_LIKE_u
AR_b // AR_LIKE_i
AR_b // AR_LIKE_f
AR_b // AR_LIKE_O
AR_LIKE_b // AR_b
AR_LIKE_u // AR_b
AR_LIKE_i // AR_b
AR_LIKE_f // AR_b
AR_LIKE_O // AR_b
AR_u // AR_LIKE_b
AR_u // AR_LIKE_u
AR_u // AR_LIKE_i
AR_u // AR_LIKE_f
AR_u // AR_LIKE_O
AR_LIKE_b // AR_u
AR_LIKE_u // AR_u
AR_LIKE_i // AR_u
AR_LIKE_f // AR_u
AR_LIKE_m // AR_u
AR_LIKE_O // AR_u
AR_i // AR_LIKE_b
AR_i // AR_LIKE_u
AR_i // AR_LIKE_i
AR_i // AR_LIKE_f
AR_i // AR_LIKE_O
AR_LIKE_b // AR_i
AR_LIKE_u // AR_i
AR_LIKE_i // AR_i
AR_LIKE_f // AR_i
AR_LIKE_m // AR_i
AR_LIKE_O // AR_i
AR_f // AR_LIKE_b
AR_f // AR_LIKE_u
AR_f // AR_LIKE_i
AR_f // AR_LIKE_f
AR_f // AR_LIKE_O
AR_LIKE_b // AR_f
AR_LIKE_u // AR_f
AR_LIKE_i // AR_f
AR_LIKE_f // AR_f
AR_LIKE_m // AR_f
AR_LIKE_O // AR_f
AR_m // AR_LIKE_u
AR_m // AR_LIKE_i
AR_m // AR_LIKE_f
AR_m // AR_LIKE_m
AR_LIKE_m // AR_m
AR_O // AR_LIKE_b
AR_O // AR_LIKE_u
AR_O // AR_LIKE_i
AR_O // AR_LIKE_f
AR_O // AR_LIKE_O
AR_LIKE_b // AR_O
AR_LIKE_u // AR_O
AR_LIKE_i // AR_O
AR_LIKE_f // AR_O
AR_LIKE_O // AR_O
# Inplace multiplication
AR_b *= AR_LIKE_b
AR_u *= AR_LIKE_b
AR_u *= AR_LIKE_u
AR_i *= AR_LIKE_b
AR_i *= AR_LIKE_u
AR_i *= AR_LIKE_i
AR_f *= AR_LIKE_b
AR_f *= AR_LIKE_u
AR_f *= AR_LIKE_i
AR_f *= AR_LIKE_f
AR_c *= AR_LIKE_b
AR_c *= AR_LIKE_u
AR_c *= AR_LIKE_i
AR_c *= AR_LIKE_f
AR_c *= AR_LIKE_c
AR_m *= AR_LIKE_b
AR_m *= AR_LIKE_u
AR_m *= AR_LIKE_i
AR_m *= AR_LIKE_f
AR_O *= AR_LIKE_b
AR_O *= AR_LIKE_u
AR_O *= AR_LIKE_i
AR_O *= AR_LIKE_f
AR_O *= AR_LIKE_c
AR_O *= AR_LIKE_O
# Inplace power
AR_u **= AR_LIKE_b
AR_u **= AR_LIKE_u
AR_i **= AR_LIKE_b
AR_i **= AR_LIKE_u
AR_i **= AR_LIKE_i
AR_f **= AR_LIKE_b
AR_f **= AR_LIKE_u
AR_f **= AR_LIKE_i
AR_f **= AR_LIKE_f
AR_c **= AR_LIKE_b
AR_c **= AR_LIKE_u
AR_c **= AR_LIKE_i
AR_c **= AR_LIKE_f
AR_c **= AR_LIKE_c
AR_O **= AR_LIKE_b
AR_O **= AR_LIKE_u
AR_O **= AR_LIKE_i
AR_O **= AR_LIKE_f
AR_O **= AR_LIKE_c
AR_O **= AR_LIKE_O
# unary ops
-c16
-c8
-f8
-f4
-i8
-i4
-u8
-u4
-td
-AR_f
+c16
+c8
+f8
+f4
+i8
+i4
+u8
+u4
+td
+AR_f
abs(c16)
abs(c8)
abs(f8)
abs(f4)
abs(i8)
abs(i4)
abs(u8)
abs(u4)
abs(td)
abs(b_)
abs(AR_f)
# Time structures
dt + td
dt + i
dt + i4
dt + i8
dt - dt
dt - i
dt - i4
dt - i8
td + td
td + i
td + i4
td + i8
td - td
td - i
td - i4
td - i8
td / f
td / f4
td / f8
td / td
td // td
td % td
# boolean
b_ / b
b_ / b_
b_ / i
b_ / i8
b_ / i4
b_ / u8
b_ / u4
b_ / f
b_ / f8
b_ / f4
b_ / c
b_ / c16
b_ / c8
b / b_
b_ / b_
i / b_
i8 / b_
i4 / b_
u8 / b_
u4 / b_
f / b_
f8 / b_
f4 / b_
c / b_
c16 / b_
c8 / b_
# Complex
c16 + c16
c16 + f8
c16 + i8
c16 + c8
c16 + f4
c16 + i4
c16 + b_
c16 + b
c16 + c
c16 + f
c16 + i
c16 + AR_f
c16 + c16
f8 + c16
i8 + c16
c8 + c16
f4 + c16
i4 + c16
b_ + c16
b + c16
c + c16
f + c16
i + c16
AR_f + c16
c8 + c16
c8 + f8
c8 + i8
c8 + c8
c8 + f4
c8 + i4
c8 + b_
c8 + b
c8 + c
c8 + f
c8 + i
c8 + AR_f
c16 + c8
f8 + c8
i8 + c8
c8 + c8
f4 + c8
i4 + c8
b_ + c8
b + c8
c + c8
f + c8
i + c8
AR_f + c8
# Float
f8 + f8
f8 + i8
f8 + f4
f8 + i4
f8 + b_
f8 + b
f8 + c
f8 + f
f8 + i
f8 + AR_f
f8 + f8
i8 + f8
f4 + f8
i4 + f8
b_ + f8
b + f8
c + f8
f + f8
i + f8
AR_f + f8
f4 + f8
f4 + i8
f4 + f4
f4 + i4
f4 + b_
f4 + b
f4 + c
f4 + f
f4 + i
f4 + AR_f
f8 + f4
i8 + f4
f4 + f4
i4 + f4
b_ + f4
b + f4
c + f4
f + f4
i + f4
AR_f + f4
# Int
i8 + i8
i8 + u8
i8 + i4
i8 + u4
i8 + b_
i8 + b
i8 + c
i8 + f
i8 + i
i8 + AR_f
u8 + u8
u8 + i4
u8 + u4
u8 + b_
u8 + b
u8 + c
u8 + f
u8 + i
u8 + AR_f
i8 + i8
u8 + i8
i4 + i8
u4 + i8
b_ + i8
b + i8
c + i8
f + i8
i + i8
AR_f + i8
u8 + u8
i4 + u8
u4 + u8
b_ + u8
b + u8
c + u8
f + u8
i + u8
AR_f + u8
i4 + i8
i4 + i4
i4 + i
i4 + b_
i4 + b
i4 + AR_f
u4 + i8
u4 + i4
u4 + u8
u4 + u4
u4 + i
u4 + b_
u4 + b
u4 + AR_f
i8 + i4
i4 + i4
i + i4
b_ + i4
b + i4
AR_f + i4
i8 + u4
i4 + u4
u8 + u4
u4 + u4
b_ + u4
b + u4
i + u4
AR_f + u4
| 7,398 | Python | 11.477234 | 84 | 0.548256 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/scalars.py | import sys
import datetime as dt
import pytest
import numpy as np
b = np.bool_()
u8 = np.uint64()
i8 = np.int64()
f8 = np.float64()
c16 = np.complex128()
U = np.str_()
S = np.bytes_()
# Construction
class D:
def __index__(self) -> int:
return 0
class C:
def __complex__(self) -> complex:
return 3j
class B:
def __int__(self) -> int:
return 4
class A:
def __float__(self) -> float:
return 4.0
np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")
np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")
np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")
np.uint64(D())
np.float32(D())
np.complex64(D())
np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
np.bytes_("hello", encoding='utf-8')
np.str_("hello")
np.str_(b"hello", 'utf-8')
np.str_(b"hello", encoding='utf-8')
# Array-ish semantics
np.int8().real
np.int16().imag
np.int32().data
np.int64().flags
np.uint8().itemsize * 2
np.uint16().ndim + 1
np.uint32().strides
np.uint64().shape
# Time structures
np.datetime64()
np.datetime64(0, "D")
np.datetime64(0, b"D")
np.datetime64(0, ('ms', 3))
np.datetime64("2019")
np.datetime64(b"2019")
np.datetime64("2019", "D")
np.datetime64(np.datetime64())
np.datetime64(dt.datetime(2000, 5, 3))
np.datetime64(dt.date(2000, 5, 3))
np.datetime64(None)
np.datetime64(None, "D")
np.timedelta64()
np.timedelta64(0)
np.timedelta64(0, "D")
np.timedelta64(0, ('ms', 3))
np.timedelta64(0, b"D")
np.timedelta64("3")
np.timedelta64(b"5")
np.timedelta64(np.timedelta64(2))
np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
np.void(1)
np.void(np.int64(1))
np.void(True)
np.void(np.bool_(True))
np.void(b"test")
np.void(np.bytes_("test"))
# Protocols
i8 = np.int64()
u8 = np.uint64()
f8 = np.float64()
c16 = np.complex128()
b_ = np.bool_()
td = np.timedelta64()
U = np.str_("1")
S = np.bytes_("1")
AR = np.array(1, dtype=np.float64)
int(i8)
int(u8)
int(f8)
int(b_)
int(td)
int(U)
int(S)
int(AR)
with pytest.warns(np.ComplexWarning):
int(c16)
float(i8)
float(u8)
float(f8)
float(b_)
float(td)
float(U)
float(S)
float(AR)
with pytest.warns(np.ComplexWarning):
float(c16)
complex(i8)
complex(u8)
complex(f8)
complex(c16)
complex(b_)
complex(td)
complex(U)
complex(AR)
# Misc
c16.dtype
c16.real
c16.imag
c16.real.real
c16.real.imag
c16.ndim
c16.size
c16.itemsize
c16.shape
c16.strides
c16.squeeze()
c16.byteswap()
c16.transpose()
# Aliases
np.str0()
np.bool8()
np.bytes0()
np.string_()
np.object0()
np.void0(0)
np.byte()
np.short()
np.intc()
np.intp()
np.int0()
np.int_()
np.longlong()
np.ubyte()
np.ushort()
np.uintc()
np.uintp()
np.uint0()
np.uint()
np.ulonglong()
np.half()
np.single()
np.double()
np.float_()
np.longdouble()
np.longfloat()
np.csingle()
np.singlecomplex()
np.cdouble()
np.complex_()
np.cfloat()
np.clongdouble()
np.clongfloat()
np.longcomplex()
b.item()
i8.item()
u8.item()
f8.item()
c16.item()
U.item()
S.item()
b.tolist()
i8.tolist()
u8.tolist()
f8.tolist()
c16.tolist()
U.tolist()
S.tolist()
b.ravel()
i8.ravel()
u8.ravel()
f8.ravel()
c16.ravel()
U.ravel()
S.ravel()
b.flatten()
i8.flatten()
u8.flatten()
f8.flatten()
c16.flatten()
U.flatten()
S.flatten()
b.reshape(1)
i8.reshape(1)
u8.reshape(1)
f8.reshape(1)
c16.reshape(1)
U.reshape(1)
S.reshape(1)
| 3,464 | Python | 12.641732 | 38 | 0.657333 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/ufuncs.py | import numpy as np
np.sin(1)
np.sin([1, 2, 3])
np.sin(1, out=np.empty(1))
np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)])
np.sin(1, signature="D->D")
np.sin(1, extobj=[16, 1, lambda: None])
# NOTE: `np.generic` subclasses are not guaranteed to support addition;
# re-enable this we can infer the exact return type of `np.sin(...)`.
#
# np.sin(1) + np.sin(1)
np.sin.types[0]
np.sin.__name__
np.sin.__doc__
np.abs(np.array([1]))
| 462 | Python | 24.722221 | 80 | 0.606061 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/mod.py | import numpy as np
f8 = np.float64(1)
i8 = np.int64(1)
u8 = np.uint64(1)
f4 = np.float32(1)
i4 = np.int32(1)
u4 = np.uint32(1)
td = np.timedelta64(1, "D")
b_ = np.bool_(1)
b = bool(1)
f = float(1)
i = int(1)
AR = np.array([1], dtype=np.bool_)
AR.setflags(write=False)
AR2 = np.array([1], dtype=np.timedelta64)
AR2.setflags(write=False)
# Time structures
td % td
td % AR2
AR2 % td
divmod(td, td)
divmod(td, AR2)
divmod(AR2, td)
# Bool
b_ % b
b_ % i
b_ % f
b_ % b_
b_ % i8
b_ % u8
b_ % f8
b_ % AR
divmod(b_, b)
divmod(b_, i)
divmod(b_, f)
divmod(b_, b_)
divmod(b_, i8)
divmod(b_, u8)
divmod(b_, f8)
divmod(b_, AR)
b % b_
i % b_
f % b_
b_ % b_
i8 % b_
u8 % b_
f8 % b_
AR % b_
divmod(b, b_)
divmod(i, b_)
divmod(f, b_)
divmod(b_, b_)
divmod(i8, b_)
divmod(u8, b_)
divmod(f8, b_)
divmod(AR, b_)
# int
i8 % b
i8 % i
i8 % f
i8 % i8
i8 % f8
i4 % i8
i4 % f8
i4 % i4
i4 % f4
i8 % AR
divmod(i8, b)
divmod(i8, i)
divmod(i8, f)
divmod(i8, i8)
divmod(i8, f8)
divmod(i8, i4)
divmod(i8, f4)
divmod(i4, i4)
divmod(i4, f4)
divmod(i8, AR)
b % i8
i % i8
f % i8
i8 % i8
f8 % i8
i8 % i4
f8 % i4
i4 % i4
f4 % i4
AR % i8
divmod(b, i8)
divmod(i, i8)
divmod(f, i8)
divmod(i8, i8)
divmod(f8, i8)
divmod(i4, i8)
divmod(f4, i8)
divmod(i4, i4)
divmod(f4, i4)
divmod(AR, i8)
# float
f8 % b
f8 % i
f8 % f
i8 % f4
f4 % f4
f8 % AR
divmod(f8, b)
divmod(f8, i)
divmod(f8, f)
divmod(f8, f8)
divmod(f8, f4)
divmod(f4, f4)
divmod(f8, AR)
b % f8
i % f8
f % f8
f8 % f8
f8 % f8
f4 % f4
AR % f8
divmod(b, f8)
divmod(i, f8)
divmod(f, f8)
divmod(f8, f8)
divmod(f4, f8)
divmod(f4, f4)
divmod(AR, f8)
| 1,578 | Python | 9.526667 | 41 | 0.577313 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/comparisons.py | from __future__ import annotations
from typing import Any
import numpy as np
c16 = np.complex128()
f8 = np.float64()
i8 = np.int64()
u8 = np.uint64()
c8 = np.complex64()
f4 = np.float32()
i4 = np.int32()
u4 = np.uint32()
dt = np.datetime64(0, "D")
td = np.timedelta64(0, "D")
b_ = np.bool_()
b = bool()
c = complex()
f = float()
i = int()
SEQ = (0, 1, 2, 3, 4)
AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True])
AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32)
AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1])
AR_f: np.ndarray[Any, np.dtype[np.float_]] = np.array([1.0])
AR_c: np.ndarray[Any, np.dtype[np.complex_]] = np.array([1.0j])
AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")])
AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")])
AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object)
# Arrays
AR_b > AR_b
AR_b > AR_u
AR_b > AR_i
AR_b > AR_f
AR_b > AR_c
AR_u > AR_b
AR_u > AR_u
AR_u > AR_i
AR_u > AR_f
AR_u > AR_c
AR_i > AR_b
AR_i > AR_u
AR_i > AR_i
AR_i > AR_f
AR_i > AR_c
AR_f > AR_b
AR_f > AR_u
AR_f > AR_i
AR_f > AR_f
AR_f > AR_c
AR_c > AR_b
AR_c > AR_u
AR_c > AR_i
AR_c > AR_f
AR_c > AR_c
AR_m > AR_b
AR_m > AR_u
AR_m > AR_i
AR_b > AR_m
AR_u > AR_m
AR_i > AR_m
AR_M > AR_M
AR_O > AR_O
1 > AR_O
AR_O > 1
# Time structures
dt > dt
td > td
td > i
td > i4
td > i8
td > AR_i
td > SEQ
# boolean
b_ > b
b_ > b_
b_ > i
b_ > i8
b_ > i4
b_ > u8
b_ > u4
b_ > f
b_ > f8
b_ > f4
b_ > c
b_ > c16
b_ > c8
b_ > AR_i
b_ > SEQ
# Complex
c16 > c16
c16 > f8
c16 > i8
c16 > c8
c16 > f4
c16 > i4
c16 > b_
c16 > b
c16 > c
c16 > f
c16 > i
c16 > AR_i
c16 > SEQ
c16 > c16
f8 > c16
i8 > c16
c8 > c16
f4 > c16
i4 > c16
b_ > c16
b > c16
c > c16
f > c16
i > c16
AR_i > c16
SEQ > c16
c8 > c16
c8 > f8
c8 > i8
c8 > c8
c8 > f4
c8 > i4
c8 > b_
c8 > b
c8 > c
c8 > f
c8 > i
c8 > AR_i
c8 > SEQ
c16 > c8
f8 > c8
i8 > c8
c8 > c8
f4 > c8
i4 > c8
b_ > c8
b > c8
c > c8
f > c8
i > c8
AR_i > c8
SEQ > c8
# Float
f8 > f8
f8 > i8
f8 > f4
f8 > i4
f8 > b_
f8 > b
f8 > c
f8 > f
f8 > i
f8 > AR_i
f8 > SEQ
f8 > f8
i8 > f8
f4 > f8
i4 > f8
b_ > f8
b > f8
c > f8
f > f8
i > f8
AR_i > f8
SEQ > f8
f4 > f8
f4 > i8
f4 > f4
f4 > i4
f4 > b_
f4 > b
f4 > c
f4 > f
f4 > i
f4 > AR_i
f4 > SEQ
f8 > f4
i8 > f4
f4 > f4
i4 > f4
b_ > f4
b > f4
c > f4
f > f4
i > f4
AR_i > f4
SEQ > f4
# Int
i8 > i8
i8 > u8
i8 > i4
i8 > u4
i8 > b_
i8 > b
i8 > c
i8 > f
i8 > i
i8 > AR_i
i8 > SEQ
u8 > u8
u8 > i4
u8 > u4
u8 > b_
u8 > b
u8 > c
u8 > f
u8 > i
u8 > AR_i
u8 > SEQ
i8 > i8
u8 > i8
i4 > i8
u4 > i8
b_ > i8
b > i8
c > i8
f > i8
i > i8
AR_i > i8
SEQ > i8
u8 > u8
i4 > u8
u4 > u8
b_ > u8
b > u8
c > u8
f > u8
i > u8
AR_i > u8
SEQ > u8
i4 > i8
i4 > i4
i4 > i
i4 > b_
i4 > b
i4 > AR_i
i4 > SEQ
u4 > i8
u4 > i4
u4 > u8
u4 > u4
u4 > i
u4 > b_
u4 > b
u4 > AR_i
u4 > SEQ
i8 > i4
i4 > i4
i > i4
b_ > i4
b > i4
AR_i > i4
SEQ > i4
i8 > u4
i4 > u4
u8 > u4
u4 > u4
b_ > u4
b > u4
i > u4
AR_i > u4
SEQ > u4
| 2,992 | Python | 8.910596 | 81 | 0.525735 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/ndarray_conversion.py | import os
import tempfile
import numpy as np
nd = np.array([[1, 2], [3, 4]])
scalar_array = np.array(1)
# item
scalar_array.item()
nd.item(1)
nd.item(0, 1)
nd.item((0, 1))
# tolist is pretty simple
# itemset
scalar_array.itemset(3)
nd.itemset(3, 0)
nd.itemset((0, 0), 3)
# tobytes
nd.tobytes()
nd.tobytes("C")
nd.tobytes(None)
# tofile
if os.name != "nt":
with tempfile.NamedTemporaryFile(suffix=".txt") as tmp:
nd.tofile(tmp.name)
nd.tofile(tmp.name, "")
nd.tofile(tmp.name, sep="")
nd.tofile(tmp.name, "", "%s")
nd.tofile(tmp.name, format="%s")
nd.tofile(tmp)
# dump is pretty simple
# dumps is pretty simple
# astype
nd.astype("float")
nd.astype(float)
nd.astype(float, "K")
nd.astype(float, order="K")
nd.astype(float, "K", "unsafe")
nd.astype(float, casting="unsafe")
nd.astype(float, "K", "unsafe", True)
nd.astype(float, subok=True)
nd.astype(float, "K", "unsafe", True, True)
nd.astype(float, copy=True)
# byteswap
nd.byteswap()
nd.byteswap(True)
# copy
nd.copy()
nd.copy("C")
# view
nd.view()
nd.view(np.int64)
nd.view(dtype=np.int64)
nd.view(np.int64, np.matrix)
nd.view(type=np.matrix)
# getfield
complex_array = np.array([[1 + 1j, 0], [0, 1 - 1j]], dtype=np.complex128)
complex_array.getfield("float")
complex_array.getfield(float)
complex_array.getfield("float", 8)
complex_array.getfield(float, offset=8)
# setflags
nd.setflags()
nd.setflags(True)
nd.setflags(write=True)
nd.setflags(True, True)
nd.setflags(write=True, align=True)
nd.setflags(True, True, False)
nd.setflags(write=True, align=True, uic=False)
# fill is pretty simple
| 1,626 | Python | 16.126316 | 73 | 0.665437 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/arrayterator.py |
from __future__ import annotations
from typing import Any
import numpy as np
AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10)
ar_iter = np.lib.Arrayterator(AR_i8)
ar_iter.var
ar_iter.buf_size
ar_iter.start
ar_iter.stop
ar_iter.step
ar_iter.shape
ar_iter.flat
ar_iter.__array__()
for i in ar_iter:
pass
ar_iter[0]
ar_iter[...]
ar_iter[:]
ar_iter[0, 0, 0]
ar_iter[..., 0, :]
| 393 | Python | 13.071428 | 57 | 0.666667 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/array_constructors.py | import sys
from typing import Any
import numpy as np
class Index:
def __index__(self) -> int:
return 0
class SubClass(np.ndarray):
pass
def func(i: int, j: int, **kwargs: Any) -> SubClass:
return B
i8 = np.int64(1)
A = np.array([1])
B = A.view(SubClass).copy()
B_stack = np.array([[1], [1]]).view(SubClass)
C = [1]
np.ndarray(Index())
np.ndarray([Index()])
np.array(1, dtype=float)
np.array(1, copy=False)
np.array(1, order='F')
np.array(1, order=None)
np.array(1, subok=True)
np.array(1, ndmin=3)
np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
np.asarray(A)
np.asarray(B)
np.asarray(C)
np.asanyarray(A)
np.asanyarray(B)
np.asanyarray(B, dtype=int)
np.asanyarray(C)
np.ascontiguousarray(A)
np.ascontiguousarray(B)
np.ascontiguousarray(C)
np.asfortranarray(A)
np.asfortranarray(B)
np.asfortranarray(C)
np.require(A)
np.require(B)
np.require(B, dtype=int)
np.require(B, requirements=None)
np.require(B, requirements="E")
np.require(B, requirements=["ENSUREARRAY"])
np.require(B, requirements={"F", "E"})
np.require(B, requirements=["C", "OWNDATA"])
np.require(B, requirements="W")
np.require(B, requirements="A")
np.require(C)
np.linspace(0, 2)
np.linspace(0.5, [0, 1, 2])
np.linspace([0, 1, 2], 3)
np.linspace(0j, 2)
np.linspace(0, 2, num=10)
np.linspace(0, 2, endpoint=True)
np.linspace(0, 2, retstep=True)
np.linspace(0j, 2j, retstep=True)
np.linspace(0, 2, dtype=bool)
np.linspace([0, 1], [2, 3], axis=Index())
np.logspace(0, 2, base=2)
np.logspace(0, 2, base=2)
np.logspace(0, 2, base=[1j, 2j], num=2)
np.geomspace(1, 2)
np.zeros_like(A)
np.zeros_like(C)
np.zeros_like(B)
np.zeros_like(B, dtype=np.int64)
np.ones_like(A)
np.ones_like(C)
np.ones_like(B)
np.ones_like(B, dtype=np.int64)
np.empty_like(A)
np.empty_like(C)
np.empty_like(B)
np.empty_like(B, dtype=np.int64)
np.full_like(A, i8)
np.full_like(C, i8)
np.full_like(B, i8)
np.full_like(B, i8, dtype=np.int64)
np.ones(1)
np.ones([1, 1, 1])
np.full(1, i8)
np.full([1, 1, 1], i8)
np.indices([1, 2, 3])
np.indices([1, 2, 3], sparse=True)
np.fromfunction(func, (3, 5))
np.identity(10)
np.atleast_1d(C)
np.atleast_1d(A)
np.atleast_1d(C, C)
np.atleast_1d(C, A)
np.atleast_1d(A, A)
np.atleast_2d(C)
np.atleast_3d(C)
np.vstack([C, C])
np.vstack([C, A])
np.vstack([A, A])
np.hstack([C, C])
np.stack([C, C])
np.stack([C, C], axis=0)
np.stack([C, C], out=B_stack)
np.block([[C, C], [C, C]])
np.block(A)
| 2,419 | Python | 16.536232 | 60 | 0.657296 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/random.py | from __future__ import annotations
from typing import Any
import numpy as np
SEED_NONE = None
SEED_INT = 4579435749574957634658964293569
SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64)
SEED_ARRLIKE: list[int] = [1, 2, 3, 4]
SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0)
SEED_MT19937: np.random.MT19937 = np.random.MT19937(0)
SEED_PCG64: np.random.PCG64 = np.random.PCG64(0)
SEED_PHILOX: np.random.Philox = np.random.Philox(0)
SEED_SFC64: np.random.SFC64 = np.random.SFC64(0)
# default rng
np.random.default_rng()
np.random.default_rng(SEED_NONE)
np.random.default_rng(SEED_INT)
np.random.default_rng(SEED_ARR)
np.random.default_rng(SEED_ARRLIKE)
np.random.default_rng(SEED_SEED_SEQ)
np.random.default_rng(SEED_MT19937)
np.random.default_rng(SEED_PCG64)
np.random.default_rng(SEED_PHILOX)
np.random.default_rng(SEED_SFC64)
# Seed Sequence
np.random.SeedSequence(SEED_NONE)
np.random.SeedSequence(SEED_INT)
np.random.SeedSequence(SEED_ARR)
np.random.SeedSequence(SEED_ARRLIKE)
# Bit Generators
np.random.MT19937(SEED_NONE)
np.random.MT19937(SEED_INT)
np.random.MT19937(SEED_ARR)
np.random.MT19937(SEED_ARRLIKE)
np.random.MT19937(SEED_SEED_SEQ)
np.random.PCG64(SEED_NONE)
np.random.PCG64(SEED_INT)
np.random.PCG64(SEED_ARR)
np.random.PCG64(SEED_ARRLIKE)
np.random.PCG64(SEED_SEED_SEQ)
np.random.Philox(SEED_NONE)
np.random.Philox(SEED_INT)
np.random.Philox(SEED_ARR)
np.random.Philox(SEED_ARRLIKE)
np.random.Philox(SEED_SEED_SEQ)
np.random.SFC64(SEED_NONE)
np.random.SFC64(SEED_INT)
np.random.SFC64(SEED_ARR)
np.random.SFC64(SEED_ARRLIKE)
np.random.SFC64(SEED_SEED_SEQ)
seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence(SEED_NONE)
seed_seq.spawn(10)
seed_seq.generate_state(3)
seed_seq.generate_state(3, "u4")
seed_seq.generate_state(3, "uint32")
seed_seq.generate_state(3, "u8")
seed_seq.generate_state(3, "uint64")
seed_seq.generate_state(3, np.uint32)
seed_seq.generate_state(3, np.uint64)
def_gen: np.random.Generator = np.random.default_rng()
D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1])
D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5])
D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9])
D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5])
I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_)
I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_)
D_arr_like_0p1: list[float] = [0.1]
D_arr_like_0p5: list[float] = [0.5]
D_arr_like_0p9: list[float] = [0.9]
D_arr_like_1p5: list[float] = [1.5]
I_arr_like_10: list[int] = [10]
I_arr_like_20: list[int] = [20]
D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]]
D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like)
S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32)
D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1)
def_gen.standard_normal()
def_gen.standard_normal(dtype=np.float32)
def_gen.standard_normal(dtype="float32")
def_gen.standard_normal(dtype="double")
def_gen.standard_normal(dtype=np.float64)
def_gen.standard_normal(size=None)
def_gen.standard_normal(size=1)
def_gen.standard_normal(size=1, dtype=np.float32)
def_gen.standard_normal(size=1, dtype="f4")
def_gen.standard_normal(size=1, dtype="float32", out=S_out)
def_gen.standard_normal(dtype=np.float32, out=S_out)
def_gen.standard_normal(size=1, dtype=np.float64)
def_gen.standard_normal(size=1, dtype="float64")
def_gen.standard_normal(size=1, dtype="f8")
def_gen.standard_normal(out=D_out)
def_gen.standard_normal(size=1, dtype="float64")
def_gen.standard_normal(size=1, dtype="float64", out=D_out)
def_gen.random()
def_gen.random(dtype=np.float32)
def_gen.random(dtype="float32")
def_gen.random(dtype="double")
def_gen.random(dtype=np.float64)
def_gen.random(size=None)
def_gen.random(size=1)
def_gen.random(size=1, dtype=np.float32)
def_gen.random(size=1, dtype="f4")
def_gen.random(size=1, dtype="float32", out=S_out)
def_gen.random(dtype=np.float32, out=S_out)
def_gen.random(size=1, dtype=np.float64)
def_gen.random(size=1, dtype="float64")
def_gen.random(size=1, dtype="f8")
def_gen.random(out=D_out)
def_gen.random(size=1, dtype="float64")
def_gen.random(size=1, dtype="float64", out=D_out)
def_gen.standard_cauchy()
def_gen.standard_cauchy(size=None)
def_gen.standard_cauchy(size=1)
def_gen.standard_exponential()
def_gen.standard_exponential(method="inv")
def_gen.standard_exponential(dtype=np.float32)
def_gen.standard_exponential(dtype="float32")
def_gen.standard_exponential(dtype="double")
def_gen.standard_exponential(dtype=np.float64)
def_gen.standard_exponential(size=None)
def_gen.standard_exponential(size=None, method="inv")
def_gen.standard_exponential(size=1, method="inv")
def_gen.standard_exponential(size=1, dtype=np.float32)
def_gen.standard_exponential(size=1, dtype="f4", method="inv")
def_gen.standard_exponential(size=1, dtype="float32", out=S_out)
def_gen.standard_exponential(dtype=np.float32, out=S_out)
def_gen.standard_exponential(size=1, dtype=np.float64, method="inv")
def_gen.standard_exponential(size=1, dtype="float64")
def_gen.standard_exponential(size=1, dtype="f8")
def_gen.standard_exponential(out=D_out)
def_gen.standard_exponential(size=1, dtype="float64")
def_gen.standard_exponential(size=1, dtype="float64", out=D_out)
def_gen.zipf(1.5)
def_gen.zipf(1.5, size=None)
def_gen.zipf(1.5, size=1)
def_gen.zipf(D_arr_1p5)
def_gen.zipf(D_arr_1p5, size=1)
def_gen.zipf(D_arr_like_1p5)
def_gen.zipf(D_arr_like_1p5, size=1)
def_gen.weibull(0.5)
def_gen.weibull(0.5, size=None)
def_gen.weibull(0.5, size=1)
def_gen.weibull(D_arr_0p5)
def_gen.weibull(D_arr_0p5, size=1)
def_gen.weibull(D_arr_like_0p5)
def_gen.weibull(D_arr_like_0p5, size=1)
def_gen.standard_t(0.5)
def_gen.standard_t(0.5, size=None)
def_gen.standard_t(0.5, size=1)
def_gen.standard_t(D_arr_0p5)
def_gen.standard_t(D_arr_0p5, size=1)
def_gen.standard_t(D_arr_like_0p5)
def_gen.standard_t(D_arr_like_0p5, size=1)
def_gen.poisson(0.5)
def_gen.poisson(0.5, size=None)
def_gen.poisson(0.5, size=1)
def_gen.poisson(D_arr_0p5)
def_gen.poisson(D_arr_0p5, size=1)
def_gen.poisson(D_arr_like_0p5)
def_gen.poisson(D_arr_like_0p5, size=1)
def_gen.power(0.5)
def_gen.power(0.5, size=None)
def_gen.power(0.5, size=1)
def_gen.power(D_arr_0p5)
def_gen.power(D_arr_0p5, size=1)
def_gen.power(D_arr_like_0p5)
def_gen.power(D_arr_like_0p5, size=1)
def_gen.pareto(0.5)
def_gen.pareto(0.5, size=None)
def_gen.pareto(0.5, size=1)
def_gen.pareto(D_arr_0p5)
def_gen.pareto(D_arr_0p5, size=1)
def_gen.pareto(D_arr_like_0p5)
def_gen.pareto(D_arr_like_0p5, size=1)
def_gen.chisquare(0.5)
def_gen.chisquare(0.5, size=None)
def_gen.chisquare(0.5, size=1)
def_gen.chisquare(D_arr_0p5)
def_gen.chisquare(D_arr_0p5, size=1)
def_gen.chisquare(D_arr_like_0p5)
def_gen.chisquare(D_arr_like_0p5, size=1)
def_gen.exponential(0.5)
def_gen.exponential(0.5, size=None)
def_gen.exponential(0.5, size=1)
def_gen.exponential(D_arr_0p5)
def_gen.exponential(D_arr_0p5, size=1)
def_gen.exponential(D_arr_like_0p5)
def_gen.exponential(D_arr_like_0p5, size=1)
def_gen.geometric(0.5)
def_gen.geometric(0.5, size=None)
def_gen.geometric(0.5, size=1)
def_gen.geometric(D_arr_0p5)
def_gen.geometric(D_arr_0p5, size=1)
def_gen.geometric(D_arr_like_0p5)
def_gen.geometric(D_arr_like_0p5, size=1)
def_gen.logseries(0.5)
def_gen.logseries(0.5, size=None)
def_gen.logseries(0.5, size=1)
def_gen.logseries(D_arr_0p5)
def_gen.logseries(D_arr_0p5, size=1)
def_gen.logseries(D_arr_like_0p5)
def_gen.logseries(D_arr_like_0p5, size=1)
def_gen.rayleigh(0.5)
def_gen.rayleigh(0.5, size=None)
def_gen.rayleigh(0.5, size=1)
def_gen.rayleigh(D_arr_0p5)
def_gen.rayleigh(D_arr_0p5, size=1)
def_gen.rayleigh(D_arr_like_0p5)
def_gen.rayleigh(D_arr_like_0p5, size=1)
def_gen.standard_gamma(0.5)
def_gen.standard_gamma(0.5, size=None)
def_gen.standard_gamma(0.5, dtype="float32")
def_gen.standard_gamma(0.5, size=None, dtype="float32")
def_gen.standard_gamma(0.5, size=1)
def_gen.standard_gamma(D_arr_0p5)
def_gen.standard_gamma(D_arr_0p5, dtype="f4")
def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out)
def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out)
def_gen.standard_gamma(D_arr_0p5, size=1)
def_gen.standard_gamma(D_arr_like_0p5)
def_gen.standard_gamma(D_arr_like_0p5, size=1)
def_gen.standard_gamma(0.5, out=D_out)
def_gen.standard_gamma(D_arr_like_0p5, out=D_out)
def_gen.standard_gamma(D_arr_like_0p5, size=1)
def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64)
def_gen.vonmises(0.5, 0.5)
def_gen.vonmises(0.5, 0.5, size=None)
def_gen.vonmises(0.5, 0.5, size=1)
def_gen.vonmises(D_arr_0p5, 0.5)
def_gen.vonmises(0.5, D_arr_0p5)
def_gen.vonmises(D_arr_0p5, 0.5, size=1)
def_gen.vonmises(0.5, D_arr_0p5, size=1)
def_gen.vonmises(D_arr_like_0p5, 0.5)
def_gen.vonmises(0.5, D_arr_like_0p5)
def_gen.vonmises(D_arr_0p5, D_arr_0p5)
def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5)
def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1)
def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.wald(0.5, 0.5)
def_gen.wald(0.5, 0.5, size=None)
def_gen.wald(0.5, 0.5, size=1)
def_gen.wald(D_arr_0p5, 0.5)
def_gen.wald(0.5, D_arr_0p5)
def_gen.wald(D_arr_0p5, 0.5, size=1)
def_gen.wald(0.5, D_arr_0p5, size=1)
def_gen.wald(D_arr_like_0p5, 0.5)
def_gen.wald(0.5, D_arr_like_0p5)
def_gen.wald(D_arr_0p5, D_arr_0p5)
def_gen.wald(D_arr_like_0p5, D_arr_like_0p5)
def_gen.wald(D_arr_0p5, D_arr_0p5, size=1)
def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.uniform(0.5, 0.5)
def_gen.uniform(0.5, 0.5, size=None)
def_gen.uniform(0.5, 0.5, size=1)
def_gen.uniform(D_arr_0p5, 0.5)
def_gen.uniform(0.5, D_arr_0p5)
def_gen.uniform(D_arr_0p5, 0.5, size=1)
def_gen.uniform(0.5, D_arr_0p5, size=1)
def_gen.uniform(D_arr_like_0p5, 0.5)
def_gen.uniform(0.5, D_arr_like_0p5)
def_gen.uniform(D_arr_0p5, D_arr_0p5)
def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5)
def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1)
def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.beta(0.5, 0.5)
def_gen.beta(0.5, 0.5, size=None)
def_gen.beta(0.5, 0.5, size=1)
def_gen.beta(D_arr_0p5, 0.5)
def_gen.beta(0.5, D_arr_0p5)
def_gen.beta(D_arr_0p5, 0.5, size=1)
def_gen.beta(0.5, D_arr_0p5, size=1)
def_gen.beta(D_arr_like_0p5, 0.5)
def_gen.beta(0.5, D_arr_like_0p5)
def_gen.beta(D_arr_0p5, D_arr_0p5)
def_gen.beta(D_arr_like_0p5, D_arr_like_0p5)
def_gen.beta(D_arr_0p5, D_arr_0p5, size=1)
def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.f(0.5, 0.5)
def_gen.f(0.5, 0.5, size=None)
def_gen.f(0.5, 0.5, size=1)
def_gen.f(D_arr_0p5, 0.5)
def_gen.f(0.5, D_arr_0p5)
def_gen.f(D_arr_0p5, 0.5, size=1)
def_gen.f(0.5, D_arr_0p5, size=1)
def_gen.f(D_arr_like_0p5, 0.5)
def_gen.f(0.5, D_arr_like_0p5)
def_gen.f(D_arr_0p5, D_arr_0p5)
def_gen.f(D_arr_like_0p5, D_arr_like_0p5)
def_gen.f(D_arr_0p5, D_arr_0p5, size=1)
def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.gamma(0.5, 0.5)
def_gen.gamma(0.5, 0.5, size=None)
def_gen.gamma(0.5, 0.5, size=1)
def_gen.gamma(D_arr_0p5, 0.5)
def_gen.gamma(0.5, D_arr_0p5)
def_gen.gamma(D_arr_0p5, 0.5, size=1)
def_gen.gamma(0.5, D_arr_0p5, size=1)
def_gen.gamma(D_arr_like_0p5, 0.5)
def_gen.gamma(0.5, D_arr_like_0p5)
def_gen.gamma(D_arr_0p5, D_arr_0p5)
def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5)
def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1)
def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.gumbel(0.5, 0.5)
def_gen.gumbel(0.5, 0.5, size=None)
def_gen.gumbel(0.5, 0.5, size=1)
def_gen.gumbel(D_arr_0p5, 0.5)
def_gen.gumbel(0.5, D_arr_0p5)
def_gen.gumbel(D_arr_0p5, 0.5, size=1)
def_gen.gumbel(0.5, D_arr_0p5, size=1)
def_gen.gumbel(D_arr_like_0p5, 0.5)
def_gen.gumbel(0.5, D_arr_like_0p5)
def_gen.gumbel(D_arr_0p5, D_arr_0p5)
def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5)
def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1)
def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.laplace(0.5, 0.5)
def_gen.laplace(0.5, 0.5, size=None)
def_gen.laplace(0.5, 0.5, size=1)
def_gen.laplace(D_arr_0p5, 0.5)
def_gen.laplace(0.5, D_arr_0p5)
def_gen.laplace(D_arr_0p5, 0.5, size=1)
def_gen.laplace(0.5, D_arr_0p5, size=1)
def_gen.laplace(D_arr_like_0p5, 0.5)
def_gen.laplace(0.5, D_arr_like_0p5)
def_gen.laplace(D_arr_0p5, D_arr_0p5)
def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5)
def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1)
def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.logistic(0.5, 0.5)
def_gen.logistic(0.5, 0.5, size=None)
def_gen.logistic(0.5, 0.5, size=1)
def_gen.logistic(D_arr_0p5, 0.5)
def_gen.logistic(0.5, D_arr_0p5)
def_gen.logistic(D_arr_0p5, 0.5, size=1)
def_gen.logistic(0.5, D_arr_0p5, size=1)
def_gen.logistic(D_arr_like_0p5, 0.5)
def_gen.logistic(0.5, D_arr_like_0p5)
def_gen.logistic(D_arr_0p5, D_arr_0p5)
def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5)
def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1)
def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.lognormal(0.5, 0.5)
def_gen.lognormal(0.5, 0.5, size=None)
def_gen.lognormal(0.5, 0.5, size=1)
def_gen.lognormal(D_arr_0p5, 0.5)
def_gen.lognormal(0.5, D_arr_0p5)
def_gen.lognormal(D_arr_0p5, 0.5, size=1)
def_gen.lognormal(0.5, D_arr_0p5, size=1)
def_gen.lognormal(D_arr_like_0p5, 0.5)
def_gen.lognormal(0.5, D_arr_like_0p5)
def_gen.lognormal(D_arr_0p5, D_arr_0p5)
def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5)
def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1)
def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.noncentral_chisquare(0.5, 0.5)
def_gen.noncentral_chisquare(0.5, 0.5, size=None)
def_gen.noncentral_chisquare(0.5, 0.5, size=1)
def_gen.noncentral_chisquare(D_arr_0p5, 0.5)
def_gen.noncentral_chisquare(0.5, D_arr_0p5)
def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1)
def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1)
def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5)
def_gen.noncentral_chisquare(0.5, D_arr_like_0p5)
def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5)
def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)
def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)
def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.normal(0.5, 0.5)
def_gen.normal(0.5, 0.5, size=None)
def_gen.normal(0.5, 0.5, size=1)
def_gen.normal(D_arr_0p5, 0.5)
def_gen.normal(0.5, D_arr_0p5)
def_gen.normal(D_arr_0p5, 0.5, size=1)
def_gen.normal(0.5, D_arr_0p5, size=1)
def_gen.normal(D_arr_like_0p5, 0.5)
def_gen.normal(0.5, D_arr_like_0p5)
def_gen.normal(D_arr_0p5, D_arr_0p5)
def_gen.normal(D_arr_like_0p5, D_arr_like_0p5)
def_gen.normal(D_arr_0p5, D_arr_0p5, size=1)
def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.triangular(0.1, 0.5, 0.9)
def_gen.triangular(0.1, 0.5, 0.9, size=None)
def_gen.triangular(0.1, 0.5, 0.9, size=1)
def_gen.triangular(D_arr_0p1, 0.5, 0.9)
def_gen.triangular(0.1, D_arr_0p5, 0.9)
def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1)
def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)
def_gen.triangular(0.5, D_arr_like_0p5, 0.9)
def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9)
def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)
def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
def_gen.noncentral_f(0.1, 0.5, 0.9)
def_gen.noncentral_f(0.1, 0.5, 0.9, size=None)
def_gen.noncentral_f(0.1, 0.5, 0.9, size=1)
def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9)
def_gen.noncentral_f(0.1, D_arr_0p5, 0.9)
def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)
def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)
def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9)
def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)
def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)
def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
def_gen.binomial(10, 0.5)
def_gen.binomial(10, 0.5, size=None)
def_gen.binomial(10, 0.5, size=1)
def_gen.binomial(I_arr_10, 0.5)
def_gen.binomial(10, D_arr_0p5)
def_gen.binomial(I_arr_10, 0.5, size=1)
def_gen.binomial(10, D_arr_0p5, size=1)
def_gen.binomial(I_arr_like_10, 0.5)
def_gen.binomial(10, D_arr_like_0p5)
def_gen.binomial(I_arr_10, D_arr_0p5)
def_gen.binomial(I_arr_like_10, D_arr_like_0p5)
def_gen.binomial(I_arr_10, D_arr_0p5, size=1)
def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)
def_gen.negative_binomial(10, 0.5)
def_gen.negative_binomial(10, 0.5, size=None)
def_gen.negative_binomial(10, 0.5, size=1)
def_gen.negative_binomial(I_arr_10, 0.5)
def_gen.negative_binomial(10, D_arr_0p5)
def_gen.negative_binomial(I_arr_10, 0.5, size=1)
def_gen.negative_binomial(10, D_arr_0p5, size=1)
def_gen.negative_binomial(I_arr_like_10, 0.5)
def_gen.negative_binomial(10, D_arr_like_0p5)
def_gen.negative_binomial(I_arr_10, D_arr_0p5)
def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)
def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)
def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)
def_gen.hypergeometric(20, 20, 10)
def_gen.hypergeometric(20, 20, 10, size=None)
def_gen.hypergeometric(20, 20, 10, size=1)
def_gen.hypergeometric(I_arr_20, 20, 10)
def_gen.hypergeometric(20, I_arr_20, 10)
def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)
def_gen.hypergeometric(20, I_arr_20, 10, size=1)
def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)
def_gen.hypergeometric(20, I_arr_like_20, 10)
def_gen.hypergeometric(I_arr_20, I_arr_20, 10)
def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)
def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)
def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)
I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64)
def_gen.integers(0, 100)
def_gen.integers(100)
def_gen.integers([100])
def_gen.integers(0, [100])
I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_)
I_bool_low_like: list[int] = [0]
I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_)
I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_)
def_gen.integers(2, dtype=bool)
def_gen.integers(0, 2, dtype=bool)
def_gen.integers(1, dtype=bool, endpoint=True)
def_gen.integers(0, 1, dtype=bool, endpoint=True)
def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True)
def_gen.integers(I_bool_high_open, dtype=bool)
def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool)
def_gen.integers(0, I_bool_high_open, dtype=bool)
def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True)
def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True)
def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True)
def_gen.integers(2, dtype=np.bool_)
def_gen.integers(0, 2, dtype=np.bool_)
def_gen.integers(1, dtype=np.bool_, endpoint=True)
def_gen.integers(0, 1, dtype=np.bool_, endpoint=True)
def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True)
def_gen.integers(I_bool_high_open, dtype=np.bool_)
def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_)
def_gen.integers(0, I_bool_high_open, dtype=np.bool_)
def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True)
def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True)
def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True)
I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8)
I_u1_low_like: list[int] = [0]
I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8)
I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8)
def_gen.integers(256, dtype="u1")
def_gen.integers(0, 256, dtype="u1")
def_gen.integers(255, dtype="u1", endpoint=True)
def_gen.integers(0, 255, dtype="u1", endpoint=True)
def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)
def_gen.integers(I_u1_high_open, dtype="u1")
def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")
def_gen.integers(0, I_u1_high_open, dtype="u1")
def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)
def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)
def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)
def_gen.integers(256, dtype="uint8")
def_gen.integers(0, 256, dtype="uint8")
def_gen.integers(255, dtype="uint8", endpoint=True)
def_gen.integers(0, 255, dtype="uint8", endpoint=True)
def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)
def_gen.integers(I_u1_high_open, dtype="uint8")
def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")
def_gen.integers(0, I_u1_high_open, dtype="uint8")
def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)
def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)
def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)
def_gen.integers(256, dtype=np.uint8)
def_gen.integers(0, 256, dtype=np.uint8)
def_gen.integers(255, dtype=np.uint8, endpoint=True)
def_gen.integers(0, 255, dtype=np.uint8, endpoint=True)
def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)
def_gen.integers(I_u1_high_open, dtype=np.uint8)
def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)
def_gen.integers(0, I_u1_high_open, dtype=np.uint8)
def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)
def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)
def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)
I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16)
I_u2_low_like: list[int] = [0]
I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16)
I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16)
def_gen.integers(65536, dtype="u2")
def_gen.integers(0, 65536, dtype="u2")
def_gen.integers(65535, dtype="u2", endpoint=True)
def_gen.integers(0, 65535, dtype="u2", endpoint=True)
def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)
def_gen.integers(I_u2_high_open, dtype="u2")
def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")
def_gen.integers(0, I_u2_high_open, dtype="u2")
def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)
def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)
def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)
def_gen.integers(65536, dtype="uint16")
def_gen.integers(0, 65536, dtype="uint16")
def_gen.integers(65535, dtype="uint16", endpoint=True)
def_gen.integers(0, 65535, dtype="uint16", endpoint=True)
def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)
def_gen.integers(I_u2_high_open, dtype="uint16")
def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")
def_gen.integers(0, I_u2_high_open, dtype="uint16")
def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)
def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)
def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)
def_gen.integers(65536, dtype=np.uint16)
def_gen.integers(0, 65536, dtype=np.uint16)
def_gen.integers(65535, dtype=np.uint16, endpoint=True)
def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True)
def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)
def_gen.integers(I_u2_high_open, dtype=np.uint16)
def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)
def_gen.integers(0, I_u2_high_open, dtype=np.uint16)
def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)
def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)
def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)
I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32)
I_u4_low_like: list[int] = [0]
I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32)
I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32)
def_gen.integers(4294967296, dtype="u4")
def_gen.integers(0, 4294967296, dtype="u4")
def_gen.integers(4294967295, dtype="u4", endpoint=True)
def_gen.integers(0, 4294967295, dtype="u4", endpoint=True)
def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)
def_gen.integers(I_u4_high_open, dtype="u4")
def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")
def_gen.integers(0, I_u4_high_open, dtype="u4")
def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)
def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)
def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)
def_gen.integers(4294967296, dtype="uint32")
def_gen.integers(0, 4294967296, dtype="uint32")
def_gen.integers(4294967295, dtype="uint32", endpoint=True)
def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True)
def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)
def_gen.integers(I_u4_high_open, dtype="uint32")
def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")
def_gen.integers(0, I_u4_high_open, dtype="uint32")
def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)
def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)
def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)
def_gen.integers(4294967296, dtype=np.uint32)
def_gen.integers(0, 4294967296, dtype=np.uint32)
def_gen.integers(4294967295, dtype=np.uint32, endpoint=True)
def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True)
def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)
def_gen.integers(I_u4_high_open, dtype=np.uint32)
def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)
def_gen.integers(0, I_u4_high_open, dtype=np.uint32)
def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)
def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)
def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)
I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64)
I_u8_low_like: list[int] = [0]
I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64)
I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64)
def_gen.integers(18446744073709551616, dtype="u8")
def_gen.integers(0, 18446744073709551616, dtype="u8")
def_gen.integers(18446744073709551615, dtype="u8", endpoint=True)
def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True)
def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)
def_gen.integers(I_u8_high_open, dtype="u8")
def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")
def_gen.integers(0, I_u8_high_open, dtype="u8")
def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)
def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)
def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)
def_gen.integers(18446744073709551616, dtype="uint64")
def_gen.integers(0, 18446744073709551616, dtype="uint64")
def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True)
def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True)
def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)
def_gen.integers(I_u8_high_open, dtype="uint64")
def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")
def_gen.integers(0, I_u8_high_open, dtype="uint64")
def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)
def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)
def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)
def_gen.integers(18446744073709551616, dtype=np.uint64)
def_gen.integers(0, 18446744073709551616, dtype=np.uint64)
def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True)
def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True)
def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)
def_gen.integers(I_u8_high_open, dtype=np.uint64)
def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)
def_gen.integers(0, I_u8_high_open, dtype=np.uint64)
def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)
def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)
def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)
I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8)
I_i1_low_like: list[int] = [-128]
I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8)
I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8)
def_gen.integers(128, dtype="i1")
def_gen.integers(-128, 128, dtype="i1")
def_gen.integers(127, dtype="i1", endpoint=True)
def_gen.integers(-128, 127, dtype="i1", endpoint=True)
def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)
def_gen.integers(I_i1_high_open, dtype="i1")
def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")
def_gen.integers(-128, I_i1_high_open, dtype="i1")
def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)
def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)
def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)
def_gen.integers(128, dtype="int8")
def_gen.integers(-128, 128, dtype="int8")
def_gen.integers(127, dtype="int8", endpoint=True)
def_gen.integers(-128, 127, dtype="int8", endpoint=True)
def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)
def_gen.integers(I_i1_high_open, dtype="int8")
def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")
def_gen.integers(-128, I_i1_high_open, dtype="int8")
def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)
def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)
def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)
def_gen.integers(128, dtype=np.int8)
def_gen.integers(-128, 128, dtype=np.int8)
def_gen.integers(127, dtype=np.int8, endpoint=True)
def_gen.integers(-128, 127, dtype=np.int8, endpoint=True)
def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)
def_gen.integers(I_i1_high_open, dtype=np.int8)
def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)
def_gen.integers(-128, I_i1_high_open, dtype=np.int8)
def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)
def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)
def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)
I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16)
I_i2_low_like: list[int] = [-32768]
I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16)
I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16)
def_gen.integers(32768, dtype="i2")
def_gen.integers(-32768, 32768, dtype="i2")
def_gen.integers(32767, dtype="i2", endpoint=True)
def_gen.integers(-32768, 32767, dtype="i2", endpoint=True)
def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)
def_gen.integers(I_i2_high_open, dtype="i2")
def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")
def_gen.integers(-32768, I_i2_high_open, dtype="i2")
def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)
def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)
def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)
def_gen.integers(32768, dtype="int16")
def_gen.integers(-32768, 32768, dtype="int16")
def_gen.integers(32767, dtype="int16", endpoint=True)
def_gen.integers(-32768, 32767, dtype="int16", endpoint=True)
def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)
def_gen.integers(I_i2_high_open, dtype="int16")
def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")
def_gen.integers(-32768, I_i2_high_open, dtype="int16")
def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)
def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)
def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)
def_gen.integers(32768, dtype=np.int16)
def_gen.integers(-32768, 32768, dtype=np.int16)
def_gen.integers(32767, dtype=np.int16, endpoint=True)
def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True)
def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)
def_gen.integers(I_i2_high_open, dtype=np.int16)
def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)
def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)
def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)
def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)
def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)
I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32)
I_i4_low_like: list[int] = [-2147483648]
I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32)
I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32)
def_gen.integers(2147483648, dtype="i4")
def_gen.integers(-2147483648, 2147483648, dtype="i4")
def_gen.integers(2147483647, dtype="i4", endpoint=True)
def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True)
def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)
def_gen.integers(I_i4_high_open, dtype="i4")
def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")
def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")
def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)
def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)
def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)
def_gen.integers(2147483648, dtype="int32")
def_gen.integers(-2147483648, 2147483648, dtype="int32")
def_gen.integers(2147483647, dtype="int32", endpoint=True)
def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True)
def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)
def_gen.integers(I_i4_high_open, dtype="int32")
def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")
def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")
def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)
def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)
def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)
def_gen.integers(2147483648, dtype=np.int32)
def_gen.integers(-2147483648, 2147483648, dtype=np.int32)
def_gen.integers(2147483647, dtype=np.int32, endpoint=True)
def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True)
def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)
def_gen.integers(I_i4_high_open, dtype=np.int32)
def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)
def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)
def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)
def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)
def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)
I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64)
I_i8_low_like: list[int] = [-9223372036854775808]
I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64)
I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64)
def_gen.integers(9223372036854775808, dtype="i8")
def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8")
def_gen.integers(9223372036854775807, dtype="i8", endpoint=True)
def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True)
def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)
def_gen.integers(I_i8_high_open, dtype="i8")
def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")
def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")
def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)
def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)
def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)
def_gen.integers(9223372036854775808, dtype="int64")
def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64")
def_gen.integers(9223372036854775807, dtype="int64", endpoint=True)
def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True)
def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)
def_gen.integers(I_i8_high_open, dtype="int64")
def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")
def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")
def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)
def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)
def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)
def_gen.integers(9223372036854775808, dtype=np.int64)
def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64)
def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True)
def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True)
def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)
def_gen.integers(I_i8_high_open, dtype=np.int64)
def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)
def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)
def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)
def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)
def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)
def_gen.bit_generator
def_gen.bytes(2)
def_gen.choice(5)
def_gen.choice(5, 3)
def_gen.choice(5, 3, replace=True)
def_gen.choice(5, 3, p=[1 / 5] * 5)
def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"])
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))
def_gen.dirichlet([0.5, 0.5])
def_gen.dirichlet(np.array([0.5, 0.5]))
def_gen.dirichlet(np.array([0.5, 0.5]), size=3)
def_gen.multinomial(20, [1 / 6.0] * 6)
def_gen.multinomial(20, np.array([0.5, 0.5]))
def_gen.multinomial(20, [1 / 6.0] * 6, size=2)
def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))
def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))
def_gen.multivariate_hypergeometric([3, 5, 7], 2)
def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)
def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)
def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))
def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")
def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")
def_gen.multivariate_normal([0.0], [[1.0]])
def_gen.multivariate_normal([0.0], np.array([[1.0]]))
def_gen.multivariate_normal(np.array([0.0]), [[1.0]])
def_gen.multivariate_normal([0.0], np.array([[1.0]]))
def_gen.permutation(10)
def_gen.permutation([1, 2, 3, 4])
def_gen.permutation(np.array([1, 2, 3, 4]))
def_gen.permutation(D_2D, axis=1)
def_gen.permuted(D_2D)
def_gen.permuted(D_2D_like)
def_gen.permuted(D_2D, axis=1)
def_gen.permuted(D_2D, out=D_2D)
def_gen.permuted(D_2D_like, out=D_2D)
def_gen.permuted(D_2D_like, out=D_2D)
def_gen.permuted(D_2D, axis=1, out=D_2D)
def_gen.shuffle(np.arange(10))
def_gen.shuffle([1, 2, 3, 4, 5])
def_gen.shuffle(D_2D, axis=1)
def_gen.__str__()
def_gen.__repr__()
def_gen_state: dict[str, Any]
def_gen_state = def_gen.__getstate__()
def_gen.__setstate__(def_gen_state)
# RandomState
random_st: np.random.RandomState = np.random.RandomState()
random_st.standard_normal()
random_st.standard_normal(size=None)
random_st.standard_normal(size=1)
random_st.random()
random_st.random(size=None)
random_st.random(size=1)
random_st.standard_cauchy()
random_st.standard_cauchy(size=None)
random_st.standard_cauchy(size=1)
random_st.standard_exponential()
random_st.standard_exponential(size=None)
random_st.standard_exponential(size=1)
random_st.zipf(1.5)
random_st.zipf(1.5, size=None)
random_st.zipf(1.5, size=1)
random_st.zipf(D_arr_1p5)
random_st.zipf(D_arr_1p5, size=1)
random_st.zipf(D_arr_like_1p5)
random_st.zipf(D_arr_like_1p5, size=1)
random_st.weibull(0.5)
random_st.weibull(0.5, size=None)
random_st.weibull(0.5, size=1)
random_st.weibull(D_arr_0p5)
random_st.weibull(D_arr_0p5, size=1)
random_st.weibull(D_arr_like_0p5)
random_st.weibull(D_arr_like_0p5, size=1)
random_st.standard_t(0.5)
random_st.standard_t(0.5, size=None)
random_st.standard_t(0.5, size=1)
random_st.standard_t(D_arr_0p5)
random_st.standard_t(D_arr_0p5, size=1)
random_st.standard_t(D_arr_like_0p5)
random_st.standard_t(D_arr_like_0p5, size=1)
random_st.poisson(0.5)
random_st.poisson(0.5, size=None)
random_st.poisson(0.5, size=1)
random_st.poisson(D_arr_0p5)
random_st.poisson(D_arr_0p5, size=1)
random_st.poisson(D_arr_like_0p5)
random_st.poisson(D_arr_like_0p5, size=1)
random_st.power(0.5)
random_st.power(0.5, size=None)
random_st.power(0.5, size=1)
random_st.power(D_arr_0p5)
random_st.power(D_arr_0p5, size=1)
random_st.power(D_arr_like_0p5)
random_st.power(D_arr_like_0p5, size=1)
random_st.pareto(0.5)
random_st.pareto(0.5, size=None)
random_st.pareto(0.5, size=1)
random_st.pareto(D_arr_0p5)
random_st.pareto(D_arr_0p5, size=1)
random_st.pareto(D_arr_like_0p5)
random_st.pareto(D_arr_like_0p5, size=1)
random_st.chisquare(0.5)
random_st.chisquare(0.5, size=None)
random_st.chisquare(0.5, size=1)
random_st.chisquare(D_arr_0p5)
random_st.chisquare(D_arr_0p5, size=1)
random_st.chisquare(D_arr_like_0p5)
random_st.chisquare(D_arr_like_0p5, size=1)
random_st.exponential(0.5)
random_st.exponential(0.5, size=None)
random_st.exponential(0.5, size=1)
random_st.exponential(D_arr_0p5)
random_st.exponential(D_arr_0p5, size=1)
random_st.exponential(D_arr_like_0p5)
random_st.exponential(D_arr_like_0p5, size=1)
random_st.geometric(0.5)
random_st.geometric(0.5, size=None)
random_st.geometric(0.5, size=1)
random_st.geometric(D_arr_0p5)
random_st.geometric(D_arr_0p5, size=1)
random_st.geometric(D_arr_like_0p5)
random_st.geometric(D_arr_like_0p5, size=1)
random_st.logseries(0.5)
random_st.logseries(0.5, size=None)
random_st.logseries(0.5, size=1)
random_st.logseries(D_arr_0p5)
random_st.logseries(D_arr_0p5, size=1)
random_st.logseries(D_arr_like_0p5)
random_st.logseries(D_arr_like_0p5, size=1)
random_st.rayleigh(0.5)
random_st.rayleigh(0.5, size=None)
random_st.rayleigh(0.5, size=1)
random_st.rayleigh(D_arr_0p5)
random_st.rayleigh(D_arr_0p5, size=1)
random_st.rayleigh(D_arr_like_0p5)
random_st.rayleigh(D_arr_like_0p5, size=1)
random_st.standard_gamma(0.5)
random_st.standard_gamma(0.5, size=None)
random_st.standard_gamma(0.5, size=1)
random_st.standard_gamma(D_arr_0p5)
random_st.standard_gamma(D_arr_0p5, size=1)
random_st.standard_gamma(D_arr_like_0p5)
random_st.standard_gamma(D_arr_like_0p5, size=1)
random_st.standard_gamma(D_arr_like_0p5, size=1)
random_st.vonmises(0.5, 0.5)
random_st.vonmises(0.5, 0.5, size=None)
random_st.vonmises(0.5, 0.5, size=1)
random_st.vonmises(D_arr_0p5, 0.5)
random_st.vonmises(0.5, D_arr_0p5)
random_st.vonmises(D_arr_0p5, 0.5, size=1)
random_st.vonmises(0.5, D_arr_0p5, size=1)
random_st.vonmises(D_arr_like_0p5, 0.5)
random_st.vonmises(0.5, D_arr_like_0p5)
random_st.vonmises(D_arr_0p5, D_arr_0p5)
random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5)
random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1)
random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.wald(0.5, 0.5)
random_st.wald(0.5, 0.5, size=None)
random_st.wald(0.5, 0.5, size=1)
random_st.wald(D_arr_0p5, 0.5)
random_st.wald(0.5, D_arr_0p5)
random_st.wald(D_arr_0p5, 0.5, size=1)
random_st.wald(0.5, D_arr_0p5, size=1)
random_st.wald(D_arr_like_0p5, 0.5)
random_st.wald(0.5, D_arr_like_0p5)
random_st.wald(D_arr_0p5, D_arr_0p5)
random_st.wald(D_arr_like_0p5, D_arr_like_0p5)
random_st.wald(D_arr_0p5, D_arr_0p5, size=1)
random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.uniform(0.5, 0.5)
random_st.uniform(0.5, 0.5, size=None)
random_st.uniform(0.5, 0.5, size=1)
random_st.uniform(D_arr_0p5, 0.5)
random_st.uniform(0.5, D_arr_0p5)
random_st.uniform(D_arr_0p5, 0.5, size=1)
random_st.uniform(0.5, D_arr_0p5, size=1)
random_st.uniform(D_arr_like_0p5, 0.5)
random_st.uniform(0.5, D_arr_like_0p5)
random_st.uniform(D_arr_0p5, D_arr_0p5)
random_st.uniform(D_arr_like_0p5, D_arr_like_0p5)
random_st.uniform(D_arr_0p5, D_arr_0p5, size=1)
random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.beta(0.5, 0.5)
random_st.beta(0.5, 0.5, size=None)
random_st.beta(0.5, 0.5, size=1)
random_st.beta(D_arr_0p5, 0.5)
random_st.beta(0.5, D_arr_0p5)
random_st.beta(D_arr_0p5, 0.5, size=1)
random_st.beta(0.5, D_arr_0p5, size=1)
random_st.beta(D_arr_like_0p5, 0.5)
random_st.beta(0.5, D_arr_like_0p5)
random_st.beta(D_arr_0p5, D_arr_0p5)
random_st.beta(D_arr_like_0p5, D_arr_like_0p5)
random_st.beta(D_arr_0p5, D_arr_0p5, size=1)
random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.f(0.5, 0.5)
random_st.f(0.5, 0.5, size=None)
random_st.f(0.5, 0.5, size=1)
random_st.f(D_arr_0p5, 0.5)
random_st.f(0.5, D_arr_0p5)
random_st.f(D_arr_0p5, 0.5, size=1)
random_st.f(0.5, D_arr_0p5, size=1)
random_st.f(D_arr_like_0p5, 0.5)
random_st.f(0.5, D_arr_like_0p5)
random_st.f(D_arr_0p5, D_arr_0p5)
random_st.f(D_arr_like_0p5, D_arr_like_0p5)
random_st.f(D_arr_0p5, D_arr_0p5, size=1)
random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.gamma(0.5, 0.5)
random_st.gamma(0.5, 0.5, size=None)
random_st.gamma(0.5, 0.5, size=1)
random_st.gamma(D_arr_0p5, 0.5)
random_st.gamma(0.5, D_arr_0p5)
random_st.gamma(D_arr_0p5, 0.5, size=1)
random_st.gamma(0.5, D_arr_0p5, size=1)
random_st.gamma(D_arr_like_0p5, 0.5)
random_st.gamma(0.5, D_arr_like_0p5)
random_st.gamma(D_arr_0p5, D_arr_0p5)
random_st.gamma(D_arr_like_0p5, D_arr_like_0p5)
random_st.gamma(D_arr_0p5, D_arr_0p5, size=1)
random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.gumbel(0.5, 0.5)
random_st.gumbel(0.5, 0.5, size=None)
random_st.gumbel(0.5, 0.5, size=1)
random_st.gumbel(D_arr_0p5, 0.5)
random_st.gumbel(0.5, D_arr_0p5)
random_st.gumbel(D_arr_0p5, 0.5, size=1)
random_st.gumbel(0.5, D_arr_0p5, size=1)
random_st.gumbel(D_arr_like_0p5, 0.5)
random_st.gumbel(0.5, D_arr_like_0p5)
random_st.gumbel(D_arr_0p5, D_arr_0p5)
random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5)
random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1)
random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.laplace(0.5, 0.5)
random_st.laplace(0.5, 0.5, size=None)
random_st.laplace(0.5, 0.5, size=1)
random_st.laplace(D_arr_0p5, 0.5)
random_st.laplace(0.5, D_arr_0p5)
random_st.laplace(D_arr_0p5, 0.5, size=1)
random_st.laplace(0.5, D_arr_0p5, size=1)
random_st.laplace(D_arr_like_0p5, 0.5)
random_st.laplace(0.5, D_arr_like_0p5)
random_st.laplace(D_arr_0p5, D_arr_0p5)
random_st.laplace(D_arr_like_0p5, D_arr_like_0p5)
random_st.laplace(D_arr_0p5, D_arr_0p5, size=1)
random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.logistic(0.5, 0.5)
random_st.logistic(0.5, 0.5, size=None)
random_st.logistic(0.5, 0.5, size=1)
random_st.logistic(D_arr_0p5, 0.5)
random_st.logistic(0.5, D_arr_0p5)
random_st.logistic(D_arr_0p5, 0.5, size=1)
random_st.logistic(0.5, D_arr_0p5, size=1)
random_st.logistic(D_arr_like_0p5, 0.5)
random_st.logistic(0.5, D_arr_like_0p5)
random_st.logistic(D_arr_0p5, D_arr_0p5)
random_st.logistic(D_arr_like_0p5, D_arr_like_0p5)
random_st.logistic(D_arr_0p5, D_arr_0p5, size=1)
random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.lognormal(0.5, 0.5)
random_st.lognormal(0.5, 0.5, size=None)
random_st.lognormal(0.5, 0.5, size=1)
random_st.lognormal(D_arr_0p5, 0.5)
random_st.lognormal(0.5, D_arr_0p5)
random_st.lognormal(D_arr_0p5, 0.5, size=1)
random_st.lognormal(0.5, D_arr_0p5, size=1)
random_st.lognormal(D_arr_like_0p5, 0.5)
random_st.lognormal(0.5, D_arr_like_0p5)
random_st.lognormal(D_arr_0p5, D_arr_0p5)
random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5)
random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1)
random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.noncentral_chisquare(0.5, 0.5)
random_st.noncentral_chisquare(0.5, 0.5, size=None)
random_st.noncentral_chisquare(0.5, 0.5, size=1)
random_st.noncentral_chisquare(D_arr_0p5, 0.5)
random_st.noncentral_chisquare(0.5, D_arr_0p5)
random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1)
random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1)
random_st.noncentral_chisquare(D_arr_like_0p5, 0.5)
random_st.noncentral_chisquare(0.5, D_arr_like_0p5)
random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5)
random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)
random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)
random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.normal(0.5, 0.5)
random_st.normal(0.5, 0.5, size=None)
random_st.normal(0.5, 0.5, size=1)
random_st.normal(D_arr_0p5, 0.5)
random_st.normal(0.5, D_arr_0p5)
random_st.normal(D_arr_0p5, 0.5, size=1)
random_st.normal(0.5, D_arr_0p5, size=1)
random_st.normal(D_arr_like_0p5, 0.5)
random_st.normal(0.5, D_arr_like_0p5)
random_st.normal(D_arr_0p5, D_arr_0p5)
random_st.normal(D_arr_like_0p5, D_arr_like_0p5)
random_st.normal(D_arr_0p5, D_arr_0p5, size=1)
random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.triangular(0.1, 0.5, 0.9)
random_st.triangular(0.1, 0.5, 0.9, size=None)
random_st.triangular(0.1, 0.5, 0.9, size=1)
random_st.triangular(D_arr_0p1, 0.5, 0.9)
random_st.triangular(0.1, D_arr_0p5, 0.9)
random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
random_st.triangular(0.1, D_arr_0p5, 0.9, size=1)
random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)
random_st.triangular(0.5, D_arr_like_0p5, 0.9)
random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9)
random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)
random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
random_st.noncentral_f(0.1, 0.5, 0.9)
random_st.noncentral_f(0.1, 0.5, 0.9, size=None)
random_st.noncentral_f(0.1, 0.5, 0.9, size=1)
random_st.noncentral_f(D_arr_0p1, 0.5, 0.9)
random_st.noncentral_f(0.1, D_arr_0p5, 0.9)
random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)
random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)
random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9)
random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)
random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)
random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
random_st.binomial(10, 0.5)
random_st.binomial(10, 0.5, size=None)
random_st.binomial(10, 0.5, size=1)
random_st.binomial(I_arr_10, 0.5)
random_st.binomial(10, D_arr_0p5)
random_st.binomial(I_arr_10, 0.5, size=1)
random_st.binomial(10, D_arr_0p5, size=1)
random_st.binomial(I_arr_like_10, 0.5)
random_st.binomial(10, D_arr_like_0p5)
random_st.binomial(I_arr_10, D_arr_0p5)
random_st.binomial(I_arr_like_10, D_arr_like_0p5)
random_st.binomial(I_arr_10, D_arr_0p5, size=1)
random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)
random_st.negative_binomial(10, 0.5)
random_st.negative_binomial(10, 0.5, size=None)
random_st.negative_binomial(10, 0.5, size=1)
random_st.negative_binomial(I_arr_10, 0.5)
random_st.negative_binomial(10, D_arr_0p5)
random_st.negative_binomial(I_arr_10, 0.5, size=1)
random_st.negative_binomial(10, D_arr_0p5, size=1)
random_st.negative_binomial(I_arr_like_10, 0.5)
random_st.negative_binomial(10, D_arr_like_0p5)
random_st.negative_binomial(I_arr_10, D_arr_0p5)
random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)
random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)
random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)
random_st.hypergeometric(20, 20, 10)
random_st.hypergeometric(20, 20, 10, size=None)
random_st.hypergeometric(20, 20, 10, size=1)
random_st.hypergeometric(I_arr_20, 20, 10)
random_st.hypergeometric(20, I_arr_20, 10)
random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)
random_st.hypergeometric(20, I_arr_20, 10, size=1)
random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)
random_st.hypergeometric(20, I_arr_like_20, 10)
random_st.hypergeometric(I_arr_20, I_arr_20, 10)
random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)
random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)
random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)
random_st.randint(0, 100)
random_st.randint(100)
random_st.randint([100])
random_st.randint(0, [100])
random_st.randint(2, dtype=bool)
random_st.randint(0, 2, dtype=bool)
random_st.randint(I_bool_high_open, dtype=bool)
random_st.randint(I_bool_low, I_bool_high_open, dtype=bool)
random_st.randint(0, I_bool_high_open, dtype=bool)
random_st.randint(2, dtype=np.bool_)
random_st.randint(0, 2, dtype=np.bool_)
random_st.randint(I_bool_high_open, dtype=np.bool_)
random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_)
random_st.randint(0, I_bool_high_open, dtype=np.bool_)
random_st.randint(256, dtype="u1")
random_st.randint(0, 256, dtype="u1")
random_st.randint(I_u1_high_open, dtype="u1")
random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")
random_st.randint(0, I_u1_high_open, dtype="u1")
random_st.randint(256, dtype="uint8")
random_st.randint(0, 256, dtype="uint8")
random_st.randint(I_u1_high_open, dtype="uint8")
random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")
random_st.randint(0, I_u1_high_open, dtype="uint8")
random_st.randint(256, dtype=np.uint8)
random_st.randint(0, 256, dtype=np.uint8)
random_st.randint(I_u1_high_open, dtype=np.uint8)
random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)
random_st.randint(0, I_u1_high_open, dtype=np.uint8)
random_st.randint(65536, dtype="u2")
random_st.randint(0, 65536, dtype="u2")
random_st.randint(I_u2_high_open, dtype="u2")
random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")
random_st.randint(0, I_u2_high_open, dtype="u2")
random_st.randint(65536, dtype="uint16")
random_st.randint(0, 65536, dtype="uint16")
random_st.randint(I_u2_high_open, dtype="uint16")
random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")
random_st.randint(0, I_u2_high_open, dtype="uint16")
random_st.randint(65536, dtype=np.uint16)
random_st.randint(0, 65536, dtype=np.uint16)
random_st.randint(I_u2_high_open, dtype=np.uint16)
random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)
random_st.randint(0, I_u2_high_open, dtype=np.uint16)
random_st.randint(4294967296, dtype="u4")
random_st.randint(0, 4294967296, dtype="u4")
random_st.randint(I_u4_high_open, dtype="u4")
random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")
random_st.randint(0, I_u4_high_open, dtype="u4")
random_st.randint(4294967296, dtype="uint32")
random_st.randint(0, 4294967296, dtype="uint32")
random_st.randint(I_u4_high_open, dtype="uint32")
random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")
random_st.randint(0, I_u4_high_open, dtype="uint32")
random_st.randint(4294967296, dtype=np.uint32)
random_st.randint(0, 4294967296, dtype=np.uint32)
random_st.randint(I_u4_high_open, dtype=np.uint32)
random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)
random_st.randint(0, I_u4_high_open, dtype=np.uint32)
random_st.randint(18446744073709551616, dtype="u8")
random_st.randint(0, 18446744073709551616, dtype="u8")
random_st.randint(I_u8_high_open, dtype="u8")
random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")
random_st.randint(0, I_u8_high_open, dtype="u8")
random_st.randint(18446744073709551616, dtype="uint64")
random_st.randint(0, 18446744073709551616, dtype="uint64")
random_st.randint(I_u8_high_open, dtype="uint64")
random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")
random_st.randint(0, I_u8_high_open, dtype="uint64")
random_st.randint(18446744073709551616, dtype=np.uint64)
random_st.randint(0, 18446744073709551616, dtype=np.uint64)
random_st.randint(I_u8_high_open, dtype=np.uint64)
random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)
random_st.randint(0, I_u8_high_open, dtype=np.uint64)
random_st.randint(128, dtype="i1")
random_st.randint(-128, 128, dtype="i1")
random_st.randint(I_i1_high_open, dtype="i1")
random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")
random_st.randint(-128, I_i1_high_open, dtype="i1")
random_st.randint(128, dtype="int8")
random_st.randint(-128, 128, dtype="int8")
random_st.randint(I_i1_high_open, dtype="int8")
random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")
random_st.randint(-128, I_i1_high_open, dtype="int8")
random_st.randint(128, dtype=np.int8)
random_st.randint(-128, 128, dtype=np.int8)
random_st.randint(I_i1_high_open, dtype=np.int8)
random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)
random_st.randint(-128, I_i1_high_open, dtype=np.int8)
random_st.randint(32768, dtype="i2")
random_st.randint(-32768, 32768, dtype="i2")
random_st.randint(I_i2_high_open, dtype="i2")
random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")
random_st.randint(-32768, I_i2_high_open, dtype="i2")
random_st.randint(32768, dtype="int16")
random_st.randint(-32768, 32768, dtype="int16")
random_st.randint(I_i2_high_open, dtype="int16")
random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")
random_st.randint(-32768, I_i2_high_open, dtype="int16")
random_st.randint(32768, dtype=np.int16)
random_st.randint(-32768, 32768, dtype=np.int16)
random_st.randint(I_i2_high_open, dtype=np.int16)
random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)
random_st.randint(-32768, I_i2_high_open, dtype=np.int16)
random_st.randint(2147483648, dtype="i4")
random_st.randint(-2147483648, 2147483648, dtype="i4")
random_st.randint(I_i4_high_open, dtype="i4")
random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")
random_st.randint(-2147483648, I_i4_high_open, dtype="i4")
random_st.randint(2147483648, dtype="int32")
random_st.randint(-2147483648, 2147483648, dtype="int32")
random_st.randint(I_i4_high_open, dtype="int32")
random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")
random_st.randint(-2147483648, I_i4_high_open, dtype="int32")
random_st.randint(2147483648, dtype=np.int32)
random_st.randint(-2147483648, 2147483648, dtype=np.int32)
random_st.randint(I_i4_high_open, dtype=np.int32)
random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)
random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)
random_st.randint(9223372036854775808, dtype="i8")
random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8")
random_st.randint(I_i8_high_open, dtype="i8")
random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")
random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")
random_st.randint(9223372036854775808, dtype="int64")
random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64")
random_st.randint(I_i8_high_open, dtype="int64")
random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")
random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")
random_st.randint(9223372036854775808, dtype=np.int64)
random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64)
random_st.randint(I_i8_high_open, dtype=np.int64)
random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)
random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)
bg: np.random.BitGenerator = random_st._bit_generator
random_st.bytes(2)
random_st.choice(5)
random_st.choice(5, 3)
random_st.choice(5, 3, replace=True)
random_st.choice(5, 3, p=[1 / 5] * 5)
random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))
random_st.dirichlet([0.5, 0.5])
random_st.dirichlet(np.array([0.5, 0.5]))
random_st.dirichlet(np.array([0.5, 0.5]), size=3)
random_st.multinomial(20, [1 / 6.0] * 6)
random_st.multinomial(20, np.array([0.5, 0.5]))
random_st.multinomial(20, [1 / 6.0] * 6, size=2)
random_st.multivariate_normal([0.0], [[1.0]])
random_st.multivariate_normal([0.0], np.array([[1.0]]))
random_st.multivariate_normal(np.array([0.0]), [[1.0]])
random_st.multivariate_normal([0.0], np.array([[1.0]]))
random_st.permutation(10)
random_st.permutation([1, 2, 3, 4])
random_st.permutation(np.array([1, 2, 3, 4]))
random_st.permutation(D_2D)
random_st.shuffle(np.arange(10))
random_st.shuffle([1, 2, 3, 4, 5])
random_st.shuffle(D_2D)
np.random.RandomState(SEED_PCG64)
np.random.RandomState(0)
np.random.RandomState([0, 1, 2])
random_st.__str__()
random_st.__repr__()
random_st_state = random_st.__getstate__()
random_st.__setstate__(random_st_state)
random_st.seed()
random_st.seed(1)
random_st.seed([0, 1])
random_st_get_state = random_st.get_state()
random_st_get_state_legacy = random_st.get_state(legacy=True)
random_st.set_state(random_st_get_state)
random_st.rand()
random_st.rand(1)
random_st.rand(1, 2)
random_st.randn()
random_st.randn(1)
random_st.randn(1, 2)
random_st.random_sample()
random_st.random_sample(1)
random_st.random_sample(size=(1, 2))
random_st.tomaxint()
random_st.tomaxint(1)
random_st.tomaxint((1,))
| 61,810 | Python | 40.289913 | 121 | 0.722569 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/simple.py | """Simple expression that should pass with mypy."""
import operator
import numpy as np
from collections.abc import Iterable
# Basic checks
array = np.array([1, 2])
def ndarray_func(x):
# type: (np.ndarray) -> np.ndarray
return x
ndarray_func(np.array([1, 2]))
array == 1
array.dtype == float
# Dtype construction
np.dtype(float)
np.dtype(np.float64)
np.dtype(None)
np.dtype("float64")
np.dtype(np.dtype(float))
np.dtype(("U", 10))
np.dtype((np.int32, (2, 2)))
# Define the arguments on the previous line to prevent bidirectional
# type inference in mypy from broadening the types.
two_tuples_dtype = [("R", "u1"), ("G", "u1"), ("B", "u1")]
np.dtype(two_tuples_dtype)
three_tuples_dtype = [("R", "u1", 2)]
np.dtype(three_tuples_dtype)
mixed_tuples_dtype = [("R", "u1"), ("G", np.unicode_, 1)]
np.dtype(mixed_tuples_dtype)
shape_tuple_dtype = [("R", "u1", (2, 2))]
np.dtype(shape_tuple_dtype)
shape_like_dtype = [("R", "u1", (2, 2)), ("G", np.unicode_, 1)]
np.dtype(shape_like_dtype)
object_dtype = [("field1", object)]
np.dtype(object_dtype)
np.dtype((np.int32, (np.int8, 4)))
# Dtype comparison
np.dtype(float) == float
np.dtype(float) != np.float64
np.dtype(float) < None
np.dtype(float) <= "float64"
np.dtype(float) > np.dtype(float)
np.dtype(float) >= np.dtype(("U", 10))
# Iteration and indexing
def iterable_func(x):
# type: (Iterable) -> Iterable
return x
iterable_func(array)
[element for element in array]
iter(array)
zip(array, array)
array[1]
array[:]
array[...]
array[:] = 0
array_2d = np.ones((3, 3))
array_2d[:2, :2]
array_2d[..., 0]
array_2d[:2, :2] = 0
# Other special methods
len(array)
str(array)
array_scalar = np.array(1)
int(array_scalar)
float(array_scalar)
# currently does not work due to https://github.com/python/typeshed/issues/1904
# complex(array_scalar)
bytes(array_scalar)
operator.index(array_scalar)
bool(array_scalar)
# comparisons
array < 1
array <= 1
array == 1
array != 1
array > 1
array >= 1
1 < array
1 <= array
1 == array
1 != array
1 > array
1 >= array
# binary arithmetic
array + 1
1 + array
array += 1
array - 1
1 - array
array -= 1
array * 1
1 * array
array *= 1
nonzero_array = np.array([1, 2])
array / 1
1 / nonzero_array
float_array = np.array([1.0, 2.0])
float_array /= 1
array // 1
1 // nonzero_array
array //= 1
array % 1
1 % nonzero_array
array %= 1
divmod(array, 1)
divmod(1, nonzero_array)
array ** 1
1 ** array
array **= 1
array << 1
1 << array
array <<= 1
array >> 1
1 >> array
array >>= 1
array & 1
1 & array
array &= 1
array ^ 1
1 ^ array
array ^= 1
array | 1
1 | array
array |= 1
# unary arithmetic
-array
+array
abs(array)
~array
# Other methods
np.array([1, 2]).transpose()
| 2,684 | Python | 15.174699 | 79 | 0.649404 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/bitwise_ops.py | import numpy as np
i8 = np.int64(1)
u8 = np.uint64(1)
i4 = np.int32(1)
u4 = np.uint32(1)
b_ = np.bool_(1)
b = bool(1)
i = int(1)
AR = np.array([0, 1, 2], dtype=np.int32)
AR.setflags(write=False)
i8 << i8
i8 >> i8
i8 | i8
i8 ^ i8
i8 & i8
i8 << AR
i8 >> AR
i8 | AR
i8 ^ AR
i8 & AR
i4 << i4
i4 >> i4
i4 | i4
i4 ^ i4
i4 & i4
i8 << i4
i8 >> i4
i8 | i4
i8 ^ i4
i8 & i4
i8 << i
i8 >> i
i8 | i
i8 ^ i
i8 & i
i8 << b_
i8 >> b_
i8 | b_
i8 ^ b_
i8 & b_
i8 << b
i8 >> b
i8 | b
i8 ^ b
i8 & b
u8 << u8
u8 >> u8
u8 | u8
u8 ^ u8
u8 & u8
u8 << AR
u8 >> AR
u8 | AR
u8 ^ AR
u8 & AR
u4 << u4
u4 >> u4
u4 | u4
u4 ^ u4
u4 & u4
u4 << i4
u4 >> i4
u4 | i4
u4 ^ i4
u4 & i4
u4 << i
u4 >> i
u4 | i
u4 ^ i
u4 & i
u8 << b_
u8 >> b_
u8 | b_
u8 ^ b_
u8 & b_
u8 << b
u8 >> b
u8 | b
u8 ^ b
u8 & b
b_ << b_
b_ >> b_
b_ | b_
b_ ^ b_
b_ & b_
b_ << AR
b_ >> AR
b_ | AR
b_ ^ AR
b_ & AR
b_ << b
b_ >> b
b_ | b
b_ ^ b
b_ & b
b_ << i
b_ >> i
b_ | i
b_ ^ i
b_ & i
~i8
~i4
~u8
~u4
~b_
~AR
| 970 | Python | 6.356061 | 40 | 0.439175 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/lib_version.py | from numpy.lib import NumpyVersion
version = NumpyVersion("1.8.0")
version.vstring
version.version
version.major
version.minor
version.bugfix
version.pre_release
version.is_devversion
version == version
version != version
version < "1.8.0"
version <= version
version > version
version >= "1.8.0"
| 299 | Python | 14.789473 | 34 | 0.765886 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.