ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dff31518f5735d3790d4eb73fd163eef67d9e5e | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tanh op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
tanh_op_info = TBERegOp("Tanh") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("tanh.so") \
.compute_cost(10) \
.kernel_name("tanh") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("formatAgnostic") \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(tanh_op_info)
def _tanh_tbe():
"""Tanh TBE register"""
return
|
py | 7dff32f95782ec5608f56af86a5a38d601813ec6 | # flake8: noqa: F811, F401
import asyncio
import logging
from secrets import token_bytes
from typing import List, Optional
import pytest
from hddcoin.consensus.blockchain import ReceiveBlockResult
from hddcoin.consensus.multiprocess_validation import PreValidationResult
from hddcoin.consensus.pot_iterations import is_overflow_block
from hddcoin.full_node.full_node_store import FullNodeStore
from hddcoin.full_node.signage_point import SignagePoint
from hddcoin.protocols import timelord_protocol
from hddcoin.protocols.timelord_protocol import NewInfusionPointVDF
from hddcoin.types.blockchain_format.sized_bytes import bytes32
from hddcoin.types.unfinished_block import UnfinishedBlock
from hddcoin.util.block_cache import BlockCache
from tests.block_tools import get_signage_point, BlockTools
from hddcoin.util.hash import std_hash
from hddcoin.util.ints import uint8, uint32, uint64, uint128
from tests.core.fixtures import default_1000_blocks, create_blockchain # noqa: F401
from tests.setup_nodes import test_constants as test_constants_original
test_constants = test_constants_original.replace(**{"DISCRIMINANT_SIZE_BITS": 32, "SUB_SLOT_ITERS_STARTING": 2 ** 12})
bt = BlockTools(test_constants)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
log = logging.getLogger(__name__)
@pytest.fixture(scope="function")
async def empty_blockchain():
bc1, connection, db_path = await create_blockchain(test_constants)
yield bc1
await connection.close()
bc1.shut_down()
db_path.unlink()
@pytest.fixture(scope="function")
async def empty_blockchain_original():
bc1, connection, db_path = await create_blockchain(test_constants_original)
yield bc1
await connection.close()
bc1.shut_down()
db_path.unlink()
class TestFullNodeStore:
@pytest.mark.asyncio
async def test_basic_store(self, empty_blockchain, normalized_to_identity: bool = False):
blockchain = empty_blockchain
blocks = bt.get_consecutive_blocks(
10,
seed=b"1234",
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
store = FullNodeStore(test_constants)
unfinished_blocks = []
for block in blocks:
unfinished_blocks.append(
UnfinishedBlock(
block.finished_sub_slots,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
)
# Add/get candidate block
assert store.get_candidate_block(unfinished_blocks[0].get_hash()) is None
for height, unf_block in enumerate(unfinished_blocks):
store.add_candidate_block(unf_block.get_hash(), uint32(height), unf_block)
candidate = store.get_candidate_block(unfinished_blocks[4].get_hash())
assert candidate is not None
assert candidate[1] == unfinished_blocks[4]
store.clear_candidate_blocks_below(uint32(8))
assert store.get_candidate_block(unfinished_blocks[5].get_hash()) is None
assert store.get_candidate_block(unfinished_blocks[8].get_hash()) is not None
# Test seen unfinished blocks
h_hash_1 = bytes32(token_bytes(32))
assert not store.seen_unfinished_block(h_hash_1)
assert store.seen_unfinished_block(h_hash_1)
store.clear_seen_unfinished_blocks()
assert not store.seen_unfinished_block(h_hash_1)
# Add/get unfinished block
for height, unf_block in enumerate(unfinished_blocks):
assert store.get_unfinished_block(unf_block.partial_hash) is None
store.add_unfinished_block(uint32(height), unf_block, PreValidationResult(None, uint64(123532), None))
assert store.get_unfinished_block(unf_block.partial_hash) == unf_block
store.remove_unfinished_block(unf_block.partial_hash)
assert store.get_unfinished_block(unf_block.partial_hash) is None
blocks = bt.get_consecutive_blocks(
1,
skip_slots=5,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
)
sub_slots = blocks[0].finished_sub_slots
assert len(sub_slots) == 5
assert (
store.get_finished_sub_slots(
BlockCache({}),
None,
sub_slots[0].challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
)
== []
)
# Test adding non-connecting sub-slots genesis
assert store.get_sub_slot(test_constants.GENESIS_CHALLENGE) is None
assert store.get_sub_slot(sub_slots[0].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.new_finished_sub_slot(sub_slots[1], blockchain, None, None) is None
assert store.new_finished_sub_slot(sub_slots[2], blockchain, None, None) is None
# Test adding sub-slots after genesis
assert store.new_finished_sub_slot(sub_slots[0], blockchain, None, None) is not None
sub_slot = store.get_sub_slot(sub_slots[0].challenge_chain.get_hash())
assert sub_slot is not None
assert sub_slot[0] == sub_slots[0]
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.new_finished_sub_slot(sub_slots[1], blockchain, None, None) is not None
for i in range(len(sub_slots)):
assert store.new_finished_sub_slot(sub_slots[i], blockchain, None, None) is not None
slot_i = store.get_sub_slot(sub_slots[i].challenge_chain.get_hash())
assert slot_i is not None
assert slot_i[0] == sub_slots[i]
assert store.get_finished_sub_slots(BlockCache({}), None, sub_slots[-1].challenge_chain.get_hash()) == sub_slots
assert store.get_finished_sub_slots(BlockCache({}), None, std_hash(b"not a valid hash")) is None
assert (
store.get_finished_sub_slots(BlockCache({}), None, sub_slots[-2].challenge_chain.get_hash())
== sub_slots[:-1]
)
# Test adding genesis peak
await blockchain.receive_block(blocks[0])
peak = blockchain.get_peak()
peak_full_block = await blockchain.get_full_peak()
if peak.overflow:
store.new_peak(peak, peak_full_block, sub_slots[-2], sub_slots[-1], None, blockchain)
else:
store.new_peak(peak, peak_full_block, None, sub_slots[-1], None, blockchain)
assert store.get_sub_slot(sub_slots[0].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[2].challenge_chain.get_hash()) is None
if peak.overflow:
slot_3 = store.get_sub_slot(sub_slots[3].challenge_chain.get_hash())
assert slot_3 is not None
assert slot_3[0] == sub_slots[3]
else:
assert store.get_sub_slot(sub_slots[3].challenge_chain.get_hash()) is None
slot_4 = store.get_sub_slot(sub_slots[4].challenge_chain.get_hash())
assert slot_4 is not None
assert slot_4[0] == sub_slots[4]
assert (
store.get_finished_sub_slots(
blockchain,
peak,
sub_slots[-1].challenge_chain.get_hash(),
)
== []
)
# Test adding non genesis peak directly
blocks = bt.get_consecutive_blocks(
2,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for block in blocks:
await blockchain.receive_block(block)
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res[0] is None
# Add reorg blocks
blocks_reorg = bt.get_consecutive_blocks(
20,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for block in blocks_reorg:
res, _, fork_height = await blockchain.receive_block(block)
if res == ReceiveBlockResult.NEW_PEAK:
if fork_height is not None and fork_height != block.height - 1:
fork_block = blockchain.block_record(blockchain.height_to_hash(fork_height))
else:
fork_block = None
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, fork_block, blockchain)
assert res[0] is None
# Add slots to the end
blocks_2 = bt.get_consecutive_blocks(
1,
block_list_input=blocks_reorg,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_2[-1].finished_sub_slots:
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak())
assert store.get_sub_slot(sub_slots[3].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[4].challenge_chain.get_hash()) is None
# Test adding signage point
peak = blockchain.get_peak()
ss_start_iters = peak.ip_sub_slot_total_iters(test_constants)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
ss_start_iters,
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
blocks = blocks_reorg
while True:
blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
res, _, fork_height = await blockchain.receive_block(blocks[-1])
if res == ReceiveBlockResult.NEW_PEAK:
if fork_height is not None and fork_height != blocks[-1].height - 1:
fork_block = blockchain.block_record(blockchain.height_to_hash(fork_height))
else:
fork_block = None
sb = blockchain.block_record(blocks[-1].header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(blocks[-1].header_hash)
res = store.new_peak(sb, blocks[-1], sp_sub_slot, ip_sub_slot, fork_block, blockchain)
assert res[0] is None
if sb.overflow and sp_sub_slot is not None:
assert sp_sub_slot != ip_sub_slot
break
peak = blockchain.get_peak()
assert peak.overflow
# Overflow peak should result in 2 finished sub slots
assert len(store.finished_sub_slots) == 2
# Add slots to the end, except for the last one, which we will use to test invalid SP
blocks_2 = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
skip_slots=3,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_2[-1].finished_sub_slots[:-1]:
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak())
finished_sub_slots = blocks_2[-1].finished_sub_slots
assert len(store.finished_sub_slots) == 4
# Test adding signage points for overflow blocks (sp_sub_slot)
ss_start_iters = peak.sp_sub_slot_total_iters(test_constants)
# for i in range(peak.signage_point_index, test_constants.NUM_SPS_SUB_SLOT):
# if i < peak.signage_point_index:
# continue
# latest = peak
# while latest.total_iters > peak.sp_total_iters(test_constants):
# latest = blockchain.blocks[latest.prev_hash]
# sp = get_signage_point(
# test_constants,
# blockchain.blocks,
# latest,
# ss_start_iters,
# uint8(i),
# [],
# peak.sub_slot_iters,
# )
# assert store.new_signage_point(i, blockchain.blocks, peak, peak.sub_slot_iters, sp)
# Test adding signage points for overflow blocks (ip_sub_slot)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants),
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding future signage point, a few slots forward (good)
saved_sp_hash = None
for slot_offset in range(1, len(finished_sub_slots)):
for i in range(
1,
test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA,
):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants) + slot_offset * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[:slot_offset],
peak.sub_slot_iters,
)
assert sp.cc_vdf is not None
saved_sp_hash = sp.cc_vdf.output.get_hash()
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding future signage point (bad)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants) + len(finished_sub_slots) * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[: len(finished_sub_slots)],
peak.sub_slot_iters,
)
assert not store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding past signage point
sp = SignagePoint(
blocks[1].reward_chain_block.challenge_chain_sp_vdf,
blocks[1].challenge_chain_sp_proof,
blocks[1].reward_chain_block.reward_chain_sp_vdf,
blocks[1].reward_chain_sp_proof,
)
assert not store.new_signage_point(
blocks[1].reward_chain_block.signage_point_index,
blockchain,
peak,
blockchain.block_record(blocks[1].header_hash).sp_sub_slot_total_iters(test_constants),
sp,
)
# Get signage point by index
assert (
store.get_signage_point_by_index(
finished_sub_slots[0].challenge_chain.get_hash(),
uint8(4),
finished_sub_slots[0].reward_chain.get_hash(),
)
is not None
)
assert (
store.get_signage_point_by_index(finished_sub_slots[0].challenge_chain.get_hash(), uint8(4), std_hash(b"1"))
is None
)
# Get signage point by hash
assert store.get_signage_point(saved_sp_hash) is not None
assert store.get_signage_point(std_hash(b"2")) is None
# Test adding signage points before genesis
store.initialize_genesis_sub_slot()
assert len(store.finished_sub_slots) == 1
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
BlockCache({}, {}),
None,
uint128(0),
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, None, peak.sub_slot_iters, sp)
blocks_3 = bt.get_consecutive_blocks(
1,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_3[-1].finished_sub_slots:
store.new_finished_sub_slot(slot, blockchain, None, None)
assert len(store.finished_sub_slots) == 3
finished_sub_slots = blocks_3[-1].finished_sub_slots
for slot_offset in range(1, len(finished_sub_slots) + 1):
for i in range(
1,
test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA,
):
sp = get_signage_point(
test_constants,
BlockCache({}, {}),
None,
slot_offset * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[:slot_offset],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, None, peak.sub_slot_iters, sp)
# Test adding signage points after genesis
blocks_4 = bt.get_consecutive_blocks(
1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
blocks_5 = bt.get_consecutive_blocks(
1,
block_list_input=blocks_4,
skip_slots=1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
# If this is not the case, fix test to find a block that is
assert (
blocks_4[-1].reward_chain_block.signage_point_index
< test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA
)
await blockchain.receive_block(blocks_4[-1])
sb = blockchain.block_record(blocks_4[-1].header_hash)
store.new_peak(sb, blocks_4[-1], None, None, None, blockchain)
for i in range(
sb.signage_point_index + test_constants.NUM_SP_INTERVALS_EXTRA,
test_constants.NUM_SPS_SUB_SLOT,
):
if is_overflow_block(test_constants, uint8(i)):
finished_sub_slots = blocks_5[-1].finished_sub_slots
else:
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
sb,
uint128(0),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), empty_blockchain, sb, peak.sub_slot_iters, sp)
# Test future EOS cache
store.initialize_genesis_sub_slot()
blocks = bt.get_consecutive_blocks(
1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
await blockchain.receive_block(blocks[-1])
while True:
blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
await blockchain.receive_block(blocks[-1])
sb = blockchain.block_record(blocks[-1].header_hash)
if sb.first_in_sub_slot:
break
assert len(blocks) >= 2
dependant_sub_slots = blocks[-1].finished_sub_slots
peak = blockchain.get_peak()
peak_full_block = await blockchain.get_full_peak()
for block in blocks[:-2]:
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
peak = sb
peak_full_block = block
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res[0] is None
assert store.new_finished_sub_slot(dependant_sub_slots[0], blockchain, peak, peak_full_block) is None
block = blocks[-2]
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res[0] == dependant_sub_slots[0]
assert res[1] == res[2] == []
# Test future IP cache
store.initialize_genesis_sub_slot()
blocks = bt.get_consecutive_blocks(
60,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
)
for block in blocks[:5]:
await blockchain.receive_block(block)
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res[0] is None
case_0, case_1 = False, False
for i in range(5, len(blocks) - 1):
prev_block = blocks[i]
block = blocks[i + 1]
new_ip = NewInfusionPointVDF(
block.reward_chain_block.get_unfinished().get_hash(),
block.reward_chain_block.challenge_chain_ip_vdf,
block.challenge_chain_ip_proof,
block.reward_chain_block.reward_chain_ip_vdf,
block.reward_chain_ip_proof,
block.reward_chain_block.infused_challenge_chain_ip_vdf,
block.infused_challenge_chain_ip_proof,
)
store.add_to_future_ip(new_ip)
await blockchain.receive_block(prev_block)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(prev_block.header_hash)
sb = blockchain.block_record(prev_block.header_hash)
res = store.new_peak(sb, prev_block, sp_sub_slot, ip_sub_slot, None, blockchain)
if len(block.finished_sub_slots) == 0:
case_0 = True
assert res[2] == [new_ip]
else:
case_1 = True
assert res[2] == []
found_ips: List[timelord_protocol.NewInfusionPointVDF] = []
for ss in block.finished_sub_slots:
ipvdf = store.new_finished_sub_slot(ss, blockchain, sb, prev_block)
assert ipvdf is not None
found_ips += ipvdf
assert found_ips == [new_ip]
# If flaky, increase the number of blocks created
assert case_0 and case_1
# Try to get two blocks in the same slot, such that we have
# SP, B2 SP .... SP B1
# i2 ......... i1
# Then do a reorg up to B2, removing all signage points after B2, but not before
log.warning(f"Adding blocks up to {blocks[-1]}")
for block in blocks:
await blockchain.receive_block(block)
log.warning(f"Starting loop")
while True:
log.warning("Looping")
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1)
assert (await blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
store.new_peak(peak, blocks[-1], sub_slots[0], sub_slots[1], None, blockchain)
blocks = bt.get_consecutive_blocks(2, block_list_input=blocks, guarantee_transaction_block=True)
i3 = blocks[-3].reward_chain_block.signage_point_index
i2 = blocks[-2].reward_chain_block.signage_point_index
i1 = blocks[-1].reward_chain_block.signage_point_index
if (
len(blocks[-2].finished_sub_slots) == len(blocks[-1].finished_sub_slots) == 0
and not is_overflow_block(test_constants, signage_point_index=i2)
and not is_overflow_block(test_constants, signage_point_index=i1)
and i2 > i3 + 3
and i1 > (i2 + 3)
):
# We hit all the conditions that we want
all_sps: List[Optional[SignagePoint]] = [None] * test_constants.NUM_SPS_SUB_SLOT
def assert_sp_none(sp_index: int, is_none: bool):
sp_to_check: Optional[SignagePoint] = all_sps[sp_index]
assert sp_to_check is not None
assert sp_to_check.cc_vdf is not None
fetched = store.get_signage_point(sp_to_check.cc_vdf.output.get_hash())
assert (fetched is None) == is_none
if fetched is not None:
assert fetched == sp_to_check
for i in range(i3 + 1, test_constants.NUM_SPS_SUB_SLOT - 3):
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
peak,
uint128(peak.ip_sub_slot_total_iters(bt.constants)),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
all_sps[i] = sp
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Adding a new peak clears all SPs after that peak
assert (await blockchain.receive_block(blocks[-2]))[0] == ReceiveBlockResult.NEW_PEAK
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
store.new_peak(peak, blocks[-2], sub_slots[0], sub_slots[1], None, blockchain)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, True)
assert_sp_none(i1 + 1, True)
assert_sp_none(i1 + 4, True)
for i in range(i2, test_constants.NUM_SPS_SUB_SLOT):
if is_overflow_block(test_constants, uint8(i)):
blocks_alt = bt.get_consecutive_blocks(1, block_list_input=blocks[:-1], skip_slots=1)
finished_sub_slots = blocks_alt[-1].finished_sub_slots
else:
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
peak,
uint128(peak.ip_sub_slot_total_iters(bt.constants)),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
all_sps[i] = sp
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, False)
assert_sp_none(i1 + 1, False)
assert_sp_none(i1 + 4, False)
assert (await blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
# Do a reorg, which should remove everything after B2
store.new_peak(
peak,
blocks[-1],
sub_slots[0],
sub_slots[1],
(await blockchain.get_block_records_at([blocks[-2].height]))[0],
blockchain,
)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, True)
assert_sp_none(i1 + 1, True)
assert_sp_none(i1 + 4, True)
break
else:
for block in blocks[-2:]:
assert (await blockchain.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_basic_store_compact_blockchain(self, empty_blockchain):
await self.test_basic_store(empty_blockchain, True)
@pytest.mark.asyncio
async def test_long_chain_slots(self, empty_blockchain_original, default_1000_blocks):
blockchain = empty_blockchain_original
store = FullNodeStore(test_constants_original)
blocks = default_1000_blocks
peak = None
peak_full_block = None
for block in blocks:
for sub_slot in block.finished_sub_slots:
assert store.new_finished_sub_slot(sub_slot, blockchain, peak, peak_full_block) is not None
res, err, _ = await blockchain.receive_block(block)
assert res == ReceiveBlockResult.NEW_PEAK
peak = blockchain.get_peak()
peak_full_block = await blockchain.get_full_peak()
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
store.new_peak(peak, peak_full_block, sp_sub_slot, ip_sub_slot, None, blockchain)
|
py | 7dff35021d4db612e0458bb82c88e317702469d3 | from typing import Any
import numpy as np
import pytest
from jina import Document
from jina.drivers.search import VectorFillDriver
from jina.executors.indexers import BaseIndexer
@pytest.fixture(scope='function')
def num_docs():
return 10
@pytest.fixture(scope='function')
def docs_to_encode(num_docs):
docs = []
for idx in range(num_docs):
doc = Document(content=np.array([idx]))
docs.append(doc)
return docs
class MockIndexer(BaseIndexer):
def query_by_key(self, keys: Any, *args, **kwargs) -> 'np.ndarray':
# encodes 10 * keys into the encoder, so return keys
return np.random.random([len(keys), 5])
class SimpleFillDriver(VectorFillDriver):
@property
def exec_fn(self):
return self._exec_fn
def test_index_driver(docs_to_encode, num_docs):
driver = SimpleFillDriver()
executor = MockIndexer()
driver.attach(executor=executor, runtime=None)
assert len(docs_to_encode) == num_docs
for doc in docs_to_encode:
assert doc.embedding is None
driver._apply_all(docs_to_encode)
assert len(docs_to_encode) == num_docs
for doc in docs_to_encode:
assert doc.embedding.shape == (5,)
|
py | 7dff3558be084658c995c9c524cd741d3778a667 | #!/usr/bin/env python
import StringIO
import unittest
from csvkit import CSVKitReader
from csvkit.utilities.csvgrep import CSVGrep
class TestCSVCut(unittest.TestCase):
def test_match(self):
args = ['-c', '1', '-m', '1', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVGrep(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['a', 'b', 'c'])
self.assertEqual(reader.next(), ['1', '2', '3'])
def test_no_match(self):
args = ['-c', '1', '-m', 'NO MATCH', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVGrep(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['a', 'b', 'c'])
def test_invert_match(self):
args = ['-c', '1', '-i', '-m', 'NO MATCH', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVGrep(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['a', 'b', 'c'])
self.assertEqual(reader.next(), ['1', '2', '3'])
def test_re_match(self):
args = ['-c', '3', '-r', '^(3|9)$', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVGrep(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['a', 'b', 'c'])
self.assertEqual(reader.next(), ['1', '2', '3'])
def test_string_match(self):
args = ['-c', '1', '-m', 'ILLINOIS', 'examples/realdata/FY09_EDU_Recipients_by_State.csv']
output_file = StringIO.StringIO()
utility = CSVGrep(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['State Name', 'State Abbreviate', 'Code', 'Montgomery GI Bill-Active Duty', 'Montgomery GI Bill- Selective Reserve', 'Dependents\' Educational Assistance', 'Reserve Educational Assistance Program', 'Post-Vietnam Era Veteran\'s Educational Assistance Program', 'TOTAL', ''])
self.assertEqual(reader.next(), ['ILLINOIS', 'IL', '17', '15,659', '2,491', '2,025', '1,770', '19', '21,964', ''])
|
py | 7dff35999480b969643c16cab7489efe053518b8 | # -*- coding: utf-8 -*-
"""
detection
~~~~~~~~~
Implements detection language/framework
:author: Feei <[email protected]>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import os
from prettytable import PrettyTable
import xml.etree.ElementTree as eT
from .rule import Rule
from .dependencies import Dependencies
from .log import logger
from pip.req import parse_requirements
from .config import rules_path
file_type = []
class Detection(object):
def __init__(self, target_directory, files):
"""
:param target_directory:
:param files:
"""
self.target_directory = target_directory
self.files = files
self.lang = None
self.requirements = None
self.frame_data = {}
self.language_data = {}
self.project_data = []
@property
def language(self):
"""Detection main language"""
languages = Rule().languages
tmp_language = None
for ext, ext_info in self.files:
logger.debug("[DETECTION] [LANGUAGE] {ext} {count}".format(ext=ext, count=ext_info['count']))
for language, language_info in languages.items():
if ext in language_info['extensions']:
if 'chiefly' in language_info and language_info['chiefly'].lower() == 'true':
logger.debug('[DETECTION] [LANGUAGE] found the chiefly language({language}), maybe have largest, continue...'.format(
language=language))
self.lang = language
else:
logger.debug('[DETECTION] [LANGUAGE] not chiefly, continue...'.format(language=language))
tmp_language = language
if self.lang is None:
logger.debug('[DETECTION] [LANGUAGE] not found chiefly language, use the largest language(language) replace'.format(
language=tmp_language))
self.lang = tmp_language
logger.debug('[DETECTION] [LANGUAGE] main language({main_language}), tmp language({tmp_language})'.format(tmp_language=tmp_language,
main_language=self.lang))
return self.lang
@property
def framework(self):
tree = self.rule()
root = tree.getroot()
frame_data, language_data = self.parse_xml(root, self.frame_data, self.language_data)
projects_data = self.project_information(self.target_directory, False)
frame_name = self.dependency_scan(root) # Based on the dependency analysis framework type
if frame_name is not None:
return frame_name
frames_name = frame_data.keys()
for frame_name in frames_name:
for rule_name in frame_data[frame_name]:
for project_data in projects_data:
if rule_name in project_data:
logger.debug("[DETECTION] [FRAMEWORK] Find the project's framework may be:" + frame_name)
return frame_name
logger.info('[DETECTION] [FRAMEWORK] Unknown Framework')
return 'Unknown Framework'
def dependency_scan(self, root):
"""
根据三方依赖识别项目使用框架类型
:param root:
:return:
"""
framework_infos = self.dependency_framework(root)
dependencies = Dependencies(self.target_directory)
dependencies_info = dependencies.get_framework
dependencies_info = list(set(dependencies_info))
for frame_name in framework_infos:
for rule in framework_infos[frame_name]['rule']:
for dependency in dependencies_info:
if rule in dependency:
logger.debug("Find the project's framework may be:" + frame_name)
return frame_name
return None
@staticmethod
def dependency_framework(root):
"""
:param root:
:return:
"""
framework_infos = {}
for framework in root:
rule_info = {
'rule': []
}
frame = framework.get('name')
for rule in framework:
if rule.tag == 'dependency':
rule_info['rule'].append(rule.get('value'))
if len(rule_info['rule']) != 0:
framework_infos[frame] = rule_info
return framework_infos
def _requirements(self):
requirements_txt = os.path.join(self.target_directory, 'requirements.txt')
logger.debug(requirements_txt)
if os.path.isfile(requirements_txt):
requirements = parse_requirements(requirements_txt, session=False)
self.requirements = [req.name.strip().lower() for req in requirements]
logger.debug('requirements modules count: {count} ({modules})'.format(count=len(self.requirements),
modules=','.join(self.requirements)))
else:
logger.debug('requirements.txt not found!')
self.requirements = []
def parse_xml(self, root, frame_data, language_data, frame_name=None):
language_name = ''
if len(root) != 0:
if root.tag != 'cobra':
frame_name = root.attrib['name']
language_name = root.attrib['language']
frame_data.setdefault(frame_name, [])
for child_of_root in root:
frame_data, language_data = self.parse_xml(child_of_root, frame_data, language_data, frame_name)
language_data.setdefault(language_name, {})
if frame_name is not None:
language_data[language_name].setdefault(frame_name, frame_data[frame_name])
return frame_data, language_data
else:
try:
frame_data[frame_name].append(root.attrib['value'])
return frame_data, language_data
except KeyError as e:
logger.warning(e.message)
@staticmethod
def rule():
framework_path = os.path.join(rules_path, 'frameworks.xml')
tree = eT.ElementTree(file=framework_path)
return tree
@staticmethod
def get_dict(extension, type_num):
for ext in extension:
type_num.setdefault(ext, {'files': 0, 'blank': 0, 'pound': 0, 'code': 0})
return type_num
@staticmethod
def project_information(absolute_path, extension, is_cloc=False):
allfiles = []
if os.path.isdir(absolute_path):
for root, dirs, filenames in os.walk(absolute_path):
for filename in filenames:
filepath = os.path.join(root, filename)
if is_cloc is True:
fileext = os.path.splitext(filepath)[1][1:]
if fileext in extension:
allfiles.append(filepath)
else:
allfiles.append(filepath)
if os.path.isfile(absolute_path):
absolute_path = os.path.abspath(absolute_path)
if is_cloc is True:
fileext = os.path.splitext(absolute_path)[1][1:]
if fileext in extension:
allfiles.append(absolute_path)
else:
allfiles.append(absolute_path)
return allfiles
# 统计Python数据的函数
@staticmethod
def count_py_line(filename):
count = {'count_code': 0, 'count_blank': 0, 'count_pound': 0}
fi = open(filename, 'r')
file_line = fi.readline()
while fi.tell() != os.path.getsize(filename):
file_line = file_line.strip()
if len(file_line) == 0:
count['count_blank'] += 1
elif file_line.startswith('#'):
count['count_pound'] += 1
elif file_line.count('"""') == 2 or file_line.count("'''") == 2:
if file_line.startswith('"""') or file_line.startswith("'''"):
count['count_pound'] += 1
else:
count['count_code'] += 1
elif file_line.count('"""') == 1 or file_line.count("'''") == 1:
if file_line.startswith('"""') or file_line.startswith("'''"):
count['count_pound'] += 1
while True:
file_line = fi.readline()
if len(file_line) == 0 or file_line == "\n":
count['count_blank'] += 1
else:
count['count_pound'] += 1
if file_line.endswith('"""\n') or file_line.endswith("'''\n"):
break
else:
count['count_code'] += 1
while True:
file_line = fi.readline()
if len(file_line) == 0 or file_line == "\n":
count['count_blank'] += 1
else:
count['count_code'] += 1
if file_line.find('"""') or file_line.find("'''"):
break
else:
count['count_code'] += 1
file_line = fi.readline()
fi.close()
return count
# 统计PHP数据的函数
@staticmethod
def count_php_line(filename):
count = {'count_code': 0, 'count_blank': 0, 'count_pound': 0}
fi = open(filename, 'r')
file_line = fi.readline()
while fi.tell() != os.path.getsize(filename):
file_line = file_line.lstrip()
if len(file_line) == 0:
count['count_blank'] += 1
elif file_line.startswith('//') or file_line.startswith('#'):
count['count_pound'] += 1
elif file_line.count('/*') == 1 and file_line.count('*/') == 1:
if file_line.startswith('/*'):
count['count_pound'] += 1
else:
count['count_code'] += 1
elif file_line.count('/*') == 1 and file_line.count('*/') == 0:
if file_line.startswith('/*'):
count['count_pound'] += 1
while True:
file_line = fi.readline()
if len(file_line) == 0 or file_line == "\n":
count['count_blank'] += 1
else:
count['count_pound'] += 1
if file_line.endswith('*/\n'):
break
else:
count['count_code'] += 1
while True:
file_line = fi.readline()
if len(file_line) == 0 or file_line == "\n":
count['count_blank'] += 1
else:
count['count_code'] += 1
if file_line.find('*/'):
break
else:
count['count_code'] += 1
file_line = fi.readline()
fi.close()
return count
# 统计Java和JS数据的函数
@staticmethod
def count_java_line(filename):
count = {'count_code': 0, 'count_blank': 0, 'count_pound': 0}
fi = open(filename, 'r')
file_line = fi.readline()
while fi.tell() != os.path.getsize(filename):
file_line = file_line.lstrip()
if len(file_line) == 0:
count['count_blank'] += 1
elif file_line.startswith('//'):
count['count_pound'] += 1
elif file_line.count('/*') == 1 and file_line.count('*/') == 1:
if file_line.startswith('/*'):
count['count_pound'] += 1
else:
count['count_code'] += 1
elif file_line.count('/*') == 1 and file_line.count('*/') == 0:
if file_line.startswith('/*'):
count['count_pound'] += 1
while True:
file_line = fi.readline()
if len(file_line) == 0 or file_line == "\n":
count['count_blank'] += 1
else:
count['count_pound'] += 1
if file_line.endswith('*/\n'):
break
else:
count['count_code'] += 1
while True:
file_line = fi.readline()
if len(file_line) == 0 or file_line == "\n":
count['count_blank'] += 1
else:
count['count_code'] += 1
if file_line.find('*/'):
break
else:
count['count_code'] += 1
file_line = fi.readline()
fi.close()
return count
# 统计HTML,CSS数据的函数
@staticmethod
def count_html_line(filename):
count = {'count_code': 0, 'count_blank': 0, 'count_pound': 0}
fi = open(filename, 'r')
file_line = fi.readline()
while fi.tell() != os.path.getsize(filename):
file_line = file_line.lstrip()
if len(file_line) == 0:
count['count_blank'] += 1
elif file_line.count('<!--') == 1 and file_line.count('-->') == 1:
if file_line.startswith('<!--'):
count['count_pound'] += 1
else:
count['count_code'] += 1
elif file_line.count('<!--') == 1 and file_line.count('-->') == 0:
if file_line.startswith('<!--'):
count['count_pound'] += 1
while True:
file_line = fi.readline()
if len(file_line) == 0 or file_line == "\n":
count['count_blank'] += 1
else:
count['count_pound'] += 1
if file_line.endswith('-->\n'):
break
else:
count['count_code'] += 1
while True:
file_line = fi.readline()
if len(file_line) == 0 or file_line == "\n":
count['count_blank'] += 1
else:
count['count_code'] += 1
if file_line.find('-->'):
break
else:
count['count_code'] += 1
file_line = fi.readline()
fi.close()
return count
# 统计markdown和xml数据的函数
@staticmethod
def count_data_line(filename):
count = {'count_code': 0, 'count_blank': 0, 'count_pound': 0}
fi = open(filename, 'r')
file_line = fi.readline()
while fi.tell() != os.path.getsize(filename):
file_line = file_line.lstrip()
if len(file_line) == 0:
count['count_blank'] += 1
else:
count['count_code'] += 1
file_line = fi.readline()
fi.close()
return count
@staticmethod
def countnum(count, type_num, fileext):
type_num[fileext]['blank'] += count['count_blank']
type_num[fileext]['code'] += count['count_code']
type_num[fileext]['pound'] += count['count_pound']
type_num[fileext]['files'] += 1
return type_num
@staticmethod
def count_total_num(type_num, extension, total_file, total_blank_line, total_pound_line, total_code_line):
for lang in extension:
total_file += type_num[lang]['files']
total_blank_line += type_num[lang]['blank']
total_pound_line += type_num[lang]['pound']
total_code_line += type_num[lang]['code']
return total_file, total_blank_line, total_pound_line, total_code_line
"""
type_num = {'js':{'files':0, 'blank':0, 'pound':0, 'code':0},
'php':{'files':0, 'blank':0, 'pound':0, 'code':0}
}
For additional file types, you need to add a file suffix to the extension and add the file suffix to the if
statement corresponding to the comment, example:
if fileext == 'py' or fileext == 'java' or fileext == 'xxx'
"""
def cloc(self):
extension = ['js', 'py', 'php', 'java', 'xml', 'css', 'html', 'md', 'm']
type_num = {}
total_code_line = 0
total_pound_line = 0
total_blank_line = 0
total_file = 0
type_num = self.get_dict(extension, type_num)
filelists = self.project_information(self.target_directory, extension, True)
for filelist in filelists:
try:
fileext = os.path.splitext(filelist)[1][1:]
if fileext not in file_type:
file_type.append(fileext)
if fileext == 'py':
count = self.count_py_line(filelist)
type_num = self.countnum(count, type_num, fileext)
if fileext == 'js' or fileext == 'java' or fileext == 'css' or fileext == 'm':
count = self.count_java_line(filelist)
type_num = self.countnum(count, type_num, fileext)
if fileext == 'php':
count = self.count_php_line(filelist)
type_num = self.countnum(count, type_num, fileext)
if fileext == 'md' or fileext == 'xml':
count = self.count_data_line(filelist)
type_num = self.countnum(count, type_num, fileext)
if fileext == 'html':
count = self.count_html_line(filelist)
type_num = self.countnum(count, type_num, fileext)
except:
logger.info('Part of the annotation rule does not match, press CTRL + C to continue the program')
total_file, total_blank_line, total_pound_line, total_code_line = self.count_total_num(type_num, extension,
total_file,
total_blank_line,
total_pound_line,
total_code_line)
x = PrettyTable(["language", "files", "blank", "comment", "code"])
x.padding_width = 2
x.align = "l"
for lang in file_type:
try:
x.add_row([lang, type_num[lang]['files'], type_num[lang]['blank'], type_num[lang]['pound'],
type_num[lang]['code']])
except KeyError:
logger.warning('There is no such file type -->' + lang + ',please add it to the whitelist')
x.add_row(["SUM", total_file, total_blank_line, total_pound_line, total_code_line])
logger.info('\n' + str(x))
return True
|
py | 7dff365aec39a60b13a7152c4580bb8b630c7607 | import datetime
import os
import shutil
import csv
import torch
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
def make_dir(dir_path):
"""
Makes a directory if already doesn't exist
:param dir_path: Directory path to be created
:return: Directory path (str)
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return dir_path
def set_seed(seed):
"""
Set model random seed. The model outputs are seed dependent.
:param seed: An int.
:return: No return
"""
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
def get_device(logger):
"""
Get device model will be run on (GPU or CPU)
:param logger: Logger object to note the device
:return: device type, num_of_gpus
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(device, n_gpu))
return device, n_gpu
def create_save_path(args, execution_file_path):
"""
1) Constructs a model save path: "master_dir/(optional folder)/train_data_name/model_size__date)"
2) Creates a copy of the main code.
:param args: Model arguments object
:param execution_file_path: file path to the main code
:return: Training specific directory where everything will be saved to.
"""
now = datetime.datetime.now().strftime("%d-%m-%Y@%H'%M")
master_dir = os.path.dirname(execution_file_path)
# Extract dataset file name from the full path
dataset_name = os.path.basename(os.path.normpath(args.train_data_path)).split(".")[0]
if args.store_in_folder:
log_path = "{}/{}/{}/{}_{}".format(master_dir, args.store_in_folder, dataset_name, args.model_size, now)
else:
log_path = "{}/{}/{}_{}".format(master_dir, dataset_name, args.model_size, now)
make_dir(log_path)
# COPY OF THE MAIN CODE
shutil.copy2(execution_file_path, "{}/copy_of_code_that_run_this_experiment.py".format(log_path))
return log_path
def log_arguments(run_details_file_path, args, special_tokens):
"""
Saves training information to a file, like arguments and special tokens.
:param run_details_file_path: File to be written to
:param args: Model arguments object
:param special_tokens: Special tokens used in this training
:return: No return
"""
now = datetime.datetime.now().strftime("%d-%m-%Y@%H'%M")
# Open a file and appends to a file. If doesn't exists (+) means to create it.
d_file = open(run_details_file_path, "a+")
d_file.write("@" * 30 + " RUN INFO " + "@" * 30)
d_file.write("\n\nDATE: {}".format(now))
d_file.write("\n\nUSING THE FOLLOWING ARGS:\n{}".format(args))
d_file.write("\n\nSPECIAL TOKENS: {}".format(special_tokens))
d_file.close()
def save_dataset(path, input, append=True):
"""
Saves data to a file path.
:param path: Save file path
:param input: Data to be saved
:param append: Whether we should append data or write to clean file
:return: No return
"""
if append:
with open(path, 'a+', encoding='utf_8') as f:
writer = csv.writer(f)
writer.writerows(input)
else:
with open(path, 'w+', encoding='utf_8') as f:
writer = csv.writer(f)
writer.writerows(input)
f.close()
def load_dataset(dataset_path):
"""
Loads lyrics dataset of the following format (genre, artist, year, album, song_name, lyrics)
:param dataset_path: Dataset file path (type csv)
:return: List of tuples where each entry contains a song and its metadata
"""
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
output = []
for line in tqdm(f):
# Output (genre, artist, year, album, song_name, lyrics)
output.append((line[0], line[1], line[2], line[3], line[4], line[5]))
return output
def format_n_tokenize_data(raw_dataset, enc):
"""
Seperates metadata with respective special tokens and then tokenizes the formated text
:param raw_dataset: Text to format and style
:param enc: Tokenizer object
:return: Formated data in the form of tuples
"""
### TODO: make training examples where lyrics are as a condition to predict features
# Get the dict: special token -> token id
spe = enc.added_tokens_encoder
formated_data = []
for genre, artist, year, album, song_name, lyrics in raw_dataset:
ge = [spe["[s:genre]"]] + enc.encode(genre) + [spe["[e:genre]"]]
ar = [spe["[s:artist]"]] + enc.encode(artist) + [spe["[e:artist]"]]
ye = [spe["[s:year]"]] + enc.encode(year) + [spe["[e:year]"]]
al = [spe["[s:album]"]] + enc.encode(album) + [spe["[e:album]"]]
sn = [spe["[s:song_name]"]] + enc.encode(song_name) + [spe["[e:song_name]"]]
ly = [spe["[s:lyrics]"]] + enc.encode(lyrics) + [spe["[e:lyrics]"]]
formated_data.append((ge, ar, ye, al, sn, ly))
print("The exceeding in length inputs are removed from the dataset.")
return formated_data
def construct_input(formated_data, device, max_input_len=1024):
"""
Given a tokenized dataset, this method constructs inputs required for the GPT2 model fine-tuning.
In particular, it creates token_type_ids & positional_ids, randomly drops lyrics' features, applies padding and
creates language modelling labels, as well as the attention masks.
Refer to - https://huggingface.co/transformers/model_doc/gpt2.html#gpt2lmheadmodel - for an indication of the inputs
:param formated_data: Tokenised dataset with special tokens inplace provided in the form of tuple(all song features)
:param device: Device that will run this code (GPU, CPU)
:param max_input_len: Max input length allowed by the model
:return: Tuple of tensors: (token_ids, token_type_ids, position_ids, attention_mask, lm_labels)
where each is of shape: (num_of_inputs * batch_size * sequence_length) -> (N, 1, 1024)
"""
sucessfull_candidates = []
for genre, artist, year, album, song_name, lyrics in formated_data:
# 1) Prepare input partitions, i.e., token type ids & position ids
# Token type ids, alternatively called segment ids
gen_seg = list([1] * len(genre))
art_seg = list([2] * len(artist))
yea_seg = list([3] * len(year))
alb_seg = list([4] * len(album))
son_seg = list([5] * len(song_name))
lyr_seg = list([6] * len(lyrics))
# 2) Randomly drop features for model to learn to handle subset of conditions
# 25% to drop all metadata but lyrics
if np.random.rand() <= 0.25:
# An integer sequence (0 -> input_len)
position_ids = list(np.arange(0, len(lyrics)))
curr_input = {
"tok_ids": lyrics,
"tok_type_ids": lyr_seg,
"pos_ids": position_ids
}
# 10% of dropping the individual features
else:
tokens_subset = []
segment_subset = []
if np.random.rand() > 0.1:
tokens_subset += genre
segment_subset += gen_seg
if np.random.rand() > 0.1:
tokens_subset += artist
segment_subset += art_seg
if np.random.rand() > 0.1:
tokens_subset += year
segment_subset += yea_seg
if np.random.rand() > 0.1:
tokens_subset += album
segment_subset += alb_seg
if np.random.rand() > 0.1:
tokens_subset += song_name
segment_subset += son_seg
# Add lyrics in all cases -> add lyrics
tokens_subset += lyrics
segment_subset += lyr_seg
position_ids = list(np.arange(0, len(tokens_subset)))
curr_input = {
"tok_ids": tokens_subset,
"tok_type_ids": segment_subset,
"pos_ids": position_ids
}
# Get rid of songs longer than allowed size, alternatively we could cut off the excess
if len(curr_input["tok_ids"]) > max_input_len:
continue
# 3) Add padding to make the input max_input_len
len_before_padding = len(curr_input["tok_ids"])
padding = max_input_len - len_before_padding
curr_input["tok_ids"] += list([0] * padding)
curr_input["tok_type_ids"] += list([0] * padding)
curr_input["pos_ids"] += list([0] * padding)
# 4) Language Modelling Labels -> this is input_copy with padding assigned to -1,
# the position shifting is done in the library code.
lm_labels = np.copy(curr_input["tok_ids"])
lm_labels[np.where(lm_labels == 0)] = -1
# 5) Attention Mask, 1 = unmasked, 0 = masked
attention_mask = list([1] * len_before_padding) + list([0] * padding)
sucessfull_candidates.append((
curr_input["tok_ids"], curr_input["tok_type_ids"], curr_input["pos_ids"], attention_mask, lm_labels
))
# We need the model inputs separate for the DataLoader
# From tuples of (N, 5, 1024) -> (N, 1024) x 5
# Note: inputs contains 5 lists
inputs = map(list, zip(*sucessfull_candidates))
# Transform each input into a tensor of shape:
# (num_inputs, batch_size, sequence_len) -> (N, 1, 1024)
dataset = [torch.tensor(t, device=torch.device(device)).unsqueeze(1) for t in inputs]
return dataset
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
"""
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering.
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
:param logits: Logits distribution shape (batch size x vocabulary size)
:param top_k: Keep only top k tokens with highest probability (top-k filtering).
:param top_p: Keep the top tokens with cumulative probability >= top_p (nucleus filtering).
:param filter_value: Value that will be ignored by in the softmax
:return: Filtered logits
"""
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
|
py | 7dff3710b33e5c775834b5c5cbdcc05ee8897aa2 | """
This example uses Approximate Nearest Neighbor Search (ANN) with Hnswlib (https://github.com/nmslib/hnswlib/).
Searching a large corpus with Millions of embeddings can be time-consuming. To speed this up,
ANN can index the existent vectors. For a new query vector, this index can be used to find the nearest neighbors.
This nearest neighbor search is not perfect, i.e., it might not perfectly find all top-k nearest neighbors.
In this example, we use Hnswlib: It is a fast and easy to use library, with excellent results on common benchmarks.
Usually you can install Hnswlib by running:
pip install hnswlib
For more details, see https://github.com/nmslib/hnswlib/
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use 100k in this example):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import pickle
import time
import hnswlib
model_name = 'quora-distilbert-multilingual'
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
embedding_cache_path = 'quora-embeddings-{}-size-{}.pkl'.format(model_name.replace('/', '_'), max_corpus_size)
embedding_size = 768 #Size of embeddings
top_k_hits = 10 #Output k hits
#Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row['question1'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row['question2'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_numpy=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data['sentences']
corpus_embeddings = cache_data['embeddings']
#Defining our hnswlib index
index_path = "./hnswlib.index"
#We use Inner Product (dot-product) as Index. We will normalize our vectors to unit length, then is Inner Product equal to cosine similarity
index = hnswlib.Index(space = 'cosine', dim = embedding_size)
if os.path.exists(index_path):
print("Loading index...")
index.load_index(index_path)
else:
### Create the HNSWLIB index
print("Start creating HNSWLIB index")
index.init_index(max_elements = len(corpus_embeddings), ef_construction = 400, M = 64)
# Then we train the index to find a suitable clustering
index.add_items(corpus_embeddings, list(range(len(corpus_embeddings))))
print("Saving index to:", index_path)
index.save_index(index_path)
# Controlling the recall by setting ef:
index.set_ef(50) # ef should always be > top_k_hits
######### Search in the index ###########
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question)
#We use hnswlib knn_query method to find the top_k_hits
corpus_ids, distances = index.knn_query(question_embedding, k=top_k_hits)
# We extract corpus ids and scores for the first query
hits = [{'corpus_id': id, 'score': 1-score} for id, score in zip(corpus_ids[0], distances[0])]
hits = sorted(hits, key=lambda x: x['score'], reverse=True)
end_time = time.time()
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time-start_time))
for hit in hits[0:top_k_hits]:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
# Approximate Nearest Neighbor (ANN) is not exact, it might miss entries with high cosine similarity
# Here, we compute the recall of ANN compared to the exact results
correct_hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k_hits)[0]
correct_hits_ids = {hit['corpus_id'] for hit in correct_hits}
ann_corpus_ids = {hit['corpus_id'] for hit in hits}
if len(ann_corpus_ids) != len(correct_hits_ids):
print("Approximate Nearest Neighbor returned a different number of results than expected")
recall = len(ann_corpus_ids.intersection(correct_hits_ids)) / len(correct_hits_ids)
print("\nApproximate Nearest Neighbor Recall@{}: {:.2f}".format(top_k_hits, recall * 100))
if recall < 1:
print("Missing results:")
for hit in correct_hits[0:top_k_hits]:
if hit['corpus_id'] not in ann_corpus_ids:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
print("\n\n========\n")
|
py | 7dff37a05df2ec02edf565e7b3ca41e27cdf1f1e | from nibabel import load
from nibabel.trackvis import empty_header, write
from dipy.tracking.integration import BoundryIntegrator, generate_streamlines
from dipy.tracking.utils import seeds_from_mask, target
from dipy.reconst.shm import SlowAdcOpdfModel, ClosestPeakSelector, \
normalize_data, ResidualBootstrapWrapper
from dipy.reconst.interpolate import NearestNeighborInterpolator
from dipy.core.triangle_subdivide import create_half_unit_sphere
from dipy.data import sample_hardi_data, sample_tracking_seedNtarget
def simple_tracking_function(data, fa, bval, bvec, seed_mask, start_steps,
voxel_size, density):
"""An example of a simple traking function using the tools in dipy
This tracking function uses the SlowAdcOpdfModel to fit diffusion data. By
using the ClosestPeakSelector, the function tracks along the peak of Opdf
closest to the incoming direction. It also uses the BoundryIntegrator to
integrate the streamlines and NearestNeighborInterpolator to interpolate
the data. The ResidualBootstrap means the tracks are probabilistic, not
deterministic.
"""
#the interpolator allows us to index the dwi data in continous space
data_mask = fa > .2
normalized_data = normalize_data(data, bval)
interpolator = NearestNeighborInterpolator(normalized_data, voxel_size,
data_mask)
#the model fits the dwi data, this model can resolve crossing fibers
#see documentation of SlowAdcOpdfModel for more info
model = SlowAdcOpdfModel(6, bval, bvec, .006)
vert, edges, faces = create_half_unit_sphere(4)
model.set_sampling_points(vert, edges)
#this residual bootstrap wrapper returns a sample from the bootstrap
#distribution istead of returning the raw data
min_signal = normalized_data.min()
B = model.B
wrapped_interp = ResidualBootstrapWrapper(interpolator, B, min_signal)
#the peakselector returns the closest peak to the incoming direction when
#in voxels with multiple peaks
peak_finder = ClosestPeakSelector(model, wrapped_interp)
peak_finder.angle_limit = 60
seeds = seeds_from_mask(seed_mask, density, voxel_size)
#the propagator is used to integrate the streamlines
propogator = BoundryIntegrator(voxel_size)
tracks = generate_streamlines(peak_finder, propogator, seeds, start_steps)
return tracks
def main():
"""Track example dataset"""
data, fa, bvec, bval, voxel_size = sample_hardi_data()
seed_mask, target_mask = sample_tracking_seedNtarget()
density = [1, 1, 2]
start_step = [-0.3, -0.7, -0.7]
tracks = simple_tracking_function(data, fa, bval, bvec, seed_mask, start_step,
voxel_size, density)
tracks = list(tracks)
targeted_tracks = target(tracks, target_mask, voxel_size)
"""
Uncomment this to save tracks
trk_tracks = ((streamline, None, None) for streamline in tracks)
trgt_trk_tracks = ((streamline, None, None) for streamline in targeted_tracks)
trk_hdr = empty_header()
trk_hdr['voxel_order'] = 'LPI'
trk_hdr['voxel_size'] = voxel_size
trk_hdr['dim'] = fa.shape
write('example_tracks_before_target.trk', trk_tracks, trk_hdr)
write('example_tracks_after_target.trk', trgt_trk_tracks, trk_hdr)
"""
if __name__ == "__main__":
main()
|
py | 7dff39fe5f92e51a87e2ac41d42efa20ffd5a3e0 | import enum
from typing import Optional
from .client import Device
from .util import lookup_enum
class IcePlus(enum.Enum):
OFF = "@CP_OFF_EN_W"
ON = "@CP_ON_EN_W"
ICE_PLUS = "@RE_TERM_ICE_PLUS_W"
ICE_PLUS_FREEZE = "@RE_MAIN_SPEED_FREEZE_TERM_W"
ICE_PLUS_OFF = "@CP_TERM_OFF_KO_W"
class FreshAirFilter(enum.Enum):
OFF = "@CP_TERM_OFF_KO_W"
AUTO = "@RE_STATE_FRESH_AIR_FILTER_MODE_AUTO_W"
POWER = "@RE_STATE_FRESH_AIR_FILTER_MODE_POWER_W"
REPLACE_FILTER = "@RE_STATE_REPLACE_FILTER_W"
SMARTCARE_ON = "@RE_STATE_SMART_SMART_CARE_ON"
SMARTCARE_OFF = "@RE_STATE_SMART_SMART_CARE_OFF"
SMARTCARE_WAIT = "@RE_STATE_SMART_SMART_CARE_WAIT"
EMPTY = ""
class SmartSavingMode(enum.Enum):
OFF = "@CP_TERM_USE_NOT_W"
NIGHT = "@RE_SMARTSAVING_MODE_NIGHT_W"
CUSTOM = "@RE_SMARTSAVING_MODE_CUSTOM_W"
SMART_GRID_OFF = "@CP_OFF_EN_W"
SMART_GRID_DEMAND_RESPONSE = "@RE_TERM_DEMAND_RESPONSE_FUNCTIONALITY_W"
SMART_GRID_CUSTOM = "@RE_TERM_DELAY_DEFROST_CAPABILITY_W"
EMPTY = ""
class RefrigeratorDevice(Device):
"""A higher-level interface for a refrigerator."""
def set_temp_refrigerator_c(self, temp):
"""Set the refrigerator temperature in Celsius.
"""
value = self.model.enum_value('TempRefrigerator', str(temp))
self._set_control('RETM', value)
def set_temp_freezer_c(self, temp):
"""Set the freezer temperature in Celsius.
"""
value = self.model.enum_value('TempFreezer', str(temp))
self._set_control('REFT', value)
def poll(self) -> Optional['RefrigeratorStatus']:
"""Poll the device's current state.
Monitoring must be started first with `monitor_start`.
:returns: Either a `RefrigeratorStatus` instance or `None` if the
status is not yet available.
"""
# Abort if monitoring has not started yet.
if not hasattr(self, 'mon'):
return None
data = self.mon.poll()
if data:
res = self.model.decode_monitor(data)
return RefrigeratorStatus(self, res)
else:
return None
class RefrigeratorStatus(object):
"""Higher-level information about a refrigerator's current status.
:param refrigerator: The RefrigeratorDevice instance.
:param data: JSON data from the API.
"""
def __init__(self, refrigerator: RefrigeratorDevice, data: dict):
self.refrigerator = refrigerator
self.data = data
@property
def temp_refrigerator_c(self):
temp = lookup_enum('TempRefrigerator', self.data, self.refrigerator)
return int(temp)
@property
def temp_freezer_c(self):
temp = lookup_enum('TempFreezer', self.data, self.refrigerator)
return int(temp)
@property
def ice_plus_status(self):
status = lookup_enum('IcePlus', self.data, self.refrigerator)
return IcePlus(status)
@property
def fresh_air_filter_status(self):
status = lookup_enum('FreshAirFilter', self.data, self.refrigerator)
return FreshAirFilter(status)
@property
def energy_saving_mode(self):
mode = lookup_enum('SmartSavingMode', self.data, self.refrigerator)
return SmartSavingMode(mode)
@property
def door_opened(self):
state = lookup_enum('DoorOpenState', self.data, self.refrigerator)
return state == "OPEN"
@property
def temp_unit(self):
return lookup_enum('TempUnit', self.data, self.refrigerator)
@property
def energy_saving_enabled(self):
mode = lookup_enum(
'SmartSavingModeStatus', self.data, self.refrigerator
)
return mode == 'ON'
@property
def locked(self):
status = lookup_enum('LockingStatus', self.data, self.refrigerator)
return status == "LOCK"
@property
def active_saving_status(self):
return self.data['ActiveSavingStatus']
@property
def eco_enabled(self):
eco = lookup_enum('EcoFriendly', self.data, self.refrigerator)
return eco == "@CP_ON_EN_W"
@property
def water_filter_used_month(self):
return self.data['WaterFilterUsedMonth']
|
py | 7dff3a9969a24eb50ecb0bbe1dc0e985ee367626 | from functools import wraps
from flask import make_response, request
from userver.user.models import User
"""
support basic auth
1、email and password
2、auth token(username=auth token, password='')
"""
class HTTPAuth:
def __init__(self):
# def default_get_password(username):
# return None
def default_auth_error():
return "Unauthorized Access"
self.realm = "Authentication Required"
# self.get_password(default_get_password)
self.error_handler(default_auth_error)
def error_handler(self, f):
@wraps(f)
def decorated(*args, **kwargs):
res = f(*args, **kwargs)
if type(res) == str:
res = make_response(res)
res.status_code = 401
if 'WWW-Authenticate' not in res.headers.keys():
res.headers['WWW-Authenticate'] = 'Basic realm="' + self.realm + '"'
return res
self.auth_error_callback = decorated
return decorated
@staticmethod
def verify_password(email_or_token, password):
# first try to authenticate by token
user = User.verify_auth_token(email_or_token)
if not user:
# try to authenticate with username/password
user = User.query.filter_by(email=email_or_token).first()
if not user or not user.verify_password(password):
return False
return user
def auth_required(self, f):
@wraps(f)
def decorated(*args, **kwargs):
user = User.query.get(1)
return f(user, *args, **kwargs)
return decorated
auth = HTTPAuth() |
py | 7dff3b5f7197f8c181a4ec3b9840aad197529c88 | import numpy as np
import matplotlib.pyplot as plt
# time = 20 second in 40 steps
t = np.linspace(0., 20, 40)
# Initial values
L = 20.
w = 500.
Qb = 150.
Qd = 500.
b0 = 100.
alpha = 0.05*(1/Qb - 1/Qd)
Z = np.exp(-alpha*w*L)/Qd - 1/Qb
b = b0*(np.exp(-alpha*w*L)/Qd - np.exp(-alpha*w*t)/Qb)/Z
d = b0*(np.exp(-alpha*w*L)/Qd - np.exp(-alpha*w*t)/Qd)/Z
plt.plot(t, b, 'r', t, d, 'b')
plt.show()
beta = 0.05*((1/Qb) + (1/Qd));
d1 = b0*Qd*(1 - np.exp(-beta*w*t))/(Qb + Qd);
b1 = d1 + b0*np.exp(-beta*w*t);
plt.plot(t, b1, 'r', t, d1, 'b')
plt.show()
|
py | 7dff3b852c7b0689f51d7090059ead057de2751f | from ._testutils import run_until_complete, BaseTest
from asyncnsq.http.writer import NsqdHttpWriter
class NsqdHttpWriterTest(BaseTest):
@run_until_complete
async def test_http_publish(self):
http_writer = NsqdHttpWriter(
"127.0.0.1", 4151, loop=self.loop)
ok = await http_writer.pub('http_baz', 'producer msg')
self.assertEqual(ok, 'OK')
await http_writer.close()
@run_until_complete
async def test_http_mpublish(self):
http_writer = NsqdHttpWriter(
"127.0.0.1", 4151, loop=self.loop)
messages = ['baz:1', b'baz:2', 3.14, 42]
ok = await http_writer.mpub('http_baz', *messages)
self.assertEqual(ok, 'OK')
await http_writer.close()
|
py | 7dff3c97e1288de292694b1112098e9adbce1333 | import pymc3_ext as pm
import numpy as np
from numpy import random as nr
import numpy.testing as npt
import pytest
import theano.tensor as tt
import theano
from pymc3_ext.distributions.distribution import _draw_value, draw_values
from .helpers import SeededTest
def test_draw_value():
npt.assert_equal(_draw_value(np.array([5, 6])), [5, 6])
npt.assert_equal(_draw_value(np.array(5.)), 5)
npt.assert_equal(_draw_value(tt.constant([5., 6.])), [5, 6])
assert _draw_value(tt.constant(5)) == 5
npt.assert_equal(_draw_value(2 * tt.constant([5., 6.])), [10, 12])
val = theano.shared(np.array([5., 6.]))
npt.assert_equal(_draw_value(val), [5, 6])
npt.assert_equal(_draw_value(2 * val), [10, 12])
a = tt.scalar('a')
a.tag.test_value = 6
npt.assert_equal(_draw_value(2 * a, givens=[(a, 1)]), 2)
assert _draw_value(5) == 5
assert _draw_value(5.) == 5
assert isinstance(_draw_value(5.), type(5.))
assert isinstance(_draw_value(5), type(5))
with pm.Model():
mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
a = pm.Normal('a', mu=mu, sigma=5, shape=2)
val1 = _draw_value(a)
val2 = _draw_value(a)
assert np.all(val1 != val2)
with pytest.raises(ValueError) as err:
_draw_value([])
err.match('Unexpected type')
class TestDrawValues:
def test_empty(self):
assert draw_values([]) == []
def test_vals(self):
npt.assert_equal(draw_values([np.array([5, 6])])[0], [5, 6])
npt.assert_equal(draw_values([np.array(5.)])[0], 5)
npt.assert_equal(draw_values([tt.constant([5., 6.])])[0], [5, 6])
assert draw_values([tt.constant(5)])[0] == 5
npt.assert_equal(draw_values([2 * tt.constant([5., 6.])])[0], [10, 12])
val = theano.shared(np.array([5., 6.]))
npt.assert_equal(draw_values([val])[0], [5, 6])
npt.assert_equal(draw_values([2 * val])[0], [10, 12])
def test_simple_model(self):
with pm.Model():
mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
a = pm.Normal('a', mu=mu, sigma=5, shape=2)
val1 = draw_values([a])
val2 = draw_values([a])
assert np.all(val1[0] != val2[0])
point = {'a': np.array([3., 4.])}
npt.assert_equal(draw_values([a], point=point), [point['a']])
def test_dep_vars(self):
with pm.Model():
mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
sd = pm.HalfNormal('sd', shape=2)
tau = 1 / sd ** 2
a = pm.Normal('a', mu=mu, tau=tau, shape=2)
point = {'a': np.array([1., 2.])}
npt.assert_equal(draw_values([a], point=point), [point['a']])
val1 = draw_values([a])[0]
val2 = draw_values([a], point={'sd': np.array([2., 3.])})[0]
val3 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
val4 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
assert all([np.all(val1 != val2), np.all(val1 != val3),
np.all(val1 != val4), np.all(val2 != val3),
np.all(val2 != val4), np.all(val3 != val4)])
def test_gof_constant(self):
# Issue 3595 pointed out that slice(None) can introduce
# theano.gof.graph.Constant into the compute graph, which wasn't
# handled correctly by draw_values
n_d = 500
n_x = 2
n_y = 1
n_g = 10
g = np.random.randint(0, n_g, (n_d,)) # group
x = np.random.randint(0, n_x, (n_d,)) # x factor
with pm.Model():
multi_dim_rv = pm.Normal('multi_dim_rv', mu=0, sd=1, shape=(n_x, n_g, n_y))
indexed_rv = multi_dim_rv[x, g, :]
i = draw_values([indexed_rv])
assert i is not None
class TestJointDistributionDrawValues(SeededTest):
def test_joint_distribution(self):
with pm.Model() as model:
a = pm.Normal('a', mu=0, sigma=100)
b = pm.Normal('b', mu=a, sigma=1e-8)
c = pm.Normal('c', mu=a, sigma=1e-8)
d = pm.Deterministic('d', b + c)
# Expected RVs
N = 1000
norm = np.random.randn(3, N)
eA = norm[0] * 100
eB = eA + norm[1] * 1e-8
eC = eA + norm[2] * 1e-8
eD = eB + eC
# Drawn RVs
nr.seed(self.random_seed)
# A, B, C, D = list(zip(*[draw_values([a, b, c, d]) for i in range(N)]))
A, B, C, D = draw_values([a, b, c, d], size=N)
A = np.array(A).flatten()
B = np.array(B).flatten()
C = np.array(C).flatten()
D = np.array(D).flatten()
# Assert that the drawn samples match the expected values
assert np.allclose(eA, A)
assert np.allclose(eB, B)
assert np.allclose(eC, C)
assert np.allclose(eD, D)
# Assert that A, B and C have the expected difference
assert np.all(np.abs(A - B) < 1e-6)
assert np.all(np.abs(A - C) < 1e-6)
assert np.all(np.abs(B - C) < 1e-6)
# Marginal draws
mA = np.array([draw_values([a]) for i in range(N)]).flatten()
mB = np.array([draw_values([b]) for i in range(N)]).flatten()
mC = np.array([draw_values([c]) for i in range(N)]).flatten()
# Also test the with model context of draw_values
with model:
mD = np.array([draw_values([d]) for i in range(N)]).flatten()
# Assert that the marginal distributions have different sample values
assert not np.all(np.abs(B - mB) < 1e-2)
assert not np.all(np.abs(C - mC) < 1e-2)
assert not np.all(np.abs(D - mD) < 1e-2)
# Assert that the marginal distributions do not have high cross
# correlation
assert np.abs(np.corrcoef(mA, mB)[0, 1]) < 0.1
assert np.abs(np.corrcoef(mA, mC)[0, 1]) < 0.1
assert np.abs(np.corrcoef(mB, mC)[0, 1]) < 0.1
|
py | 7dff3d5595151d220070ee79e386f53bfc396c82 | """
Utility functions.
"""
import warnings
from collections import OrderedDict
from contextlib import contextmanager
# Python 2/3 independant dict iteration
iteritems = getattr(dict, 'iteritems', dict.items)
itervalues = getattr(dict, 'itervalues', dict.values)
class LRUCache:
# @param capacity, an integer
def __init__(self, capacity=None):
self.capacity = capacity
self.__cache = OrderedDict()
@property
def lru(self):
return list(self.__cache.keys())
@property
def length(self):
return len(self.__cache)
def clear(self):
self.__cache.clear()
def __len__(self):
return self.length
def __contains__(self, item):
return item in self.__cache
def __setitem__(self, key, value):
self.set(key, value)
def __delitem__(self, key):
del self.__cache[key]
def __getitem__(self, key):
return self.get(key)
def get(self, key, default=None):
value = self.__cache.get(key)
if value:
del self.__cache[key]
self.__cache[key] = value
return value
return default
def set(self, key, value):
if self.__cache.get(key):
del self.__cache[key]
self.__cache[key] = value
else:
self.__cache[key] = value
# Check, if the cache is full and we have to remove old items
# If the queue is of unlimited size, self.capacity is NaN and
# x > NaN is always False in Python and the cache won't be cleared.
if self.capacity is not None and self.length > self.capacity:
self.__cache.popitem(last=False)
# Source: https://github.com/PythonCharmers/python-future/blob/466bfb2dfa36d865285dc31fe2b0c0a53ff0f181/future/utils/__init__.py#L102-L134
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class Metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return Metaclass('temporary_class', None, {})
@contextmanager
def catch_warning(warning_cls):
with warnings.catch_warnings():
warnings.filterwarnings('error', category=warning_cls)
yield
class FrozenDict(dict):
def __hash__(self):
return hash(tuple(sorted(self.items())))
def _immutable(self, *args, **kws):
raise TypeError('object is immutable')
__setitem__ = _immutable
__delitem__ = _immutable
clear = _immutable
update = _immutable
setdefault = _immutable
pop = _immutable
popitem = _immutable
def freeze(obj):
if isinstance(obj, dict):
return FrozenDict((k, freeze(v)) for k, v in obj.items())
elif isinstance(obj, list):
return tuple(freeze(el) for el in obj)
elif isinstance(obj, set):
return frozenset(obj)
else:
return obj
|
py | 7dff3d69d6afa8c548e163369be7d7b0122ec55e | from django.db import models
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=30)
headImg = models.FileField(upload_to="./upload/")
def __unicode__(self):
return self.username
|
py | 7dff3dafd3f9cfb572c4046a814176803584e4c0 | ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import sys
import unittest
import keras2onnx
import keras_contrib
import numpy as np
from keras2onnx import set_converter
from keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))
from test_utils import run_onnx_runtime, convert_InstanceNormalizationLayer
Activation = keras.layers.Activation
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
Concatenate = keras.layers.Concatenate
Conv2D = keras.layers.Conv2D
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Input = keras.layers.Input
LeakyReLU = keras.layers.LeakyReLU
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
# From https://github.com/eriklindernoren/Keras-GAN/blob/master/pixelda/pixelda.py
class PixelDA():
def __init__(self):
# Input shape
self.img_rows = 32
self.img_cols = 32
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.num_classes = 10
# Loss weights
lambda_adv = 10
lambda_clf = 1
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of residual blocks in the generator
self.residual_blocks = 6
# Number of filters in first layer of discriminator and classifier
self.df = 64
self.cf = 64
# Build and compile the discriminators
self.discriminator = self.build_discriminator()
# Build the generator
self.generator = self.build_generator()
# Build the task (classification) network
self.clf = self.build_classifier()
# Input images from both domains
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Translate images from domain A to domain B
fake_B = self.generator(img_A)
# Classify the translated image
class_pred = self.clf(fake_B)
# For the combined model we will only train the generator and classifier
self.discriminator.trainable = False
# Discriminator determines validity of translated images
valid = self.discriminator(fake_B)
self.combined = Model(img_A, [valid, class_pred])
def build_generator(self):
"""Resnet Generator"""
def residual_block(layer_input):
"""Residual block described in paper"""
d = Conv2D(64, kernel_size=3, strides=1, padding='same')(layer_input)
d = BatchNormalization(momentum=0.8)(d)
d = Activation('relu')(d)
d = Conv2D(64, kernel_size=3, strides=1, padding='same')(d)
d = BatchNormalization(momentum=0.8)(d)
d = Add()([d, layer_input])
return d
# Image input
img = Input(shape=self.img_shape)
l1 = Conv2D(64, kernel_size=3, padding='same', activation='relu')(img)
# Propogate signal through residual blocks
r = residual_block(l1)
for _ in range(self.residual_blocks - 1):
r = residual_block(r)
output_img = Conv2D(self.channels, kernel_size=3, padding='same', activation='tanh')(r)
return Model(img, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
d1 = d_layer(img, self.df, normalization=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model(img, validity)
def build_classifier(self):
def clf_layer(layer_input, filters, f_size=4, normalization=True):
"""Classifier layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
c1 = clf_layer(img, self.cf, normalization=False)
c2 = clf_layer(c1, self.cf*2)
c3 = clf_layer(c2, self.cf*4)
c4 = clf_layer(c3, self.cf*8)
c5 = clf_layer(c4, self.cf*8)
class_pred = Dense(self.num_classes, activation='softmax')(Flatten()(c5))
return Model(img, class_pred)
set_converter(keras_contrib.layers.InstanceNormalization, convert_InstanceNormalizationLayer)
class TestPixelDA(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
def test_PixelDA(self):
keras_model = PixelDA().combined
x = np.random.rand(5, 32, 32, 3).astype(np.float32)
expected = keras_model.predict([x])
onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
|
py | 7dff3e37b24ddd9440d64bf5365db59f7939ed22 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="fontin",
version="0.0.10",
author="Laxya Pahuja",
author_email="[email protected]",
description="A better font extractor and installer for bulk fonts in one archive.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/laxyapahuja/font-in",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={'console_scripts': ['fontin=fontin.__main__:main']},
python_requires='>=3.6',
install_requires=[
'pyunpack',
'patool'
],
project_urls={
'Documentation': 'https://github.com/laxyapahuja/font-in/README.md',
'Source': 'https://github.com/laxyapahuja/font-in'
},
) |
py | 7dff3ea3b67f4f6c11b203e9c12381de18688a9c | # Author: Fayas (https://github.com/FayasNoushad) (@FayasNoushad)
from .admin import *
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
START_TEXT = """Hello {} 😌
I am a link shortner telegram bot.
>> `I can short any type of link`
Made by @FayasNoushad"""
HELP_TEXT = """**Hey, Follow these steps:**
➠ Just send a link for shorting.
➠ I will send the shorted links.
**Available Commands**
/start - Checking Bot Online
/help - For more help
/about - For more about me
/status - For bot status
/settings - For bot settings
/reset - For reset bot settings
Made by @FayasNoushad"""
ABOUT_TEXT = """--**About Me 😎**--
🤖 **Name :** [Link shortner](https://telegram.me/{})
👨💻 **Developer :** [Fayas](https://github.com/FayasNoushad)
📢 **Channel :** [Fayas Noushad](https://telegram.me/FayasNoushad)
👥 **Group :** [Developer Team](https://telegram.me/TheDeveloperTeam)
🌐 **Source :** [👉 Click here](https://github.com/FayasNoushad/URL-Shortner)
📝 **Language :** [Python3](https://python.org)
🧰 **Framework :** [Pyrogram](https://pyrogram.org)
📡 **Server :** [Heroku](https://heroku.com)"""
SETTINGS_TEXT = "**Settings**"
RESET_TEXT = "**Are you sure for reset.**"
START_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('⚙ Help', callback_data='help'),
InlineKeyboardButton('About 🔰', callback_data='about'),
InlineKeyboardButton('Close ⛔️', callback_data='close')
]]
)
HELP_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('About 🔰', callback_data='about')
],[
InlineKeyboardButton('⚒ Settings', callback_data='settings'),
InlineKeyboardButton('Close ⛔️', callback_data='close')
]]
)
ABOUT_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('Help ⚙', callback_data='help')
],[
InlineKeyboardButton('Close ⛔️', callback_data='close')
]]
)
SETTINGS_BUTTONS = [
[
InlineKeyboardButton('🏘 Home', callback_data='home'),
InlineKeyboardButton('Help ⚙', callback_data='help')
],
[
InlineKeyboardButton('🔄 Reset', callback_data='reset'),
InlineKeyboardButton('Close ⛔️', callback_data='close')
]
]
RESET_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton(text="Yes ✅", callback_data="confirm_reset"),
InlineKeyboardButton(text="No ❌", callback_data="cancel_reset")
]]
)
@Client.on_message(filters.private & filters.command(["start"]))
async def start(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=START_TEXT.format(update.from_user.mention),
reply_markup=START_BUTTONS,
disable_web_page_preview=True,
quote=True
)
@Client.on_message(filters.private & filters.command(["help"]))
async def help(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=HELP_TEXT,
disable_web_page_preview=True,
reply_markup=HELP_BUTTONS,
quote=True
)
@Client.on_message(filters.private & filters.command(["about"]))
async def about(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=ABOUT_TEXT.format((await bot.get_me()).username),
disable_web_page_preview=True,
reply_markup=ABOUT_BUTTONS,
quote=True
)
@Client.on_message(filters.private & filters.command(["reset"]))
async def reset(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await update.reply_text(
text=RESET_TEXT,
disable_web_page_preview=True,
reply_markup=RESET_BUTTONS,
quote=True
)
@Client.on_message(filters.private & filters.command(["status"]))
async def status(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
total_users = await db.total_users_count()
text = "**Bot Status**\n"
text += f"\n**Total Users:** `{total_users}`"
await update.reply_text(
text=text,
quote=True,
disable_web_page_preview=True
)
@Client.on_message(filters.private & filters.command(["settings"]))
async def settings(bot, update):
if not await db.is_user_exist(update.from_user.id):
await db.add_user(update.from_user.id)
await display_settings(bot, update, db)
async def display_settings(bot, update, db, cb=False):
chat_id = update.from_user.id
text = SETTINGS_TEXT
buttons = []
if await db.allow_domain(chat_id, domain="gplinks.in"):
buttons.append([InlineKeyboardButton(text="Gplinks.in ✅", callback_data="set+gplinks.in")])
else:
buttons.append([InlineKeyboardButton(text="Gplinks.in ❌", callback_data="set+gplinks.in")])
if await db.allow_domain(chat_id, domain="bit.ly"):
buttons.append([InlineKeyboardButton(text="Bit.ly ✅", callback_data="set+bit.ly")])
else:
buttons.append([InlineKeyboardButton(text="Bit.ly ❌", callback_data="set+bit.ly")])
if await db.allow_domain(chat_id, domain="chilp.it"):
buttons.append([InlineKeyboardButton(text="Chilp.it ✅", callback_data="set+chilp.it")])
else:
buttons.append([InlineKeyboardButton(text="Chilp.it ❌", callback_data="set+chilp.it")])
if await db.allow_domain(chat_id, domain="click.ru"):
buttons.append([InlineKeyboardButton(text="Click.ru ✅", callback_data="set+click.ru")])
else:
buttons.append([InlineKeyboardButton(text="Click.ru ❌", callback_data="set+click.ru")])
if await db.allow_domain(chat_id, domain="cutt.ly"):
buttons.append([InlineKeyboardButton(text="Cutt.ly ✅", callback_data="set+cutt.ly")])
else:
buttons.append([InlineKeyboardButton(text="Cutt.ly ❌", callback_data="set+cutt.ly")])
if await db.allow_domain(chat_id, domain="da.gd"):
buttons.append([InlineKeyboardButton(text="Da.gd ✅", callback_data="set+da.gd")])
else:
buttons.append([InlineKeyboardButton(text="Da.gd ❌", callback_data="set+da.gd")])
if await db.allow_domain(chat_id, domain="git.io"):
buttons.append([InlineKeyboardButton(text="Git.io ✅", callback_data="set+git.io")])
else:
buttons.append([InlineKeyboardButton(text="Git.io ❌", callback_data="set+git.io")])
if await db.allow_domain(chat_id, domain="is.gd"):
buttons.append([InlineKeyboardButton(text="Is.gd ✅", callback_data="set+is.gd")])
else:
buttons.append([InlineKeyboardButton(text="Is.gd ❌", callback_data="set+is.gd")])
if await db.allow_domain(chat_id, domain="osdb.link"):
buttons.append([InlineKeyboardButton(text="Osdb.link ✅", callback_data="set+osdb.link")])
else:
buttons.append([InlineKeyboardButton(text="Osdb.link ❌", callback_data="set+osdb.link")])
if await db.allow_domain(chat_id, domain="ow.ly"):
buttons.append([InlineKeyboardButton(text="Ow.ly ✅", callback_data="set+ow.ly")])
else:
buttons.append([InlineKeyboardButton(text="Ow.ly ❌", callback_data="set+ow.ly")])
if await db.allow_domain(chat_id, domain="po.st"):
buttons.append([InlineKeyboardButton(text="Po.st ✅", callback_data="set+po.st")])
else:
buttons.append([InlineKeyboardButton(text="Po.st ❌", callback_data="set+po.st")])
if await db.allow_domain(chat_id, domain="qps.ru"):
buttons.append([InlineKeyboardButton(text="Qps.ru ✅", callback_data="set+qps.ru")])
else:
buttons.append([InlineKeyboardButton(text="Qps.ru ❌", callback_data="set+qps.ru")])
if await db.allow_domain(chat_id, domain="short.cm"):
buttons.append([InlineKeyboardButton(text="Short.cm ✅", callback_data="set+short.cm")])
else:
buttons.append([InlineKeyboardButton(text="Short.cm ❌", callback_data="set+short.cm")])
if await db.allow_domain(chat_id, domain="tinyurl.com"):
buttons.append([InlineKeyboardButton(text="Tinyurl.com ✅", callback_data="set+tinyurl.com")])
else:
buttons.append([InlineKeyboardButton(text="Tinyurl.com ❌", callback_data="set+tinyurl.com")])
if await db.allow_domain(chat_id, domain="0x0.st"):
buttons.append([InlineKeyboardButton(text="0x0.st ✅", callback_data="set+0x0.st")])
else:
buttons.append([InlineKeyboardButton(text="0x0.st ❌", callback_data="set+0x0.st")])
if await db.allow_domain(chat_id, domain="ttm.sh"):
buttons.append([InlineKeyboardButton(text="ttm.sh ✅", callback_data="set+ttm.sh")])
else:
buttons.append([InlineKeyboardButton(text="ttm.sh ❌", callback_data="set+ttm.sh")])
keyboard = []
for line in buttons:
for button in line:
if len(keyboard) == 0 or len(keyboard[-1]) >= 2:
keyboard.append([button])
else:
keyboard[-1].append(button)
for setting_button in SETTINGS_BUTTONS:
keyboard.append(setting_button)
if cb:
await update.message.edit_text(
text=text,
reply_markup=InlineKeyboardMarkup(keyboard),
disable_web_page_preview=True
)
else:
await update.reply_text(
text=text,
reply_markup=InlineKeyboardMarkup(keyboard),
disable_web_page_preview=True,
quote=True
)
|
py | 7dff3fd1e752c9a95351da82464c6eefbf419124 | from django.http import HttpResponseBadRequest
from django.shortcuts import get_object_or_404, render
from django.views import View
from django.views.decorators.http import require_GET
from game.forms import QuestForm
from game.models import Quest
class QuestView(View):
http_method_names = ['get', 'post']
def get(self, request, id):
quest = get_object_or_404(Quest, id=id)
return render(request, 'question.html', {'quest': quest})
def post(self, request, id):
quest = get_object_or_404(Quest, id=id)
qf = QuestForm(request.POST)
if qf.is_valid():
user_answer = qf.cleaned_data['user_answer']
result = quest.answer.evaluate(user_answer)
return render(request, 'answer.html', {'quest': quest, 'result': result})
else:
return HttpResponseBadRequest('You broke it - you won it!')
@require_GET
def reward(request, id):
quest = get_object_or_404(Quest, id=id)
edition = quest.edition
return render(request, 'reward.html', {'name': edition.name, 'reward': edition.reward})
|
py | 7dff406ae63c248c849a07c08e6186a5045dfb44 | """
Miscallenous python functions
.. autosummary::
:nosignatures:
module_available
environment
ensure_directory_exists
preserve_scalars
decorator_arguments
skipUnlessModule
import_class
classproperty
hybridmethod
estimate_computation_speed
hdf_write_attributes
.. codeauthor:: David Zwicker <[email protected]>
"""
import errno
import functools
import importlib
import json
import os
import sys
import unittest
from pathlib import Path
from typing import Any, Callable, Dict, List, Sequence, Union
import numpy as np
# import functions moved on 2020-07-27
# using this path for import is deprecated
from .output import display_progress, get_progress_bar_class # @UnusedImport
Number = Union[float, complex]
def module_available(module_name: str) -> bool:
"""check whether a python module is available
Args:
module_name (str): The name of the module
Returns:
`True` if the module can be imported and `False` otherwise
"""
try:
importlib.import_module(module_name)
except ImportError:
return False
else:
return True
def environment(dict_type=dict) -> Dict[str, Any]:
"""obtain information about the compute environment
Args:
dict_type: The type to create the returned dictionaries. The default is
`dict`, but :class:`collections.OrderedDict` is an alternative.
Returns:
dict: information about the python installation and packages
"""
from .. import __version__ as package_version
from .numba import numba_environment
def get_package_versions(packages: List[str]) -> Dict[str, str]:
""" tries to load certain python packages and returns their version """
versions: Dict[str, str] = dict_type()
for name in sorted(packages):
try:
module = importlib.import_module(name)
except ImportError:
versions[name] = "not available"
else:
versions[name] = module.__version__ # type: ignore
return versions
result: Dict[str, Any] = dict_type()
result["package version"] = package_version
result["python version"] = sys.version
result["mandatory packages"] = get_package_versions(
["matplotlib", "numba", "numpy", "scipy", "sympy"]
)
result["optional packages"] = get_package_versions(
["h5py", "pandas", "pyfftw", "tqdm"]
)
if module_available("numba"):
result["numba environment"] = numba_environment()
return result
def ensure_directory_exists(folder: Union[str, Path]):
"""creates a folder if it not already exists
Args:
folder (str): path of the new folder
"""
folder = str(folder)
if folder == "":
return
try:
os.makedirs(folder)
except OSError as err:
if err.errno != errno.EEXIST:
raise
def preserve_scalars(method: Callable) -> Callable:
"""decorator that makes vectorized methods work with scalars
This decorator allows to call functions that are written to work on numpy
arrays to also accept python scalars, like `int` and `float`. Essentially,
this wrapper turns them into an array and unboxes the result.
Args:
method: The method being decorated
Returns:
The decorated method
"""
@functools.wraps(method)
def wrapper(self, *args):
args = [number_array(arg, copy=False) for arg in args]
if args[0].ndim == 0:
args = [arg[None] for arg in args]
return method(self, *args)[0]
else:
return method(self, *args)
return wrapper
def decorator_arguments(decorator: Callable) -> Callable:
r"""make a decorator usable with and without arguments:
The resulting decorator can be used like `@decorator`
or `@decorator(\*args, \**kwargs)`
Inspired by https://stackoverflow.com/a/14412901/932593
Args:
decorator: the decorator that needs to be modified
Returns:
the decorated function
"""
@functools.wraps(decorator)
def new_decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# actual decorated function
return decorator(args[0])
else:
# decorator arguments
return lambda realf: decorator(realf, *args, **kwargs)
return new_decorator
def skipUnlessModule(module_names: Union[Sequence[str], str]) -> Callable:
"""decorator that skips a test when a module is not available
Args:
module_names (str): The name of the required module(s)
Returns:
A function, so this can be used as a decorator
"""
if isinstance(module_names, str):
module_names = [module_names]
for module_name in module_names:
if not module_available(module_name):
# return decorator skipping test
return unittest.skip(f"requires {module_name}")
# return no-op decorator if all modules are available
def wrapper(f: Callable) -> Callable:
return f
return wrapper
def import_class(identifier: str):
"""import a class or module given an identifier
Args:
identifier (str):
The identifier can be a module or a class. For instance, calling the
function with the string `identifier == 'numpy.linalg.norm'` is
roughly equivalent to running `from numpy.linalg import norm` and
would return a reference to `norm`.
"""
module_path, _, class_name = identifier.rpartition(".")
if module_path:
module = importlib.import_module(module_path)
return getattr(module, class_name)
else:
# this happens when identifier does not contain a dot
return importlib.import_module(class_name)
class classproperty(property):
"""decorator that can be used to define read-only properties for classes.
This is inspired by the implementation of :mod:`astropy`, see
`astropy.org <http://astropy.org/>`_.
Example:
The decorator can be used much like the `property` decorator::
class Test():
item: str = 'World'
@classproperty
def message(cls):
return 'Hello ' + cls.item
print(Test.message)
"""
def __new__(cls, fget=None, doc=None):
if fget is None:
# use wrapper to support decorator without arguments
def wrapper(func):
return cls(func)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None):
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
return self.fget.__wrapped__(objtype)
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError("classproperty is read-only")
def deleter(self, fdel):
raise NotImplementedError("classproperty is read-only")
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
class hybridmethod:
"""
descriptor that can be used as a decorator to allow calling a method both
as a classmethod and an instance method
Adapted from https://stackoverflow.com/a/28238047
"""
def __init__(self, fclass, finstance=None, doc=None):
self.fclass = fclass
self.finstance = finstance
self.__doc__ = doc or fclass.__doc__
# support use on abstract base classes
self.__isabstractmethod__ = bool(getattr(fclass, "__isabstractmethod__", False))
def classmethod(self, fclass):
return type(self)(fclass, self.finstance, None)
def instancemethod(self, finstance):
return type(self)(self.fclass, finstance, self.__doc__)
def __get__(self, instance, cls):
if instance is None or self.finstance is None:
# either bound to the class, or no instance method available
return self.fclass.__get__(cls, None)
return self.finstance.__get__(instance, cls)
def estimate_computation_speed(func: Callable, *args, **kwargs) -> float:
"""estimates the computation speed of a function
Args:
func (callable): The function to call
Returns:
float: the number of times the function can be calculated in one second.
The inverse is thus the runtime in seconds per function call
"""
import timeit
test_duration = kwargs.pop("test_duration", 1)
# prepare the function
if args or kwargs:
test_func = functools.partial(func, *args, **kwargs)
else:
test_func = func # type: ignore
# call function once to allow caches be filled
test_func()
# call the function until the total time is achieved
number, duration = 1, 0
while duration < 0.1 * test_duration:
number *= 10
duration = timeit.timeit(test_func, number=number) # type: ignore
return number / duration
def hdf_write_attributes(
hdf_path, attributes: Dict[str, Any] = None, raise_serialization_error: bool = False
) -> None:
"""write (JSON-serialized) attributes to a hdf file
Args:
hdf_path:
Path to a group or dataset in an open HDF file
attributes (dict):
Dictionary with values written as attributes
raise_serialization_error (bool):
Flag indicating whether serialization errors are raised or silently
ignored
"""
if attributes is None:
return
for key, value in attributes.items():
try:
value_serialized = json.dumps(value)
except TypeError:
if raise_serialization_error:
raise
else:
hdf_path.attrs[key] = value_serialized
def number(value: Union[Number, str]) -> Number:
"""convert a value into a float or complex number
Args:
value (Number or str):
The value which needs to be converted
Result:
Number: A complex number or a float if the imaginary part vanishes
"""
result = complex(value)
return result.real if result.imag == 0 else result
def get_common_dtype(*args):
r"""returns a dtype in which all arguments can be represented
Args:
*args: All items (arrays, scalars, etc) to be checked
Returns: np.complex if any entry is complex, otherwise np.double
"""
for arg in args:
if np.iscomplexobj(arg):
return np.complex
return np.double
def number_array(data: np.ndarray, dtype=None, copy: bool = True) -> np.ndarray:
"""convert array dtype either to np.double or np.complex
Args:
data (:class:`numpy.ndarray`):
The data that needs to be converted to a float array. This can also be any
iterable of numbers.
dtype (numpy dtype):
The data type of the field. All the numpy dtypes are supported. If omitted,
it will be determined from `data` automatically.
copy (bool):
Whether the data must be copied (in which case the original array is left
untouched). Note that data will always be copied when changing the dtype.
Returns:
:class:`numpy.ndarray`: An array with the correct dtype
"""
if dtype is None:
# dtype needs to be determined automatically
try:
# convert the result to a numpy array with the given dtype
result = np.array(data, dtype=get_common_dtype(data), copy=copy)
except TypeError:
# Conversion can fail when `data` contains a complex sympy number, i.e.,
# sympy.I. In this case, we simply try to convert the expression using a
# complex dtype
result = np.array(data, dtype=np.complex, copy=copy)
else:
# a specific dtype is requested
result = np.array(data, dtype=np.dtype(dtype), copy=copy)
return result
|
py | 7dff40a30dabc4248d863addb898faf17aed84c8 | import abc
import sys
from typing import Union
import pytest
import meadowrun.docker_controller
from meadowrun import (
AllocCloudInstances,
ContainerAtDigest,
ContainerAtTag,
Deployment,
GitRepoBranch,
GitRepoCommit,
ServerAvailableInterpreter,
run_command,
run_function,
run_map,
)
from meadowrun.config import MEADOWRUN_INTERPRETER
from meadowrun.deployment import (
CodeDeployment,
VersionedCodeDeployment,
InterpreterDeployment,
VersionedInterpreterDeployment,
)
from meadowrun.meadowrun_pb2 import EnvironmentSpecInCode
from meadowrun.meadowrun_pb2 import ServerAvailableContainer, ProcessState
from meadowrun.run_job_core import (
CloudProviderType,
Host,
JobCompletion,
MeadowrunException,
)
class HostProvider(abc.ABC):
"""
The way we set up our tests is a little complicated. We have multiple "test suites",
like BasicsSuite, ErrorSuite, which are abstract classes. We also have multiple
"HostProviders" like AwsHostProvider, LocalHostProvider. So e.g. class
TestBasicsAws(AwsHostProvider, BasicsSuite), runs the "Basics" test suite on AWS
hosts.
"""
@abc.abstractmethod
def get_host(self) -> Host:
pass
@abc.abstractmethod
def get_test_repo_url(self) -> str:
pass
@abc.abstractmethod
async def get_log_file_text(self, job_completion: JobCompletion) -> str:
pass
class BasicsSuite(HostProvider, abc.ABC):
@pytest.mark.asyncio
async def test_meadowrun_git_repo_commit(self):
await self._test_meadowrun(
GitRepoCommit(
repo_url=self.get_test_repo_url(),
commit="cb277fa1d35bfb775ed1613b639e6f5a7d2f5bb6",
),
ServerAvailableInterpreter(interpreter_path=MEADOWRUN_INTERPRETER),
)
@pytest.mark.asyncio
async def test_meadowrun_git_repo_branch(self):
await self._test_meadowrun(
GitRepoBranch(repo_url=self.get_test_repo_url(), branch="main"),
ServerAvailableInterpreter(interpreter_path=MEADOWRUN_INTERPRETER),
)
@pytest.mark.asyncio
async def test_meadowrun_git_repo_commit_container(self):
# TODO first make sure the image we're looking for is NOT already cached on this
# system, then run it again after it has been cached, as this works different
# code paths
await self._test_meadowrun(
GitRepoCommit(
repo_url=self.get_test_repo_url(),
commit="cb277fa1d35bfb775ed1613b639e6f5a7d2f5bb6",
),
ContainerAtTag(repository="python", tag="3.9.8-slim-buster"),
)
async def _test_meadowrun(
self,
code_deployment: Union[CodeDeployment, VersionedCodeDeployment],
interpreter_deployment: Union[
InterpreterDeployment, VersionedInterpreterDeployment
],
):
results: str = await run_function(
"example_package.example.example_runner",
self.get_host(),
Deployment(interpreter_deployment, code_deployment),
args=["foo"],
)
assert results == "hello foo"
job_completion = await run_command(
"pip --version",
self.get_host(),
Deployment(interpreter_deployment, code_deployment),
)
assert "pip" in await self.get_log_file_text(job_completion)
@pytest.mark.asyncio
async def test_meadowrun_path_in_git_repo(self):
"""Tests GitRepoCommit.path_to_source"""
results: str = await run_function(
"example.example_runner",
self.get_host(),
Deployment(
code=GitRepoCommit(
repo_url=self.get_test_repo_url(),
commit="cb277fa1d35bfb775ed1613b639e6f5a7d2f5bb6",
path_to_source="example_package",
)
),
args=["foo"],
)
assert results == "hello foo"
@pytest.mark.asyncio
async def test_meadowrun_containers(self):
"""
Basic test on running with containers, checks that different images behave as
expected
"""
for version in ["3.9.8", "3.8.12"]:
digest = await (
meadowrun.docker_controller.get_latest_digest_from_registry(
"python", f"{version}-slim-buster", None
)
)
result = await run_command(
"python --version",
self.get_host(),
Deployment(ContainerAtDigest(repository="python", digest=digest)),
)
assert (await self.get_log_file_text(result)).startswith(
f"Python" f" {version}"
)
# there's a cloudpickle issue that prevents lambdas serialized on 3.7 running on
# 3.8. Assuming here that this extends to all lambdas serialized on <=3.7 running on
# >=3.8
@pytest.mark.skipif("sys.version_info < (3, 8)")
@pytest.mark.asyncio
async def test_meadowrun_environment_in_spec(self):
def remote_function():
import importlib
# we could just do import requests, but that messes with mypy
pd = importlib.import_module("pandas") # from myenv.yml
requests = importlib.import_module("requests") # from myenv.yml
example = importlib.import_module("example") # from example_package
return requests.__version__, pd.__version__, example.join_strings("a", "b")
results = await run_function(
remote_function,
self.get_host(),
Deployment(
EnvironmentSpecInCode(
environment_type=EnvironmentSpecInCode.EnvironmentType.CONDA,
path_to_spec="myenv.yml",
),
GitRepoCommit(
repo_url=self.get_test_repo_url(),
commit="a249fc16",
path_to_source="example_package",
),
),
)
assert results == ("2.27.1", "1.4.1", "a, b")
class ErrorsSuite(HostProvider, abc.ABC):
@pytest.mark.asyncio
async def test_run_request_failed(self):
with pytest.raises(MeadowrunException) as exc_info:
await run_function(
lambda: "hello",
self.get_host(),
Deployment(ServerAvailableContainer(image_name="does-not-exist")),
)
assert (
exc_info.value.process_state.state
== ProcessState.ProcessStateEnum.RUN_REQUEST_FAILED
)
@pytest.mark.skipif("sys.version_info < (3, 8)")
@pytest.mark.asyncio
async def test_non_zero_return_code(self):
def exit_immediately():
sys.exit(101)
with pytest.raises(MeadowrunException) as exc_info:
await run_function(exit_immediately, self.get_host())
assert (
exc_info.value.process_state.state
== ProcessState.ProcessStateEnum.NON_ZERO_RETURN_CODE
)
assert exc_info.value.process_state.return_code == 101
class MapSuite(abc.ABC):
@abc.abstractmethod
def cloud_provider(self) -> CloudProviderType:
pass
@pytest.mark.skipif("sys.version_info < (3, 8)")
@pytest.mark.asyncio
async def test_run_map(self):
"""Runs a "real" run_map"""
results = await run_map(
lambda x: x**x,
[1, 2, 3, 4],
AllocCloudInstances(1, 1, 15, self.cloud_provider(), 3),
)
assert results == [1, 4, 27, 256]
|
py | 7dff40f2a835e73e34320079aa45a0dfdc59c570 | from PIL import Image #引入Image模块
from PIL import ImageEnhance #引入ImageEnhance模块
# 可以合并为from PIL import Image, ImageEnhance
img = Image.open("lena256.jpg") #读入图像文件lena256.jpg
img.show() #显示图像
box=(100,100,200,200)
region_img=img.crop(box)
region_img.save("img_region.jpg")
region_img.show()
new_img = img.resize((128,128),Image.BILINEAR) #改变图像的大小
new_img.save("img_new.jpg") #保存结果图像
new_img.show()
rot_img = new_img.rotate(45) #将图像绕其中心点逆时针旋转45度
rot_img.save("img_rot.jpg")
rot_img.show()
##rot_img.save("img_rot.bmp") # 转换图像格式
rot_img.histogram() #输出图像直方图数据统计结果
# 图像亮度增强
brightness = ImageEnhance.Brightness(img)
bright_img = brightness.enhance(2.0)
bright_img.save("img_bright.jpg")
bright_img.show()
# 图像尖锐化
sharpness = ImageEnhance.Sharpness(img)
sharp_img = sharpness.enhance(7.0)
sharp_img.save("img_sharp.jpg")
sharp_img.show()
# 图像对比度增强
contrast = ImageEnhance.Contrast(img)
contrast_img = contrast.enhance(2.0)
contrast_img.save("img_contrast.jpg")
|
py | 7dff41258635b6d1c6392fe3d854125f7f9db1e2 | # from .scene_abstract import SingleRobotEmptyScene
from .scene_stadium import SinglePlayerStadiumScene
from .env_bases import MJCFBaseBulletEnv
import numpy as np
from robot_manipulators import Reacher, Pusher, Striker, Thrower
class ReacherBulletEnv(MJCFBaseBulletEnv):
def __init__(self, renders=False,doneAlive=True,actionRepeat=1,randomExplor=True,distractor=False,random_target=True, target_pos=None,display_target=False, seed=None):
self.robot = Reacher(randomExplor=randomExplor,distractor=distractor,random_target=random_target, target_pos=target_pos,display_target=display_target)
MJCFBaseBulletEnv.__init__(self, self.robot, renders,doneAlive,actionRepeat,seed)
self.target_radius = 0.015
# def create_single_player_scene(self, bullet_client):
# return SingleRobotEmptyScene(bullet_client, gravity=0.0, timestep=0.0165, frame_skip=1)
def create_single_player_scene(self, bullet_client):
self.stadium_scene = SinglePlayerStadiumScene(bullet_client,gravity=9.8,timestep=0.0165,frame_skip=1)
return self.stadium_scene
def goal_distance(self, goal_a, goal_b):
assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a - goal_b, axis=-1)
def step(self, a):
assert (not self.scene.multiplayer)
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.to_target_vec
# potential_old = self.potential
# self.potential = self.robot.calc_potential()
#
# electricity_cost = (
# -0.10 * (np.abs(a[0] * self.robot.theta_dot) + np.abs(a[1] * self.robot.gamma_dot)
# ) # work torque*angular_velocity
# - 0.01 * (np.abs(a[0]) + np.abs(a[1])) # stall torque require some energy
# )
# stuck_joint_cost = -0.1 if np.abs(np.abs(self.robot.gamma) - 1) < 0.01 else 0.0
# self.rewards = [
# float(self.potential - potential_old),
# float(electricity_cost),
# float(stuck_joint_cost)
# ]
# self.HUD(state, a, False)
reward = - (self.goal_distance(self.robot.calc_object(), self.robot.calc_target()) > self.target_radius).astype(np.float32)
return state, reward, False, {'not alive':False} # sum(self.rewards),
def camera_adjust(self):
x, y, z = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
class PusherBulletEnv(MJCFBaseBulletEnv):
def __init__(self, renders=False):
self.robot = Pusher()
MJCFBaseBulletEnv.__init__(self, self.robot, renders)
# def create_single_player_scene(self, bullet_client):
# return SingleRobotEmptyScene(bullet_client, gravity=9.81, timestep=0.0020, frame_skip=5)
def create_single_player_scene(self, bullet_client):
self.stadium_scene = SinglePlayerStadiumScene(bullet_client,gravity=9.8,timestep=0.0165,frame_skip=1)
return self.stadium_scene
def step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.to_target_vec
potential_old = self.potential
self.potential = self.robot.calc_potential()
joint_vel = np.array([
self.robot.shoulder_pan_joint.get_velocity(),
self.robot.shoulder_lift_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.elbow_flex_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.wrist_flex_joint.get_velocity(),
self.robot.wrist_roll_joint.get_velocity()
])
action_product = np.matmul(np.abs(a), np.abs(joint_vel))
action_sum = np.sum(a)
electricity_cost = (
-0.10 * action_product # work torque*angular_velocity
- 0.01 * action_sum # stall torque require some energy
)
stuck_joint_cost = 0
for j in self.robot.ordered_joints:
if np.abs(j.current_relative_position()[0]) - 1 < 0.01:
stuck_joint_cost += -0.1
self.rewards = [
float(self.potential - potential_old),
float(electricity_cost),
float(stuck_joint_cost)
]
self.HUD(state, a, False)
return state, sum(self.rewards), False, {}
def calc_potential(self):
return -100 * np.linalg.norm(self.to_target_vec)
def camera_adjust(self):
x, y, z = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
class StrikerBulletEnv(MJCFBaseBulletEnv):
def __init__(self, renders=False):
self.robot = Striker()
MJCFBaseBulletEnv.__init__(self, self.robot, renders)
self._striked = False
self._min_strike_dist = np.inf
self.strike_threshold = 0.1
# def create_single_player_scene(self, bullet_client):
# return SingleRobotEmptyScene(bullet_client, gravity=9.81, timestep=0.0020, frame_skip=5)
def create_single_player_scene(self, bullet_client):
self.stadium_scene = SinglePlayerStadiumScene(bullet_client,gravity=9.8,timestep=0.0165,frame_skip=1)
return self.stadium_scene
def step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.to_target_vec
potential_old = self.potential
self.potential = self.robot.calc_potential()
joint_vel = np.array([
self.robot.shoulder_pan_joint.get_velocity(),
self.robot.shoulder_lift_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.elbow_flex_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.wrist_flex_joint.get_velocity(),
self.robot.wrist_roll_joint.get_velocity()
])
action_product = np.matmul(np.abs(a), np.abs(joint_vel))
action_sum = np.sum(a)
electricity_cost = (
-0.10 * action_product # work torque*angular_velocity
- 0.01 * action_sum # stall torque require some energy
)
stuck_joint_cost = 0
for j in self.robot.ordered_joints:
if np.abs(j.current_relative_position()[0]) - 1 < 0.01:
stuck_joint_cost += -0.1
dist_object_finger = self.robot.object.pose().xyz() - self.robot.fingertip.pose().xyz()
reward_dist_vec = self.robot.object.pose().xyz() - self.robot.target.pose().xyz(
) # TODO: Should the object and target really belong to the robot? Maybe split this off
self._min_strike_dist = min(self._min_strike_dist, np.linalg.norm(reward_dist_vec))
if np.linalg.norm(dist_object_finger) < self.strike_threshold:
self._striked = True
self._strike_pos = self.robot.fingertip.pose().xyz()
if self._striked:
reward_near_vec = self.robot.object.pose().xyz() - self._strike_pos
else:
reward_near_vec = self.robot.object.pose().xyz() - self.robot.fingertip.pose().xyz()
reward_near = -np.linalg.norm(reward_near_vec)
reward_dist = -np.linalg.norm(self._min_strike_dist)
reward_ctrl = -np.square(a).sum()
self.rewards = [
float(self.potential - potential_old),
float(electricity_cost),
float(stuck_joint_cost), 3 * reward_dist, 0.1 * reward_ctrl, 0.5 * reward_near
]
self.HUD(state, a, False)
return state, sum(self.rewards), False, {}
def calc_potential(self):
return -100 * np.linalg.norm(self.to_target_vec)
def camera_adjust(self):
x, y, z = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
class ThrowerBulletEnv(MJCFBaseBulletEnv):
def __init__(self, renders=False):
self.robot = Thrower()
MJCFBaseBulletEnv.__init__(self, self.robot, renders)
# def create_single_player_scene(self, bullet_client):
# return SingleRobotEmptyScene(bullet_client, gravity=0.0, timestep=0.0020, frame_skip=5)
def create_single_player_scene(self, bullet_client):
self.stadium_scene = SinglePlayerStadiumScene(bullet_client,gravity=9.8,timestep=0.0165,frame_skip=1)
return self.stadium_scene
def step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.to_target_vec
potential_old = self.potential
self.potential = self.robot.calc_potential()
joint_vel = np.array([
self.robot.shoulder_pan_joint.get_velocity(),
self.robot.shoulder_lift_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.elbow_flex_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.wrist_flex_joint.get_velocity(),
self.robot.wrist_roll_joint.get_velocity()
])
action_product = np.matmul(np.abs(a), np.abs(joint_vel))
action_sum = np.sum(a)
electricity_cost = (
-0.10 * action_product # work torque*angular_velocity
- 0.01 * action_sum # stall torque require some energy
)
stuck_joint_cost = 0
for j in self.robot.ordered_joints:
if np.abs(j.current_relative_position()[0]) - 1 < 0.01:
stuck_joint_cost += -0.1
object_xy = self.robot.object.pose().xyz()[:2]
target_xy = self.robot.target.pose().xyz()[:2]
if not self.robot._object_hit_ground and self.robot.object.pose().xyz(
)[2] < -0.25: # TODO: Should the object and target really belong to the robot? Maybe split this off
self.robot._object_hit_ground = True
self.robot._object_hit_location = self.robot.object.pose().xyz()
if self.robot._object_hit_ground:
object_hit_xy = self.robot._object_hit_location[:2]
reward_dist = -np.linalg.norm(object_hit_xy - target_xy)
else:
reward_dist = -np.linalg.norm(object_xy - target_xy)
reward_ctrl = -np.square(a).sum()
self.rewards = [
float(self.potential - potential_old),
float(electricity_cost),
float(stuck_joint_cost), reward_dist, 0.002 * reward_ctrl
]
self.HUD(state, a, False)
return state, sum(self.rewards), False, {}
def camera_adjust(self):
x, y, z = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
|
py | 7dff41ccd18939f08fdbede2c2916d6f92d3e5cc | import os
from ehive.runnable.IGFBaseProcess import IGFBaseProcess
from igf_data.utils.config_genome_browser import Config_genome_browser
class BuildGenomeBrowserConfigForProject(IGFBaseProcess):
def param_defaults(self):
params_dict=super(BuildGenomeBrowserConfigForProject,self).param_defaults()
params_dict.update({
'ref_genome_type':'GENOME_TWOBIT_URI',
'collection_table':'experiment',
'use_ephemeral_space':0,
})
return params_dict
def run(self):
'''
'''
try:
project_igf_id = self.param_required('project_igf_id')
experiment_igf_id = self.param_required('experiment_igf_id')
sample_igf_id = self.param_required('sample_igf_id')
igf_session_class = self.param_required('igf_session_class')
collection_type_list = self.param_required('collection_type_list')
ref_genome_type = self.param('ref_genome_type')
collection_table = self.param('collection_table')
pipeline_name = self.param_required('pipeline_name')
species_name = self.param_required('species_name')
base_work_dir = self.param_required('base_work_dir')
template_file = self.param_required('template_file')
use_ephemeral_space = self.param('use_ephemeral_space')
work_dir_prefix = \
os.path.join(
base_work_dir,
project_igf_id,
sample_igf_id,
experiment_igf_id)
work_dir = \
self.get_job_work_dir(work_dir=work_dir_prefix) # get a run work dir
output_file = \
os.path.join(
work_dir,
os.path.basename(template_file)) # get output file name
cg = \
Config_genome_browser(
dbsession_class=igf_session_class,
project_igf_id=project_igf_id,
collection_type_list=collection_type_list,
pipeline_name=pipeline_name,
collection_table=collection_table,
species_name=species_name,
use_ephemeral_space=use_ephemeral_space,
ref_genome_type=ref_genome_type)
cg.build_biodalliance_config(
template_file=template_file,
output_file=output_file)
if os.path.exists(output_file):
self.param('dataflow_params',{'genome_browser_config':output_file}) # populate dataflow if the output file found
else:
self.param('dataflow_params',{'genome_browser_config':''}) # send empty string to dataflow
message = \
'Generated genome browser config for {0}: {1}'.\
format(
project_igf_id,
sample_igf_id)
self.post_message_to_slack(message,reaction='pass') # send log to slack
self.post_message_to_ms_team(
message=message,
reaction='pass')
except Exception as e:
message = \
'project: {2}, sample:{3}, Error in {0}: {1}'.format(
self.__class__.__name__,
e,
project_igf_id,
sample_igf_id)
self.warning(message)
self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs
self.post_message_to_ms_team(
message=message,
reaction='fail')
raise |
py | 7dff41e4d81283874d870239538ea2b84514ce76 | # PIANO TILES BOT PYTHON SCRIPT.
# AKSHITH KANDIVANAM.
# importing the required modules.
import pyautogui
import time
import keyboard
'''
This project aimed to automate the Piano Tiles game using the PyAutoGUI module's functions.
The main idea is to simply recognize if any of the 4 pixels we took match the RGB colour code of the tile that must be clicked.
If any of the pixels match the desired tile's colour , we perform the click operation. To stop the clicking process, a while loop is set in place and will be terminated upon pressing 'q'.
Step 1. I used the IDLE IDE for Python to scope out the X & Y coordinates for 4 pixels in each of the 4 columns. I also scoped out the RGB value of the tile that is expected to be clicked.
Step 2. I wrote this Python script and worked with the PyAutoGUI's 'click()' and 'pixelMatchesColor()' functions.
WHEN I SCOPED OUT THE COORDINATES THEY CAME OUT AS:
LEFTMOST COLUMN: X-761, Y-707.
LEFT COLUMN: X-887, Y-707.
RIGHT COLUMN: X-1007, Y-707.
RIGHTMOST COLUMN: X-1136, Y-707.
THE RGB CODE OF THE TILE TO CLICK WAS (17, 17, 17)
**Note: This program works with the Piano Tiles game from the website: http://tanksw.com/piano-tiles/. To play it on another website, adjust the coordinates and find the appropriate RGB value for the tile to click.
'''
# creating the main function for the click of the mouse.
# function takes in the parameters of a pixel's X & Y coordinates in each of the 4 columns.
def click_event(x, y):
# using the module's 'click' function to click on the pixel.
pyautogui.click(x, y)
time.sleep(0)
# creating a function to perform the clicking event on the leftmost column.
def leftmost_column():
# creating an if-statement to check if the pixel (761, 707) in the leftmost column matches the RGB code of the desired tile.
if pyautogui.pixelMatchesColor(761, 707, (17, 17, 17)):
# if the pixel does match the RGB code, we call the 'click_event' function and pass the appropriate coordinates of the pixel.
click_event(761, 707)
# creating a function to perform the clicking event on the left column.
def left_column():
# creating an if-statement to check if the pixel (887, 707) in the left column matches the RGB code of the desired tile.
if pyautogui.pixelMatchesColor(887, 707, (17, 17, 17)):
# if the pixel does match the RGB code, we call the 'click_event' function and pass the appropriate coordinates of the pixel.
click_event(887, 707)
# creating a function to perform the clicking event on the right column.
def right_column():
# creating an if-statement to check if the pixel (1007, 707) in the right column matches the RGB code of the desired tile.
if pyautogui.pixelMatchesColor(1007, 707, (17, 17, 17)):
# if the pixel does match the RGB code, we call the 'click_event' function and pass the appropriate coordinates of the pixel.
click_event(1007, 707)
# creating a function to perform the clicking event on the rightmost column.
def rightmost_column():
# creating an if-statement to check if the pixel (1136, 707) in the rightmost column matches the RGB code of the desired tile.
if pyautogui.pixelMatchesColor(1136, 707, (17, 17, 17)):
# if the pixel does match the RGB code, we call the 'click_event' function and pass the appropriate coordinates of the pixel.
click_event(1136, 707)
# creating a while-loop to iterate until the key 'q' is pressed to quit the program.
while keyboard.is_pressed('q') == False:
# calling all the functions to find if the pixels in their respective columns represent the RGB code for the tile to click.
leftmost_column()
left_column()
right_column()
rightmost_column()
|
py | 7dff42daf71854c4fa287d85ba52f77cfeba992c | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package dependencies for TFX."""
def make_required_install_packages():
# Make sure to sync the versions of common dependencies (absl-py, numpy,
# six, and protobuf) with TF.
# TODO(b/130767399): add flask once the frontend is exposed externally.
return [
'absl-py>=0.1.6,<1',
'apache-beam[gcp]>=2.14,<3',
'click>=7.0,<8',
'google-api-python-client>=1.7.8,<2',
'jinja2>=2.7.3,<3',
'ml-metadata>=0.14,<0.15',
'protobuf>=3.7,<4',
'six>=1.10,<2',
'tensorflow-data-validation>=0.14.1,<0.15',
'tensorflow-model-analysis>=0.14,<0.15',
'tensorflow-transform>=0.14,<0.15',
'PyYAML',
]
def make_required_test_packages():
"""Prepare extra packages needed for 'python setup.py test'."""
return [
'apache-airflow>=1.10,<2',
'docker>=4.0.0,<5.0.0',
# LINT.IfChange
'kfp==0.1.31.2; python_version >= "3.0"',
# LINT.ThenChange(
# testing/github/common.sh,
# testing/github/ubuntu/image/image.sh,
# testing/kubeflow/common.sh
# )
'pytest>=5.0.0,<6.0.0',
'tensorflow>=1.14,<2',
'tzlocal>=1.5,<2.0',
]
def make_extra_packages_docker_image():
# Packages needed for tfx docker image.
return [
'python-snappy>=0.5,<0.6',
'tensorflow>=1.14,<2',
# TODO(b/138406006): Remove the narrower dependency for pyarrow
# and numpy after Beam 2.15 release.
'numpy>=1.16,<1.17',
'pyarrow>=0.14,<0.15',
]
|
py | 7dff4354d436076cea8cbbd6857690661d089190 | from manim import *
import numpy as np
from numpy import array
from copy import deepcopy
import math
class CycleComp(Scene):
def construct(self):
l_string = '(12)(3)(45)'
circle = r'\circ'
r_string = '(153)(24)'
comp = MathTex(*l_string, circle, *r_string).scale(2)
comp2 = MathTex(*l_string, *r_string).scale(2)
self.play(
Write(comp)
)
self.wait(2)
self.play(
Unwrite(comp[11])
)
self.play(
ReplacementTransform(comp[:11], comp2[:11]),
ReplacementTransform(comp[12:], comp2[11:])
)
self.remove(comp)
self.wait(2)
comp2.generate_target()
comp2.target.move_to(UP*3)
self.play(
MoveToTarget(comp2)
)
c1 = MathTex(r'1', r'\rightarrow 5', r'\rightarrow 4').scale(2)
c2 = MathTex(r'2', r'\rightarrow 4', r'\rightarrow 5').scale(2)
c3 = MathTex(r'3', r'\rightarrow 1', r'\rightarrow 2').scale(2)
c4 = MathTex(r'4', r'\rightarrow 2', r'\rightarrow 1').scale(2)
c5 = MathTex(r'5', r'\rightarrow 3', r'\rightarrow 3').scale(2)
result = MathTex('=(', *'14)(253)').scale(2).shift(3*DOWN)
cs = [c1, c2, c3, c4, c5]
nums = [comp2[17:19], comp2[12:15], comp2[8:10], comp2[5:6], comp2[1:3]]
orders = [[1, 2], [0, 2], [1, 4], [0, 4], [1, 3]]
results = [result[0:3], result[3:4], result[4:7], result[7:8], result[8:9]]
ks = [0, 3, 1, 4, 2]
for k in range(5):
i = ks[k]
order = orders[i]
c = cs[i]
unders = [Underline(num).set_color(BLUE) for num in nums]
self.play(
Write(c[0])
)
for j in range(5):
num = nums[j]
under = unders[j]
self.play(
num.animate.set_color(BLUE),
Write(under)
)
if j in order:
self.play(
Write(
c[order.index(j) + 1]
)
)
self.play(
num.animate.set_color(WHITE),
Unwrite(under)
)
self.play(
Write(results[k])
)
self.play(
Unwrite(c)
)
self.wait(2)
self.play(
comp2.animate.shift(DOWN*2),
result.animate.shift(UP*2)
) |
py | 7dff4367e113cdb02c0b0b54eb71317211c2491f | """See quiz.__doc__."""
from random import randint as rand
from out_of_range_error import out_of_range_error
def quiz(location, score):
"""Give the user an interactive quiz based on their location.
Use the user's location find the correct set of quiz_questions and
quiz_answers to use. Assign these questions and answers to
current_questions and current_answers. Use the user's location to find and
set the appropriate score to 0. While there are more than 0 questions do
the following:
1. Pick a random question in the range of questions left.
2. Set the answer and answer text to answer and answer_text
2. Give the user a prompt to type their answer, therein deleting the
question from the question list. Convert this input to an integer
3. Check that the user input matches answer
3.1 If the answer was right, iterate the user score and display
'You got it right'.
3.2 If not display the correct answer using answer_text.
If there are 0 questions left in the current location, return the user
scores.
Args:
location:
The location of the user.
score:
The score of the user.
Returns:
The score of the user.
"""
# Initialize lists of answers and questions
quiz_questions = [
[
"""What is the correct formula to find the sum of the internal
angles of a polygon:""",
"""What is the correct formula to find the sum of the external
angles of a polygon:""",
"""Substiute u = 3 and t = 5 into the following equation:
d = ut + 3t²"""
],
[
"What part of speech is the word jump:",
"""What language feature is this:
Go clean your room right now this instance you naughty little
devil child!""", """What type of poem is this:
Go clean your room right
now this instance you naughty
little devil child!"""
],
[
"How many credits does a Level 1 student in 2020 need:",
"How many credits will a Level 2 student need next year:"
]
]
quiz_answers = [[["n - 2 * 180", "(n - 2)180", "n - 2 * 60", "360", 1],
["n * 60", "n + 3 * 180", "(n + 3)180", "360", 3],
["15", "30", "100", "90", 3]],
[["Noun", "Verb", "Adjective", "Adverb", 1],
["Hyperbole", "Rhetoric", "Imperative", "Sonnet", 2],
["Sonnet", "Haiku", "Limerick", "Free verse", 1]],
[["80", "60", "72", "70", 3], ["80", "60", "72", "52", 1]]]
# get the question answer, and score values to use based on the users
# location
current_questions = quiz_questions[location - 1]
current_answers = quiz_answers[location - 1]
score[location - 1] = 0
while len(current_questions) > 0:
# Run while there are still questions left
rand_choice = rand(0, len(current_questions) - 1)
# pick a random question and answer
answer = current_answers[rand_choice][
4] # get the integer that 'points' to the correct answer
answer_text = current_answers[rand_choice][
answer] # feed this integer back in to get the text of the answer
try:
user_input = int(
input(f"""
{current_questions.pop(rand_choice)}
1) {current_answers[rand_choice].pop(0)}
2) {current_answers[rand_choice].pop(0)}
3) {current_answers[rand_choice].pop(0)}
4) {current_answers[rand_choice].pop(0)}
[1-4]: """)) # give the user the randomly selected question and possible
# answers
except ValueError: # if the user doesn't put in an interger, skip the
# question and give them the error message
out_of_range_error(4)
user_input = None # set user_input so the program doesn't break
# delete the question from the master list, and take user input
current_answers.pop(rand_choice)
# get the answers to the randomly selected question
if user_input in (1, 2, 3, 4): # check if the users input is valid
if user_input - 1 == answer:
input("""
You got it right""")
score[location - 1] += 1
else:
input(f"""
You got it wrong.
The answer was:
{answer_text}""")
return score
if __name__ == "__main__":
print(quiz(int(input()), [None, None, None]))
|
py | 7dff436fbe7673f55c1673d2d24f304d9da3dc3a | import tekore as tk
from .config import Config
class SpotifyUtils:
"""
Class for making requests to Spotify API
"""
def __init__(self, client_id=Config.client_id, client_secret=Config.client_secret,
redirect_uri=Config.redirect_uri):
# Credentials to access the Spotify Music Data
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.app_token = tk.request_client_token(self.client_id, self.client_secret)
self.spt = tk.Spotify(self.app_token)
def get_artist(self, artist_id: str):
"""
Get information about the artist
:param artist_id: identifier of artist
:return: information about the artist
"""
artist = self.spt.artist(artist_id)
return {
'id': artist.id,
'name': artist.name,
'popularity': artist.popularity,
'genres': [genre for genre in artist.genres]
}
def get_album_songs(self, album_id: str):
"""
Get songs of the album
:param album_id: identifier of album
:return: tracks in album and total number of tracks
"""
tracks = self.spt.album_tracks(album_id, limit=50)
return {
'tracks': tracks.items,
'total': tracks.total,
}
def get_song_meta(self, song_id: str) -> dict:
"""
Get meta-info about the song
:param song_id: identifier of song
:return: Meta-info about song
"""
meta_information = self.spt.track(song_id)
return {
'name': meta_information.name,
'album': meta_information.album.name,
'artists': str([artist.name for artist in meta_information.artists]),
'id': meta_information.id,
'release_date': meta_information.album.release_date,
'popularity': float(meta_information.popularity),
'length': float(meta_information.duration_ms),
}
def get_song_analise(self, song_id: str) -> dict:
"""
Analise the song
:param song_id: identifier of song
:return: info after analysing the song
"""
analise = self.spt.track_audio_analysis(song_id)
return {
'bars': analise.bars,
'beats': analise.beats,
'sections': analise.sections,
'segments': analise.segments,
'tatums': analise.tatums,
}
def get_song_features(self, song_id: str) -> dict:
"""
Get features of song
:param song_id: identifier of song
:return: song features
"""
features = self.spt.track_audio_features(song_id)
return {
'danceability': float(features.danceability),
'acousticness': float(features.acousticness),
'energy': float(features.energy),
'instrumentalness': float(features.instrumentalness),
'liveness': float(features.liveness),
'valence': float(features.valence),
'loudness': float(features.loudness),
'speechiness': float(features.speechiness),
'tempo': float(features.tempo),
'key': float(features.key),
'time_signature': float(features.time_signature),
}
def get_song(self, song_id: str) -> list:
"""
Get all information about song
:param song_id: identifier of song
:return: information about song
"""
meta = self.get_song_meta(song_id)
features = self.get_song_features(song_id)
return [*meta.items(), *features.items()]
# [('name', 'I’m Ready (with Demi Lovato)'), ('album', 'I’m Ready (with Demi Lovato)'),
# ('artists', "['Sam Smith', 'Demi Lovato']"), ('release_date', '2020-04-16'), ('length', 200838.0),
# ('popularity', 74.0), ('id', '1fipvP2zmef6vN2IwXfJhY'), ('acousticness', 0.00346), ('danceability', 0.501),
# ('energy', 0.674), ('instrumentalness', 3.56e-05), ('liveness', 0.282), ('valence', 0.152),
# ('loudness', -6.363), ('speechiness', 0.0408), ('tempo', 155.051), ('key', 5.0), ('time_signature', 4.0)]
# obj = SpotifyUtils()
# print(obj.get_song('4Km5HrUvYTaSUfiSGPJeQR'))
|
py | 7dff43bed4e31033719cc6017c065d344a3c8ba1 | from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from irekua_database.models import DeviceType
class MimeTypesInline(admin.TabularInline):
extra = 0
model = DeviceType.mime_types.through
autocomplete_fields = ('mimetype',)
verbose_name = _('Mime type')
verbose_name_plural = _('Mime types')
classes = ('collapse', )
class DeviceTypeAdmin(admin.ModelAdmin):
search_fields = ['name', 'mime_types__name']
list_display = ('id', 'name', 'created_on')
list_display_links = ('id', 'name')
fieldsets = (
(None, {
'fields': (
('name', 'icon'),
'description'
),
}),
)
inlines = [
MimeTypesInline
]
|
py | 7dff440da52ec889becd7d5c1fe8d060707863e7 | """
PROBLEM
Consider the fraction, n/d, where n and d are positive integers. If n<d and HCF(n,d)=1, it is called a reduced proper
fraction.
If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that there are 21 elements in this set.
How many elements would be contained in the set of reduced proper fractions for d ≤ 1,000,000?
ANSWER:
303963552391
Solve time ~4 seconds
"""
import unittest
from util.utils import timeit, len_faray_seq
class Problem72:
def __init__(self, n):
self.n = n
@timeit
def solve(self):
return len_faray_seq(self.n) - 2 # subtract the 0/n and n/n cases.
class Solution72(unittest.TestCase):
def setUp(self):
self.problem = Problem72(n=1000000)
def test_solution(self):
self.assertEqual(303963552391, self.problem.solve())
def test_small_solution(self):
self.assertEqual(21, Problem72(n=8).solve())
if __name__ == '__main__':
unittest.main()
|
py | 7dff4501cc99af756736f13cd95e306910f7ec5e | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import numpy as np
from google.protobuf import json_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
def flatten_values_tensors_or_sparse(tensors_list):
"""Flatten each SparseTensor object into 3 Tensors for session.run()."""
return list(
flatten([[v.indices, v.values, v.dense_shape]
if isinstance(v, sparse_tensor.SparseTensor) else [v]
for v in tensors_list]))
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
flat_output):
tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
i = 0 # Index into the flattened output of session.run()
for k, v in dict_tensors.items():
expected_v = expected_tensors[k]
tf_logging.info("Comparing key: %s", k)
if isinstance(v, sparse_tensor.SparseTensor):
# Three outputs for SparseTensor : indices, values, shape.
tester.assertEqual([k, len(expected_v)], [k, 3])
tester.assertAllEqual(expected_v[0], flat_output[i])
tester.assertAllEqual(expected_v[1], flat_output[i + 1])
tester.assertAllEqual(expected_v[2], flat_output[i + 2])
i += 3
else:
# One output for standard Tensor.
tester.assertAllEqual(expected_v, flat_output[i])
i += 1
class ParseExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.test_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
return
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_example(**kwargs)
result = flatten_values_tensors_or_sparse(out.values())
# Check values.
tf_result = sess.run(result)
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
serialized = kwargs["serialized"]
batch_size = (
serialized.eval().size if isinstance(serialized, ops.Tensor) else
np.asarray(serialized).size)
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape().as_list()), (batch_size,) + f.shape)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (2,))
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = ( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array(2 * [[a_default]]),
b_name: np.array(2 * [b_default]),
c_name: np.array(2 * [c_default]),
}
self._test({
"example_names": np.empty((0,), dtype=bytes),
"serialized": ops.convert_to_tensor(["", ""]),
"features": {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
}
}, expected_output)
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature((2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
{
"example_names": ["in1"],
"serialized": [original.SerializeToString()],
"features": input_features,
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
{
"example_names": ["in1", "in2"],
"serialized": ["", ""],
"features": input_features,
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature: c \\(data type: float\\) is required"))
def testDenseNotMatchingShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})),
example(features=features({
"a": float_feature([-1, -1]),
}))
]
names = ["passing", "failing"]
serialized = [m.SerializeToString() for m in original]
self._test(
{
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
}
},
expected_err=(errors_impl.OpError,
"Name: failing, Key: a, Index: 1. Number of float val"))
def testDenseDefaultNoShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})),
]
serialized = [m.SerializeToString() for m in original]
self._test(
{
"example_names": ["failing"],
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature(None, dtypes.float32)
}
},
expected_err=(ValueError, "Missing shape for feature a"))
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(
features=features({
"st_c": float_feature([]), # empty float list
})),
example(
features=features({
"st_d": feature(), # feature with nothing in it
})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
serialized = [m.SerializeToString() for m in original]
expected_st_c = ( # indices, values, shape
np.array([[0, 0], [0, 1], [3, 0], [3, 1], [3, 2]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, 2.0, -1.0], dtype=np.float32),
np.array([4, 3], dtype=np.int64)) # batch == 2, max_elems = 3
expected_st_d = ( # indices, values, shape
np.array([[3, 0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([4, 1], dtype=np.int64)) # batch == 2, max_elems = 1
expected_output = {
"st_c": expected_st_c,
"st_d": expected_st_d,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
}
}, expected_output)
def testSerializedContainingSparseFeature(self):
original = [
example(
features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(
features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(
features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(
features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = ( # indices, values, shape
np.array([[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
np.array([4, 13], dtype=np.int64)) # batch == 4, max_elems = 13
expected_output = {
"sp": expected_sp,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.float32, [13])
}
}, expected_output)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(
features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(
features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
serialized = [m.SerializeToString() for m in original]
expected_sp1 = ( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32), np.array(
[2, 13], dtype=np.int64)) # batch == 2, max_elems = 13
expected_sp2 = ( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32), np.array(
[2, 7], dtype=np.int64)) # batch == 2, max_elems = 13
expected_output = {
"sp1": expected_sp1,
"sp2": expected_sp2,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx", "val2", dtypes.float32, size=7, already_sorted=True)
}
}, expected_output)
def testSerializedContaining3DSparseFeature(self):
original = [
example(
features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(
features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(
features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(
features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = (
# indices
np.array(
[[0, 5, 0], [0, 10, 2], [3, 0, 1], [3, 3, 2], [3, 9, 0]],
dtype=np.int64),
# values
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
# shape batch == 4, max_elems = 13
np.array([4, 13, 3], dtype=np.int64))
expected_output = {
"sp": expected_sp,
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"sp":
parsing_ops.SparseFeature(["idx0", "idx1"], "val",
dtypes.float32, [13, 3])
}
}, expected_output)
def testSerializedContainingDense(self):
aname = "a"
bname = "b*has+a:tricky_name"
original = [
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b""]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(["b0_str", ""], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
# This test is identical as the previous one except
# for the creation of 'serialized'.
def testSerializedContainingDenseWithConcat(self):
aname = "a"
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
(example(features=features({
aname: float_feature([10, 10]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
}))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
})),
),
]
serialized = [
m.SerializeToString() + n.SerializeToString() for (m, n) in original
]
expected_output = {
aname:
np.array([[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
}
}, expected_output)
def testSerializedContainingDenseScalar(self):
original = [
example(features=features({
"a": float_feature([1]),
})),
example(features=features({}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array([[1], [-1]], dtype=np.float32) # 2x1 (column vector)
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1,), dtype=dtypes.float32, default_value=-1),
}
}, expected_output)
def testSerializedContainingDenseWithDefaults(self):
original = [
example(features=features({
"a": float_feature([1, 1]),
})),
example(features=features({
"b": bytes_feature([b"b1"]),
})),
example(features=features({
"b": feature()
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array([[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(
3, 1, 2, 1),
"b":
np.array(["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(
3, 1, 1, 1, 1),
}
self._test({
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a":
parsing_ops.FixedLenFeature(
(1, 2, 1), dtype=dtypes.float32, default_value=[3.0, -3.0]),
"b":
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string, default_value="tmp_str"),
}
}, expected_output)
def testSerializedContainingSparseAndSparseFeatureAndDenseWithNoDefault(self):
expected_st_a = ( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_sp = ( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 7]], dtype=np.int64),
np.array(["a", "b", "c"], dtype="|S"), np.array(
[2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(
features=features({
"c": float_feature([3, 4]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})),
example(
features=features({
"c": float_feature([1, 2]),
"val": bytes_feature([b"c"]),
"idx": int64_feature([7])
}))
]
names = ["in1", "in2"]
serialized = [m.SerializeToString() for m in original]
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": np.array(2 * [[a_default]]),
"b": np.array(2 * [b_default]),
"c": np.array([[3, 4], [1, 2]], dtype=np.float32),
}
self._test(
{
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_output)
def testSerializedContainingSparseAndSparseFeatureWithReuse(self):
expected_idx = ( # indices, values, shape
np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64),
np.array([0, 3, 7, 1]),
np.array([2, 2], dtype=np.int64)) # batch == 4, max_elems = 2
expected_sp = ( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64),
np.array(["a", "b", "d", "c"], dtype="|S"),
np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(
features=features({
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})),
example(
features=features({
"val": bytes_feature([b"c", b"d"]),
"idx": int64_feature([7, 1])
}))
]
names = ["in1", "in2"]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"idx": expected_idx,
"sp": expected_sp,
}
self._test({
"example_names": names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
"idx":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]),
}
}, expected_output)
def _testSerializedContainingVarLenDenseLargerBatch(self, batch_size):
# During parsing, data read from the serialized proto is stored in buffers.
# For small batch sizes, a buffer will contain one minibatch entry.
# For larger batch sizes, a buffer may contain several minibatch
# entries. This test identified a bug where the code that copied
# data out of the buffers and into the output tensors assumed each
# buffer only contained one minibatch entry. The bug has since been fixed.
truth_int = [i for i in range(batch_size)]
truth_str = [[("foo%d" % i).encode(), ("bar%d" % i).encode()]
for i in range(batch_size)]
expected_str = copy.deepcopy(truth_str)
# Delete some intermediate entries
for i in range(batch_size):
col = 1
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry
expected_str[i][col] = b"default"
col -= 1
truth_str[i].pop()
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry (possibly again)
expected_str[i][col] = b"default"
truth_str[i].pop()
expected_output = {
# Batch size batch_size, 1 time step.
"a": np.array(truth_int, dtype=np.int64).reshape(batch_size, 1),
# Batch size batch_size, 2 time steps.
"b": np.array(expected_str, dtype="|S").reshape(batch_size, 2),
}
original = [
example(
features=features({
"a": int64_feature([truth_int[i]]),
"b": bytes_feature(truth_str[i])
})) for i in range(batch_size)
]
serialized = [m.SerializeToString() for m in original]
self._test({
"serialized": ops.convert_to_tensor(serialized, dtype=dtypes.string),
"features": {
"a":
parsing_ops.FixedLenSequenceFeature(
shape=(),
dtype=dtypes.int64,
allow_missing=True,
default_value=-1),
"b":
parsing_ops.FixedLenSequenceFeature(
shape=[],
dtype=dtypes.string,
allow_missing=True,
default_value="default"),
}
}, expected_output)
def testSerializedContainingVarLenDenseLargerBatch(self):
np.random.seed(3456)
for batch_size in (1, 10, 20, 100, 256):
self._testSerializedContainingVarLenDenseLargerBatch(batch_size)
def testSerializedContainingVarLenDense(self):
aname = "a"
bname = "b"
cname = "c"
dname = "d"
example_names = ["in1", "in2", "in3", "in4"]
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(
features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array(
[
[0, 0, 0, 0],
[1, 1, 0, 0],
[-1, -1, 2, 2],
[0, 0, 0, 0],
],
dtype=np.float32).reshape(4, 2, 2, 1),
bname:
np.array(
[["", ""], ["b0_str", "b1_str"], ["b1", ""], ["", ""]],
dtype=bytes).reshape(4, 2, 1, 1, 1),
cname:
np.array([2, 0, 0, 3], dtype=np.int64).reshape(4, 1),
dname:
np.empty(shape=(4, 0), dtype=bytes),
}
self._test({
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output)
# Test with padding values.
expected_output_custom_padding = dict(expected_output)
expected_output_custom_padding[aname] = np.array(
[
[-2, -2, -2, -2],
[1, 1, -2, -2],
[-1, -1, 2, 2],
[-2, -2, -2, -2],
],
dtype=np.float32).reshape(4, 2, 2, 1)
self._test({
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=-2.0),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
}, expected_output_custom_padding)
# Change number of required values so the inputs are not a
# multiple of this size.
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(
errors_impl.OpError, "Name: in3, Key: b, Index: 2. "
"Number of bytes values is not a multiple of stride length."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Cannot reshape a tensor with 0 elements to shape"))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenFeature(
(None, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"First dimension of shape for feature a unknown. "
"Consider using FixedLenSequenceFeature."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
cname:
parsing_ops.FixedLenFeature(
(1, None), dtype=dtypes.int64, default_value=[[1]]),
}
},
expected_err=(ValueError,
"All dimensions of shape for feature c need to be known "
r"but received \(1, None\)."))
self._test(
{
"example_names": example_names,
"serialized": ops.convert_to_tensor(serialized),
"features": {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=False),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}
},
expected_err=(ValueError,
"Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True."))
class ParseSingleExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.test_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_single_example(**kwargs)
# Check values.
tf_result = sess.run(flatten_values_tensors_or_sparse(out.values()))
_compare_output_to_expected(self, out, expected_values, tf_result)
# Check shapes.
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape()), tensor_shape.as_shape(f.shape))
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
def testSingleExampleWithSparseAndSparseFeatureAndDense(self):
original = example(
features=features({
"c": float_feature([3, 4]),
"d": float_feature([0.0, 1.0]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: max_values = 2
expected_sp = ( # indices, values, shape
np.array([[0], [3]], dtype=np.int64), np.array(["a", "b"], dtype="|S"),
np.array([13], dtype=np.int64)) # max_values = 13
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": [a_default],
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
"d": np.array([0.0, 1.0], dtype=np.float32),
}
self._test(
{
"example_names": ops.convert_to_tensor("in1"),
"serialized": ops.convert_to_tensor(serialized),
"features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string,
[13]),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature(
[], dtypes.float32, allow_missing=True)
}
},
expected_output)
class ParseSequenceExampleTest(test.TestCase):
def testCreateSequenceExample(self):
value = sequence_example(
context=features({
"global_feature": float_feature([1, 2, 3]),
}),
feature_lists=feature_lists({
"repeated_feature_2_frames":
feature_list([
bytes_feature([b"a", b"b", b"c"]),
bytes_feature([b"a", b"d", b"e"])
]),
"repeated_feature_3_frames":
feature_list([
int64_feature([3, 4, 5, 6, 7]),
int64_feature([-1, 0, 0, 0, 0]),
int64_feature([1, 2, 3, 4, 5])
])
}))
value.SerializeToString() # Smoke test
def _test(self,
kwargs,
expected_context_values=None,
expected_feat_list_values=None,
expected_length_values=None,
expected_err=None,
batch=False):
expected_context_values = expected_context_values or {}
expected_feat_list_values = expected_feat_list_values or {}
expected_length_values = expected_length_values or {}
with self.test_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
if batch:
c_out, fl_out, _ = parsing_ops.parse_sequence_example(**kwargs)
else:
c_out, fl_out = parsing_ops.parse_single_sequence_example(**kwargs)
if c_out:
sess.run(flatten_values_tensors_or_sparse(c_out.values()))
if fl_out:
sess.run(flatten_values_tensors_or_sparse(fl_out.values()))
else:
# Returns dicts w/ Tensors and SparseTensors.
if batch:
(context_out, feat_list_out,
lengths_out) = parsing_ops.parse_sequence_example(**kwargs)
else:
(context_out,
feat_list_out) = parsing_ops.parse_single_sequence_example(**kwargs)
lengths_out = {}
context_result = sess.run(
flatten_values_tensors_or_sparse(
context_out.values())) if context_out else []
feat_list_result = sess.run(
flatten_values_tensors_or_sparse(
feat_list_out.values())) if feat_list_out else []
lengths_result = sess.run(
flatten_values_tensors_or_sparse(
lengths_out.values())) if lengths_out else []
# Check values.
_compare_output_to_expected(self, context_out, expected_context_values,
context_result)
_compare_output_to_expected(self, feat_list_out,
expected_feat_list_values, feat_list_result)
_compare_output_to_expected(self, lengths_out, expected_length_values,
lengths_result)
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
if "context_features" in kwargs:
for k, f in kwargs["context_features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
if batch:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()[1:]), f.shape)
else:
self.assertEqual(
tuple(context_out[k].get_shape().as_list()), f.shape)
elif isinstance(f, parsing_ops.VarLenFeature) and batch:
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 2))
self.assertEqual(
tuple(context_out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(context_out[k].dense_shape.get_shape().as_list()), (2,))
elif isinstance(f, parsing_ops.VarLenFeature) and not batch:
self.assertEqual(
tuple(context_out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(
tuple(context_out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(context_out[k].dense_shape.get_shape().as_list()), (1,))
def _testBoth(self,
kwargs,
expected_context_values=None,
expected_feat_list_values=None,
expected_err=None):
# Test using tf.parse_single_sequence_example
self._test(
kwargs,
expected_context_values=expected_context_values,
expected_feat_list_values=expected_feat_list_values,
expected_err=expected_err,
batch=False)
# Convert the input to a batch of size 1, and test using
# tf.parse_sequence_example.
# Some replacements are needed for the batch version.
kwargs["serialized"] = [kwargs.pop("serialized")]
kwargs["example_names"] = [kwargs.pop("example_name")
] if "example_name" in kwargs else None
# Disable error string matching; it's not consistent for batch mode.
if expected_err:
expected_err = (expected_err[0], "")
# Add a batch dimension to expected output
if expected_context_values:
new_values = {}
for k in expected_context_values:
v = expected_context_values[k]
if isinstance(kwargs["context_features"][k],
parsing_ops.FixedLenFeature):
new_values[k] = np.expand_dims(v, axis=0)
else:
# Sparse tensor.
new_values[k] = (np.insert(v[0], 0, 0, axis=1), v[1],
np.insert(v[2], 0, 1))
expected_context_values = new_values
expected_length_values = {}
if expected_feat_list_values:
new_values = {}
for k in expected_feat_list_values:
v = expected_feat_list_values[k]
if isinstance(kwargs["sequence_features"][k],
parsing_ops.FixedLenSequenceFeature):
expected_length_values[k] = [np.shape(v)[0]]
new_values[k] = np.expand_dims(v, axis=0)
else:
# Sparse tensor.
new_values[k] = (np.insert(v[0], 0, 0, axis=1), v[1],
np.insert(v[2], 0, 1))
expected_feat_list_values = new_values
self._test(
kwargs,
expected_context_values=expected_context_values,
expected_feat_list_values=expected_feat_list_values,
expected_length_values=expected_length_values,
expected_err=expected_err,
batch=True)
def testSequenceExampleWithSparseAndDenseContext(self):
original = sequence_example(
context=features({
"c": float_feature([3, 4]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: num_features = 2
a_default = [[1, 2, 3]]
b_default = np.random.rand(3, 3).astype(bytes)
expected_context_output = {
"st_a": expected_st_a,
"a": a_default,
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"context_features": {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
}
},
expected_context_values=expected_context_output)
def testSequenceExampleWithMultipleSizeFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([-1, 0, 1]),
int64_feature([2, 3, 4]),
int64_feature([5, 6, 7]),
int64_feature([8, 9, 10]),
]),
"b":
feature_list([bytes_feature([b"r00", b"r01", b"r10", b"r11"])]),
"c":
feature_list([float_feature([3, 4]),
float_feature([-1, 2])]),
}))
serialized = original.SerializeToString()
expected_feature_list_output = {
"a":
np.array(
[ # outer dimension is time.
[[-1, 0, 1]], # inside are 1x3 matrices
[[2, 3, 4]],
[[5, 6, 7]],
[[8, 9, 10]]
],
dtype=np.int64),
"b":
np.array(
[ # outer dimension is time, inside are 2x2 matrices
[[b"r00", b"r01"], [b"r10", b"r11"]]
],
dtype=bytes),
"c":
np.array(
[ # outer dimension is time, inside are 2-vectors
[3, 4], [-1, 2]
],
dtype=np.float32),
"d":
np.empty(shape=(0, 5), dtype=np.float32), # empty_allowed_missing
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a":
parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
"b":
parsing_ops.FixedLenSequenceFeature((2, 2), dtypes.string),
"c":
parsing_ops.FixedLenSequenceFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature(
(5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleWithoutDebugName(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([3, 4]),
int64_feature([1, 0])]),
"st_a":
feature_list([
float_feature([3.0, 4.0]),
float_feature([5.0]),
float_feature([])
]),
"st_b":
feature_list([
bytes_feature([b"a"]),
bytes_feature([]),
bytes_feature([]),
bytes_feature([b"b", b"c"])
])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [1, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_st_b = (
np.array([[0, 0], [3, 0], [3, 1]], dtype=np.int64), # indices
np.array(["a", "b", "c"], dtype="|S"), # values
np.array([4, 2], dtype=np.int64)) # shape: num_time = 4, max_feat = 2
expected_st_c = (
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # values
np.array([0, 0], dtype=np.int64)) # shape: num_time = 0, max_feat = 0
expected_feature_list_output = {
"a": np.array([[3, 4], [1, 0]], dtype=np.int64),
"st_a": expected_st_a,
"st_b": expected_st_b,
"st_c": expected_st_c,
}
self._testBoth(
{
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
"st_b": parsing_ops.VarLenFeature(dtypes.string),
"st_c": parsing_ops.VarLenFeature(dtypes.int64),
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleWithSparseAndDenseFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([3, 4]),
int64_feature([1, 0])]),
"st_a":
feature_list([
float_feature([3.0, 4.0]),
float_feature([5.0]),
float_feature([])
]),
"st_b":
feature_list([
bytes_feature([b"a"]),
bytes_feature([]),
bytes_feature([]),
bytes_feature([b"b", b"c"])
])
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [1, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_st_b = (
np.array([[0, 0], [3, 0], [3, 1]], dtype=np.int64), # indices
np.array(["a", "b", "c"], dtype="|S"), # values
np.array([4, 2], dtype=np.int64)) # shape: num_time = 4, max_feat = 2
expected_st_c = (
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # values
np.array([0, 0], dtype=np.int64)) # shape: num_time = 0, max_feat = 0
expected_feature_list_output = {
"a": np.array([[3, 4], [1, 0]], dtype=np.int64),
"st_a": expected_st_a,
"st_b": expected_st_b,
"st_c": expected_st_c,
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
"st_b": parsing_ops.VarLenFeature(dtypes.string),
"st_c": parsing_ops.VarLenFeature(dtypes.int64),
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleWithEmptyFeatureInFeatureLists(self):
original = sequence_example(
feature_lists=feature_lists({
"st_a":
feature_list([
float_feature([3.0, 4.0]),
feature(),
float_feature([5.0]),
]),
}))
serialized = original.SerializeToString()
expected_st_a = (
np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64), # indices
np.array([3.0, 4.0, 5.0], dtype=np.float32), # values
np.array([3, 2], dtype=np.int64)) # shape: num_time = 3, max_feat = 2
expected_feature_list_output = {
"st_a": expected_st_a,
}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"st_a": parsing_ops.VarLenFeature(dtypes.float32),
}
},
expected_feat_list_values=expected_feature_list_output)
def testSequenceExampleListWithInconsistentDataFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a": feature_list([int64_feature([-1, 0]),
float_feature([2, 3])])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError, "Feature list: a, Index: 1."
" Data types don't match. Expected type: int64"))
def testSequenceExampleListWithWrongDataTypeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a": feature_list([float_feature([2, 3])])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError,
"Feature list: a, Index: 0. Data types don't match."
" Expected type: int64"))
def testSequenceExampleListWithWrongSparseDataTypeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([3, 4]),
int64_feature([1, 2]),
float_feature([2.0, 3.0])
])
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError,
"Name: in1, Feature list: a, Index: 2."
" Data types don't match. Expected type: int64"
" Feature is: float_list"))
def testSequenceExampleListWithWrongShapeFails(self):
original = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([int64_feature([2, 3]),
int64_feature([2, 3, 4])]),
}))
serialized = original.SerializeToString()
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(errors_impl.OpError, r"Name: in1, Key: a, Index: 1."
r" Number of int64 values != expected."
r" values size: 3 but output shape: \[2\]"))
def testSequenceExampleWithMissingFeatureListFails(self):
original = sequence_example(feature_lists=feature_lists({}))
# Test fails because we didn't add:
# feature_list_dense_defaults = {"a": None}
self._testBoth(
{
"example_name": "in1",
"serialized": ops.convert_to_tensor(original.SerializeToString()),
"sequence_features": {
"a": parsing_ops.FixedLenSequenceFeature((2,), dtypes.int64)
}
},
expected_err=(
errors_impl.OpError,
"Name: in1, Feature list 'a' is required but could not be found."
" Did you mean to include it in"
" feature_list_dense_missing_assumed_empty or"
" feature_list_dense_defaults?"))
def testSequenceExampleBatch(self):
first = sequence_example(
feature_lists=feature_lists({
"a":
feature_list([
int64_feature([-1, 0, 1]),
int64_feature([2, 3, 4]),
int64_feature([5, 6, 7]),
int64_feature([8, 9, 10]),
])
}))
second = sequence_example(
feature_lists=feature_lists({
"a": feature_list([
int64_feature([21, 2, 11]),
])
}))
serialized = [first.SerializeToString(), second.SerializeToString()]
expected_feature_list_output = {
"a":
np.array(
[ # outermost dimension is example id
[ # middle dimension is time.
[[-1, 0, 1]], # inside are 1x3 matrices
[[2, 3, 4]],
[[5, 6, 7]],
[[8, 9, 10]]
],
[ # middle dimension is time.
[[21, 2, 11]], # inside are 1x3 matrices
[[0, 0, 0]], # additional entries are padded with 0
[[0, 0, 0]],
[[0, 0, 0]]
]
],
dtype=np.int64),
"d":
np.empty(shape=(2, 0, 5), dtype=np.float32), # allowed_missing
}
self._test(
{
"example_names": ops.convert_to_tensor(["in1", "in2"]),
"serialized": ops.convert_to_tensor(serialized),
"sequence_features": {
"a":
parsing_ops.FixedLenSequenceFeature((1, 3), dtypes.int64),
"d":
parsing_ops.FixedLenSequenceFeature(
(5,), dtypes.float32, allow_missing=True),
}
},
expected_feat_list_values=expected_feature_list_output,
expected_length_values={
"a": [4, 1],
"d": [0, 0]
},
batch=True)
class DecodeJSONExampleTest(test.TestCase):
def _testRoundTrip(self, examples):
with self.test_session() as sess:
examples = np.array(examples, dtype=np.object)
json_tensor = constant_op.constant(
[json_format.MessageToJson(m) for m in examples.flatten()],
shape=examples.shape,
dtype=dtypes.string)
binary_tensor = parsing_ops.decode_json_example(json_tensor)
binary_val = sess.run(binary_tensor)
if examples.shape:
self.assertShapeEqual(binary_val, json_tensor)
for input_example, output_binary in zip(
np.array(examples).flatten(), binary_val.flatten()):
output_example = example_pb2.Example()
output_example.ParseFromString(output_binary)
self.assertProtoEquals(input_example, output_example)
else:
output_example = example_pb2.Example()
output_example.ParseFromString(binary_val)
self.assertProtoEquals(examples.item(), output_example)
def testEmptyTensor(self):
self._testRoundTrip([])
self._testRoundTrip([[], [], []])
def testEmptyExamples(self):
self._testRoundTrip([example(), example(), example()])
def testDenseFeaturesScalar(self):
self._testRoundTrip(
example(features=features({
"a": float_feature([1, 1, 3])
})))
def testDenseFeaturesVector(self):
self._testRoundTrip([
example(features=features({
"a": float_feature([1, 1, 3])
})),
example(features=features({
"a": float_feature([-1, -1, 2])
})),
])
def testDenseFeaturesMatrix(self):
self._testRoundTrip([
[example(features=features({
"a": float_feature([1, 1, 3])
}))],
[example(features=features({
"a": float_feature([-1, -1, 2])
}))],
])
def testSparseFeatures(self):
self._testRoundTrip([
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([])
})),
example(features=features({
"st_d": feature()
})),
example(
features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
})),
])
def testSerializedContainingBytes(self):
aname = "a"
bname = "b*has+a:tricky_name"
self._testRoundTrip([
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"])
})),
example(
features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"])
})),
])
def testInvalidSyntax(self):
with self.test_session() as sess:
json_tensor = constant_op.constant(["{]"])
binary_tensor = parsing_ops.decode_json_example(json_tensor)
with self.assertRaisesOpError("Error while parsing JSON"):
sess.run(binary_tensor)
class ParseTensorOpTest(test.TestCase):
def testToFloat32(self):
with self.test_session():
expected = np.random.rand(3, 4, 5).astype(np.float32)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.float32)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
self.assertAllEqual(expected, result)
def testToUint8(self):
with self.test_session():
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint8)
result = tensor.eval(
feed_dict={serialized: tensor_proto.SerializeToString()})
self.assertAllEqual(expected, result)
def testTypeMismatch(self):
with self.test_session():
expected = np.random.rand(3, 4, 5).astype(np.uint8)
tensor_proto = tensor_util.make_tensor_proto(expected)
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
r"Type mismatch between parsed tensor \(uint8\) and dtype "
r"\(uint16\)"):
tensor.eval(feed_dict={serialized: tensor_proto.SerializeToString()})
def testInvalidInput(self):
with self.test_session():
serialized = array_ops.placeholder(dtypes.string)
tensor = parsing_ops.parse_tensor(serialized, dtypes.uint16)
with self.assertRaisesOpError(
"Could not parse `serialized` as TensorProto: 'bogus'"):
tensor.eval(feed_dict={serialized: "bogus"})
with self.assertRaisesOpError(
r"Expected `serialized` to be a scalar, got shape: \[1\]"):
tensor.eval(feed_dict={serialized: ["bogus"]})
if __name__ == "__main__":
test.main()
|
py | 7dff4553c6742c5a0892b739162a47b133091f8c | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import models
import models.backbone
import utils
class DeepLabv3plus(nn.Module):
def __init__(self, backbone: str, output_stride: int, num_classes: int):
super(DeepLabv3plus, self).__init__()
self.low_level_feature = []
# Backbone
if backbone == 'ResNet101':
self.backbone = models.backbone.resnet101.ResNet101(output_stride)
self.backbone.layer1.register_forward_hook(utils.hooks.get_feature_maps(self.low_level_feature))
elif backbone == 'Xception':
self.backbone = models.backbone.xception.xception(output_stride, pretrained=True)
self.backbone.block2.sepconv2.register_forward_hook(utils.hooks.get_feature_maps(self.low_level_feature))
else:
raise NotImplementedError('Wrong backbone.')
# ASPP
if output_stride == 16:
atrous_rates = (6, 12, 18)
elif output_stride == 8:
atrous_rates = (12, 24, 36)
else:
raise NotImplementedError('Wrong output_stride.')
self.aspp = torchvision.models.segmentation.deeplabv3.ASPP(2048, atrous_rates, 256)
# Decoder
self.decoder = Decoder(num_classes)
self.upsample = nn.Upsample(mode='bilinear', align_corners=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.upsample.size = x.size()[-2:]
x = self.backbone(x)
x = self.aspp(x)
x = self.decoder(x, self.low_level_feature)
x = self.upsample(x)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for param in m.parameters():
param.requires_grad = False
class Decoder(nn.Module):
def __init__(self, num_classes: int):
super(Decoder, self).__init__()
self.feature_refinement_module = self.make_feature_refinement_module(256, 48)
self.decoding_block = self.make_decoding_block(256 + 48, 256)
self.classifier = nn.Conv2d(256, num_classes, kernel_size=1)
def forward(self, x: torch.Tensor, low_level_feature: list[torch.Tensor]) -> torch.Tensor:
low_level_feature = self.feature_refinement_module(low_level_feature.pop())
x = F.interpolate(x, size=low_level_feature.size()[2:], mode='bilinear', align_corners=False)
x = torch.cat((x, low_level_feature), dim=1)
x = self.decoding_block(x)
x = self.classifier(x)
return x
def make_feature_refinement_module(self, in_channels: int, out_channels: int):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def make_decoding_block(self, in_channels: int, out_channels: int):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DeepLabv3plus('Xception', output_stride=16, num_classes=19).to(device)
models.test.test_model(model, (1, 3, 1024, 2048), '../runs')
|
py | 7dff45f0f3d9e4e26e10e0c0e12b3416ab707be6 | #!/usr/bin/env python
'''
Run this script inside of src/ and it will look for all the files
that were changed this year that still have the last year in the
copyright headers, and it will fix the headers on that file using
a perl regex one liner.
For example: if it finds something like this and we're in 2014
// Copyright (c) 2009-2013 The GreenCoin Core developers
it will change it to
// Copyright (c) 2009-2014 The GreenCoin Core developers
It will do this for all the files in the folder and its children.
Author: @gubatron
'''
import os
import time
year = time.gmtime()[0]
last_year = year - 1
command = "perl -pi -e 's/%s The GreenCoin/%s The GreenCoin/' %s"
listFilesCommand = "find . | grep %s"
extensions = [".cpp",".h"]
def getLastGitModifiedDate(filePath):
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1"
p = os.popen(gitGetLastCommitDateCommand)
result = ""
for l in p:
result = l
break
result = result.replace("\n","")
return result
n=1
for extension in extensions:
foundFiles = os.popen(listFilesCommand % extension)
for filePath in foundFiles:
filePath = filePath[1:-1]
if filePath.endswith(extension):
filePath = os.getcwd() + filePath
modifiedTime = getLastGitModifiedDate(filePath)
if len(modifiedTime) > 0 and str(year) in modifiedTime:
print n,"Last Git Modified: ", modifiedTime, " - ", filePath
os.popen(command % (last_year,year,filePath))
n = n + 1
|
py | 7dff4624e053c331bc0811dc8abb79cca2adf3ed | # This program uses recursion to print numbers
# from the Fibonacci series.
def main():
print('The first 10 numbers in the')
print('Fibonacci series are:')
for number in range(1, 11):
print(fib(number))
# The fib function returns the nth number
# in the Fibonacci series.
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
# Call the main function.
main()
|
py | 7dff4648353db4ba9cf328651050e9d65b2ab878 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# NOTES:
# 1. matplotlib MUST be in 3.1.0; 3.1.1 ruins the heatmap
# # Across-Site Statistics for Visit Date Disparity Rates
#
# ### NOTE: Aggregate info is weighted by the contribution of each site
import pandas as pd
import xlrd
import matplotlib.pyplot as plt
import seaborn as sns
from math import pi
# %matplotlib inline
# +
sheets = []
fn1 = 'visit_date_disparity_table_sheets_analytics_report.xlsx'
file_names = [fn1]
s1 = 'Observation'
s2 = 'Measurement'
s3 = 'Procedure Occurrence'
s4 = 'Drug Exposure'
s5 = 'Condition Occurrence'
sheet_names = [s1, s2, s3, s4, s5]
# +
table_sheets = []
for file in file_names:
for sheet in sheet_names:
s = pd.read_excel(file, sheet, index_col=0)
table_sheets.append(s)
date_cols = table_sheets[0].columns
date_cols = (list(date_cols))
hpo_id_cols = table_sheets[0].index
hpo_id_cols = (list(hpo_id_cols))
# -
# ### Converting the numbers as needed and putting into a dictionary
# +
new_table_sheets = {}
for name, sheet in zip(sheet_names, table_sheets):
sheet_cols = sheet.columns
sheet_cols = sheet_cols[0:]
new_df = pd.DataFrame(columns=sheet_cols)
for col in sheet_cols:
old_col = sheet[col]
new_col = pd.to_numeric(old_col, errors='coerce')
new_df[col] = new_col
new_table_sheets[name] = new_df
# -
# ### Fixing typos
# +
fig, ax = plt.subplots(figsize=(18, 12))
sns.heatmap(new_table_sheets['Condition Occurrence'], annot=True, annot_kws={"size": 10},
fmt='g', linewidths=.5, ax=ax, yticklabels=hpo_id_cols,
xticklabels=date_cols, cmap="YlGnBu", vmin=0, vmax=100)
ax.set_title("Condition Table Visit Date Disparity Rate", size=14)
ax.set_ylim(len(hpo_id_cols)-0.1, 0)
plt.show()
# plt.savefig("condition_table_visit_date_disparity.jpg")
# +
fig, ax = plt.subplots(figsize=(18, 12))
sns.heatmap(new_table_sheets['Drug Exposure'], annot=True, annot_kws={"size": 10},
fmt='g', linewidths=.5, ax=ax, yticklabels=hpo_id_cols,
xticklabels=date_cols, cmap="YlGnBu", vmin=0, vmax=100)
ax.set_ylim(len(hpo_id_cols)-0.1, 0)
ax.set_title("Drug Table Visit Date Disparity Rate", size=14)
# plt.savefig("drug_table_visit_date_disparity.jpg")
# +
fig, ax = plt.subplots(figsize=(18, 12))
sns.heatmap(new_table_sheets['Measurement'], annot=True, annot_kws={"size": 10},
fmt='g', linewidths=.5, ax=ax, yticklabels=hpo_id_cols,
xticklabels=date_cols, cmap="YlGnBu", vmin=0, vmax=100)
ax.set_ylim(len(hpo_id_cols)-0.1, 0)
ax.set_title("Measurement Table Visit Date Disparity Rate", size=14)
# plt.savefig("measurement_table_visit_date_disparity.jpg")
# +
fig, ax = plt.subplots(figsize=(18, 12))
sns.heatmap(new_table_sheets['Observation'], annot=True, annot_kws={"size": 10},
fmt='g', linewidths=.5, ax=ax, yticklabels=hpo_id_cols,
xticklabels=date_cols, cmap="YlGnBu", vmin=0, vmax=100)
ax.set_ylim(len(hpo_id_cols)-0.1, 0)
ax.set_title("Observation Table Visit Date Disparity Rate", size=14)
# plt.savefig("observation_table_visit_date_disparity.jpg")
# +
fig, ax = plt.subplots(figsize=(18, 12))
sns.heatmap(new_table_sheets['Procedure Occurrence'], annot=True, annot_kws={"size": 10},
fmt='g', linewidths=.5, ax=ax, yticklabels=hpo_id_cols,
xticklabels=date_cols, cmap="YlGnBu", vmin=0, vmax=100)
ax.set_ylim(len(hpo_id_cols)-0.1, 0)
ax.set_title("Procedure Table Visit Date Disparity Rate", size=14)
# plt.savefig("procedure_table_visit_date_disparity.jpg")
# -
# # Now let's look at the metrics for particular sites with respect to visit date disparity. This will allow us to send them the appropriate information.
fn1_hpo_sheets = 'visit_date_disparity_hpo_sheets_analytics_report.xlsx'
file_names_hpo_sheets = [fn1_hpo_sheets]
x1 = pd.ExcelFile(fn1_hpo_sheets)
site_name_list = x1.sheet_names
# +
num_hpo_sheets = len(site_name_list)
print(f"There are {num_hpo_sheets} HPO sheets.")
# -
# name_of_interest = 'aggregate_info'
#
# if name_of_interest not in site_name_list:
# raise ValueError("Name not found in the list of HPO site names.")
#
# for idx, site in enumerate(site_name_list):
# if site == name_of_interest:
# idx_of_interest = idx
# +
hpo_sheets = []
for file in file_names_hpo_sheets:
for sheet in site_name_list:
s = pd.read_excel(file, sheet, index_col=0)
hpo_sheets.append(s)
table_id_cols = list(hpo_sheets[0].index)
date_cols = table_sheets[0].columns
date_cols = (list(date_cols))
# +
new_hpo_sheets = []
for sheet in hpo_sheets:
sheet_cols = sheet.columns
new_df = pd.DataFrame(columns=sheet_cols)
for col in sheet_cols:
old_col = sheet[col]
new_col = pd.to_numeric(old_col, errors='coerce')
new_df[col] = new_col
new_hpo_sheets.append(new_df)
# -
# ### Showing for one particular site
for i in range(len(site_name_list)):
name_of_interest = site_name_list[i]
idx_of_interest = i
fig, ax = plt.subplots(figsize=(9, 6))
data = new_hpo_sheets[idx_of_interest]
mask = data.isnull()
g = sns.heatmap(data, annot=True, annot_kws={"size": 14},
fmt='g', linewidths=.5, ax=ax, yticklabels=table_id_cols,
xticklabels=date_cols, cmap="YlOrBr", vmin=0, vmax=100, mask=mask)
g.set_facecolor("lightgrey")
ax.set_title(f"Visit Date Disparity Rates for {name_of_interest}", size=14)
ax.set_ylim(len(table_id_cols)-0.1, 0)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
plt.tight_layout()
img_name = name_of_interest + "_visit_date_disparity.png"
plt.savefig(img_name)
|
py | 7dff46e81b9e32fc36c3a733f54a6a4ec818e8b3 | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['IPC-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
|
py | 7dff476f5b07538c175407ac6793f4c21aad8c8f | # Copyright (C) 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.backup import driver
from cinder.tests.unit.backup import fake_service
class FakeBackupServiceWithVerify(driver.BackupDriverWithVerify,
fake_service.FakeBackupService):
def verify(self, backup):
pass
|
py | 7dff47d5c1963836b413280a84e8853a2c06ee9c | from django.urls import path
from .views import StudentViewReg
urlpatterns = [
path('signup/', StudentViewReg.as_view(), name='sign_up'),
]
|
py | 7dff48a3cd8927c86f34285dc6c071a5e9fbf8b4 | import numpy
import torch
import torch.nn.functional as F
from babyai.rl.algos.base import BaseAlgo
class PPOAlgo(BaseAlgo):
"""The class for the Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, acmodel, num_frames_per_proc=None, discount=0.99, lr=7e-4, beta1=0.9, beta2=0.999,
gae_lambda=0.95,
entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=4,
adam_eps=1e-5, clip_eps=0.2, epochs=4, batch_size=256, preprocess_obss=None,
reshape_reward=None, aux_info=None):
num_frames_per_proc = num_frames_per_proc or 128
super().__init__(envs, acmodel, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward,
aux_info)
self.clip_eps = clip_eps
self.epochs = epochs
self.batch_size = batch_size
assert self.batch_size % self.recurrence == 0
self.optimizer = torch.optim.Adam(self.acmodel.parameters(), lr, (beta1, beta2), eps=adam_eps)
self.batch_num = 0
def update_parameters(self):
# Collect experiences
exps, logs = self.collect_experiences()
'''
exps is a DictList with the following keys ['obs', 'memory', 'mask', 'action', 'value', 'reward',
'advantage', 'returnn', 'log_prob'] and ['collected_info', 'extra_predictions'] if we use aux_info
exps.obs is a DictList with the following keys ['image', 'instr']
exps.obj.image is a (n_procs * n_frames_per_proc) x image_size 4D tensor
exps.obs.instr is a (n_procs * n_frames_per_proc) x (max number of words in an instruction) 2D tensor
exps.memory is a (n_procs * n_frames_per_proc) x (memory_size = 2*image_embedding_size) 2D tensor
exps.mask is (n_procs * n_frames_per_proc) x 1 2D tensor
if we use aux_info: exps.collected_info and exps.extra_predictions are DictLists with keys
being the added information. They are either (n_procs * n_frames_per_proc) 1D tensors or
(n_procs * n_frames_per_proc) x k 2D tensors where k is the number of classes for multiclass classification
'''
for _ in range(self.epochs):
# Initialize log values
log_entropies = []
log_values = []
log_policy_losses = []
log_value_losses = []
log_grad_norms = []
log_losses = []
'''
For each epoch, we create int(total_frames / batch_size + 1) batches, each of size batch_size (except
maybe the last one. Each batch is divided into sub-batches of size recurrence (frames are contiguous in
a sub-batch), but the position of each sub-batch in a batch and the position of each batch in the whole
list of frames is random thanks to self._get_batches_starting_indexes().
'''
for inds in self._get_batches_starting_indexes():
# inds is a numpy array of indices that correspond to the beginning of a sub-batch
# there are as many inds as there are batches
# Initialize batch values
batch_entropy = 0
batch_value = 0
batch_policy_loss = 0
batch_value_loss = 0
batch_loss = 0
# Initialize memory
memory = exps.memory[inds]
for i in range(self.recurrence):
# Create a sub-batch of experience
sb = exps[inds + i]
# Compute loss
model_results = self.acmodel(sb.obs, memory * sb.mask)
dist = model_results['dist']
value = model_results['value']
memory = model_results['memory']
extra_predictions = model_results['extra_predictions']
entropy = dist.entropy().mean()
ratio = torch.exp(dist.log_prob(sb.action) - sb.log_prob)
surr1 = ratio * sb.advantage
surr2 = torch.clamp(ratio, 1.0 - self.clip_eps, 1.0 + self.clip_eps) * sb.advantage
policy_loss = -torch.min(surr1, surr2).mean()
value_clipped = sb.value + torch.clamp(value - sb.value, -self.clip_eps, self.clip_eps)
surr1 = (value - sb.returnn).pow(2)
surr2 = (value_clipped - sb.returnn).pow(2)
value_loss = torch.max(surr1, surr2).mean()
loss = policy_loss - self.entropy_coef * entropy + self.value_loss_coef * value_loss
# Update batch values
batch_entropy += entropy.item()
batch_value += value.mean().item()
batch_policy_loss += policy_loss.item()
batch_value_loss += value_loss.item()
batch_loss += loss
# Update memories for next epoch
if i < self.recurrence - 1:
exps.memory[inds + i + 1] = memory.detach()
# Update batch values
batch_entropy /= self.recurrence
batch_value /= self.recurrence
batch_policy_loss /= self.recurrence
batch_value_loss /= self.recurrence
batch_loss /= self.recurrence
# Update actor-critic
self.optimizer.zero_grad()
batch_loss.backward()
grad_norm = sum(p.grad.data.norm(2) ** 2 for p in self.acmodel.parameters() if p.grad is not None) ** 0.5
torch.nn.utils.clip_grad_norm_(self.acmodel.parameters(), self.max_grad_norm)
self.optimizer.step()
# Update log values
log_entropies.append(batch_entropy)
log_values.append(batch_value)
log_policy_losses.append(batch_policy_loss)
log_value_losses.append(batch_value_loss)
log_grad_norms.append(grad_norm.item())
log_losses.append(batch_loss.item())
# Log some values
logs["entropy"] = numpy.mean(log_entropies)
logs["value"] = numpy.mean(log_values)
logs["policy_loss"] = numpy.mean(log_policy_losses)
logs["value_loss"] = numpy.mean(log_value_losses)
logs["grad_norm"] = numpy.mean(log_grad_norms)
logs["loss"] = numpy.mean(log_losses)
return logs
def _get_batches_starting_indexes(self):
"""Gives, for each batch, the indexes of the observations given to
the model and the experiences used to compute the loss at first.
Returns
-------
batches_starting_indexes : list of list of int
the indexes of the experiences to be used at first for each batch
"""
indexes = numpy.arange(0, self.num_frames, self.recurrence)
indexes = numpy.random.permutation(indexes)
num_indexes = self.batch_size // self.recurrence
batches_starting_indexes = [indexes[i:i + num_indexes] for i in range(0, len(indexes), num_indexes)]
return batches_starting_indexes
|
py | 7dff48d1d5f63a84e03e805ec7d7f48804782cde | #!/usr/bin/python3
import numpy as np
class Softmax:
# Converts Arbitrary Values from layers to probabilties
def __init__(self, input_nodes, output_nodes):
self.weights = np.random.randn(input_nodes, output_nodes) / input_nodes
self.biases = np.zeros(output_nodes)
def backprop(self, d_L_d_out):
''' does the backprop stage in Softmax layer. Returns loss grad for
inputs. d_L_d_out is the loss grad '''
# We only know 1 element will be nonzero
for i, grad in enumerate(d_L_d_out):
if grad == 0:
continue
# e^totals
t_exp = np.exp(self.last_totals)
# Sum of all e^totals
S = np.sum(t_exp)
# Grad of out[i] against totals
d_out_d_t = -t_exp[i] * t_exp / (S ** 2)
d_out_d_t[i] = t_exp[i] * (S-t_exp[i]) / (S ** 2)
def forward(self, input):
self.last_input_shape = input.shape
input = input.flatten()
self.last_input = input
input_nodes, output_nodes = self.weights.shape
totals = np.dot(input, self.weights) + self.biases
self.last_totals = totals
exp = np.exp(totals)
return exp / np.sum(exp, axis=0)
|
py | 7dff49076c859e4bf11373a17a99f82f292846d2 | """tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from . import views
from django.urls import path
from django.contrib.auth.views import login, logout
urlpatterns = [
path('', views.home),
path('login/', login, {'template_name': 'accounts/login.html'}),
path('logout/', logout, {'template_name': 'accounts/logout.html'}),
path('register/', views.register, name='register'),
path('profile/', views.view_profile, name='view_profile'),
path('profile/edit/', views.edit_profile, name='edit_profile')
] |
py | 7dff4913c3a3ed12e2ca6c6eec65025f5ddf0c35 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import FilbitTestFramework
from test_framework.util import *
class SignRawTransactionsTest(FilbitTestFramework):
"""Tests transaction signing via RPC command "signrawtransaction"."""
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def successful_signing_test(self):
"""Creates and signs a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
py | 7dff492c260c6ee7c8bf744599e65449cbc3ab10 | # tkinter app stuff
import os
import sys
from tkinter import Button, Event, Frame, Label, StringVar, Tk, ttk
from typing import Callable
from camera import set_selected_camera
from camera import get_cameras
from pipe import calibrate
def init_tkinter_app() -> Tk:
Logo = resource_path("favicon.ico")
root = Tk()
root.title('Airpose')
root.iconbitmap(Logo)
# Create a frame
app = Frame(root, bg="white")
app.grid()
init_calibrate_button(app, calibrate)
init_video_output(app)
init_camera_combobox(app)
return app
def init_calibrate_button(app: Tk, command: Callable):
# Create calibration button
calibration_button = Button(app, text="Calibrate", command=command, width = 50, height = 5, bg = 'green')
calibration_button.grid(row=2,column=0)
def init_video_output(app: Tk) -> Label:
# Create a label for video stream
video_label = Label(app)
video_label.grid(row=1,column=0,columnspan=1)
return video_label
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def init_camera_combobox(app: Tk):
cameras = get_cameras()
stringvar = StringVar()
cb = ttk.Combobox(app, textvariable=stringvar, state= "readonly")
cb['values'] = list(cameras.keys())
cb.current(0)
cb.grid(row=0, column=0, columnspan=1, rowspan=1)
cb.bind("<<ComboboxSelected>>", lambda _: (set_selected_camera(cb.get())))
|
py | 7dff494000fe12c14f3bee1f1cf9e9ed1f9c8ec7 | # Copyright (C) 2015 EMC Corporation.
# Copyright (C) 2016 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from cinder_tempest_plugin.api.volume import base
from cinder_tempest_plugin import cinder_clients
CONF = config.CONF
class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest):
@classmethod
def setup_clients(cls):
cls._api_version = 2
super(ConsistencyGroupsV2Test, cls).setup_clients()
cls.admin_volume_client = cls.os_admin.volumes_v2_client
manager = cinder_clients.Manager(cls.os_admin)
cls.consistencygroups_adm_client = manager.consistencygroups_adm_client
@classmethod
def skip_checks(cls):
super(ConsistencyGroupsV2Test, cls).skip_checks()
if not CONF.volume_feature_enabled.consistency_group:
raise cls.skipException("Cinder consistency group "
"feature disabled")
def _delete_consistencygroup(self, cg_id):
self.consistencygroups_adm_client.delete_consistencygroup(cg_id)
vols = self.admin_volume_client.list_volumes(detail=True)['volumes']
for vol in vols:
if vol['consistencygroup_id'] == cg_id:
self.admin_volume_client.wait_for_resource_deletion(vol['id'])
self.consistencygroups_adm_client.wait_for_consistencygroup_deletion(
cg_id)
def _delete_cgsnapshot(self, cgsnapshot_id, cg_id):
self.consistencygroups_adm_client.delete_cgsnapshot(cgsnapshot_id)
vols = self.admin_volume_client.list_volumes(detail=True)['volumes']
snapshots = self.os_admin.snapshots_v2_client.list_snapshots(
detail=True)['snapshots']
for vol in vols:
for snap in snapshots:
if (vol['consistencygroup_id'] == cg_id and
vol['id'] == snap['volume_id']):
(self.snapshots_client.
wait_for_resource_deletion(snap['id']))
self.consistencygroups_adm_client.wait_for_cgsnapshot_deletion(
cgsnapshot_id)
@decorators.idempotent_id('3fe776ba-ec1f-4e6c-8d78-4b14c3a7fc44')
def test_consistencygroup_create_delete(self):
# Create volume type
name = data_utils.rand_name("volume-type")
volume_type = self.os_admin.volume_types_v2_client.create_volume_type(
name=name)['volume_type']
# Create CG
cg_name = data_utils.rand_name('CG')
create_consistencygroup = (
self.consistencygroups_adm_client.create_consistencygroup)
cg = create_consistencygroup(volume_type['id'],
name=cg_name)['consistencygroup']
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
cg['id'], 'available')
self.assertEqual(cg_name, cg['name'])
# Create volume
vol_name = data_utils.rand_name("volume")
params = {'name': vol_name,
'volume_type': volume_type['id'],
'consistencygroup_id': cg['id'],
'size': CONF.volume.volume_size}
volume = self.admin_volume_client.create_volume(**params)['volume']
waiters.wait_for_volume_resource_status(self.admin_volume_client,
volume['id'], 'available')
# Get a given CG
cg = self.consistencygroups_adm_client.show_consistencygroup(
cg['id'])['consistencygroup']
self.assertEqual(cg_name, cg['name'])
# Get all CGs with detail
cgs = self.consistencygroups_adm_client.list_consistencygroups(
detail=True)['consistencygroups']
self.assertIn((cg['name'], cg['id']),
[(m['name'], m['id']) for m in cgs])
# Clean up
self._delete_consistencygroup(cg['id'])
self.os_admin.volume_types_v2_client.delete_volume_type(
volume_type['id'])
@decorators.idempotent_id('2134dd52-f333-4456-bb05-6cb0f009a44f')
def test_consistencygroup_cgsnapshot_create_delete(self):
# Create volume type
name = data_utils.rand_name("volume-type")
volume_type = self.admin_volume_types_client.create_volume_type(
name=name)['volume_type']
# Create CG
cg_name = data_utils.rand_name('CG')
create_consistencygroup = (
self.consistencygroups_adm_client.create_consistencygroup)
cg = create_consistencygroup(volume_type['id'],
name=cg_name)['consistencygroup']
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
cg['id'], 'available')
self.assertEqual(cg_name, cg['name'])
# Create volume
vol_name = data_utils.rand_name("volume")
params = {'name': vol_name,
'volume_type': volume_type['id'],
'consistencygroup_id': cg['id'],
'size': CONF.volume.volume_size}
volume = self.admin_volume_client.create_volume(**params)['volume']
waiters.wait_for_volume_resource_status(self.admin_volume_client,
volume['id'], 'available')
# Create cgsnapshot
cgsnapshot_name = data_utils.rand_name('cgsnapshot')
create_cgsnapshot = (
self.consistencygroups_adm_client.create_cgsnapshot)
cgsnapshot = create_cgsnapshot(cg['id'],
name=cgsnapshot_name)['cgsnapshot']
self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
cgsnapshot['id'], 'available')
self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
snapshots = self.os_admin.snapshots_v2_client.list_snapshots(
detail=True)['snapshots']
for snap in snapshots:
if volume['id'] == snap['volume_id']:
waiters.wait_for_volume_resource_status(
self.os_admin.snapshots_v2_client,
snap['id'], 'available')
# Get a given CG snapshot
cgsnapshot = self.consistencygroups_adm_client.show_cgsnapshot(
cgsnapshot['id'])['cgsnapshot']
self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
# Get all CG snapshots with detail
cgsnapshots = self.consistencygroups_adm_client.list_cgsnapshots(
detail=True)['cgsnapshots']
self.assertIn((cgsnapshot['name'], cgsnapshot['id']),
[(m['name'], m['id']) for m in cgsnapshots])
# Clean up
self._delete_cgsnapshot(cgsnapshot['id'], cg['id'])
self._delete_consistencygroup(cg['id'])
self.admin_volume_types_client.delete_volume_type(volume_type['id'])
@decorators.idempotent_id('3a6a5525-25ca-4a6c-aac4-cac6fa8f5b43')
def test_create_consistencygroup_from_cgsnapshot(self):
# Create volume type
name = data_utils.rand_name("volume-type")
volume_type = self.admin_volume_types_client.create_volume_type(
name=name)['volume_type']
# Create CG
cg_name = data_utils.rand_name('CG')
create_consistencygroup = (
self.consistencygroups_adm_client.create_consistencygroup)
cg = create_consistencygroup(volume_type['id'],
name=cg_name)['consistencygroup']
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
cg['id'], 'available')
self.assertEqual(cg_name, cg['name'])
# Create volume
vol_name = data_utils.rand_name("volume")
params = {'name': vol_name,
'volume_type': volume_type['id'],
'consistencygroup_id': cg['id'],
'size': CONF.volume.volume_size}
volume = self.admin_volume_client.create_volume(**params)['volume']
waiters.wait_for_volume_resource_status(self.admin_volume_client,
volume['id'], 'available')
# Create cgsnapshot
cgsnapshot_name = data_utils.rand_name('cgsnapshot')
create_cgsnapshot = (
self.consistencygroups_adm_client.create_cgsnapshot)
cgsnapshot = create_cgsnapshot(cg['id'],
name=cgsnapshot_name)['cgsnapshot']
self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
cgsnapshot['id'], 'available')
self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for snap in snapshots:
if volume['id'] == snap['volume_id']:
waiters.wait_for_volume_resource_status(
self.os_admin.snapshots_v2_client, snap['id'], 'available')
# Create CG from CG snapshot
cg_name2 = data_utils.rand_name('CG_from_snap')
create_consistencygroup2 = (
self.consistencygroups_adm_client.create_consistencygroup_from_src)
cg2 = create_consistencygroup2(cgsnapshot_id=cgsnapshot['id'],
name=cg_name2)['consistencygroup']
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
cg2['id'], 'available')
self.assertEqual(cg_name2, cg2['name'])
vols = self.admin_volume_client.list_volumes(
detail=True)['volumes']
for vol in vols:
if vol['consistencygroup_id'] == cg2['id']:
waiters.wait_for_volume_resource_status(
self.admin_volume_client, vol['id'], 'available')
# Clean up
self._delete_consistencygroup(cg2['id'])
self._delete_cgsnapshot(cgsnapshot['id'], cg['id'])
self._delete_consistencygroup(cg['id'])
self.admin_volume_types_client.delete_volume_type(volume_type['id'])
@decorators.idempotent_id('556121ae-de9c-4342-9897-e54260447a19')
def test_create_consistencygroup_from_consistencygroup(self):
# Create volume type
name = data_utils.rand_name("volume-type")
volume_type = self.admin_volume_types_client.create_volume_type(
name=name)['volume_type']
# Create CG
cg_name = data_utils.rand_name('CG')
create_consistencygroup = (
self.consistencygroups_adm_client.create_consistencygroup)
cg = create_consistencygroup(volume_type['id'],
name=cg_name)['consistencygroup']
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
cg['id'], 'available')
self.assertEqual(cg_name, cg['name'])
# Create volume
vol_name = data_utils.rand_name("volume")
params = {'name': vol_name,
'volume_type': volume_type['id'],
'consistencygroup_id': cg['id'],
'size': CONF.volume.volume_size}
volume = self.admin_volume_client.create_volume(**params)['volume']
waiters.wait_for_volume_resource_status(self.admin_volume_client,
volume['id'], 'available')
# Create CG from CG
cg_name2 = data_utils.rand_name('CG_from_cg')
create_consistencygroup2 = (
self.consistencygroups_adm_client.create_consistencygroup_from_src)
cg2 = create_consistencygroup2(source_cgid=cg['id'],
name=cg_name2)['consistencygroup']
self.consistencygroups_adm_client.wait_for_consistencygroup_status(
cg2['id'], 'available')
self.assertEqual(cg_name2, cg2['name'])
vols = self.admin_volume_client.list_volumes(
detail=True)['volumes']
for vol in vols:
if vol['consistencygroup_id'] == cg2['id']:
waiters.wait_for_volume_resource_status(
self.admin_volume_client, vol['id'], 'available')
# Clean up
self._delete_consistencygroup(cg2['id'])
self._delete_consistencygroup(cg['id'])
self.admin_volume_types_client.delete_volume_type(volume_type['id'])
|
py | 7dff4a494b6a78fdb215c5c9652788c849308e60 | #!/usr/bin/env python
import argparse
import glob
import os
import re
import shutil
import subprocess
import sys
import stat
if sys.platform == "win32":
import _winreg
from lib.config import BASE_URL, PLATFORM, enable_verbose_mode, \
get_target_arch, get_zip_name, build_env
from lib.util import scoped_cwd, rm_rf, get_electron_version, make_zip, \
execute, electron_gyp, electron_features
ELECTRON_VERSION = get_electron_version()
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'download',
'libchromiumcontent', 'static_library')
NATIVE_MKSNAPSHOT_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'native_mksnapshot')
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
PDF_VIEWER_ENABLED = electron_features()['enable_pdf_viewer%']
TARGET_BINARIES = {
'darwin': [
],
'win32': [
'{0}.exe'.format(PROJECT_NAME), # 'electron.exe'
'content_shell.pak',
'd3dcompiler_47.dll',
'icudtl.dat',
'libEGL.dll',
'libGLESv2.dll',
'ffmpeg.dll',
'node.dll',
'blink_image_resources_200_percent.pak',
'content_resources_200_percent.pak',
'ui_resources_200_percent.pak',
'views_resources_200_percent.pak',
'natives_blob.bin',
'snapshot_blob.bin',
],
'linux': [
PROJECT_NAME, # 'electron'
'content_shell.pak',
'icudtl.dat',
'libffmpeg.so',
'libnode.so',
'blink_image_resources_200_percent.pak',
'content_resources_200_percent.pak',
'ui_resources_200_percent.pak',
'views_resources_200_percent.pak',
'natives_blob.bin',
'snapshot_blob.bin',
],
}
TARGET_BINARIES_EXT = []
TARGET_DIRECTORIES = {
'darwin': [
'{0}.app'.format(PRODUCT_NAME),
],
'win32': [
'resources',
'locales',
],
'linux': [
'resources',
'locales',
],
}
def main():
args = parse_args()
if args.chromium_dir:
globals().update(CHROMIUM_DIR=args.chromium_dir)
if args.verbose:
enable_verbose_mode()
rm_rf(DIST_DIR)
os.makedirs(DIST_DIR)
force_build()
create_symbols()
copy_binaries()
copy_chrome_binary('chromedriver')
copy_chrome_binary('mksnapshot')
copy_license()
if PLATFORM == 'win32':
copy_vcruntime_binaries()
copy_ucrt_binaries()
if PLATFORM != 'win32' and not args.no_api_docs:
create_api_json_schema()
create_typescript_definitions()
if PLATFORM == 'linux':
strip_binaries()
create_version()
create_dist_zip()
create_chrome_binary_zip('chromedriver', ELECTRON_VERSION)
create_chrome_binary_zip('mksnapshot', ELECTRON_VERSION)
create_ffmpeg_zip()
create_symbols_zip()
def force_build():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
execute([sys.executable, build, '-c', 'Release'])
def copy_binaries():
for binary in TARGET_BINARIES[PLATFORM]:
shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR)
if PLATFORM != 'darwin' and PDF_VIEWER_ENABLED:
shutil.copy2(os.path.join(OUT_DIR, 'pdf_viewer_resources.pak'),
DIST_DIR)
for directory in TARGET_DIRECTORIES[PLATFORM]:
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def copy_chrome_binary(binary):
if PLATFORM == 'win32':
binary += '.exe'
src = os.path.join(CHROMIUM_DIR, binary)
dest = os.path.join(DIST_DIR, binary)
# Copy file and keep the executable bit.
shutil.copyfile(src, dest)
os.chmod(dest, os.stat(dest).st_mode | stat.S_IEXEC)
def copy_vcruntime_binaries():
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\VisualStudio\14.0\Setup\VC", 0,
_winreg.KEY_READ | _winreg.KEY_WOW64_32KEY) as key:
crt_dir = _winreg.QueryValueEx(key, "ProductDir")[0]
arch = get_target_arch()
if arch == "ia32":
arch = "x86"
crt_dir += r"redist\{0}\Microsoft.VC140.CRT\\".format(arch)
dlls = ["msvcp140.dll", "vcruntime140.dll"]
# Note: copyfile is used to remove the read-only flag
for dll in dlls:
shutil.copyfile(crt_dir + dll, os.path.join(DIST_DIR, dll))
TARGET_BINARIES_EXT.append(dll)
def copy_ucrt_binaries():
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\Windows Kits\Installed Roots"
) as key:
ucrt_dir = _winreg.QueryValueEx(key, "KitsRoot10")[0]
arch = get_target_arch()
if arch == "ia32":
arch = "x86"
ucrt_dir += r"Redist\ucrt\DLLs\{0}".format(arch)
dlls = glob.glob(os.path.join(ucrt_dir, '*.dll'))
if len(dlls) == 0:
raise Exception('UCRT files not found')
for dll in dlls:
shutil.copy2(dll, DIST_DIR)
TARGET_BINARIES_EXT.append(os.path.basename(dll))
def copy_license():
shutil.copy2(os.path.join(CHROMIUM_DIR, '..', 'LICENSES.chromium.html'),
DIST_DIR)
shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR)
def create_api_json_schema():
node_bin_dir = os.path.join(SOURCE_ROOT, 'node_modules', '.bin')
env = os.environ.copy()
env['PATH'] = os.path.pathsep.join([node_bin_dir, env['PATH']])
outfile = os.path.relpath(os.path.join(DIST_DIR, 'electron-api.json'))
execute(['electron-docs-linter', 'docs', '--outfile={0}'.format(outfile),
'--version={}'.format(ELECTRON_VERSION.replace('v', ''))],
env=env)
def create_typescript_definitions():
node_bin_dir = os.path.join(SOURCE_ROOT, 'node_modules', '.bin')
env = os.environ.copy()
env['PATH'] = os.path.pathsep.join([node_bin_dir, env['PATH']])
infile = os.path.relpath(os.path.join(DIST_DIR, 'electron-api.json'))
outfile = os.path.relpath(os.path.join(DIST_DIR, 'electron.d.ts'))
execute(['electron-typescript-definitions', '--in={0}'.format(infile),
'--out={0}'.format(outfile)], env=env)
def strip_binaries():
for binary in TARGET_BINARIES[PLATFORM]:
if binary.endswith('.so') or '.' not in binary:
strip_binary(os.path.join(DIST_DIR, binary))
def strip_binary(binary_path):
if get_target_arch() == 'arm':
strip = 'arm-linux-gnueabihf-strip'
elif get_target_arch() == 'arm64':
strip = 'aarch64-linux-gnu-strip'
elif get_target_arch() == 'mips64el':
strip = 'mips64el-redhat-linux-strip'
else:
strip = 'strip'
execute([strip, binary_path], env=build_env())
def create_version():
version_path = os.path.join(SOURCE_ROOT, 'dist', 'version')
with open(version_path, 'w') as version_file:
version_file.write(ELECTRON_VERSION)
def create_symbols():
if get_target_arch() == 'mips64el':
return
destination = os.path.join(DIST_DIR, '{0}.breakpad.syms'.format(PROJECT_NAME))
dump_symbols = os.path.join(SOURCE_ROOT, 'script', 'dump-symbols.py')
execute([sys.executable, dump_symbols, destination])
if PLATFORM == 'darwin':
dsyms = glob.glob(os.path.join(OUT_DIR, '*.dSYM'))
for dsym in dsyms:
shutil.copytree(dsym, os.path.join(DIST_DIR, os.path.basename(dsym)))
elif PLATFORM == 'win32':
pdbs = glob.glob(os.path.join(OUT_DIR, '*.pdb'))
for pdb in pdbs:
shutil.copy2(pdb, DIST_DIR)
def create_dist_zip():
dist_name = get_zip_name(PROJECT_NAME, ELECTRON_VERSION)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = TARGET_BINARIES[PLATFORM] + TARGET_BINARIES_EXT + ['LICENSE',
'LICENSES.chromium.html', 'version']
if PLATFORM != 'darwin' and PDF_VIEWER_ENABLED:
files += ['pdf_viewer_resources.pak']
dirs = TARGET_DIRECTORIES[PLATFORM]
make_zip(zip_file, files, dirs)
def create_chrome_binary_zip(binary, version):
file_suffix = ''
create_native_mksnapshot = False
if binary == 'mksnapshot':
arch = get_target_arch()
if arch.startswith('arm'):
# if the arch is arm/arm64 the mksnapshot executable is an x64 binary,
# so name it as such.
file_suffix = 'x64'
create_native_mksnapshot = True
dist_name = get_zip_name(binary, version, file_suffix)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
files = ['LICENSE', 'LICENSES.chromium.html']
if PLATFORM == 'win32':
files += [binary + '.exe']
else:
files += [binary]
with scoped_cwd(DIST_DIR):
make_zip(zip_file, files, [])
if create_native_mksnapshot == True:
# Create a zip with the native version of the mksnapshot binary.
src = os.path.join(NATIVE_MKSNAPSHOT_DIR, binary)
dest = os.path.join(DIST_DIR, binary)
# Copy file and keep the executable bit.
shutil.copyfile(src, dest)
os.chmod(dest, os.stat(dest).st_mode | stat.S_IEXEC)
dist_name = get_zip_name(binary, version)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
make_zip(zip_file, files, [])
def create_ffmpeg_zip():
dist_name = get_zip_name('ffmpeg', ELECTRON_VERSION)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
if PLATFORM == 'darwin':
ffmpeg_name = 'libffmpeg.dylib'
elif PLATFORM == 'linux':
ffmpeg_name = 'libffmpeg.so'
elif PLATFORM == 'win32':
ffmpeg_name = 'ffmpeg.dll'
shutil.copy2(os.path.join(CHROMIUM_DIR, '..', 'ffmpeg', ffmpeg_name),
DIST_DIR)
if PLATFORM == 'linux':
strip_binary(os.path.join(DIST_DIR, ffmpeg_name))
with scoped_cwd(DIST_DIR):
make_zip(zip_file, [ffmpeg_name, 'LICENSE', 'LICENSES.chromium.html'], [])
def create_symbols_zip():
if get_target_arch() == 'mips64el':
return
dist_name = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'symbols')
zip_file = os.path.join(DIST_DIR, dist_name)
licenses = ['LICENSE', 'LICENSES.chromium.html', 'version']
with scoped_cwd(DIST_DIR):
dirs = ['{0}.breakpad.syms'.format(PROJECT_NAME)]
make_zip(zip_file, licenses, dirs)
if PLATFORM == 'darwin':
dsym_name = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'dsym')
with scoped_cwd(DIST_DIR):
dsyms = glob.glob('*.dSYM')
make_zip(os.path.join(DIST_DIR, dsym_name), licenses, dsyms)
elif PLATFORM == 'win32':
pdb_name = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'pdb')
with scoped_cwd(DIST_DIR):
pdbs = glob.glob('*.pdb')
make_zip(os.path.join(DIST_DIR, pdb_name), pdbs + licenses, [])
def parse_args():
parser = argparse.ArgumentParser(description='Create Electron Distribution')
parser.add_argument('--no_api_docs',
action='store_true',
help='Skip generating the Electron API Documentation!')
parser.add_argument('--chromium_dir',
help='Specify a custom libchromiumcontent dist directory '
+ 'if manually compiled')
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main())
|
py | 7dff4bafc31f85e560dc928e8837b95067563b8b | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/interior_components/shared_droid_interface_interior.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","droid_interface_interior_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 7dff4c3241df27e35c14b57b0a8841587333f8bf | import getopt
import subprocess
import sys
import os
class Pdf_generator(object):
def __init__(self, kind=0, file_="", zip_file="",path=""):
super(Pdf_generator, self).__init__()
self.kind = kind
self.file_ = file_
self.path = path
self.zip_file = zip_file
def file_title_parse(self):
list_option = self.file_.split("_")
return {
"number" : list_option[1],
"PreProcessing": list_option[2],
"TypeClusterization": list_option[3],
"???": list_option[4].split(".")[0]
}
def build(self):
info = self.file_title_parse()
output = self.zip_file[:-4]+"_"+info["number"]+"_lines.pdf"
try:
print ("Create graphic...")
if self.kind == 1: # Grafico de linhas sem o modality
os.system("Rscript pdf/pdf_lines.R "+ self.zip_file+" "+self.file_+" "+output)
if self.kind == 2: # Grafico de pontos sem o modality
os.system("Rscript pdf/pdf_points.R "+ self.zip_file+" "+self.file_+" "+output)
if self.kind == 10: # Grafico de linhas com o modality
os.system("Rscript pdf/pdf_lines_modality.R "+ self.zip_file+" "+self.file_+" "+output)
print ("Ok")
except Exception :
print ("Problem :(")
return
|
py | 7dff4de995b677855c6aef2e0aa5307e06979f88 | import argparse
import numpy as np
import keras
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional
|
py | 7dff4e8ce37b7cbe14326b924e40765cdfc1bd51 | import gramex.data
import numpy as np
import os
import pandas as pd
import pptx
from gramex.config import objectpath
from gramex.pptgen2 import pptgen, load_data, commands, commandline
from nose.tools import eq_, ok_, assert_raises
from orderedattrdict import AttrDict
from pandas.util.testing import assert_frame_equal as afe
from pptx import Presentation
from pptx.dml.color import _NoneColor
from pptx.enum.dml import MSO_THEME_COLOR, MSO_FILL
from pptx.enum.text import PP_ALIGN, MSO_VERTICAL_ANCHOR as MVA
from pptx.oxml.ns import _nsmap, qn
from testfixtures import LogCapture, OutputCapture
from unittest import TestCase
from . import folder, sales_file
units = ('inches', 'cm', 'mm', 'pt', 'emu', 'centipoints')
aeq_ = lambda a, b: ok_(abs(a - b) <= 1) # noqa
class TestPPTGen(TestCase):
# Test Case module for pptgen
@classmethod
def setUp(cls):
# Setup class method to initialize common variables.
np.random.seed(0)
cls.input = os.path.join(folder, 'input2.pptx')
cls.prs = Presentation(cls.input)
cls.output = os.path.join(folder, 'output.pptx')
cls.image = os.path.join(folder, 'small-image.jpg')
cls.data = pd.read_excel(sales_file, encoding='utf-8')
if os.path.exists(cls.output):
os.remove(cls.output)
@classmethod
def remove_output(cls):
if os.path.exists(cls.output):
os.remove(cls.output)
def get_shapes(self, shapes, name):
'''Return shapes with given name among shapes. Find shapes in groups using ">"'''
names = name.split('>')
if len(names) == 1:
return [shape for shape in shapes if shape.name == name]
else:
for name in names[:-1]:
shapes = sum((tuple(s.shapes) for s in self.get_shapes(shapes, name)), ())
return self.get_shapes(shapes, names[-1])
def get_shape(self, shapes, name):
try:
return self.get_shapes(shapes, name)[0]
except IndexError:
raise ValueError('Cannot find shape %s among %r' % (name, [s.name for s in shapes]))
def check_opacity(self, colorformat, opacity):
str_val = colorformat._xFill.xpath('.//a:alpha')[0].values()[0]
eq_(commands.ST_Percentage.convert_from_xml(str_val), opacity)
def test_data(self):
# Empty dict is returned as-is
eq_(load_data({}), {})
# [non-str, non-dict] datasets are loaded as-is
vals = [(eq_, None), (eq_, 1), (eq_, [1, 2]), (afe, self.data)]
for test_fn, val in vals:
test_fn(load_data(val)['data'], val)
for test_fn, val in vals:
test_fn(load_data({'key': val})['key'], val)
# Strings can be treated as functions with _default_key=function
eq_(load_data('[1, 2]', _default_key='function'), {'data': [1, 2]})
eq_(load_data({'key': '[1, 2]'}, _default_key='function'), {'key': [1, 2]})
# Strings can be treated as URLs with _default_key=url
sales_data = gramex.data.filter(sales_file)
afe(load_data(sales_file, _default_key='url')['data'], sales_data)
afe(load_data({'key': sales_file}, _default_key='url')['key'], sales_data)
# Strings raise an exception without _default_key
with assert_raises(Exception):
load_data('text')
with assert_raises(Exception):
load_data({'key': 'text'})
# Dicts with url: are processed via gramex.data.filter
afe(load_data({'url': sales_file})['data'], sales_data)
afe(load_data({'d': {'url': sales_file}})['d'], sales_data)
transform = 'data.set_index(["देश", "city", "product"])'
afe(load_data({'d': {'url': sales_file, 'transform': transform}})['d'],
sales_data.set_index(['देश', 'city', 'product']))
afe(load_data({'d': {'url': sales_file, 'args': {'product': 'Eggs'}}})['d'],
gramex.data.filter(sales_file, args={'product': 'Eggs'}))
# Dicts with function: are executed
afe(load_data({'function': 'gramex.cache.open(%r)' % sales_file})['data'], sales_data)
afe(load_data({'d': {'function': 'gramex.cache.open(%r)' % sales_file}})['d'], sales_data)
eq_(load_data({'d': {'function': 'str(handler)'}}, handler='abc')['d'], 'abc')
# Functions can chain data keys, and also accept kwargs
eq_(load_data({'x': 'a', 'y': 'x'}, a=1, _default_key='function')['y'], 1)
eq_(load_data({'x': 'a + 1', 'y': 'x + 1'}, a=1, _default_key='function')['y'], 3)
# kwargs are overridden by data unless they're None
eq_(load_data(2, data=1)['data'], 2)
eq_(load_data({'x': 2}, x=1)['x'], 2)
eq_(load_data(None, data=1)['data'], 1)
eq_(load_data({'x': None}, x=1)['x'], 1)
def test_expr(self):
# Test expr mode - when strings are expressions
t = lambda v: commands.expr(v, data={'_expr_mode': True, 'x': 1}) # noqa
eq_(t('x + 0'), 1) # Value is a variable
eq_(t('"x + 0"'), 'x + 0') # String is a literal
eq_(t('"{x + 0}"'), '{x + 0}') # String is a literal
eq_(t('f"x + 0"'), 'x + 0') # f-string is a template
eq_(t('f"{x + 0}"'), '1') # f-string is a template using data
for val in [None, True, 1, []]: # Non-string returns as-is
eq_(t(val), val)
afe(t(self.data), self.data)
eq_(t({'value': 'x + 0'}), 'x + 0') # value: is a template
# NOTE: literal cannot process x + 0. Just test with x
eq_(t({'value': '{x}'}), '1') # value: is a template using data
for val in [None, 1, [], {}]: # value: non-string returns as-is
eq_(t({'value': val}), val)
afe(t({'value': self.data}), self.data)
# Test literal mode - when strings are values
t = lambda v: commands.expr(v, data={'_expr_mode': False, 'x': 1}) # noqa
eq_(t('x + 0'), 'x + 0') # Value is a literal
eq_(t('"x + 0"'), '"x + 0"') # String value is a literal
# NOTE: literal cannot process expressions like x + 0. Just test with x
eq_(t('{x}'), '1') # String template is formatted
eq_(t('f"x + 0"'), 'f"x + 0"') # f-string value is a literal
for val in [None, True, 1, []]: # Non-string returns as-is
eq_(t(val), val)
afe(t(self.data), self.data)
eq_(t({'expr': 'x + 0'}), 1) # expr: is a variable
eq_(t({'expr': '"{x + 0}"'}), '{x + 0}') # expr: quoted string becomes string literal
eq_(t({'expr': 'f"{x + 0}"'}), '1') # expr: f-string is a template using data
for val in [None, 1, [], {}]: # expr: non-string returns as-is
eq_(t({'expr': val}), val)
def test_length(self):
length = commands.length
eq_(length(3.2), pptx.util.Inches(3.2))
for unit in ('', '"', 'in', 'inch'):
eq_(length('3.2' + unit), pptx.util.Inches(3.2))
eq_(length('3.2 ' + unit), pptx.util.Inches(3.2))
for unit in ('cp', 'centipoint'):
eq_(length('3.2' + unit), pptx.util.Centipoints(3.2))
eq_(length('3.2 ' + unit), pptx.util.Centipoints(3.2))
for unit in units:
eq_(length('3.2' + unit), getattr(pptx.util, unit.title())(3.2))
eq_(length('3.2 ' + unit), getattr(pptx.util, unit.title())(3.2))
with assert_raises(ValueError):
length('3.4 nonunits')
with assert_raises(ValueError):
length('-3.4')
with assert_raises(ValueError):
length(None)
length_class = commands.length_class
for unit in ('"', 'in', 'inch', 'inches', 'IN', 'Inch', 'INCHes', ''):
eq_(length_class(unit), pptx.util.Inches)
for unit in ('emu', 'Emu', 'EMU'):
eq_(length_class(unit), pptx.util.Emu)
for unit in ('cp', 'CentiPoint', 'CENTIPoints'):
eq_(length_class(unit), pptx.util.Centipoints)
with assert_raises(ValueError):
eq_(length_class('nonunits'))
def test_unit(self):
rule = {'Title 1': {'width': 10}}
for unit in units:
prs = pptgen(source=self.input, only=1, rules=[rule], unit=unit)
eq_(commands.length_unit.__name__, unit.title())
eq_(prs.slides[0].shapes[0].width, commands.length_unit(10))
def test_register(self, slides=3):
# register= must be a dict
with assert_raises(AssertionError):
pptgen(source=self.input, only=1, register='dummy')
# register= compiles the functions into commands.cmdlist
prs = pptgen(source=self.input, only=slides, register={
'cmd1': '(shape, spec, data)',
'cmd2': 'shape.get(spec, data)',
'rename': 'setattr(shape, "name", spec)',
'rotate': 'setattr(shape, "rotation", spec)',
}, rules=[
{'Rectangle 1': {'rotate': 45, 'rename': 'abc'}}
])
ok_('cmd1' in commands.cmdlist)
eq_(commands.cmdlist['cmd1'](shape=1, spec={}), (1, {}, None))
ok_('cmd2' in commands.cmdlist)
eq_(commands.cmdlist['cmd2'](shape={}, spec='x', data='y'), 'y')
shape = self.get_shape(prs.slides[0].shapes, 'abc')
eq_(shape.rotation, 45)
def test_only(self, slides=[2, 4]):
# Delete slides except those specified in ``only``
with assert_raises(AssertionError):
pptgen(source=self.input, only={})
with assert_raises(AssertionError):
pptgen(source=self.input, only='4')
# Test single only= value
only = slides[0]
prs = pptgen(source=self.input, only=only)
eq_(len(prs.slides), 1)
eq_(prs.slides[0].shapes.title.text, self.prs.slides[only - 1].shapes.title.text)
# Test multiple only= value
only = slides
prs = pptgen(source=self.input, only=only)
eq_(len(prs.slides), len(only))
for i, slide in enumerate(only):
eq_(prs.slides[i].shapes.title.text, self.prs.slides[slide - 1].shapes.title.text)
def test_target(self, slides=1):
# pptgen returns target presentation
prs = pptgen(source=self.input, only=slides)
eq_(len(prs.slides), 1)
eq_(prs.slides[0].shapes.title.text, self.prs.slides[0].shapes.title.text)
# pptgen ALSO saves at target= if it is specified
prs = pptgen(source=self.input, target=self.output, only=slides)
eq_(len(prs.slides), 1)
eq_(prs.slides[0].shapes.title.text, self.prs.slides[0].shapes.title.text)
prs = Presentation(self.output)
eq_(len(prs.slides), 1)
eq_(prs.slides[0].shapes.title.text, self.prs.slides[0].shapes.title.text)
def test_incorrect(self, slides=1):
with LogCapture() as logs:
pptgen(source=self.input, only=slides, rules=[
{'No-Shape': {'left': 0}},
{'Title 1': {'no-command': 0}}
])
logs.check_present(
('gramex', 'WARNING', 'pptgen2: No shape matches pattern: No-Shape'),
('gramex', 'WARNING', 'pptgen2: Unknown command: no-command on shape: Title 1')
)
def test_slide_filter(self, slides=[1, 2, 3]):
# Rules are specified as rule-name={shape: {rule}, ...}
data = {'x': [2, 3]}
rule1 = {'slide-number': 1, 'Title 1': {'width': 10}}
rule2 = {'slide-number': {'expr': 'x'},
'Title 1': {'width': 20},
'Rectangle 1': {'width': 20}}
prs = pptgen(source=self.input, only=slides, data=data, rules=[rule1, rule2])
eq_(self.get_shape(prs.slides[0].shapes, 'Title 1').width, pptx.util.Inches(10))
eq_(self.get_shape(prs.slides[1].shapes, 'Title 1').width, pptx.util.Inches(20))
eq_(self.get_shape(prs.slides[2].shapes, 'Title 1').width, pptx.util.Inches(20))
eq_(self.get_shape(prs.slides[2].shapes, 'Rectangle 1').width, pptx.util.Inches(20))
rule1 = {'slide-title': '*pptx*', 'Title 1': {'width': 10}}
rule2 = {'slide-title': ['*pos*', '*pptx*'], 'Rectangle 1': {'width': 20}}
prs = pptgen(source=self.input, only=slides, rules=[rule1, rule2])
eq_(self.get_shape(prs.slides[0].shapes, 'Title 1').width, pptx.util.Inches(10))
eq_(self.get_shape(prs.slides[2].shapes, 'Rectangle 1').width, pptx.util.Inches(20))
with LogCapture() as logs:
pptgen(source=self.input, only=slides, rules=[{'slide-number': 5}])
logs.check_present(
('gramex', 'WARNING', 'pptgen2: No slide with slide-number: 5, slide-title: None'),
)
def test_transition(self, slides=[1, 2, 3]):
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'slide-number': 1, 'transition': 'glitter'},
{'slide-number': 2, 'transition': {'type': 'morph', 'duration': 1}},
{'slide-number': 3, 'transition': {'type': 'wind left', 'advance': 2}},
])
node = 'mc:AlternateContent/mc:Choice/p:transition'
tr = prs.slides[0].element.find(node, _nsmap)
eq_(tr.attrib['{%s}dur' % _nsmap['p14']], '300')
el = tr.find('p14:glitter', _nsmap)
eq_(el.attrib, {'pattern': 'hexagon', 'dir': 'l'})
tr = prs.slides[1].element.find(node, _nsmap)
eq_(tr.attrib['{%s}dur' % _nsmap['p14']], '1000')
el = tr.find('p159:morph', _nsmap)
eq_(el.attrib, {'option': 'byObject'})
el = prs.slides[1].element.find(node, _nsmap)
tr = prs.slides[2].element.find(node, _nsmap)
eq_(tr.attrib['advTm'], '2000')
el = tr.find('p15:prstTrans', _nsmap)
eq_(el.attrib, {'prst': 'wind', 'invX': '1'})
prs = pptgen(source=prs, target=self.output, rules=[{'transition': 'none'}])
for slide in prs.slides:
eq_(slide.element.find(node, _nsmap), None)
def test_normalize_group(self, slides=3):
def coords(grp):
c = grp.element.find(qn('p:grpSpPr')).find(qn('a:xfrm'))
return AttrDict(off=c.find(qn('a:off')),
ext=c.find(qn('a:ext')),
choff=c.find(qn('a:chOff')),
chext=c.find(qn('a:chExt')))
grp = self.get_shape(self.prs.slides[slides - 1].shapes, 'Group 2')
subgrps = self.get_shapes(grp.shapes, 'SubGroup')
for g in [grp] + subgrps:
c = coords(g)
assert c.off.x != c.choff.x, 'x offset is initially different'
assert c.off.y != c.choff.y, 'y offset is initially different'
assert c.ext.cx != c.chext.cx, 'width is initially different'
assert c.ext.cy != c.chext.cy, 'height is initially different'
# Just opening via pptgen normalizes the groups
prs = pptgen(source=self.input, target=self.output)
grp = self.get_shape(prs.slides[slides - 1].shapes, 'Group 2')
subgrps = self.get_shapes(grp.shapes, 'SubGroup')
for g in [grp] + subgrps:
c = coords(g)
assert c.off.x == c.choff.x, 'x offset is same after normalization'
assert c.off.y == c.choff.y, 'y offset is same after normalization'
assert c.ext.cx == c.chext.cx, 'width is same after normalization'
assert c.ext.cy == c.chext.cy, 'height is same after normalization'
def test_shape_names(self, slides=3):
prs = pptgen(source=self.input, only=slides, rules=[
{'group 1': {'left': 99}}, # Case-sensitive match is ignored
{'Group ?': {'left': 1}}, # Group 1, Group 2
{'?extBox ?': {'left': 2}}, # TextBox 1
{'*le 1': {'left': 3}}, # Title 1, Rectangle 1
{'*form*': {'left': 4}}, # Freeform 1
{'[BC]har[tu] 1': {'left': 5}}, # Chart 1
])
eq_(self.get_shape(prs.slides[0].shapes, 'Group 1').left, pptx.util.Inches(1))
eq_(self.get_shape(prs.slides[0].shapes, 'Group 2').left, pptx.util.Inches(1))
eq_(self.get_shape(prs.slides[0].shapes, 'TextBox 1').left, pptx.util.Inches(2))
eq_(self.get_shape(prs.slides[0].shapes, 'Title 1').left, pptx.util.Inches(3))
eq_(self.get_shape(prs.slides[0].shapes, 'Rectangle 1').left, pptx.util.Inches(3))
eq_(self.get_shape(prs.slides[0].shapes, 'Freeform 1').left, pptx.util.Inches(4))
eq_(self.get_shape(prs.slides[0].shapes, 'Chart 1').left, pptx.util.Inches(5))
def test_name_position(self, slides=3):
pos = {'width': 4, 'height': 3, 'top': 2, 'left': 1, 'rotation': 30,
'name': {'expr': 'shape.name + " X"'}}
add = {'add-width': -0.1, 'add-height': 0.05, 'add-top': 0.3, 'add-left': -0.1,
'add-rotation': 30, 'name': {'expr': 'shape.name + " X"'}}
for name in ['Rectangle 1', 'TextBox 1', 'Picture 1', 'Chart 1', 'Group 1', 'Table 1',
'Diagram 1', 'Audio 1', 'Freeform 1', 'Word Art 1']:
# 'Zoom 1', 'Equation 1' are not supported by python-pptx
prs = pptgen(source=self.input, only=slides, rules=[{name: pos}])
shp = self.get_shape(prs.slides[0].shapes, name + ' X')
for attr, val in pos.items():
if attr != 'name':
convert = float if attr == 'rotation' else commands.length_unit
eq_(getattr(shp, attr), convert(val))
prs = pptgen(source=self.input, only=slides, rules=[{name: add}])
shp = self.get_shape(prs.slides[0].shapes, name + ' X')
src = self.get_shape(self.prs.slides[slides - 1].shapes, name)
for cmd, val in add.items():
if attr != 'name':
attr = cmd.split('-')[-1]
convert = float if cmd == 'add-rotation' else commands.length_unit
eq_(getattr(shp, attr), convert(val) + getattr(src, attr))
for zoom in (0.6, 1.2):
prs = pptgen(source=self.input, only=slides, rules=[{name: {'zoom': zoom}}])
shp = self.get_shape(prs.slides[0].shapes, name)
src = self.get_shape(self.prs.slides[slides - 1].shapes, name)
aeq_(shp.left, int(src.left - (zoom - 1) * src.width / 2))
aeq_(shp.top, int(src.top - (zoom - 1) * src.height / 2))
aeq_(shp.width, int(src.width * zoom))
aeq_(shp.height, int(src.height * zoom))
# Adjust position within group and subgroups
text_pos = {'left': 1, 'top': 1, 'width': 2, 'height': 0.5}
img_pos = {'left': 0, 'top': 1, 'width': 0.5, 'height': 0.5}
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'Group 2': {
'left': 1,
'Subgroup': {'left': 1, 'Text': text_pos, 'Picture': img_pos}
}}
])
eq_(self.get_shape(prs.slides[0].shapes, 'Group 2>Subgroup').left, pptx.util.Inches(1))
shape = self.get_shape(prs.slides[0].shapes, 'Group 2>Subgroup>Text')
for attr, val in text_pos.items():
eq_(getattr(shape, attr), pptx.util.Inches(val))
shape = self.get_shape(prs.slides[0].shapes, 'Group 2>Subgroup>Picture')
for attr, val in img_pos.items():
eq_(getattr(shape, attr), pptx.util.Inches(val))
def test_image(self, slides=3):
shape = self.get_shape(self.prs.slides[slides - 1].shapes, 'Picture 1')
width = shape.width
for img, aspect in (('small-image.jpg', 1), ('small-image.png', 2)):
path = os.path.join(folder, img)
prs = pptgen(source=self.input, only=slides,
rules=[{'Picture 1': {'image': path}}])
shape = self.get_shape(prs.slides[0].shapes, 'Picture 1')
rid = shape._pic.blipFill.blip.rEmbed
part = shape.part.related_parts[rid]
with open(path, 'rb') as handle:
eq_(part.blob, handle.read())
eq_(shape.width, width)
self.assertAlmostEqual(shape.width / shape.height, aspect, places=5)
def test_image_width_height(self, slides=3):
shape = self.get_shape(self.prs.slides[slides - 1].shapes, 'Picture 1')
aspect = shape.width / shape.height
for size in (3, '3 inches', '7.62 cm', '76.2 mm', '216 pt', '2743200 emu', '21600 cp'):
# image-width preserves aspect ratio
prs = pptgen(source=self.input, only=slides,
rules=[{'Picture 1': {'image-width': size}}])
shape = self.get_shape(prs.slides[0].shapes, 'Picture 1')
eq_(shape.width, pptx.util.Inches(3))
self.assertAlmostEqual(shape.width / shape.height, aspect, places=5)
# image-height preserves aspect ratio
prs = pptgen(source=self.input, only=slides,
rules=[{'Picture 1': {'image-height': size}}])
shape = self.get_shape(prs.slides[0].shapes, 'Picture 1')
eq_(shape.height, pptx.util.Inches(3))
self.assertAlmostEqual(shape.width / shape.height, aspect, places=5)
def test_fill_stroke(self, slides=3):
colors = (
('red', {'rgb': (255, 0, 0)}),
('#f00', {'rgb': (255, 0, 0)}),
('#ff0000', {'rgb': (255, 0, 0)}),
('rgb(255, 0, 0)', {'rgb': (255, 0, 0)}),
((255, 0, 0), {'rgb': (255, 0, 0)}),
([255, 0, 0], {'rgb': (255, 0, 0)}),
((1.0, 0.5, 0), {'rgb': (255, 128, 0)}),
([1.0, 0.5, 0], {'rgb': (255, 128, 0)}),
('ACCENT_1', {'theme_color': MSO_THEME_COLOR.ACCENT_1, 'brightness': 0}),
('ACCENT_2+40', {'theme_color': MSO_THEME_COLOR.ACCENT_2, 'brightness': 0.4}),
('ACCENT_3-20', {'theme_color': MSO_THEME_COLOR.ACCENT_3, 'brightness': -0.2}),
)
for color, result in colors:
for name in ['TextBox 1', 'Rectangle 1', 'Word Art 1', 'Freeform 1']:
# Doesn't work for 'Group 1', 'Table 1', 'Audio 1', 'Chart 1', 'Diagram 1'
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{name: {
'fill': color,
'stroke': color,
'fill-opacity': 0.5,
'stroke-opacity': 0.4,
'stroke-width': '1 pt',
}}
])
shape = self.get_shape(prs.slides[0].shapes, name)
for key in (('fill.fore_color.', 'line.fill.fore_color.')):
for attr, val in result.items():
if attr == 'brightness':
self.assertAlmostEqual(objectpath(shape, key + attr), val, places=5)
else:
eq_(objectpath(shape, key + attr), val)
self.check_opacity(shape.fill.fore_color, 0.5)
self.check_opacity(shape.line.fill.fore_color, 0.4)
eq_(shape.line.width, pptx.util.Pt(1))
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'Rectangle 1': {'fill': 'none', 'stroke': 'none'}}
])
shape = self.get_shape(prs.slides[0].shapes, 'Rectangle 1')
eq_(shape.fill.type, MSO_FILL.BACKGROUND)
eq_(shape.line.fill.type, MSO_FILL.BACKGROUND)
def test_clone_shape(self, slides=3):
data = {'a': -0.5, 'b': 1, 'c': 2.5}
clone = {'clone-shape': data, 'top': 1, 'add-top': {'expr': 'clone.val'}}
# Clone shapes
for name in ['TextBox 1', 'Group 1', 'Table 1', 'Audio 1', 'Freeform 1', 'Word Art 1']:
# TODO: 'Chart 1', 'Diagram 1' don't work yet
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[{name: clone}])
shapes = [shape for shape in prs.slides[0].shapes if shape.name == name]
eq_(shapes[0].top, pptx.util.Inches(0.5))
eq_(shapes[1].top, pptx.util.Inches(2.0))
eq_(shapes[2].top, pptx.util.Inches(3.5))
# Clone groups
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'Group 1': {
'clone-shape': data,
'data': {'myclone': 'clone'},
'top': {'expr': '1 + myclone.val'},
'Picture': {
'clone-shape': data,
'data': {'subclone': 'clone'},
'left': {'expr': '1 + subclone.val / 2'},
'image-width': 0.2,
},
'Caption': {
'clone-shape': data,
'data': {'subclone2': 'clone'},
'left': {'expr': '1 + subclone2.val / 2'},
'text': '{clone.pos}, {clone.key}, {clone.val}, {clone.shape.text}, ' +
'{clone.parent.key}, {clone.parent.val}',
'fill': 'red',
}
}}
])
groups = self.get_shapes(prs.slides[0].shapes, 'Group 1')
picture = self.get_shapes(prs.slides[0].shapes, 'Group 1>Picture')
caption = self.get_shapes(prs.slides[0].shapes, 'Group 1>Caption')
n = len(data)
for i, (ik, iv) in enumerate(data.items()):
eq_(groups[i].top, pptx.util.Inches(1 + iv))
for j, (jk, jv) in enumerate(data.items()):
eq_(picture[i * n + j].left, pptx.util.Inches(1 + jv / 2))
eq_(picture[i * n + j].width, pptx.util.Inches(0.2))
eq_(caption[i * n + j].left, pptx.util.Inches(1 + jv / 2))
eq_(caption[i * n + j].fill.fore_color.rgb, (255, 0, 0))
eq_(caption[i * n + j].text, f'{j}, {jk}, {jv}, Grouped image, {ik}, {iv}')
def test_text(self, slides=4):
# Non-strings are converted to str
for val in (1, ['x'], ):
prs = pptgen(source=self.input, only=slides, rules=[
{'TextBox 1': {'text': str(val)}}])
shape = self.get_shape(prs.slides[0].shapes, 'TextBox 1')
eq_(shape.text, str(val))
# Unicode characters work
text = '高σ高λس►'
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'TextBox 1': {'text': text}}])
shape = self.get_shape(prs.slides[0].shapes, 'TextBox 1')
eq_(shape.text, text)
# Para and run formatting works
text = '''P0R0 <a>P0R1</a> P0R2 <a>P0R3</a> P0R4
<p align="left" bold="y" color="#ff0000" font-name="Arial" font-size="8 pt" italic="y"
level="0" line-spacing="3 pt" space-before="16 pt" space-after="20 pt"
underline="y"
>
P1R0
<a baseline="superscript" bold="n" color="#00ff00" font-name="Calibri"
font-size="18 pt" italic="n" strike="double" underline="n"> P1R1 </a>
P1R2
</p>
<p>P2R0
<a bold="y" baseline="-35%" strike="single"> P2R1 </a>
<a baseline="subscript" strike="none"> P2R2 </a>
P2R3</p>
P3R0 <a color="#00f">P3R1</a> P3R2
'''
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'TextBox 1': {'text': text}}])
shape = self.get_shape(prs.slides[0].shapes, 'TextBox 1')
eq_(shape.text.split(),
'P0R0 P0R1 P0R2 P0R3 P0R4 P1R0 P1R1 P1R2 P2R0 P2R1 P2R2 P2R3 P3R0 P3R1 P3R2'.split())
paras = shape.text_frame.paragraphs
# Para 0 has the right attributes and text
eq_(paras[0].text, 'P0R0 P0R1 P0R2 P0R3 P0R4 ')
eq_(paras[0].runs[0].text, 'P0R0 ')
# Para 0 attributes are preserved
eq_(paras[0].level, 0)
eq_(paras[0].alignment, PP_ALIGN.CENTER)
eq_(paras[0].runs[0].font.size, pptx.util.Pt(28))
eq_(paras[0].runs[0].font.name, 'Consolas')
for attr in ('line_spacing', 'space_after', 'space_before'):
eq_(getattr(paras[0], attr), None)
# Para 1 has the right attributes and text
eq_(paras[1].text.split(), 'P1R0 P1R1 P1R2'.split())
eq_(paras[1].alignment, PP_ALIGN.LEFT)
eq_(paras[1].font.bold, True)
eq_(paras[1].font.color.rgb, (255, 0, 0))
eq_(paras[1].font.name, 'Arial')
eq_(paras[1].font.size, pptx.util.Pt(8))
eq_(paras[1].font.italic, True)
eq_(paras[1].level, 0)
eq_(paras[1].line_spacing, pptx.util.Pt(3))
eq_(paras[1].space_before, pptx.util.Pt(16))
eq_(paras[1].space_after, pptx.util.Pt(20))
eq_(paras[1].font.underline, True)
eq_(paras[1].runs[0].text, ' P1R0 ')
# Para 1 run 2 has the specified attributes
eq_(paras[1].runs[1].text, ' P1R1 ')
eq_(paras[1].runs[1].font.bold, False)
eq_(paras[1].runs[1].font.color.rgb, (0, 255, 0))
eq_(paras[1].runs[1].font.name, 'Calibri')
eq_(paras[1].runs[1].font.size, pptx.util.Pt(18))
eq_(paras[1].runs[1].font.italic, False)
eq_(paras[1].runs[1].font.underline, False)
eq_(paras[1].runs[1].font._rPr.get('baseline'), '30000')
eq_(paras[1].runs[1].font._rPr.get('strike'), 'dblStrike')
# Auto-created runs have no attributes
for attr in ('bold', 'name', 'italic', 'underline'):
eq_(getattr(paras[1].runs[0].font, attr), None)
eq_(getattr(paras[1].runs[2].font, attr), None)
eq_(getattr(paras[2].runs[0].font, attr), None)
eq_(getattr(paras[2].runs[2].font, attr), None)
# ... except font size, which is taken from the first run (set in the PPTX)
eq_(paras[1].runs[0].font.size, pptx.util.Pt(28))
eq_(paras[1].runs[2].font.size, pptx.util.Pt(28))
eq_(paras[2].runs[0].font.size, pptx.util.Pt(28))
eq_(paras[2].runs[2].font.size, pptx.util.Pt(28))
# Para 2 runs have the right attrs
eq_(paras[2].runs[1].text, ' P2R1 ')
eq_(paras[2].runs[1].font.bold, True)
eq_(paras[2].runs[1].font._rPr.get('baseline'), '-35000')
eq_(paras[2].runs[1].font._rPr.get('strike'), 'sngStrike')
eq_(paras[2].runs[2].text, ' P2R2 ')
eq_(paras[2].runs[2].font._rPr.get('baseline'), '-25000')
eq_(paras[2].runs[2].font._rPr.get('strike'), 'noStrike')
# Para 3: runs are auto-wrapped into paras
eq_(paras[3].runs[0].text, ' P3R0 ')
eq_(paras[3].runs[1].text, 'P3R1')
eq_(paras[3].runs[1].font.color.rgb, (0, 0, 255))
eq_(paras[3].runs[2].text, ' P3R2 ')
def test_text_style(self, slides=4):
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'TextBox 1': {
'bold': 0,
'italic': 1,
'underline': 0,
'color': 'blue',
'font-name': 'Calibri',
'font-size': '10 pt',
}}
])
shape = self.get_shape(prs.slides[0].shapes, 'TextBox 1')
for para in shape.text_frame.paragraphs:
eq_(para.font.bold, False)
eq_(para.font.fill.fore_color.rgb, (0, 0, 255))
eq_(para.font.italic, True)
eq_(para.font.name, 'Calibri')
eq_(para.font.size, pptx.util.Pt(10))
eq_(para.font.underline, False)
for run in para.runs:
eq_(run.font.bold, None)
# PPT needs colors on runs too, not only paras
eq_(run.font.fill.fore_color.rgb, (0, 0, 255))
eq_(run.font.italic, None)
eq_(run.font.name, None)
eq_(run.font.size, None)
eq_(run.font.underline, None)
def test_replace(self, slides=4):
with assert_raises(ValueError):
pptgen(source=self.input, only=slides, rules=[{'TextBox 1': {'replace': 'text'}}])
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'TextBox 1': {
'replace': {
'[Oo]ld': 'Old1',
'(Title|italic)': '<a underline="y" bold="n">New</a>',
'title': 'ignored',
'der': 'd<a bold="y" underline="n">E<a color="green">R</a>',
'c.l.r': '<a font-size="18 pt" font-name="Arial">COLOR</a>',
}
}}
])
defaults = {'font-name': 'Consolas', 'font-size': '28 pt'}
expected_runs = [
{'text': 'Old1', **defaults},
{'text': ' ', **defaults},
{'text': 'New', 'underline': True, **defaults},
{'text': ' ', **defaults},
{'text': 'un', 'underline': True, **defaults},
{'text': 'd', 'underline': True, **defaults},
{'text': 'E', 'bold': True, **defaults},
{'text': 'R', 'color': (0, 128, 0), 'underline': True, **defaults},
{'text': 'line', 'underline': True, **defaults},
{'text': ' ', **defaults},
{'text': 'New', 'italic': True, 'underline': True, **defaults},
{'text': ' ', **defaults},
{'text': 'COLOR', 'color': (255, 0, 0), 'font-size': '18 pt', 'font-name': 'Arial'},
]
shape = self.get_shape(prs.slides[0].shapes, 'TextBox 1')
for expected, actual in zip(expected_runs, shape.text_frame.paragraphs[0].runs):
eq_(expected['text'], actual.text)
eq_(expected.get('bold', False), bool(actual.font.bold))
eq_(expected.get('italic', False), bool(actual.font.italic))
eq_(expected.get('underline', False), bool(actual.font.underline))
if 'color' in expected:
eq_(expected['color'], actual.font.color.rgb)
else:
ok_(isinstance(actual.font.color._color, _NoneColor))
eq_(expected.get('font-name', None), actual.font.name)
if 'font-size' in expected:
eq_(commands.length(expected['font-size']), actual.font.size)
else:
eq_(actual.font.size, None)
def test_link_hover_tooltip(self, slides=[1, 2, 3, 4, 5, 6, 7], main_slide=5):
prefixes = {'Link ': 'link', 'Hover ': 'hover', 'Has ': 'link', 'Tooltip ': 'tooltip'}
vals = {
'first': {'target': '', 'action': 'ppaction://hlinkshowjump?jump=firstslide'},
'last': {'target': '', 'action': 'ppaction://hlinkshowjump?jump=lastslide'},
'next': {'target': '', 'action': 'ppaction://hlinkshowjump?jump=nextslide'},
'previous': {'target': '', 'action': 'ppaction://hlinkshowjump?jump=previousslide'},
'prev': {'target': '', 'action': 'ppaction://hlinkshowjump?jump=previousslide'},
# 'end': {'target': '', 'action': 'ppaction://hlinkshowjump?jump=endshow'},
'back': {'target': '', 'action': 'ppaction://hlinkshowjump?jump=lastslideviewed'},
'noaction': {'target': '', 'action': 'ppaction://noaction'},
'1': {'target': self.prs.slides[1 - 1].shapes.title.text,
'action': 'ppaction://hlinksldjump'},
'2': {'target': self.prs.slides[2 - 1].shapes.title.text,
'action': 'ppaction://hlinksldjump'},
'4': {'target': self.prs.slides[4 - 1].shapes.title.text,
'action': 'ppaction://hlinksldjump'},
'https://t.co/': {'target': 'https://t.co/', 'action': None},
'file.pptx': {'target': 'file.pptx',
'action': 'ppaction://hlinkpres?slideindex=1&slidetitle='},
'file.xlsx': {'target': 'file.xlsx', 'action': 'ppaction://hlinkfile'},
}
shape_rule = {prefix + val: {key: val}
for val in vals for prefix, key in prefixes.items()}
text_rule = {prefix + 'Text': {
'replace': {val + '$': f'<a {key}="{val}">{val}</a>' for val in vals}
} for prefix, key in prefixes.items()}
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
shape_rule, text_rule])
slide = prs.slides[main_slide - 1]
shapes = slide.shapes
for prefix, key in prefixes.items():
for val, attr in vals.items():
# Shape rules
shape = self.get_shape(shapes, prefix + val)
tag = 'a:hlinkClick' if key in {'link', 'tooltip'} else 'a:hlinkHover'
self.check_link(slide, shape._element, tag, key, val, attr)
# Text rules
shape = self.get_shape(shapes, prefix + 'Text')
tag = 'a:hlinkClick' if key in {'link', 'tooltip'} else 'a:hlinkMouseOver'
for para in shape.text_frame.paragraphs:
for run in para.runs:
if run.text == val:
self.check_link(slide, run._r, tag, key, val, attr)
def check_link(self, slide, el, tag, key, val, attr):
link = el.find('.//' + tag, _nsmap)
# Tooltip converts 'noaction' into 'next'. Handle that
if key == 'tooltip' and val == 'noaction':
action = 'ppaction://hlinkshowjump?jump=nextslide'
else:
action = attr['action']
eq_(link.get('action'), action)
rid = link.get(qn('r:id'))
target = slide.part.rels[rid]._target if rid else ''
if isinstance(target, pptx.parts.slide.SlidePart):
target = target.slide.shapes.title.text
eq_(target, attr['target'])
def test_table(self, slides=9):
data = self.data.head(10) # The 10th row has NaNs. Ensure the row is included
headers = ['<a color="red">देश</a>', 'city', '<p>prod</p><p>uct</p>', 'Sales']
prs = pptgen(source=self.input, target=self.output, only=slides, mode='expr',
data={'data': data},
rules=[
{'Table 1': {'table': {'data': 'data', 'header-row': headers}}},
{'Table 2': {'table': {'data': 'data', 'width': 2}}},
])
for row_offset, shape_name in ((1, 'Table 1'), (0, 'Table 2')):
table = self.get_shape(prs.slides[0].shapes, shape_name).table
for i, (index, row) in enumerate(data.iterrows()):
for j, (column, val) in enumerate(row.iteritems()):
cell = table.rows[i + row_offset].cells[j]
eq_(cell.text, '{}'.format(val))
# Test table header
header = self.get_shape(prs.slides[0].shapes, 'Table 1').table.rows[0].cells
eq_(header[0].text, 'देश')
eq_(header[0].text_frame.paragraphs[0].runs[0].font.color.rgb, (255, 0, 0))
eq_(header[2].text_frame.paragraphs[0].text, 'prod')
eq_(header[2].text_frame.paragraphs[1].text, 'uct')
eq_(header[4].text, 'Table 1') # Inherited from the template
# Test column widths
gridcols = self.get_shape(prs.slides[0].shapes, 'Table 2').table._tbl.tblGrid.gridCol_lst
all(v.get('w') == pptx.util.Inches(2) for v in gridcols)
# If there's no table data, text is copied from source
prs = pptgen(source=self.input, target=self.output, only=slides, mode='expr', rules=[
{'Table 2': {'table': {
'header-row': True,
'fill': '"red" if "Val" in cell.val else "yellow"',
}}}
])
table = self.get_shape(prs.slides[0].shapes, 'Table 2').table
eq_(table.rows[1].cells[0].fill.fore_color.rgb, (255, 0, 0))
eq_(table.rows[1].cells[1].fill.fore_color.rgb, (255, 255, 0))
prs = pptgen(source=self.input, target=self.output, only=slides, mode='expr', rules=[
{'Table 2': {'table': {
'fill': '"red" if "Table" in cell.val else "yellow"',
}}}
])
table = self.get_shape(prs.slides[0].shapes, 'Table 2').table
eq_(table.rows[0].cells[0].fill.fore_color.rgb, (255, 0, 0))
eq_(table.rows[0].cells[1].fill.fore_color.rgb, (255, 255, 0))
# Test all table commands comprehensively
cmds = {'table': {
'data': data,
'header-row': False,
'total-row': True,
'first-column': True,
'last-column': True,
'width': {
'देश': '1 in',
'city': {'expr': '"2 in" if cell.column == "city" else "1 in"'},
'product': {'expr': '"2 in" if cell.column == "city" else "1.5 in"'},
},
'align': {'expr': '"left" if cell.pos.row % 2 else "right"'},
'bold': {'expr': 'cell.pos.row % 2'},
'color': {'expr': '"red" if cell.pos.row % 3 else "green"'},
'fill': {'expr': '"#eee" if cell.pos.row % 2 else "#ccc"'},
'fill-opacity': 0.4,
'font-name': 'Arial',
'font-size': {'expr': '"10 pt" if cell.column == "देश" else "8 pt"'},
'italic': {
'देश': True,
'city': {'expr': 'cell.pos.row % 2'},
},
'margin-left': {
'देश': '0.05 in',
'city': {'expr': '0 if cell.pos.column % 2 else "0.1 in"'},
},
'margin-right': '1 pt',
'margin-top': {'expr': '0 if cell.pos.column % 2 else "0.1 in"'},
'margin-bottom': 0,
'underline': {'expr': 'cell.pos.column % 2'},
'vertical-align': {
'देश': 'middle',
'city': {'expr': '"top" if cell.pos.row % 2 else "bottom"'},
},
# Add text: at the end to verify that it over-rides bold:, italic:, etc
'text': '{cell.pos.row} {cell.pos.column} <a italic="y">{cell.index}</a> ' +
'{cell.column} {cell.val} {cell.row.size} {cell.data.size}',
}}
prs = pptgen(source=self.input, target=self.output, only=slides, rules=[
{'Table 1': cmds}, {'Table 2': cmds}
])
for shape_name in ('Table 1', 'Table 2'):
src_table = self.get_shape(self.prs.slides[slides - 1].shapes, shape_name).table
table = self.get_shape(prs.slides[0].shapes, shape_name).table
# Table shape is extended or contracted
eq_(len(table.rows), len(data))
eq_(len(table.columns), len(data.columns))
# Special rows / columns are set
eq_(table.first_row, False)
eq_(table.last_row, True)
eq_(table.first_col, True)
eq_(table.last_col, True)
# Check column widths for changed columns
gridcols = table._tbl.tblGrid.gridCol_lst
eq_(int(gridcols[0].get('w')), pptx.util.Inches(1))
eq_(int(gridcols[1].get('w')), pptx.util.Inches(2))
eq_(int(gridcols[2].get('w')), pptx.util.Inches(1.5))
# Check cell contents
maxrow, maxcol = len(src_table.rows) - 1, len(src_table.columns) - 1
for i, (index, row) in enumerate(data.iterrows()):
# Row height is the same as in the source table (or its last row)
eq_(table.rows[i].height, src_table.rows[min(i, maxrow)].height)
for j, (column, val) in enumerate(row.iteritems()):
# Unspecified col width is the same as in the source table (or its last col)
if column in {'sales', 'growth'}:
eq_(table.columns[j].width, src_table.columns[min(j, maxcol)].width)
# Text matches, and all cell.* attributes are correct
cell = table.rows[i].cells[j]
paras = cell.text_frame.paragraphs
eq_(cell.text, f'{i} {j} {index} {column} {val} {row.size} {data.size}')
eq_(paras[0].font.bold, bool(i % 2))
# Check para font color, but not run font color. Run is overwritten because
# text: command is given AFTER color: command
eq_(paras[0].font.color.rgb, (255, 0, 0) if i % 3 else (0, 128, 0))
eq_(cell.fill.fore_color.rgb, (238, 238, 238) if i % 2 else (204, 204, 204))
self.check_opacity(cell.fill.fore_color, 0.4)
eq_(paras[0].font.size,
pptx.util.Pt(10) if column == 'देश' else pptx.util.Pt(8))
eq_(paras[0].font.name, 'Arial')
eq_(paras[0].font.italic,
True if column == 'देश' else
bool(i % 2) if column == 'city' else
None)
eq_(paras[0].runs[1].font.italic, True)
eq_(paras[0].font.underline, bool(j % 2))
eq_(paras[0].alignment, PP_ALIGN.LEFT if i % 2 else PP_ALIGN.RIGHT)
eq_(cell.vertical_anchor,
MVA.MIDDLE if column == 'देश' else
(MVA.TOP if i % 2 else MVA.BOTTOM) if column == 'city' else
None)
eq_(cell.margin_left,
pptx.util.Inches(0.05) if column == 'देश' else
pptx.util.Inches(0 if j % 2 else 0.1) if column == 'city' else
pptx.util.Inches(0.1))
eq_(cell.margin_right, pptx.util.Pt(1))
eq_(cell.margin_top, pptx.util.Inches(0 if j % 2 else 0.1))
eq_(cell.margin_bottom, 0)
# table: can only apply to a table element, not text
with assert_raises(ValueError):
pptgen(source=self.input, only=slides, rules=[{'Title 1': {'table': {}}}])
# table.data: must be a DataFrame
with assert_raises(ValueError):
pptgen(source=self.input, only=slides, rules=[{'Table 1': {'table': {'data': []}}}])
# Invalid column names raise a warning
with LogCapture() as logs:
pptgen(source=self.input, only=slides, rules=[{'Table 1': {'table': {
'data': self.data.head(3),
'width': {'NA1': 1},
'text': {'NA2': 0},
}}}])
logs.check_present(
('gramex', 'WARNING', 'pptgen2: No column: NA1 in table: Table 1'),
('gramex', 'WARNING', 'pptgen2: No column: NA2 in table: Table 1'),
)
# TODO: if we delete slide 6 and use slides=[6, 7], this causes an error
def test_copy_slide(self, slides=[7, 8]):
data = [1, 1.5, 2]
prs = pptgen(source=self.input, target=self.output, only=slides, mode='expr', rules=[
{
'slide-numbers': [1, 2],
'copy-slide': data,
'data': {'mycopy': 'copy'},
'Title 1': {'text': 'f"{copy.pos}: {copy.key} - {copy.val}: {len(copy.slides)}"'},
'TL': {'top': 'copy.val', 'left': 'mycopy.val'},
'TC': {'top': 'copy.val', 'left': 'mycopy.val * 2'},
'TR': {'top': 'copy.val', 'left': 'mycopy.val * 3'},
'CL': {'top': 'copy.val * 2', 'left': 'mycopy.val'},
'CC': {'top': 'copy.val * 2', 'left': 'mycopy.val * 2'},
'CR': {'top': 'copy.val * 2', 'left': 'mycopy.val * 3'},
'BL': {'top': 'copy.val * 3', 'left': 'mycopy.val'},
'BC': {'top': 'copy.val * 3', 'left': 'mycopy.val * 2'},
'BR': {'top': 'copy.val * 3', 'left': 'mycopy.val * 3'},
}
])
# All shapes are copied into 3 slides?
eq_(len(prs.slides), len(slides) * len(data))
names = [[shape.name for shape in slide.shapes] for slide in prs.slides]
eq_(names[0], names[2])
eq_(names[0], names[4])
eq_(names[1], names[3])
eq_(names[1], names[5])
# Titles are copied?
eq_(prs.slides[0].shapes.title.text, '0: 0 - 1: 2')
eq_(prs.slides[2].shapes.title.text, '1: 1 - 1.5: 2')
eq_(prs.slides[4].shapes.title.text, '2: 2 - 2: 2')
# Position commands are applied?
for val, slide in zip(data, (1, 3, 5)):
eq_(self.get_shape(prs.slides[slide].shapes, 'TL').left, pptx.util.Inches(val))
eq_(self.get_shape(prs.slides[slide].shapes, 'TL').top, pptx.util.Inches(val))
eq_(self.get_shape(prs.slides[slide].shapes, 'CC').left, pptx.util.Inches(2 * val))
eq_(self.get_shape(prs.slides[slide].shapes, 'CC').top, pptx.util.Inches(2 * val))
eq_(self.get_shape(prs.slides[slide].shapes, 'BR').left, pptx.util.Inches(3 * val))
eq_(self.get_shape(prs.slides[slide].shapes, 'BR').top, pptx.util.Inches(3 * val))
# Background is copied?
for n in (1, 3, 5):
eq_(prs.slides[n].background.fill.fore_color.theme_color, MSO_THEME_COLOR.ACCENT_3)
self.assertAlmostEqual(prs.slides[n].background.fill.fore_color.brightness, 0.8)
# Links are copied?
for n in (0, 2, 4):
shape = self.get_shape(prs.slides[n].shapes, 'Freeform 1')
eq_(shape.click_action.hyperlink.address, 'https://t.co/')
para = self.get_shape(prs.slides[n].shapes, 'TextBox 1').text_frame.paragraphs[0]
eq_(para.runs[1]._r.find('.//' + qn('a:hlinkClick')).get('action'),
'ppaction://hlinkshowjump?jump=firstslide')
def test_commandline(self):
# "slidesense" prints usage
with OutputCapture() as logs:
commandline([])
ok_(logs.captured.startswith('usage: slidesense'))
# "slidesense nonexistent.yaml" prints an error
with LogCapture() as logs:
commandline(['nonexistent.yaml'])
logs.check_present(
('gramex', 'ERROR', 'No rules found in file: nonexistent.yaml')
)
# "slidesense gramex.yaml nonexistent-url" prints an error
with LogCapture() as logs:
path = os.path.join(folder, 'slidesense-gramex.yaml')
commandline([path, 'nonexistent-url'])
logs.check_present(
('gramex', 'ERROR', 'No PPTXHandler matched in file: ' + path)
)
target = os.path.join(folder, 'output.pptx')
non_target = os.path.join(folder, 'nonexistent.pptx')
for args in (
('slidesense-config.yaml', ),
('slidesense-gramex.yaml', ),
('slidesense-gramex.yaml', 'slidesense-test'),
):
self.remove_output()
commandline([os.path.join(folder, args[0]), *args[1:],
f'--target={target}', '--no-open'])
ok_(os.path.exists(target))
ok_(not os.path.exists(non_target))
@classmethod
def tearDown(cls):
cls.remove_output()
|
py | 7dff4f4f2fdd5465e9261ac28e3de35525dceca0 | # pylint: disable=no-self-use,misplaced-comparison-constant
class TestLatest:
def test_get_latest(self, client):
response = client.get("/latest")
assert 200 == response.status_code
assert 'text/html' == response.mimetype
assert 'src="/latest1.jpg"' in response.get_data(as_text=True)
|
py | 7dff4f9c151aee1e42aee79bc14fabc3c4371772 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListQueuesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'include_deadletter': 'bool'
}
attribute_map = {
'include_deadletter': 'include_deadletter'
}
def __init__(self, include_deadletter=None):
"""ListQueuesRequest - a model defined in huaweicloud sdk"""
self._include_deadletter = None
self.discriminator = None
if include_deadletter is not None:
self.include_deadletter = include_deadletter
@property
def include_deadletter(self):
"""Gets the include_deadletter of this ListQueuesRequest.
是否包含死信信息。 支持的值如下: - true:包含死信消息。 - false:不包含死信消息。 默认值为:false。 Kafka队列没有死信功能,该参数对于Kafka队列无效。
:return: The include_deadletter of this ListQueuesRequest.
:rtype: bool
"""
return self._include_deadletter
@include_deadletter.setter
def include_deadletter(self, include_deadletter):
"""Sets the include_deadletter of this ListQueuesRequest.
是否包含死信信息。 支持的值如下: - true:包含死信消息。 - false:不包含死信消息。 默认值为:false。 Kafka队列没有死信功能,该参数对于Kafka队列无效。
:param include_deadletter: The include_deadletter of this ListQueuesRequest.
:type: bool
"""
self._include_deadletter = include_deadletter
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListQueuesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7dff4f9e30bdca804b258578e0666b954209c716 | import ocflib.account.search as search
import ocflib.infra.ldap as ldap
from ocflib.infra.ldap import UCB_LDAP_PEOPLE
def get_calnet_names(uid):
"""Returns CalNet LDAP entries relating to names"""
attrs = search.user_attrs_ucb(uid)
if attrs:
return {key: attrs[key]
for key in ('givenName', 'sn', 'displayName') if key in attrs}
def name_by_calnet_uid(uid):
"""Returns the name of CalNet person, searched by CalNet UID.
Returns None on failure.
"""
names = get_calnet_names(uid)
if not names:
return None
# the name we want to input into our system is "givenName sn"
# displayName is not necessarily equal to what's printed on Cal 1 Cards
def get_longest_string(strs):
return max(strs, key=len)
if 'givenName' in names and 'sn' in names:
given_name = get_longest_string(names['givenName'])
sn = get_longest_string(names['sn'])
if given_name and sn:
return '{} {}'.format(given_name, sn)
else:
return names.get('displayName')
def calnet_uids_by_name(name):
"""Searches for people by name and returns any CalNet UIDs found.
>>> calnet_uids_by_name("Dara Adib")
[872544]
"""
conds = ''.join(['(cn=*{}*)'.format(n) for n in name.split()])
ldap_filter = '(&{})'.format(conds)
with ldap.ldap_ucb() as c:
c.search(UCB_LDAP_PEOPLE, ldap_filter, attributes=('uid',))
return [int(entry['attributes']['uid'][0]) for entry in c.response]
|
py | 7dff4fb6220a0dc642d301804e9c456b0dbcb1c7 | from hp4controller.virtualdevice.virtualdevice import VirtualDevice
from hp4controller.virtualdevice.p4rule import P4Rule
from hp4controller.virtualdevice.interpret import Interpretation
from hp4controller.p4command import P4Command
from hp4controller.errors import AddRuleError, LoadError, VirtnetError
import copy
import code
from inspect import currentframe, getframeinfo
def debug():
""" Break and enter interactive method after printing location info """
# written before I knew about the pdb module
caller = currentframe().f_back
method_name = caller.f_code.co_name
line_no = getframeinfo(caller).lineno
print(method_name + ": line " + str(line_no))
code.interact(local=dict(globals(), **caller.f_locals))
FILTERED = 1
UNFILTERED = 0
filteredlookup = {'filtered': FILTERED, 'unfiltered': UNFILTERED}
class Lease(object):
def __init__(self, dev_name, dev, entry_limit, ports):
self.dev_name = dev_name
self.device = dev
self.entry_limit = entry_limit
self.entry_usage = 0
self.ports = ports
self.vdevs = {} # {vdev_name (string): vdev (VirtualDevice)}
self.assignments = {} # {pport : vdev_ID}
self.assignment_handles = {} # {pport : tset_context rule handle}
vegress_val = 1
self.egress_map = {} # {vegress_spec (int): egress_spec (int)}
self.ingress_map = {} # {pport (int): virt_ingress_port (int)}
self.egress_map[0] = 0
# note, following assumes port id == egress_spec
for port in ports:
self.egress_map[vegress_val] = port
self.ingress_map[port] = vegress_val
vegress_val += 1
# get mcast_grp_id from device
self.mcast_grp_id = self.device.assign_mcast_grp_id()
# create/associate mcast_grp, mcast_node
self.mcast_node_handle = self.device.mcast_setup(self.mcast_grp_id, self.ports)
self.mcast_egress_specs = {} # {vegress_spec (int): FILTERED|UNFILTERED (int)}
def revoke(self):
for vdev_name in self.vdevs.keys():
vdev = self.vdevs[vdev_name]
vdev.dev_name = 'none'
self.lease_remove([vdev_name], vdev)
# delete rules for tset_context
for port in self.assignments.keys():
table = 'tset_context'
handle = self.assignment_handles[port]
rule_identifier = table + ' ' + str(handle)
self.device.do_table_delete(rule_identifier)
self.assignments = {}
self.assignment_handles = {}
# delete mcast group and node
self.device.mcast_teardown(self.mcast_grp_id, self.mcast_node_handle)
for port in self.ports:
self.device.phys_ports_remaining.append(port)
self.device.reserved_entries -= self.entry_limit
def debug_lvd(self, vdev):
ret = ''
keyst2_hp4rules = []
keyst2_hp4_code_and_rules = []
keys21_hp4rules = []
keys21_hp4_code_and_rules = []
nrules_encrypt = []
for key in vdev.hp4rules.keys():
if key[0] == 't2_extracted_ternary':
keyst2_hp4rules.append(key)
elif key[0] == 't_bit_xor_21':
keys21_hp4rules.append(key)
for key in vdev.hp4_code_and_rules.keys():
if key[0] == 't2_extracted_ternary':
keyst2_hp4_code_and_rules.append(key)
elif key[0] == 't_bit_xor_21':
keys21_hp4_code_and_rules.append(key)
for key in vdev.nrules.keys():
if key[0] == 'encrypt':
nrules_encrypt.append(key)
keyst2_hp4rules.sort()
keyst2_hp4_code_and_rules.sort()
keys21_hp4rules.sort()
keys21_hp4_code_and_rules.sort()
nrules_encrypt.sort()
ret += 'vdev.hp4rules:\n'
start = str(keyst2_hp4rules[0][1])
stop = str(keyst2_hp4rules[-1][1])
ret += '\tt2_extracted_ternary: ' + start + ' - ' + stop + '\n'
start = str(keys21_hp4rules[0][1])
stop = str(keys21_hp4rules[-1][1])
ret += '\tt_bit_xor_21: ' + start + ' - ' + stop + '\n'
ret += 'vdev.hp4_code_and_rules: ' + str(len(vdev.hp4_code_and_rules)) + ' entries\n'
if keyst2_hp4_code_and_rules:
start = str(keyst2_hp4_code_and_rules[0][1])
stop = str(keyst2_hp4_code_and_rules[-1][1])
ret += '\tt2_extracted_ternary: ' + start + ' - ' + stop + '\n'
if keys21_hp4_code_and_rules:
start = str(keys21_hp4_code_and_rules[0][1])
stop = str(keys21_hp4_code_and_rules[-1][1])
ret += '\tt_bit_xor_21: ' + start + ' - ' + stop + '\n'
ret += 'vdev.nrules:\n'
start = str(nrules_encrypt[0][1])
stop = str(nrules_encrypt[-1][1])
ret += '\tencrypt: ' + start + ' - ' + stop + '\n'
ret += 'vdev.nrules[(encrypt, 1)].hp4_rule_keys:\n'
interp = vdev.nrules[('encrypt', 1)]
t2val = 9999
tbx21val = 9999
for key in interp.hp4_rule_keys:
if key[0] == 't2_extracted_ternary':
t2val = key[2]
elif key[0] == 't_bit_xor_21':
tbx21val = key[2]
ret += '\tt2_extracted_ternary: ' + str(t2val) + '\n'
ret += '\tt_bit_xor_21: ' + str(tbx21val)
debug()
return ret
def load_virtual_device(self, vdev_name, vdev, egress_mode):
# validate request
# - validate vdev_name
if vdev_name in self.vdevs:
debug()
raise LoadError(vdev_name + ' already present')
# - validate lease has sufficient entries
entries_available = self.entry_limit - self.entry_usage
if (len(vdev.hp4_code_and_rules) > entries_available):
debug()
raise LoadError('request('+ str(len(vdev.hp4_code_and_rules)) + ') \
exceeds entries available(' + str(entries_available) + ')')
# - validate virtual device not already somewhere else
if vdev.dev_name != 'none':
debug()
raise LoadError('first remove ' + vdev_name + ' from ' + vdev.dev_name)
#if vdev_name == 's1_vib_enc':
# debug()
vdev.hp4_code_and_rules = {}
def func(rule):
table = rule.table
command_type = 'table_add'
action = rule.action
aparams = list(rule.aparams)
if egress_mode == 'efalse':
if ('t_mod_' in table) and ('mod_stdmeta_egressspec' in rule.action):
action = '_no_op'
aparams = aparams[-1]
elif egress_mode == 'econd':
if (table == 'tset_pipeline_config'):
aparams[2] = '1'
elif egress_mode != 'etrue':
debug()
raise LoadError('Invalid egress handling mode: ' + egress_mode)
if action == 'mod_intmeta_mcast_grp_const':
aparams[0] = str(self.mcast_grp_id)
attribs = {'table': table,
'action': action,
'mparams': rule.mparams,
'aparams': aparams}
handle = self.send_command(P4Command(command_type, attribs))
return table, handle
def addRuleErrorHandler(e):
# remove all entries already added
for table, handle in vdev.hp4_code_and_rules.keys():
rule_identifier = table + ' ' + str(handle)
self.device.do_table_delete(rule_identifier)
del vdev.hp4_code_and_rules[(table, handle)]
debug()
raise LoadError('Lease::insert: ' + str(e))
for rule in vdev.hp4code:
try:
table, handle = func(rule)
vdev.hp4_code_and_rules[(table, handle)] = rule
except AddRuleError as e:
addRuleErrorHandler(e)
new_hp4rules = {}
for nrule in vdev.nrules:
interp = vdev.nrules[nrule]
new_hp4_rule_keys = []
for key in interp.hp4_rule_keys:
# this case likely corresponds to default rule
if (key[0], key[2]) not in vdev.hp4rules:
continue
rule = vdev.hp4rules[(key[0], key[2])]
try:
table, handle = func(rule)
vdev.hp4_code_and_rules[(table, handle)] = rule
new_hp4rules[(table, handle)] = rule
except AddRuleError as e:
addRuleErrorHandler(e)
new_hp4_rule_keys.append((key[0], key[1], handle))
interp.hp4_rule_keys = new_hp4_rule_keys
vdev.hp4rules = new_hp4rules
self.entry_usage = len(vdev.hp4code) + len(vdev.hp4rules)
#if vdev_name == 's1_vib_enc':
# debug()
def send_command(self, p4cmd):
"Send command to associated device, return handle"
return self.device.send_command(self.device.command_to_string(p4cmd))
def lease_remove(self, parameters, vdev):
"Remove virtual device from Lease (does not destroy virtual device)"
vdev_name = parameters[0]
num_entries = len(vdev.hp4_code_and_rules)
# pull all virtual device related rules from device
for table, handle in vdev.hp4_code_and_rules.keys():
rule_identifier = table + ' ' + str(handle)
self.device.do_table_delete(rule_identifier)
vdev.hp4_code_and_rules = {}
self.entry_usage -= num_entries
vdev.dev_name = 'none'
# make lease forget about it (Lease's owning Slice still has it)
del self.vdevs[vdev_name]
def lease_config_egress(self, parameters):
egress_spec = int(parameters[0])
command_type = parameters[1]
self.mcast_egress_specs[egress_spec] = filteredlookup[parameters[2]]
return 'Egress ' + str(egress_spec) + ' configured'
def lease_dump(self):
ret = ''
for vdev in self.vdevs:
ret += self.vdevs[vdev].info()
return ret[:-1]
def print_vdevs(self):
ret = ''
for vdev in self.vdevs:
ret += ' ' + vdev + '\n'
return ret
def __str__(self):
ret = 'entry usage/limit: ' + str(self.entry_usage) + '/' \
+ str(self.entry_limit) + '\n'
ret += 'ports:' + str(self.ports) + '\n'
ret += 'virtual devices:\n'
ret += self.print_vdevs()
#ret += 'composition: ' + str(self.composition)
return ret
class Chain(Lease):
def __init__(self, dev_name, dev, entry_limit, ports):
super(Chain, self).__init__(dev_name, dev, entry_limit, ports)
self.vdev_chain = [] # vdev_names (strings)
def handle_request(self, parameters, *args):
return super(Chain, self).handle_request(parameters, args)
def print_vdevs(self):
ret = ''
for vdev in self.vdev_chain:
ret += ' ' + vdev + '\n'
return ret
def p2vdev(self, vdev):
"Connect physical interfaces to virtual device"
vdev_ID = vdev.virtual_device_ID
if len(self.assignments) > 0:
# table_modify
for port in self.assignments:
handle = self.assignment_handles[port]
command_type = 'table_modify'
attribs = {'table': 'tset_context',
'action': 'a_set_context',
'handle': str(handle),
'aparams': [str(vdev_ID), str(self.ingress_map[port])]}
command = P4Command(command_type, attribs)
self.assignments[port] = vdev_ID
# self.assignments_handles[port] = <- handle doesn't change
self.send_command(command)
else:
# table_add
for port in self.ports:
command_type = 'table_add'
attribs = {'table': 'tset_context',
'action': 'a_set_context',
'mparams': [str(port)],
'aparams': [str(vdev_ID), str(self.ingress_map[port])]}
command = P4Command(command_type, attribs)
self.assignments[port] = vdev_ID
self.assignment_handles[port] = self.send_command(command)
def install_mcast_rules(self, vdev, vegress):
vdev_ID = vdev.virtual_device_ID
command_type = 'table_add'
filtered = self.mcast_egress_specs[vegress]
attribs = self.device.get_mcast_attribs(vdev_ID,
vegress,
self.mcast_grp_id,
filtered)
handle = self.send_command(P4Command(command_type, attribs))
vdev.t_virtnet_handles[vegress] = handle
def vdev2p(self, vdev):
"Connect virtual device to physical interfaces"
vdev_ID = vdev.virtual_device_ID
# t_virtnet
# t_egr_virtnet
if len(vdev.t_virtnet_handles) > 0:
# table_delete
# self.t_virtnet_handles = {} # KEY: vegress_spec (int)
# VALUE: hp4-facing handle (int)
for vegress in vdev.t_virtnet_handles.keys():
attribs = {'table': 't_virtnet',
'handle': str(vdev.t_virtnet_handles[vegress])}
self.send_command(P4Command('table_delete', attribs))
del vdev.t_virtnet_handles[vegress]
if len(vdev.t_egr_virtnet_handles) > 0:
# eliminate
for vegress in vdev.t_egr_virtnet_handles.keys():
attribs = {'table': 't_egr_virtnet',
'handle': str(vdev.t_egr_virtnet_handles[vegress])}
self.send_command(P4Command('table_delete', attribs))
del vdev.t_egr_virtnet_handles[vegress]
else:
if len(vdev.t_egr_virtnet_handles) > 0:
debug()
raise VirtnetError('vdev2p: t_egr_virtnet has entries when t_virtnet doesn\'t')
for vegress in self.mcast_egress_specs:
self.install_mcast_rules(vdev, vegress)
for vegress in self.egress_map:
command_type = 'table_add'
attribs = {'table': 't_virtnet',
'action': 'do_phys_fwd_only',
'mparams': [str(vdev_ID), str(vegress)],
'aparams': [str(self.egress_map[vegress]), str(UNFILTERED)]}
handle = self.send_command(P4Command(command_type, attribs))
vdev.t_virtnet_handles[vegress] = handle
def vdev2vdev(self, src_vdev, dest_vdev):
"Connect source virtual device to destination virtual device"
src_vdev_ID = src_vdev.virtual_device_ID
dest_vdev_ID = dest_vdev.virtual_device_ID
vingress = str(len(self.ports) + dest_vdev_ID)
# t_virtnet src -> dest
# t_egr_virtnet src -> dest
if len(src_vdev.t_virtnet_handles) > 0:
# table_modify
if len(src_vdev.t_egr_virtnet_handles) > 0:
# table_modify
for vegress in src_vdev.t_virtnet_handles:
command_type = 'table_modify'
attribs = {'table': 't_egr_virtnet',
'action': 'vfwd',
'handle': str(src_vdev.t_egr_virtnet_handles[vegress]),
'aparams': [str(dest_vdev_ID), vingress]}
self.send_command(P4Command(command_type, attribs))
else:
# table_add
for vegress in src_vdev.t_virtnet_handles:
command_type = 'table_add'
attribs = {'table': 't_egr_virtnet',
'action': 'vfwd',
'mparams': [str(src_vdev_ID), str(vegress)],
'aparams': [str(dest_vdev_ID), vingress]}
handle = self.send_command(P4Command(command_type, attribs))
src_vdev.t_egr_virtnet_handles[vegress] = handle
print("vdev2vdev: ln 407")
for vegress in src_vdev.t_virtnet_handles:
# self.t_virtnet_handles = {} # KEY: vegress_spec (int)
# VALUE: hp4-facing handle (int)
command_type = 'table_modify'
attribs = {'table': 't_virtnet',
'action': 'do_virt_fwd',
'handle': str(src_vdev.t_virtnet_handles[vegress]),
'aparams': []}
self.send_command(P4Command(command_type, attribs))
else:
# table_add
if len(src_vdev.t_egr_virtnet_handles) > 0:
debug()
raise VirtnetError('vdev2vdev: t_egr_virtnet has entries when t_virtnet doesn\'t')
command_type = 'table_add'
for vegress in self.egress_map:
attribs = {'table': 't_egr_virtnet',
'action': 'vfwd',
'mparams': [str(src_vdev_ID), str(vegress)],
'aparams': [str(dest_vdev_ID), vingress]}
handle = self.send_command(P4Command(command_type, attribs))
src_vdev.t_egr_virtnet_handles[vegress] = handle
for vegress in self.egress_map:
attribs = {'table': 't_virtnet',
'action': 'do_virt_fwd',
'mparams': [str(src_vdev_ID), str(vegress)],
'aparams': []}
handle = self.send_command(P4Command(command_type, attribs))
src_vdev.t_virtnet_handles[vegress] = handle
print("vdev2vdev: end of function")
def lease_replace(self, parameters, vdev, new_vdev):
# parameters:
# <old virtual device name> <new virtual device name> <egress mode>
vdev_name = parameters[0]
new_vdev_name = parameters[1]
egress_mode = parameters[2]
try:
self.load_virtual_device(new_vdev_name, new_vdev, egress_mode)
except LoadError as e:
if 'already present' in str(e):
pass
else:
return 'Error - could not load ' + new_vdev_name + '; ' + str(e)
chain = self.vdev_chain
position = chain.index(vdev_name)
if position >= len(chain) - 1:
self.vdev2p(new_vdev)
if (len(chain) > 0) and (position < len(chain) - 1):
rightvdev_name = chain[position + 1]
rightvdev = self.vdevs[rightvdev_name]
self.vdev2vdev(new_vdev, rightvdev)
if len(chain) > 0 and position > 0:
leftvdev_name = chain[position - 1]
leftvdev = self.vdevs[leftvdev_name]
self.vdev2vdev(leftvdev, new_vdev)
if position == 0:
self.p2vdev(new_vdev)
# update vdev_chain
chain.remove(vdev_name)
chain.insert(position, new_vdev_name)
return 'Virtual device ' + vdev_name + ' replaced with ' + new_vdev_name
def lease_insert(self, parameters, vdev):
# parameters:
# <virtual device name> <position> <egress handling mode>
vdev_name = parameters[0]
position = int(parameters[1])
egress_mode = parameters[2]
vdev_ID = vdev.virtual_device_ID
try:
self.load_virtual_device(vdev_name, vdev, egress_mode)
except LoadError as e:
if 'already present' in str(e):
pass
else:
return 'Error - could not load ' + vdev_name + '; ' + str(e)
chain = self.vdev_chain
#debug()
if position >= len(chain):
print('STARTING vdev2p: ' + vdev_name)
self.vdev2p(vdev)
print('COMPLETED vdev2p: ' + vdev_name)
if (len(chain) > 0) and (position < len(chain)):
rightvdev_name = chain[position]
rightvdev = self.vdevs[rightvdev_name]
print('STARTING vdev2vdev: ' + rightvdev_name + ' -> ' + vdev_name)
self.vdev2vdev(vdev, rightvdev)
print('COMPLETED vdev2vdev: ' + rightvdev_name + ' -> ' + vdev_name)
if len(chain) > 0 and position > 0:
leftvdev_name = chain[position - 1]
leftvdev = self.vdevs[leftvdev_name]
print('STARTING vdev2vdev: ' + leftvdev_name + ' -> ' + vdev_name)
self.vdev2vdev(leftvdev, vdev)
print('COMPLETED vdev2vdev: ' + leftvdev_name + ' -> ' + vdev_name)
if position == 0:
self.p2vdev(vdev)
print('p2vdev: ' + vdev_name)
chain.insert(position, vdev_name)
vdev.dev_name = self.dev_name
self.vdevs[vdev_name] = vdev
vdev.mcast_grp_id = self.mcast_grp_id
return 'Virtual Device ' + vdev_name + ' inserted at position ' + str(position)
def lease_append(self, parameters, vdev):
# parameters:
# <virtual device name> <egress handling mode>
parameters.insert(1, len(self.vdev_chain))
return self.lease_insert(parameters, vdev)
def lease_remove(self, parameters, vdev):
vdev_name = parameters[0]
chain = self.vdev_chain
position = chain.index(vdev_name)
if position == 0:
if len(chain) > 1:
# rightvdev exists; modify tset_context rules
rightvdev_name = chain[1]
rightvdev = self.vdevs[rightvdev_name]
self.p2vdev(rightvdev)
else:
# no rightvdev; delete tset_context rules
command_type = 'table_delete'
for port in self.assignments.keys():
attribs = {'table': 'tset_context',
'handle': str(self.assignment_handles[port])}
self.send_command(P4Command(command_type, attribs))
del self.assignments[port]
del self.assignment_handles[port]
else:
leftvdev_name = chain[position - 1]
leftvdev = self.vdevs[leftvdev_name]
if position < (len(chain) - 1):
rightvdev_name = chain[position + 1]
rightvdev = self.vdevs[rightvdev_name]
self.vdev2vdev(leftvdev, rightvdev)
else:
self.vdev2p(leftvdev)
# delete vdev's t_virtnet/t_egr_virtnet entries
for vegress in vdev.t_virtnet_handles.keys():
handle = vdev.t_virtnet_handles[vegress]
attribs = {'table': 't_virtnet',
'handle': str(handle)}
self.send_command(P4Command('table_delete', attribs))
del vdev.t_virtnet_handles[vegress]
for vegress in vdev.t_egr_virtnet_handles.keys():
handle = vdev.t_egr_virtnet_handles[vegress]
attribs = {'table': 't_egr_virtnet',
'handle': str(handle)}
self.send_command(P4Command('table_delete', attribs))
del vdev.t_egr_virtnet_handles[vegress]
super(Chain, self).lease_remove(parameters, vdev)
chain.remove(vdev_name)
return 'Virtual device ' + vdev_name + ' removed'
def lease_config_egress(self, parameters):
super(Chain, self).lease_config_egress(parameters)
vegress = int(parameters[0])
if len(self.vdev_chain) > 0:
end_vdev_name = self.vdev_chain[-1]
end_vdev = self.vdevs[end_vdev_name]
self.install_mcast_rules(end_vdev, vegress)
return 'Chain: Egress ' + str(vegress) + ' configured'
def __str__(self):
ret = super(Chain, self).__str__()
ret += 'Chain: \n'
for i in range(len(self.vdev_chain)):
ret += ' -> ' + self.vdev_chain[i]
return ret
class DAG(Lease):
pass
class VirtualNetwork(Lease):
pass
|
py | 7dff511cf7cd50c0c30b862cf1979a4b23762373 | from pylsl import StreamInlet
from PyQt5 import QtCore, QtWidgets
import pyqtgraph as pg
import pylslhandler
timestamp_arr, TP9_arr, AF7_arr, AF8_arr, TP10_arr, AUX_arr = ([] for i in range(6))
tickInterval = 1 #milliseconds
yRange = 1700 #microVolts
xRange = 500 #milliseconds of readings
class LiveEEGViewer(pg.GraphicsWindow):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.mainLayout = QtWidgets.QVBoxLayout()
self.setLayout(self.mainLayout)
self.timer = QtCore.QTimer(self)
self.timer.setInterval(tickInterval)
self.timer.start()
self.timer.timeout.connect(self.pullData)
self.plt = self.addPlot(title="Muse Raw EEG Stream (red: TP9, green: AF7, blue: AF8, pink: TP10)")
self.plt.setLabel("left", "Potential (uV)", color="grey")
self.plt.setYRange(-yRange, yRange)
self.curve_TP9 = self.plt.plot(pen=pg.mkPen(color=(255, 0, 0)))
self.curve_AF7 = self.plt.plot(pen=pg.mkPen(color=(0, 255, 0)))
self.curve_AF8 = self.plt.plot(pen=pg.mkPen(color=(0, 0, 255)))
self.curve_TP10 = self.plt.plot(pen=pg.mkPen(color=(255, 0, 255)))
#self.curve_AUX = self.plt.plot(pen=pg.mkPen(color=(0, 255, 255)))
def setData(self, x, yTP9, yAF7, yAF8, yTP10, yAUX):
self.curve_TP9.setData(x, yTP9)
self.curve_AF7.setData(x, yAF7)
self.curve_AF8.setData(x, yAF8)
self.curve_TP10.setData(x, yTP10)
#self.curve_AUX.setData(x, yAUX)
def pullData(self):
sample, timestamp = inlet.pull_sample()
if len(TP9_arr) >= xRange:
TP9_arr.pop(0)
AF7_arr.pop(0)
AF8_arr.pop(0)
TP10_arr.pop(0)
#AUX_arr.pop(0)
timestamp_arr.pop(0)
#convert relative values to electrical potential (uV)
#range=1000, voltage=3.3, gain of AFE=1961
TP9_arr.append((sample[0]/1000)*3.3*(1/1961)*1000000)
AF7_arr.append((sample[1]/1000)*3.3*(1/1961)*1000000)
AF8_arr.append((sample[2]/1000)*3.3*(1/1961)*1000000)
TP10_arr.append((sample[3]/1000)*3.3*(1/1961)*1000000)
#AUX_arr.append((sample[4]/1000)*3.3*(1/1961)*1000000)
timestamp_arr.append(timestamp)
self.setData(timestamp_arr, TP9_arr, AF7_arr, AF8_arr, TP10_arr, AUX_arr)
def main():
app = QtWidgets.QApplication([])
window = LiveEEGViewer()
window.show()
window.resize(800,600)
window.setWindowTitle('Muse Raw EEG Stream')
window.raise_()
app.exec_()
if __name__ == "__main__":
#first resolve an EEG stream on the lab network
streams = pylslhandler.resolve_conn()
print("Connection established")
#create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
main() |
py | 7dff51ea3a0ee0107029f2f297e4406d6d94f330 | from functools import partial
from typing import Callable, List
from pyglet.window import mouse
from engine.models.ship import ShipModel
from engine.views.ship_parts.factories import ConfigViewFactory
from .base import BaseMenu, BaseButton
from .drydock import ControlConfiguration
class ControlConfigMenu(BaseMenu):
def __init__(self, heading: str, buttons, x, y, control_config: ControlConfiguration):
super().__init__(heading, buttons, x, y)
self.control_config = control_config
self.components: List[ControlConfiguration] = [control_config]
@classmethod
def manufacture_for_ship_model(cls, ship_model: ShipModel, close_menu_function: Callable, x, y,
font_size=36, screen_width=1280, screen_height=720):
left = 0
right = screen_width
bottom = 0
top = screen_height
control_config = ControlConfiguration(left, right, bottom, top, ship=ship_model,
view_factory=ConfigViewFactory())
heading = "Configure controls"
callables = [("<- Back", close_menu_function),
("Keyboard", partial(control_config.set_mode, "keyboard")),
("Gamepad", partial(control_config.set_mode, "gamepad")),
("Reset", control_config.reset),
("Save", control_config.save_all)]
height = int(font_size * 1.6)
width = int(height * 6)
height_spacing = int(height * 1.1)
buttons = []
for i, (name, func) in enumerate(callables):
i += 1
button = BaseButton.labeled_button(name, font_size=font_size, left=x, right=x + width,
bottom=y - height_spacing * i, top=y - height_spacing * i + height,
func=func)
buttons.append(button)
return cls(heading, buttons, x, y, control_config)
def _component_at(self, x, y):
for component in self.components:
if component.in_area(x, y):
return component
def draw(self):
super(ControlConfigMenu, self).draw()
self.control_config.draw()
def on_mouse_motion(self, x, y, dx, dy):
super(ControlConfigMenu, self).on_mouse_motion(x, y, dx, dy)
self.control_config.highlight_at(x, y)
def on_mouse_press(self, x, y, button, modifiers):
super(ControlConfigMenu, self).on_mouse_press(x, y, button, modifiers)
self.control_config.on_mouse_press(x, y, button, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
self.control_config.on_mouse_release(x, y, button, modifiers)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
component = self._component_at(x, y)
if component:
if buttons & mouse.RIGHT:
component.translate(dx, dy)
if buttons & mouse.LEFT:
self.control_config.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_key_press(self, symbol, modifiers):
self.control_config.on_key_press(symbol, modifiers)
def on_joybutton_press(self, joystick, button):
self.control_config.on_joybutton_press(joystick, button)
def on_joyaxis_motion(self, joystick, axis, value):
if abs(value) > 0.9:
self.control_config.on_joyaxis_motion(joystick, axis, value)
|
py | 7dff5202ce62a570309c501264846d233fb57f78 | """myshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.i18n import i18n_patterns
urlpatterns = i18n_patterns(
path("admin/", admin.site.urls),
path("cart/", include("cart.urls", namespace="cart")),
path("orders/", include("orders.urls", namespace="orders")),
path("coupons/", include("coupons.urls", namespace="coupons")),
path("rosetta/", include("rosetta.urls")),
path("", include("shopp.urls", namespace="shopp")),
)
# Чтобы сервер Django мог обращаться к файлам пользователей
from django.conf import settings
from django.conf.urls.static import static
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
py | 7dff5214d728283f4de69d2adda9d77d77fad387 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.image.psnr import _psnr_compute, _psnr_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
class PSNR(Metric):
r"""
Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR):
.. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right)
Where :math:`\text{MSE}` denotes the `mean-squared-error`_ function.
Args:
data_range:
the range of the data. If None, it is determined from the data (max - min).
The ``data_range`` must be given when ``dim`` is not None.
base: a base of a logarithm to use (default: 10)
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied
dim:
Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is
None meaning scores will be reduced across all dimensions and all batches.
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Raises:
ValueError:
If ``dim`` is not ``None`` and ``data_range`` is not given.
Example:
>>> from torchmetrics import PSNR
>>> psnr = PSNR()
>>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> psnr(preds, target)
tensor(2.5527)
.. note::
Half precision is only support on GPU for this metric
"""
min_target: Tensor
max_target: Tensor
def __init__(
self,
data_range: Optional[float] = None,
base: float = 10.0,
reduction: str = "elementwise_mean",
dim: Optional[Union[int, Tuple[int, ...]]] = None,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
) -> None:
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
if dim is None and reduction != "elementwise_mean":
rank_zero_warn(f"The `reduction={reduction}` will not have any effect when `dim` is None.")
if dim is None:
self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
else:
self.add_state("sum_squared_error", default=[])
self.add_state("total", default=[])
if data_range is None:
if dim is not None:
# Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to
# calculate `data_range` in the future.
raise ValueError("The `data_range` must be given when `dim` is not None.")
self.data_range = None
self.add_state("min_target", default=tensor(0.0), dist_reduce_fx=torch.min)
self.add_state("max_target", default=tensor(0.0), dist_reduce_fx=torch.max)
else:
self.add_state("data_range", default=tensor(float(data_range)), dist_reduce_fx="mean")
self.base = base
self.reduction = reduction
self.dim = tuple(dim) if isinstance(dim, Sequence) else dim
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
"""Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)
if self.dim is None:
if self.data_range is None:
# keep track of min and max target values
self.min_target = min(target.min(), self.min_target)
self.max_target = max(target.max(), self.max_target)
self.sum_squared_error += sum_squared_error
self.total += n_obs
else:
self.sum_squared_error.append(sum_squared_error)
self.total.append(n_obs)
def compute(self) -> Tensor:
"""Compute peak signal-to-noise ratio over state."""
if self.data_range is not None:
data_range = self.data_range
else:
data_range = self.max_target - self.min_target
if self.dim is None:
sum_squared_error = self.sum_squared_error
total = self.total
else:
sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])
total = torch.cat([values.flatten() for values in self.total])
return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)
|
py | 7dff537939cf9980601d15ab868f208c648b599c | import unittest
import pickle
import os
import torch
import torchani
path = os.path.dirname(os.path.realpath(__file__))
N = 10
class TestEnsemble(unittest.TestCase):
def setUp(self):
self.tol = 1e-5
self.conformations = 20
def _test_molecule(self, coordinates, species):
builtins = torchani.neurochem.Builtins()
coordinates.requires_grad_(True)
aev = builtins.aev_computer
ensemble = builtins.models
models = [torch.nn.Sequential(aev, m) for m in ensemble]
ensemble = torch.nn.Sequential(aev, ensemble)
_, energy1 = ensemble((species, coordinates))
force1 = torch.autograd.grad(energy1.sum(), coordinates)[0]
energy2 = [m((species, coordinates))[1] for m in models]
energy2 = sum(energy2) / len(models)
force2 = torch.autograd.grad(energy2.sum(), coordinates)[0]
energy_diff = (energy1 - energy2).abs().max().item()
force_diff = (force1 - force2).abs().max().item()
self.assertLess(energy_diff, self.tol)
self.assertLess(force_diff, self.tol)
def testGDB(self):
for i in range(N):
datafile = os.path.join(path, 'test_data/ANI1_subset/{}'.format(i))
with open(datafile, 'rb') as f:
coordinates, species, _, _, _, _ = pickle.load(f)
coordinates = torch.from_numpy(coordinates)
species = torch.from_numpy(species)
self._test_molecule(coordinates, species)
if __name__ == '__main__':
unittest.main()
|
py | 7dff53bffa484525623eee17a8cf0ec6577b227d | #!/usr/bin/env python3
#
# Copyright (c) Siemens AG, 2020
# [email protected]
#
# SPDX-License-Identifier: MIT
#
# This file implements analysis of stderr
#
import results
import json
import yaml
import sys
import re
injectFileName = "inject.yaml"
def call_analyse(identifier = None, fname = "func"):
# Note: findings shall not repeat themselves, if found more than once
findings = {}
# function that adds a potential new finding to the findings database
def addFinding(finding, tag=""):
if not(finding in findings):
findings[finding] = tag
def loadYamlFile(inFile):
_inDict = {}
try:
with open(inFile) as f:
_inDict = yaml.load(f, Loader=yaml.FullLoader)
except Exception as e:
#print("WARNING: "+str(e))
pass
return _inDict
nLinesInjected = {}
I = loadYamlFile(injectFileName)
for fName in I:
nLinesInjected[fName] = len(I[fName])
chalConfig = loadYamlFile("config.yaml")
##########################################################################
# Main #
##########################################################################
#fNameR = r"func(?:_\d+)?\.cp?p?"
challengeFiles = chalConfig["files"]
if type(challengeFiles) is str:
challengeFiles = [challengeFiles]
for fname in challengeFiles:
# NOTE: we need to split this for loop - it is now very big...!
fNameR = fname
# +------------------------------------------+
# | Process compilaton errors |
# +------------------------------------------+
print("Search file:", fNameR)
try:
with open("compile.txt") as f:
errLines = f.read().split("\n")
lineNum = 0
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
if re.search("redirected_",errLine): errLine = re.sub("redirected_","",errLine)
# search for messages related only to the user file
# Errors that can be bypassed
if re.match(r"^collect2: error: ld returned 1 exit status$",errLine):
finding = "Linking failed!"
addFinding(finding)
#if re.search(r"\.o: No such file or directory$",errLine):
# finding = "File or directory missing!"
# addFinding(finding)
# deal with redirected funtions
# redirect.h:17:14: error: too few arguments to function ‘redirected_time’
m = re.match(r"^"+fNameR+r":(\d+):(\d+): error: (.*)",errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
errMsg = m.group(3)
m = re.search("redirected_",errMsg)
if m:
errMsg = re.sub("redirected_","",errMsg)
# continue searching for the line number
# func_7453459449.c:21:19: note: in expansion of macro ‘time’
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
lineNum = lineNum + 1
m = re.search(fNameR+ r":(\d+):(\d+): note: in expansion of macro (.*)",errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errMsg)
addFinding(finding)
break
#
# TODO: capture name of file and compare to our files that we are searching for...
# if not in our dictionary, then this is a "Yikes" error
#
#### # Compiler error in a file not func*
#### # This needs to be improved here...
#### m = re.match(r"^((?!" + fNameR + r")).*error:.*",errLine)
#### if m:
#### print("Yikes: ("+str(m.groups()))+")",errLine)
#### finding = "ERROR in project! Help, Hurry ... call someone!?!?! Yikes!"
#### addFinding(finding)
# Compiler error in func.c or func.cpp - Type 1 (line number + column number)
# func.c:12:5: error: implicit declaration of function ‘xstrcpy’; did you mean ‘strcpy’? [-Werror=implicit-function-declaration]
m = re.search(fNameR+ r":(\d+):(\d+): error: (.*)",errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colnNumber = int(m.group(2))
errorMsg = m.group(3)
finding = "ERROR ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errorMsg)
addFinding(finding)
# Compiler error in func.c or func.cpp - Type 2 (only line number)
m = re.search(fNameR+ r":(\d+): error: (.*)",errLine)
if m:
#print("BBB",errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colnNumber = int(m.group(2))
errorMsg = m.group(3)
finding = "ERROR ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errorMsg)
addFinding(finding)
# Compiler error in func.c or func.cpp - Type 3 (fatal error + line number + column number)
m = re.search( fNameR + r":(\d+):(\d+): fatal error: (.*)",errLine)
if m:
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colnNumber = int(m.group(2))
errorMsg = m.group(3)
finding = "ERROR ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errorMsg)
addFinding(finding)
# Usage of deprecated functions
m = re.search( fNameR+ r":(\d+):(\d+): warning: ‘(.*)’ * is deprecated \[-Wdeprecated-declarations\]", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colnNumber = int(m.group(2))
funcName = m.group(3)
finding = "WARNING ({fileName},{lineNumber}): {errMsg}".format(fileName=fNameR,lineNumber=lineNumber,errMsg=errorMsg)
addFinding(finding)
# func.c:28:9: warning: format not a string literal and no format arguments [-Wformat-security]
if 'format not a string literal and no format arguments [-Wformat-security]' in errLine:
# func.c:22:14: runtime error: signed integer overflow: 244140625 * 25 cannot be represented in type 'int'
m = re.search( fNameR + r":(\d+):(\d+):", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
finding = "WARNING ({fileName},{lineNumber}): A format string attack is possible".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
lineNum = lineNum + 1
except Exception as e:
print("Exception: "+str(e))
# +------------------------------------------+
# | Process findings from stderr |
# +------------------------------------------+
try:
with open("stderr.txt") as f:
errLines = f.read().split("\n")
lineNum = 0
found_asan = False
added_asan_finding = False
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
if "runtime error: signed integer overflow" in errLine:
# func.c:22:14: runtime error: signed integer overflow: 244140625 * 25 cannot be represented in type 'int'
m = re.search(fNameR+ r":(\d+):(\d+): runtime error: signed integer overflow", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
finding = "ERROR ({fileName},{lineNumber}): There is a signed integer overflow vulnerability".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
if "runtime error: division by zero" in errLine:
# func.c:25:17: runtime error: division by zero
m = re.search(fNameR+ r":(\d+):(\d+): runtime error: division by zero", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
finding = "ERROR ({fileNamer},{lineNumber}): There is a division-by-zero vulnerability".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
if "runtime error: reference binding to null pointer of type" in errLine:
m = re.search(fNameR+ r":(\d+):(\d+): runtime error: member call on null pointer of type \.*", errLine)
if m:
lineNumber = int(m.group(1)) - nLinesInjected.get(fNameR,0)
colNumber = int(m.group(2))
finding = "ERROR ({fileName},{lineNumber}): Null pointer access".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
# findings by AddressSanitizer and LeakSanitizer
# ==============================================
if re.search(r"AddressSanitizer: ",errLine):
found_asan = True
# search for AddressSanitizer: buffer overflow
if re.search(r"AddressSanitizer: stack-buffer-overflow on address",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
m = re.search(r"'(.*?)'.*<== Memory access at offset \d+ overflows this variable",errLine)
if m:
varName = m.group(1)
finding = "Stack overflow on variable '{varName}'".format(varName=varName)
addFinding(finding)
added_asan_finding = True
break
m = re.search(r"'(.*?)'.*<== Memory access at offset \d+ underflows this variable",errLine)
if m:
varName = m.group(1)
finding = "Stack underflow on variable '{varName}'".format(varName=varName)
addFinding(finding)
added_asan_finding = True
break
lineNum = lineNum + 1
# search for AddressSanitizer: buffer overflow
elif re.search(r"^\*\*\* stack smashing detected \*\*\*",errLine):
finding = "Possible stack smashing was detected"
addFinding(finding)
# Example: ==4==ERROR: AddressSanitizer: SEGV on unknown address 0x5566f04f9933 (pc 0x5566f04db4d6 bp 0x7ffe1f0c2eb0 sp 0x7ffe1f0c2df0 T0)
elif re.search(r"^==\d+==ERROR: AddressSanitizer: SEGV on unknown address",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #0 0x557a3f99c4d5 in func /home/gasiba/Git/sifu/upload/edbd33d4-6ece-4cec-9da9-4b66084db79e/func.c:13
m = re.search(r"^.*in (.*) .*\/" + fNameR + r":(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): Segmentation fault".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
added_asan_finding = True
break
lineNum = lineNum + 1
# search for AddressSanitizer: heap-buffer-overflow
# Example: ==2==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x602000000015 at pc 0x55b9ad0e93fd bp 0x7ffce65329b0 sp 0x7ffce65329a0
elif re.search(r"^==\d+==ERROR: AddressSanitizer: heap-buffer-overflow",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #0 0x55b9ad0e93fc in func /home/gasiba/Git/sifu/upload/51b30a8b-acde-4bf3-8c64-8d2f88fd932c/func.c:14
m = re.search(r"^in (.*) .*\/" + fNameR + r".cp?p?:(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): Heap Buffer Overflow/Underflow".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
added_asan_finding = True
break
lineNum = lineNum + 1
# search for memory leaks
# Example: ==2==ERROR: LeakSanitizer: detected memory leaks
elif re.search(r"==\d+==ERROR: LeakSanitizer: detected memory leaks",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #1 0x5602ed79db34 in get_filled_buffer /home/gasiba/Git/sifu/Challenges/test/chal_0007/func.c:25
m = re.search(r"^.*in (.*) .*\/"+fNameR+":(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): Memory leak".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
break
# SUMMARY: AddressSanitizer: 120 byte(s) leaked in 1 allocation(s).
m = re.search(r"SUMMARY: AddressSanitizer: \d+ byte\(s\) leaked in \d+ allocation\(s\).$",errLine)
if m:
addFinding("Detected memory leak")
break
lineNum = lineNum + 1
# search for free memory that was not malloc'ed
# Example: AddressSanitizer: attempting free on address which was not malloc()-ed: 0x7ffffa10fcd0 in thread T0
# #0 0x560d6e9f491f in __interceptor_free (/home/gasiba/Git/sifu/Challenges/test/chal_0007/main+0x10591f)
# #1 0x560d6ea42783 in get_y_no /home/gasiba/Git/sifu/Challenges/test/chal_0007/func.c:17
# #2 0x560d6ea4191b in Test_Main /home/gasiba/Git/sifu/Challenges/test/chal_0007/main.c:49
# #3 0x560d6ea41ae1 in main /home/gasiba/Git/sifu/Challenges/test/chal_0007/main.c:73
# #4 0x7fe368b1cb96 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x21b96)
# #5 0x560d6e90b449 in _start (/home/gasiba/Git/sifu/Challenges/test/chal_0007/main+0x1c449)
elif re.search(r"AddressSanitizer: attempting free on address which was not malloc",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #1 0x560d6ea42783 in get_y_no /home/gasiba/Git/sifu/Challenges/test/chal_0007/func.c:17
m = re.search(r"^.*in (.*) .*\/"+fNameR+":(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): Trying to free memory that was not malloc'ed".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
added_asan_finding = True
break
lineNum = lineNum + 1
# ==2==ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000 (pc 0x55daab485ac3 bp 0x7ffd89bea910 sp 0x7ffd89bea860 T0)
# ==2==The signal is caused by a READ memory access.
# ==2==Hint: address points to the zero page.
# #0 0x55daab485ac2 in get_y_no /home/gasiba/Git/sifu/upload/a4438855-2f36-4c53-9e1f-52e6a46f3b24/func.c:36
# #1 0x55daab484b9e in Test_Main /home/gasiba/Git/sifu/upload/a4438855-2f36-4c53-9e1f-52e6a46f3b24/main.c:49
# #2 0x55daab484d96 in main /home/gasiba/Git/sifu/upload/a4438855-2f36-4c53-9e1f-52e6a46f3b24/main.c:74
# #3 0x7f60b21b6b96 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x21b96)
# #4 0x55daab34e6c9 in _start (/chal/main+0x1c6c9)
elif re.search(r"AddressSanitizer: SEGV on unknown address",errLine):
lineNum = lineNum + 1
while (lineNum<len(errLines)):
errLine = errLines[lineNum]
# #0 0x55daab485ac2 in get_y_no /home/gasiba/Git/sifu/upload/a4438855-2f36-4c53-9e1f-52e6a46f3b24/func.c:36
m = re.search(r"^.*in (.*) .*\/"+fNameR+":(.*?)$",errLine)
if m:
functionName = m.group(1)
lineNumber = int(m.group(2)) - nLinesInjected.get(fNameR,0)
finding = "ERROR ({fileName},{lineNumber}): segmentation fault".format(fileName=fNameR,lineNumber=lineNumber)
addFinding(finding)
added_asan_finding = True
break
lineNum = lineNum + 1
lineNum = lineNum + 1
if found_asan and not added_asan_finding:
addFinding("There is a security vulnerability with your code.")
#TODO: report to ourselves
except Exception as e:
print("Exception: "+str(e))
# Dump the results to the Sifu backend
r = results.loadResults()
for finding in findings:
tag = findings[finding]
r = results.addResult(r,0,"TEST_100",finding,"","",tag, identifier)
# in the end, if we have no results, something wrong or very bad has happen!
if len(r)==0:
r = results.addResult(r,0,"TEST_0","This challenge seems to be broken!","","","NO_TAG")
results.saveResults(r)
# Done.
if __name__ == '__main__':
call_analyse()
|
py | 7dff553e118d7b1e1d418c614de3945113b2d034 | # -*- coding: utf-8 -*-
""" Update an Attendify schedule XLSX file with the currently accepted
talks.
Usage: manage.py attendify_schedule_xlsx ep2016 schedule.xlsx
Note that for Attendify you have to download the schedule before
running this script, since they add meta data to the downloaded
file which has to be kept around when uploading it again.
The script updates schedule.xlsx in place. Unfortunately, Attendify
currently has a bug in that it doesn't accept the file format
generated by openpyxl. Opening the file in LibreOffice and saving
it (without changes) fixes this as work-around.
Attendify Worksheet "Schedule" format
-------------------------------------
Row A4: Session Title, Date (MM/DD/YYYY), Start Time (HH:MM), End
Time (HH:MM), Description (Optional), Location (Optional), Track
Title (Optional), UID (do not delete)
Row A6: Start of data
"""
from django.core.management.base import BaseCommand, CommandError
from django.core import urlresolvers
from django.utils.html import strip_tags
from conference import models
import datetime
import markdown2
import openpyxl
### Globals
# Debug output ?
_debug = 0
# These must match the talk .type or .admin_type
from accepted_talks import TYPE_NAMES
# Special handling of poster sessions
if 0:
# Poster sessions don't have events associated with them, so use
# these defaults
ADJUST_POSTER_SESSIONS = True
POSTER_START = datetime.datetime(2016,7,19,15,15) # TBD
POSTER_DURATION = datetime.timedelta(minutes=90)
POSTER_ROOM = u'Exhibition Hall'
else:
ADJUST_POSTER_SESSIONS = False
# Plenary sessions will have 2-3 tracks assigned. We use the
# plenary room in this case.
PLENARY_ROOM = 'Smarkets'
# Breaks have more than 3 tracks assigned. Since this changes between
# the days, we don't set the room name.
BREAK_ROOM = ''
### Helpers
def profile_url(user):
return urlresolvers.reverse('conference-profile',
args=[user.attendeeprofile.slug])
def speaker_listing(talk, filter_special_entries=True):
l = []
for speaker in talk.get_all_speakers():
full_name = u'%s %s' % (
speaker.user.first_name.title(),
speaker.user.last_name.title())
if filter_special_entries:
if full_name in (u'To Be Announced', u'Tobey Announced'):
continue
l.append(full_name)
return u', '.join(l)
def format_text(text, remove_tags=False, output_html=True):
# Remove whitespace
text = text.strip()
if not text:
return text
# Remove links, tags, etc.
if remove_tags:
text = strip_tags(text)
# Remove quotes
if text[0] == '"' and text[-1] == '"':
text = text[1:-1]
# Convert markdown markup to HTML
if output_html:
text = markdown2.markdown(text)
return text
def talk_title(talk):
title = format_text(talk.title, remove_tags=True, output_html=False)
if not title:
return title
return title
def talk_abstract(talk):
abstract = talk.getAbstract()
if abstract:
text = format_text(talk.getAbstract().body)
else:
text = ''
return '<p>By <i>%s</i></p>\n\n%s' % (
speaker_listing(talk),
text)
def event_title(event):
title = format_text(event.custom, remove_tags=True, output_html=False)
if not title:
return title
return title
def event_abstract(event):
return format_text(event.abstract)
def add_event(data, talk=None, event=None, session_type='', talk_events=None):
# Determine title and abstract
title = ''
abstract = ''
if talk is None:
if event is None:
raise TypeError('need either talk or event given')
title = event_title(event)
abstract = event_abstract(event)
speakers = u''
else:
title = talk_title(talk)
abstract = talk_abstract(talk)
speakers = speaker_listing(talk)
if event is None:
event = talk.get_event()
# Determine time_range and room
if event is None:
if talk.type and talk.type[:1] == 'p' and ADJUST_POSTER_SESSIONS:
# Poster session
time_range = (POSTER_START,
POSTER_START + POSTER_DURATION)
room = POSTER_ROOM
else:
print ('Talk %r (type %r) does not have an event '
'associated with it; skipping' %
(title, talk.type))
return
else:
time_range = event.get_time_range()
tracks = event.tracks.all()
if len(tracks) > 3:
# Must be a break
room = BREAK_ROOM
elif len(tracks) > 1:
# Must be a plenary session
room = PLENARY_ROOM
elif tracks:
room = tracks[0].title
else:
room = u''
if talk_events is not None:
talk_events[event.pk] = event
# Don't add entries for events without title
if not title:
return
# Format time entries
date = time_range[0].strftime('%m/%d/%Y')
start_time = time_range[0].strftime('%H:%M')
stop_time = time_range[1].strftime('%H:%M')
# UID
uid = u''
data.append((
title,
date,
start_time,
stop_time,
abstract,
room,
session_type,
speakers,
uid,
))
# Start row of data in spreadsheet (Python 0-based index)
SCHEDULE_WS_START_DATA = 5
# Column number of UID columns (Python 0-based index)
SCHEDULE_UID_COLUMN = 8
# Number of columns to make row unique (title, date, start, end)
SCHEDULE_UNIQUE_COLS = 4
def update_schedule(schedule_xlsx, new_data, updated_xlsx=None):
# Load workbook
wb = openpyxl.load_workbook(schedule_xlsx)
assert wb.sheetnames == [u'Instructions', u'Schedule', u'System']
ws = wb['Schedule']
# Extract data values
ws_data = list(ws.values)[SCHEDULE_WS_START_DATA:]
print ('read %i data lines' % len(ws_data))
print ('first line: %r' % ws_data[:1])
print ('last line: %r' % ws_data[-1:])
# Reconcile UIDs / talks
uids = {}
for line in ws_data:
uid = line[SCHEDULE_UID_COLUMN]
if not uid:
continue
uids[tuple(line[:SCHEDULE_UNIQUE_COLS])] = uid
# Add UID to new data
new_schedule = []
for line in new_data:
key = tuple(line[:SCHEDULE_UNIQUE_COLS])
if key not in uids:
print ('New or rescheduled talk %s found' % (key,))
uid = u''
else:
uid = uids[key]
line = tuple(line[:SCHEDULE_UID_COLUMN]) + (uid,)
new_schedule.append(line)
new_data = new_schedule
# Replace old data with new data
old_data_rows = len(ws_data)
new_data_rows = len(new_data)
print ('new data: %i data lines' % new_data_rows)
offset = SCHEDULE_WS_START_DATA + 1
print ('new_data = %i rows' % len(new_data))
for j, row in enumerate(ws[offset: offset + new_data_rows - 1]):
new_row = new_data[j]
if _debug:
print ('updating row %i with %r' % (j, new_row))
if len(row) > len(new_row):
row = row[:len(new_row)]
for i, cell in enumerate(row):
cell.value = new_row[i]
# Overwrite unused cells with None
if new_data_rows < old_data_rows:
for j, row in enumerate(ws[offset + new_data_rows + 1:
offset + old_data_rows + 1]):
if _debug:
print ('clearing row %i' % (j,))
for i, cell in enumerate(row):
cell.value = None
# Write updated data
if updated_xlsx is None:
updated_xlsx = schedule_xlsx
wb.save(updated_xlsx)
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
args = '<conference> <xlsx-file>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
try:
schedule_xlsx = args[1]
except IndexError:
raise CommandError('XLSX file not specified')
talks = (models.Talk.objects
.filter(conference=conference,
status='accepted'))
# Group by types
talk_types = {}
for talk in talks:
talk_type = talk.type[:1]
admin_type = talk.admin_type[:1]
if admin_type == 'm':
type = 'm'
elif admin_type == 'k':
type = 'k'
else:
type = talk_type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
# Build data for updating the spreadsheet
data = []
talk_events = {}
for type, type_name, description in TYPE_NAMES:
# Get bag with talks
bag = talk_types.get(type, [])
if not bag:
continue
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
# Add talks from bag to data
for talk in bag:
for event in talk.get_event_list():
# A talk may have multiple events associated with it
add_event(data,
talk=talk,
event=event,
talk_events=talk_events,
session_type=type_name)
# Add events which are not talks
for schedule in models.Schedule.objects.filter(conference=conference):
for event in models.Event.objects.filter(schedule=schedule):
if event.pk in talk_events:
continue
add_event(data, event=event)
# Update spreadsheet with new data
update_schedule(schedule_xlsx, data)
|
py | 7dff556aabdb4fca026046ae37b64eb701d90643 | # pylint: disable=too-many-function-args
"""This file provides NeedsDisplay, a class to collect display tooling"""
from ctypes import c_char_p, POINTER
from wotw_xlib.xlib import Display, XCloseDisplay, XOpenDisplay
class NeedsDisplay(object):
"""This class provides the vehicle to manage a Display connection"""
def __init__(self, unknown_display=None):
"""Ctor initializes the display state"""
self.opened_display = False
self.display = self.parse_unknown_display(unknown_display)
def parse_unknown_display(self, unknown_display=None):
"""
This method checks for an existing display and viable display strings
"""
if isinstance(unknown_display, (Display, POINTER(Display))):
return unknown_display
if not isinstance(unknown_display, (basestring, c_char_p)):
unknown_display = None
return self.open_display(unknown_display)
def open_display(self, display_to_open):
"""Sets an internal flag and returns the display"""
self.opened_display = True
return XOpenDisplay(display_to_open)
def close_display(self):
"""Checks the internal flag and only closes displays it opened"""
if self.opened_display:
XCloseDisplay(self.display)
def __enter__(self):
"""Sends itself off"""
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Explicitly closes the display"""
self.close_display()
|
py | 7dff5580a7a07cc379c5dd574517883baaecbe2c | from django.template import RequestContext
from django.shortcuts import render_to_response
def home(request):
return render_to_response('home/index.html', RequestContext(request))
|
py | 7dff55e1b760819b63fde0848ebe7df2e6806a68 | from __future__ import absolute_import
import os
import mimetypes
import posixpath
from tempfile import SpooledTemporaryFile
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.core.files.storage import Storage
from django.utils import timezone
from django.utils.encoding import force_bytes, smart_str, force_text
from google.cloud.storage.client import Client
from google.cloud.storage.blob import Blob
from google.cloud.storage.bucket import Bucket
from google.cloud.exceptions import NotFound
from google.auth.exceptions import TransportError, RefreshError
from google.resumable_media.common import DataCorruption
from requests.exceptions import RequestException
from sentry.utils import metrics
from sentry.net.http import TimeoutAdapter
# how many times do we want to try if stuff goes wrong
GCS_RETRIES = 5
# how long are we willing to wait?
GCS_TIMEOUT = 6.0
# _client cache is a 3-tuple of project_id, credentials, Client
# this is so if any information changes under it, it invalidates
# the cache. This scenario is possible since `options` are dynamic
_client = None, None, None
def try_repeated(func):
"""
Runs a function a few times ignoring errors we see from GCS
due to what appears to be network issues. This is a temporary workaround
until we can find the root cause.
"""
if hasattr(func, '__name__'):
func_name = func.__name__
elif hasattr(func, 'func'):
# Partials
func_name = getattr(func.func, '__name__', '__unknown__')
else:
func_name = '__unknown__'
metrics_key = 'filestore.gcs.retry'
metrics_tags = {'function': func_name}
idx = 0
while True:
try:
result = func()
metrics_tags.update({'success': '1'})
metrics.timing(metrics_key, idx, tags=metrics_tags)
return result
except (DataCorruption, TransportError, RefreshError, RequestException) as e:
if idx >= GCS_RETRIES:
metrics_tags.update({'success': '0', 'exception_class': e.__class__.__name__})
metrics.timing(metrics_key, idx, tags=metrics_tags)
raise
idx += 1
def get_client(project_id, credentials):
global _client
if _client[2] is None or (project_id, credentials) != (_client[0], _client[1]):
client = Client(project=project_id, credentials=credentials)
session = client._http
adapter = TimeoutAdapter(timeout=GCS_TIMEOUT)
session.mount('http://', adapter)
session.mount('https://', adapter)
_client = (project_id, credentials, client)
return _client[2]
def clean_name(name):
"""
Cleans the name so that Windows style paths work
"""
# Normalize Windows style paths
clean_name = posixpath.normpath(name).replace('\\', '/')
# os.path.normpath() can strip trailing slashes so we implement
# a workaround here.
if name.endswith('/') and not clean_name.endswith('/'):
# Add a trailing slash as it was stripped.
clean_name = clean_name + '/'
# Given an empty string, os.path.normpath() will return ., which we don't want
if clean_name == '.':
clean_name = ''
return clean_name
def safe_join(base, *paths):
"""
A version of django.utils._os.safe_join for S3 paths.
Joins one or more path components to the base path component
intelligently. Returns a normalized version of the final path.
The final path must be located inside of the base path component
(otherwise a ValueError is raised).
Paths outside the base path indicate a possible security
sensitive operation.
"""
base_path = force_text(base)
base_path = base_path.rstrip('/')
paths = [force_text(p) for p in paths]
final_path = base_path + '/'
for path in paths:
_final_path = posixpath.normpath(posixpath.join(final_path, path))
# posixpath.normpath() strips the trailing /. Add it back.
if path.endswith('/') or _final_path + '/' == final_path:
_final_path += '/'
final_path = _final_path
if final_path == base_path:
final_path += '/'
# Ensure final_path starts with base_path and that the next character after
# the base path is /.
base_path_len = len(base_path)
if (not final_path.startswith(base_path) or final_path[base_path_len] != '/'):
raise ValueError('the joined path is located outside of the base path'
' component')
return final_path.lstrip('/')
class FancyBlob(Blob):
def __init__(self, download_url, *args, **kwargs):
self.download_url = download_url
super(FancyBlob, self).__init__(*args, **kwargs)
def _get_download_url(self):
if self.media_link is None:
download_url = u'{download_url}/download/storage/v1{path}?alt=media'.format(
download_url=self.download_url,
path=self.path,
)
if self.generation is not None:
download_url += u'&generation={:d}'.format(self.generation)
return download_url
else:
return self.media_link
class GoogleCloudFile(File):
def __init__(self, name, mode, storage):
self.name = name
self.mime_type = mimetypes.guess_type(name)[0]
self._mode = mode
self._storage = storage
# NOTE(mattrobenolt): This is the same change in behavior as in
# the s3 backend. We're opting now to load the file
# or metadata at this step. This means we won't actually
# know a file doesn't exist until we try to read it.
self.blob = FancyBlob(storage.download_url, self.name, storage.bucket)
self._file = None
self._is_dirty = False
@property
def size(self):
return self.blob.size
def _get_file(self):
def _try_download():
self.blob.download_to_file(self._file)
self._file.seek(0)
if self._file is None:
with metrics.timer('filestore.read', instance='gcs'):
self._file = SpooledTemporaryFile(
max_size=self._storage.max_memory_size,
suffix=".GSStorageFile",
dir=None,
)
if 'r' in self._mode:
self._is_dirty = False
try_repeated(_try_download)
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
def read(self, num_bytes=None):
if 'r' not in self._mode:
raise AttributeError("File was not opened in read mode.")
if num_bytes is None:
num_bytes = -1
return super(GoogleCloudFile, self).read(num_bytes)
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was not opened in write mode.")
self._is_dirty = True
return super(GoogleCloudFile, self).write(force_bytes(content))
def close(self):
def _try_upload():
self.file.seek(0)
self.blob.upload_from_file(self.file, content_type=self.mime_type)
if self._file is not None:
if self._is_dirty:
try_repeated(_try_upload)
self._file.close()
self._file = None
class GoogleCloudStorage(Storage):
project_id = None
credentials = None
bucket_name = None
file_name_charset = 'utf-8'
file_overwrite = True
download_url = 'https://www.googleapis.com'
# The max amount of memory a returned file can take up before being
# rolled over into a temporary file on disk. Default is 0: Do not roll over.
max_memory_size = 0
def __init__(self, **settings):
# check if some of the settings we've provided as class attributes
# need to be overwritten with values passed in here
for name, value in settings.items():
if hasattr(self, name):
setattr(self, name, value)
self._bucket = None
self._client = None
@property
def client(self):
if self._client is None:
self._client = get_client(
self.project_id,
self.credentials,
)
return self._client
@property
def bucket(self):
if self._bucket is None:
self._bucket = Bucket(self.client, name=self.bucket_name)
return self._bucket
def _normalize_name(self, name):
"""
Normalizes the name so that paths like /path/to/ignored/../something.txt
and ./file.txt work. Note that clean_name adds ./ to some paths so
they need to be fixed here.
"""
return safe_join('', name)
def _encode_name(self, name):
return smart_str(name, encoding=self.file_name_charset)
def _open(self, name, mode='rb'):
name = self._normalize_name(clean_name(name))
return GoogleCloudFile(name, mode, self)
def _save(self, name, content):
def _try_upload():
content.seek(0, os.SEEK_SET)
file.blob.upload_from_file(content, size=content.size,
content_type=file.mime_type)
with metrics.timer('filestore.save', instance='gcs'):
cleaned_name = clean_name(name)
name = self._normalize_name(cleaned_name)
content.name = cleaned_name
encoded_name = self._encode_name(name)
file = GoogleCloudFile(encoded_name, 'w', self)
try_repeated(_try_upload)
return cleaned_name
def delete(self, name):
def _try_delete():
normalized_name = self._normalize_name(clean_name(name))
self.bucket.delete_blob(self._encode_name(normalized_name))
try:
try_repeated(_try_delete)
except NotFound:
pass
def exists(self, name):
if not name: # root element aka the bucket
try:
self.bucket
return True
except ImproperlyConfigured:
return False
name = self._normalize_name(clean_name(name))
return bool(self.bucket.get_blob(self._encode_name(name)))
def listdir(self, name):
name = self._normalize_name(clean_name(name))
# for the bucket.list and logic below name needs to end in /
# But for the root path "" we leave it as an empty string
if name and not name.endswith('/'):
name += '/'
files_list = list(self.bucket.list_blobs(prefix=self._encode_name(name)))
files = []
dirs = set()
base_parts = name.split("/")[:-1]
for item in files_list:
parts = item.name.split("/")
parts = parts[len(base_parts):]
if len(parts) == 1 and parts[0]:
# File
files.append(parts[0])
elif len(parts) > 1 and parts[0]:
# Directory
dirs.add(parts[0])
return list(dirs), files
def _get_blob(self, name):
# Wrap google.cloud.storage's blob to raise if the file doesn't exist
blob = self.bucket.get_blob(name)
if blob is None:
raise NotFound(u'File does not exist: {}'.format(name))
return blob
def size(self, name):
name = self._normalize_name(clean_name(name))
blob = self._get_blob(self._encode_name(name))
return blob.size
def modified_time(self, name):
name = self._normalize_name(clean_name(name))
blob = self._get_blob(self._encode_name(name))
return timezone.make_naive(blob.updated)
def get_modified_time(self, name):
name = self._normalize_name(clean_name(name))
blob = self._get_blob(self._encode_name(name))
updated = blob.updated
return updated if settings.USE_TZ else timezone.make_naive(updated)
def url(self, name):
# Preserve the trailing slash after normalizing the path.
name = self._normalize_name(clean_name(name))
blob = self._get_blob(self._encode_name(name))
return blob.public_url
def get_available_name(self, name, max_length=None):
if self.file_overwrite:
name = clean_name(name)
return name
return super(GoogleCloudStorage, self).get_available_name(name, max_length)
|
py | 7dff5605d1322e6fd60be0c770faa425b9c4cf84 | """Unit test for basic histogram creation, subtraction and fitting with errors."""
import numpy as np
from panter.config.evalFitSettings import pol0
from panter.data.dataHistPerkeo import HistPerkeo
from panter.eval.evalFit import DoFit
from tests.unittestroot import UnitTestRoot
class HistTestFit(UnitTestRoot):
"""Unit test class for basic histogram creation, subtraction and fitting.
Inherited from base class UnitTestRoot.
Parameters
----------
txtfile: str
Name of sample txt file with data to fill the histogram with.
params: [int, float, float, float, float]
List of parameters to be used for histograms and fit:
[BinCounts, HistLowLim, HistUpLim, FitRangeLow, FitRangeUp]
"""
def __init__(self, txtfile: str, params: list):
self._txtfile = txtfile
self.hist_par = params[0:3]
self.fit_par = params[3:]
self._root_macro = "basicfit.cpp"
super().__init__(
test_label="HistTestFit", params=params, root_macro=self._root_macro
)
def _do_root(self):
"""Do ROOT evaluation part for the unit test."""
return super()._do_root([self._txtfile], self._params)
def _do_panter(self):
"""Do panter evaluation part for the unit test."""
data_raw = open(self._txtfile).read().split()
data_raw = list(map(float, data_raw))
hpanter1 = HistPerkeo(*[data_raw, *self.hist_par])
hpanter2 = HistPerkeo(*[np.array(data_raw) + 2, *self.hist_par])
hpanter1.addhist(hpanter2, -0.5)
fitclass = DoFit(hpanter1.hist)
fitclass.setup(pol0)
fitclass.limit_range(self.fit_par)
fitres = fitclass.fit()
panter_fitres = [
fitres.params["c0"].value,
fitres.params["c0"].stderr,
fitclass.ret_gof()[0],
]
return np.asarray(panter_fitres)
def do_histtestfit() -> bool:
"""Run this unit test with hard coded, default parameters."""
file = "sample.txt"
par = [5, 0, 15, 0, 15]
test = HistTestFit(txtfile=file, params=par)
return test.test(brel_dev=False, bprint=True)
|
py | 7dff56ad1ec1c9e9d2d669b56d687f10df17834c | """
A Coffescript Object Notation (CSON) parser for Python 2 and Python 3.
See documentation at https://github.com/avaka/pycson
"""
from .parser import load, loads
from .writer import dump, dumps
from speg import ParseError
|
py | 7dff56b2010913870010d452b37e4f226e235287 | """The park level model."""
from .park_level import ParkLevel # noqa: F401
|
py | 7dff571f9c8939769b45742b8206d298f56a7891 | import connexion
import json
from flask import Response
from connexion.exceptions import OAuthProblem
TOKEN_DB = {"asdf1234567890": {"uid": 100}}
def apikey_auth(token, required_scopes):
info = TOKEN_DB.get(token, None)
if not info:
raise OAuthProblem("Invalid token")
return info
def get_invalid_response():
validation_message = {
"code": "OPGDATA-API-INVALIDREQUEST",
"message": "Invalid Request",
}
response = Response(
json.dumps(validation_message),
status=400,
mimetype="application/vnd.opg-data.v1+json",
)
return response
def rewrite_bad_request(response):
if response.status_code == 400:
validation_message = {
"errors": [
{"code": "OPGDATA-API-INVALIDREQUEST", "message": "Invalid Request"},
]
}
response = Response(
json.dumps(validation_message),
status=400,
mimetype="application/vnd.opg-data.v1+json",
)
return response
mock = connexion.FlaskApp(__name__, specification_dir="../../../openapi/")
mock.app.after_request(rewrite_bad_request)
mock.add_api("template-openapi.yml", strict_validation="true")
mock.add_api("state-openapi.yml")
mock.run(port=4343)
|
py | 7dff588a8da3517eb8a4def4dfecd5b56952c58e | import datetime as dt
def makedate(line):
if len([d for d in line if d in '0123456789']) == 8:
return dt.date(int(line[:4]), int(line[4:6]), int(line[6:]))
def maketime(line):
if len([d for d in line if d in ':0123456789']) == 5:
return dt.time(int(line[:2]), int(line[3::]))
class TagsFilters(object):
mdjpp_skip_notag_content = False
mdjpp_skip_global_tag_content = False
mdjpp_date_from = ""
mdjpp_date_to = ""
mdjpp_date_skip_notags = False
mdjpp_date_skip_empty = False
mdjpp_skip_tag = ""
mdjpp_only_tag = ""
mdjpp_only_global_tag = ""
class Render(object):
mdjpp_html = False
mdjpp_null = True
mdjpp_md = False
class Time(object):
mdjpp_time_propagate = True # propagate time tags into document
mdjpp_time_annotate = True # adds additional raw time tags if possible
class mdJPP(TagsFilters, Render, Time):
def reparse_options(self):
# date
if self.mdjpp_date_from:
self.mdjpp_date_from = makedate(self.mdjpp_date_from)
if self.mdjpp_date_to:
self.mdjpp_date_to = makedate(self.mdjpp_date_to)
# render
if not (self.mdjpp_html or self.mdjpp_md):
self.mdjpp_null = True
default_options = mdJPP()
|
py | 7dff5bf6504cdf3e59f09028d03372fc402aae81 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-09-01 17:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('instaapp', '0018_auto_20190901_1722'),
]
operations = [
migrations.AlterField(
model_name='image',
name='profile',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
py | 7dff5c46a5f3dc6316f0eb26e245cf703281deb0 | #!/usr/bin/env python
#coding=utf-8
import copy
from btform import attrget
from btform import storage
from btform import AttributeList
class Form(object):
def __init__(self, *inputs, **kw):
self.inputs = inputs
self.valid = True
self.note = None
self.validators = kw.pop('validators', [])
def __call__(self, x=None):
o = copy.deepcopy(self)
if x: o.validates(x)
return o
@property
def errors(self):
return ",".join([u"%s error,%s" % (i.description, i.note) for i in self.inputs if i.note])
def validates(self, source=None, _validate=True, **kw):
source = source or kw
out = True
for i in self.inputs:
v = attrget(source, i.name)
if _validate:
out = i.validate(v) and out
else:
i.set_value(v)
if _validate:
out = out and self._validate(source)
self.valid = out
return out
def _validate(self, value):
self.value = value
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def fill(self, source=None, **kw):
return self.validates(source, _validate=False, **kw)
def __getitem__(self, i):
for x in self.inputs:
if x.name == i: return x
raise KeyError, i
def __getattr__(self, name):
# don't interfere with deepcopy
inputs = self.__dict__.get('inputs') or []
for x in inputs:
if x.name == name: return x
raise AttributeError, name
def get(self, i, default=None):
try:
return self[i]
except KeyError:
return default
def _get_d(self): #@@ should really be form.attr, no?
return storage([(i.name, i.get_value()) for i in self.inputs])
d = property(_get_d)
class Item(object):
def __init__(self, name, *validators, **attrs):
self.name = name
self.validators = validators
self.attrs = attrs = AttributeList(attrs)
self.description = attrs.pop('description', name)
self.value = attrs.pop('value', None)
self.note = None
self.id = attrs.setdefault('id', self.get_default_id())
def get_default_id(self):
return self.name
def validate(self, value):
self.set_value(value)
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def addatts(self):
# add leading space for backward-compatibility
return " " + str(self.attrs) |
py | 7dff5c722cd8a5f865dc03b7c43a6df95871806e | import logging
from .interfaces import (
IProcedure, ProcState, Token, IProcessorAPI, CallResult)
class Procedure(IProcedure):
def __init__(self, name: str, state: ProcState = ProcState.OPEN):
self._name = name
self._state = state
@property
def name(self) -> str:
return self._name
@property
def state(self) -> ProcState:
return self._state
@state.setter
def state(self, state: ProcState) -> None:
self._state = state
def call(self, token: Token, processor: IProcessorAPI) -> CallResult:
return CallResult()
def __str__(self) -> str:
return '{} ({})'.format(self.name, self.state) |
py | 7dff5ccf1db3720e542510efe5a2c84680a6e26f | from unittest import TestCase
from gcsa.reminders import EmailReminder, PopupReminder
from gcsa.serializers.reminder_serializer import ReminderSerializer
class TestReminder(TestCase):
def test_email_reminder(self):
reminder = EmailReminder(34)
self.assertEqual(reminder.method, 'email')
self.assertEqual(reminder.minutes_before_start, 34)
def test_popup_reminder(self):
reminder = PopupReminder(51)
self.assertEqual(reminder.method, 'popup')
self.assertEqual(reminder.minutes_before_start, 51)
class TestReminderSerializer(TestCase):
def test_to_json(self):
reminder_json = {
'method': 'email',
'minutes': 55
}
reminder = EmailReminder(55)
self.assertDictEqual(ReminderSerializer.to_json(reminder), reminder_json)
reminder_json = {
'method': 'popup',
'minutes': 13
}
reminder = PopupReminder(13)
self.assertDictEqual(ReminderSerializer.to_json(reminder), reminder_json)
serializer = ReminderSerializer(reminder)
self.assertDictEqual(serializer.get_json(), reminder_json)
def test_to_object(self):
reminder_json = {
'method': 'email',
'minutes': 55
}
reminder = ReminderSerializer.to_object(reminder_json)
self.assertIsInstance(reminder, EmailReminder)
self.assertEqual(reminder.minutes_before_start, 55)
reminder_json = {
'method': 'popup',
'minutes': 33
}
reminder = ReminderSerializer.to_object(reminder_json)
self.assertIsInstance(reminder, PopupReminder)
self.assertEqual(reminder.minutes_before_start, 33)
with self.assertRaises(ValueError):
reminder_json = {
'method': 'telegram',
'minutes': 33
}
ReminderSerializer.to_object(reminder_json)
|
py | 7dff5dfe68a68520ab826afbadcbfa0c3a2571d7 | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
class ReadOnlyFirst(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'bar': (str,), # noqa: E501
'baz': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'bar': 'bar', # noqa: E501
'baz': 'baz', # noqa: E501
}
read_only_vars = {
'bar', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ReadOnlyFirst - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bar (str): [optional] # noqa: E501
baz (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ReadOnlyFirst - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bar (str): [optional] # noqa: E501
baz (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 7dff5e0b06e16a1c2aa523894804c8d653642000 | from typing import Dict, List, Optional, Union, Any
from pydantic import BaseModel, Field
class QueryRequest(BaseModel):
query: str
params: Optional[dict] = None
class FilterRequest(BaseModel):
filters: Optional[Dict[str, Optional[Union[str, List[str]]]]] = None
class QueryAnswer(BaseModel):
answer: Optional[str]
question: Optional[str]
score: Optional[float] = None
probability: Optional[float] = None
context: Optional[str]
offset_start: Optional[int]
offset_end: Optional[int]
offset_start_in_doc: Optional[int]
offset_end_in_doc: Optional[int]
document_id: Optional[str] = None
meta: Optional[Dict[str, Any]]
class QueryResponse(BaseModel):
query: str
answers: List[QueryAnswer]
class ExtractiveQAFeedback(BaseModel):
question: str = Field(..., description="The question input by the user, i.e., the query.")
is_correct_answer: bool = Field(..., description="Whether the answer is correct or not.")
document_id: str = Field(..., description="The document in the query result for which feedback is given.")
model_id: Optional[int] = Field(None, description="The model used for the query.")
is_correct_document: bool = Field(
...,
description="In case of negative feedback, there could be two cases; incorrect answer but correct "
"document & incorrect document. This flag denotes if the returned document was correct.",
)
answer: str = Field(..., description="The answer string.")
offset_start_in_doc: int = Field(
..., description="The answer start offset in the original doc. Only required for doc-qa feedback."
)
|
py | 7dff5e8274ecb833f4e4372fcd00e178eaed0526 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('./CNN')
sys.path.append('./utility')
sys.path.append('./network')
sys.path.append('./dataset')
import tensorflow as tf
import optuna
from cnn import CNN
from lenet import LeNet
from resnet import ResNet, ResNeXt, SENet
from dense_net import DenseNet
from load import Load
from trainer import OptunaTrain
from search import Optuna
from train import set_model
from functools import partial
def objective(trial):
tf.reset_default_graph()
param = {
'opt' : trial.suggest_categorical('opt', ['SGD','Momentum','Adadelta','Adagrad','Adam','RMSProp']),
'lr' : trial.suggest_loguniform('lr', 8e-5, 8e-2),
'batch_size' : trial.suggest_categorical('batch_size', [64, 96 ,128]),
'aug': trial.suggest_categorical('aug', ['None','shift','mirror','rotate','shift_rotate','cutout']),
'l2': trial.suggest_categorical('l2', ['True','False'])
}
FLAGS.aug = param['aug']
FLAGS.l2_norm = param['l2']
FLAGS.batch_size = param['batch_size']
# prepare training
## load dataset
data = Load(FLAGS.data)
## setting models
model_set = set_model(data.output_dim)
model = eval(FLAGS.network)(model=model_set, name=FLAGS.network, out_dim=data.output_dim, lr=param['lr'], opt=param['opt'], trainable=True)
#training
trainer = OptunaTrain(FLAGS=FLAGS, message=None, data=data, model=model, name='tuning')
test_accuracy = trainer.train()
return -test_accuracy
def main(argv):
op = Optuna('example-study')
op.search(objective, FLAGS.n_trials)
return
if __name__ == '__main__':
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('network', 'CNN', 'Choice the training data name -> [CNN,LeNet,ResNet,ResNeXt,SENet,DenseNet]')
flags.DEFINE_string('data', 'mnist', 'Choice the training data name -> ["mnist","cifar10","cifar100","kuzushiji"]')
flags.DEFINE_integer('n_epoch', '3000', 'Input max epoch')
flags.DEFINE_integer('n_trials', '1000', 'Input trial epoch')
flags.DEFINE_integer('batch_size', '32', 'Input batch size')
flags.DEFINE_string('aug','None','Choice the Augmentation -> ["shift","mirror","rotate","shift_rotate","cutout"]')
flags.DEFINE_bool('l2_norm', 'False', 'Input learning rate')
flags.DEFINE_string('init_model', 'None', 'Choice the checkpoint directpry(ex. ./results/181225_193106/model)')
flags.DEFINE_integer('checkpoints_to_keep', 5,'checkpoint keep count')
flags.DEFINE_integer('keep_checkpoint_every_n_hours', 1, 'checkpoint create ')
flags.DEFINE_integer('save_checkpoint_steps', 100,'save checkpoint step')
tf.app.run() |
py | 7dff5e986707dc6af709b368977e383d33806c85 | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.l2.extensions import qos_linux as qos
from neutron.common import constants
from neutron.services.qos.drivers.openvswitch import driver
from neutron.services.qos import qos_consts
LOG = logging.getLogger(__name__)
class QosOVSAgentDriver(qos.QosLinuxAgentDriver):
SUPPORTED_RULES = driver.SUPPORTED_RULES
def __init__(self):
super(QosOVSAgentDriver, self).__init__()
self.br_int_name = cfg.CONF.OVS.integration_bridge
self.br_int = None
self.agent_api = None
self.ports = collections.defaultdict(dict)
def consume_api(self, agent_api):
self.agent_api = agent_api
def initialize(self):
self.br_int = self.agent_api.request_int_br()
self.cookie = self.br_int.default_cookie
def create_bandwidth_limit(self, port, rule):
self.update_bandwidth_limit(port, rule)
def update_bandwidth_limit(self, port, rule):
vif_port = port.get('vif_port')
if not vif_port:
port_id = port.get('port_id')
LOG.debug("update_bandwidth_limit was received for port %s but "
"vif_port was not found. It seems that port is already "
"deleted", port_id)
return
if rule.direction == constants.INGRESS_DIRECTION:
self._update_ingress_bandwidth_limit(vif_port, rule)
else:
self._update_egress_bandwidth_limit(vif_port, rule)
def delete_bandwidth_limit(self, port):
vif_port = port.get('vif_port')
if not vif_port:
port_id = port.get('port_id')
LOG.debug("delete_bandwidth_limit was received for port %s but "
"vif_port was not found. It seems that port is already "
"deleted", port_id)
return
self.br_int.delete_egress_bw_limit_for_port(vif_port.port_name)
def delete_bandwidth_limit_ingress(self, port):
vif_port = port.get('vif_port')
if not vif_port:
port_id = port.get('port_id')
LOG.debug("delete_bandwidth_limit_ingress was received "
"for port %s but vif_port was not found. "
"It seems that port is already deleted", port_id)
return
self.br_int.delete_ingress_bw_limit_for_port(vif_port.port_name)
def create_dscp_marking(self, port, rule):
self.update_dscp_marking(port, rule)
def update_dscp_marking(self, port, rule):
self.ports[port['port_id']][qos_consts.RULE_TYPE_DSCP_MARKING] = port
vif_port = port.get('vif_port')
if not vif_port:
port_id = port.get('port_id')
LOG.debug("update_dscp_marking was received for port %s but "
"vif_port was not found. It seems that port is already "
"deleted", port_id)
return
port_name = vif_port.port_name
port = self.br_int.get_port_ofport(port_name)
mark = rule.dscp_mark
#mark needs to be bit shifted 2 left to not overwrite the
#lower 2 bits of type of service packet header.
#source: man ovs-ofctl (/mod_nw_tos)
mark = str(mark << 2)
# reg2 is a metadata field that does not alter packets.
# By loading a value into this field and checking if the value is
# altered it allows the packet to be resubmitted and go through
# the flow table again to be identified by other flows.
flows = self.br_int.dump_flows_for(cookie=self.cookie, table=0,
in_port=port, reg2=0)
if not flows:
actions = ("mod_nw_tos:" + mark + ",load:55->NXM_NX_REG2[0..5]," +
"resubmit(,0)")
self.br_int.add_flow(in_port=port, table=0, priority=65535,
reg2=0, actions=actions)
else:
for flow in flows:
actions = str(flow).partition("actions=")[2]
acts = actions.split(',')
# mod_nw_tos = modify type of service header
# This is the second byte of the IPv4 packet header.
# DSCP makes up the upper 6 bits of this header field.
actions = "mod_nw_tos:" + mark + ","
actions += ','.join([act for act in acts
if "mod_nw_tos:" not in act])
self.br_int.mod_flow(reg2=0, in_port=port, table=0,
actions=actions)
def delete_dscp_marking(self, port):
dscp_port = self.ports[port['port_id']].pop(qos_consts.
RULE_TYPE_DSCP_MARKING, 0)
if dscp_port:
port_num = dscp_port['vif_port'].ofport
self.br_int.uninstall_flows(in_port=port_num, table_id=0, reg2=0)
else:
LOG.debug("delete_dscp_marking was received for port %s but "
"no port information was stored to be deleted",
port['port_id'])
def _update_egress_bandwidth_limit(self, vif_port, rule):
max_kbps = rule.max_kbps
# NOTE(slaweq): According to ovs docs:
# http://openvswitch.org/support/dist-docs/ovs-vswitchd.conf.db.5.html
# ovs accepts only integer values of burst:
max_burst_kbps = int(self._get_egress_burst_value(rule))
self.br_int.create_egress_bw_limit_for_port(vif_port.port_name,
max_kbps,
max_burst_kbps)
def _update_ingress_bandwidth_limit(self, vif_port, rule):
port_name = vif_port.port_name
max_kbps = rule.max_kbps or 0
max_burst_kbps = rule.max_burst_kbps or 0
self.br_int.update_ingress_bw_limit_for_port(
port_name,
max_kbps,
max_burst_kbps
)
|
py | 7dff5ea03f3d7b686bf3fc56574bed45222b5231 | """
molecool
A Python package for analyzing and visualizing xyz files.
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='molecool',
author='Haimeng Wang',
author_email='[email protected]',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
|
py | 7dff604711daf0ba1fce3e41ed744b4ff3ab3869 | from django.db import models
from i18nfield.fields import I18nCharField
from pretalx.common.mixins import LogMixin
class Track(LogMixin, models.Model):
event = models.ForeignKey(
to='event.Event',
on_delete=models.PROTECT,
related_name='tracks',
)
name = I18nCharField(
max_length=200,
)
color = models.CharField(
max_length=7,
)
def __str__(self) -> str:
return f'Track(event={self.event.slug}, name={self.name})'
|
py | 7dff60780f455e2a4a378026b63a982eefe1ac22 | import os
from dpa.app.entity import Entity, EntityRegistry, EntityError
# -----------------------------------------------------------------------------
class GeomEntity(Entity):
category = 'geom'
CHANNEL_CONFIG = 'config/mari/geom/channels.cfg'
# -------------------------------------------------------------------------
@classmethod
def import_product_representation(cls, session, representation, *args,
**kwargs):
if session.mari.projects.current():
raise EntityError("Cannot have a project open when importing.")
channel_config = session.ptask_area.config(cls.CHANNEL_CONFIG,
composite_ancestors=True)
force_ptex = kwargs.get('force_ptex', False)
if not channel_config or not hasattr(channel_config, 'channels'):
raise EntityError(
"Unable to find channel config for {cat} import.".format(
cat=cls.category))
product_name = representation.product_version.product.name
channels = []
# create the channels
for (channel_name, channel_options) in channel_config.channels.iteritems():
# prepend the product name to the channel
channel_name = product_name + '_' + channel_name
# retrieve the channel options
color_values = channel_options.get('color', [0.5, 0.5, 0.5, 1.0])
color = session.mari.Color(*color_values[0:3])
use_alpha = channel_options.get('alpha', True)
depth = channel_options.get('depth', 16)
channel = session.mari.ChannelInfo(channel_name,
use_alpha=use_alpha, fill_color=color)
channel.setDepth(depth)
channels.append(channel)
mari_dir = session.ptask_area.dir(dir_name='mari')
# get a path to the geom product via the import directory
geom_file = cls.get_import_file(session, product_name, cls.category,
representation)
# create the project
if force_ptex:
#session.mari.utils.message("Using Ptex!")
EmptyChannels = []
project_meta_options = dict()
project_meta_options["MappingScheme"] = session.mari.projects.FORCE_PTEX
project_meta_options["MultipleGeometries"] = session.mari.projects.MERGE_GEOMETRIES
project_meta_options["PtexFaceSizeScheme"] = session.mari.projects.PTEX_WORLD_SPACE_DENSITY_SIZE
project_meta_options["PtexFaceSize"] = 16
project_meta_options["PtexImageFormat"] = session.mari.projects.PTEXFORMAT_BYTE
project_meta_options["PtexFaceColor"] = session.mari.Color(0.5, 0.5, 0.5, 1)
project_meta_options["MergeType"] = session.mari.geo.MERGETYPE_SINGLE_MESH
project_meta_options["CreateSelectionSets"] = session.mari.geo.SELECTION_GROUPS_CREATE_FROM_FACE_GROUPS
session.mari.projects.create(product_name, geom_file, EmptyChannels, EmptyChannels, project_meta_options)
else:
session.mari.projects.create(product_name, geom_file, channels)
# now account for adjustment layers, etc.
for (channel_name, channel_options) in channel_config.channels.iteritems():
# prepend the product name to the channel
channel_name = product_name + '_' + channel_name
# layers
if 'layers' in channel_options:
for (layer_type, layer_options) in \
channel_options.layers.iteritems():
# adjustment layer
if layer_type == 'adjustment':
for (layer_name, adjustment_key) in \
layer_options.iteritems():
geo = session.mari.geo.current()
geo_channel = geo.channel(channel_name)
adjustment_layer = \
geo_channel.createAdjustmentLayer(
layer_name, adjustment_key)
adjustment_layer.setVisibility(False)
# other layer types...
# close and archive the new project
project = session.mari.projects.current()
uuid = project.uuid()
project.save(force_save=True)
project.close(confirm_if_modified=False)
# archive
mari_file = os.path.join(mari_dir, product_name + '.mra')
session.mari.projects.archive(uuid, mari_file)
os.chmod(mari_file, 0770)
session.mari.projects.open(uuid)
# -------------------------------------------------------------------------
def export(self, *args, **kwargs):
"""Export this entity to a product."""
raise EntityError("Mari geom export not supported.")
# -----------------------------------------------------------------------------
EntityRegistry().register('mari', GeomEntity)
|
py | 7dff6101c3f987a3ad91bb73db914c387cd3360b | # Author: Mingyu Ding
# Time: 6/1/2020 8:29 PM
# Copyright 2019. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import glob
for item in glob.glob('output/*/*'):
if 'latest.pth' not in os.listdir(item):
print('clear dir ' + item)
os.system('rm -rf ' + item)
|
py | 7dff61cc4f63e9742fdb686de0400939783ab92e | from django.contrib import admin
from controller.models import Usertable
class UsertableAdmin(admin.ModelAdmin):
list_display = ('name', 'address')
admin.site.register(Usertable,UsertableAdmin)
|
py | 7dff61d0a65a9931241a362b30baccb7be8ebab6 | """
Test module for context broker models
"""
import unittest
from typing import List
from pydantic import ValidationError
from filip.models.base import DataType
from filip.clients.ngsi_v2 import IoTAClient, ContextBrokerClient
from filip.models.ngsi_v2.iot import Device, TransportProtocol, DeviceCommand
from filip.models import FiwareHeader
from filip.utils.cleanup import clear_all
from tests.config import settings
from filip.models.ngsi_v2.base import Metadata, NamedMetadata
from filip.models.ngsi_v2.context import \
ActionType, \
Command, \
ContextAttribute, \
ContextEntity, \
create_context_entity_model, \
Update, \
NamedContextAttribute, \
ContextEntityKeyValues, \
NamedCommand, \
PropertyFormat
class TestContextModels(unittest.TestCase):
"""
Test class for context broker models
"""
def setUp(self) -> None:
"""
Setup test data
Returns:
None
"""
self.attr = {'temperature': {'value': 20,
'type': 'Number'}}
self.relation = {'relation': {'value': 'OtherEntity',
'type': 'Relationship'}}
self.entity_data = {'id': 'MyId',
'type': 'MyType'}
self.entity_data.update(self.attr)
self.entity_data.update(self.relation)
def test_cb_attribute(self) -> None:
"""
Test context attribute models
Returns:
None
"""
attr = ContextAttribute(**{'value': 20, 'type': 'Text'})
self.assertIsInstance(attr.value, str)
attr = ContextAttribute(**{'value': 20, 'type': 'Number'})
self.assertIsInstance(attr.value, float)
attr = ContextAttribute(**{'value': [20, 20], 'type': 'Float'})
self.assertIsInstance(attr.value, list)
attr = ContextAttribute(**{'value': [20.0, 20.0], 'type': 'Integer'})
self.assertIsInstance(attr.value, list)
attr = ContextAttribute(**{'value': [20, 20], 'type': 'Array'})
self.assertIsInstance(attr.value, list)
def test_cb_metadata(self) -> None:
"""
Test context metadata model
Returns:
None
"""
md1 = Metadata(type='Text', value='test')
md2 = NamedMetadata(name='info', type='Text', value='test')
md3 = [NamedMetadata(name='info', type='Text', value='test')]
attr1 = ContextAttribute(value=20,
type='Integer',
metadata={'info': md1})
attr2 = ContextAttribute(**attr1.dict(exclude={'metadata'}),
metadata=md2)
attr3 = ContextAttribute(**attr1.dict(exclude={'metadata'}),
metadata=md3)
self.assertEqual(attr1, attr2)
self.assertEqual(attr1, attr3)
def test_cb_entity(self) -> None:
"""
Test context entity models
Returns:
None
"""
entity = ContextEntity(**self.entity_data)
self.assertEqual(self.entity_data, entity.dict(exclude_unset=True))
entity = ContextEntity.parse_obj(self.entity_data)
self.assertEqual(self.entity_data, entity.dict(exclude_unset=True))
properties = entity.get_properties(response_format='list')
self.assertEqual(self.attr, {properties[0].name: properties[0].dict(
exclude={'name', 'metadata'}, exclude_unset=True)})
properties = entity.get_properties(response_format='dict')
self.assertEqual(self.attr['temperature'],
properties['temperature'].dict(exclude={'metadata'},
exclude_unset=True))
relations = entity.get_relationships()
self.assertEqual(self.relation, {relations[0].name: relations[0].dict(
exclude={'name', 'metadata'}, exclude_unset=True)})
new_attr = {'new_attr': ContextAttribute(type='Number', value=25)}
entity.add_attributes(new_attr)
generated_model = create_context_entity_model(data=self.entity_data)
entity = generated_model(**self.entity_data)
self.assertEqual(self.entity_data, entity.dict(exclude_unset=True))
entity = generated_model.parse_obj(self.entity_data)
self.assertEqual(self.entity_data, entity.dict(exclude_unset=True))
def test_command(self):
"""
Test command model
Returns:
"""
cmd_data = {"type": "command",
"value": [5]}
Command(**cmd_data)
Command(value=[0])
with self.assertRaises(ValidationError):
class NotSerializableObject:
test: "test"
Command(value=NotSerializableObject())
Command(type="cmd", value=5)
def test_update_model(self):
"""
Test model for bulk updates
Returns:
None
"""
entities = [ContextEntity(id='1', type='myType')]
action_type = ActionType.APPEND
Update(actionType=action_type, entities=entities)
with self.assertRaises(ValueError):
Update(actionType='test', entities=entities)
def test_fiware_safe_fields(self):
"""
Tests all fields of models/ngsi_v2/context.py that have a regex to
be FIWARE safe
Returns:
None
"""
from pydantic.error_wrappers import ValidationError
valid_strings: List[str] = ["name", "test123", "3_:strange-Name!"]
invalid_strings: List[str] = ["my name", "Test?", "#False", "/notvalid"]
special_strings: List[str] = ["id", "type", "geo:location"]
# Test if all needed fields, detect all invalid strings
for string in invalid_strings:
self.assertRaises(ValidationError,
Metadata, type=string)
self.assertRaises(ValidationError,
NamedMetadata, name=string)
self.assertRaises(ValidationError,
ContextAttribute, type=string)
self.assertRaises(ValidationError,
NamedContextAttribute, name=string)
self.assertRaises(ValidationError,
ContextEntityKeyValues, id=string, type="name")
self.assertRaises(ValidationError,
ContextEntityKeyValues, id="name", type=string)
self.assertRaises(ValidationError,
NamedCommand, name=string)
# Test if all needed fields, do not trow wrong errors
for string in valid_strings:
Metadata(type=string)
NamedMetadata(name=string)
ContextAttribute(type=string)
NamedContextAttribute(name=string)
ContextEntityKeyValues(id=string, type=string)
NamedCommand(name=string, value=string)
# Test for the special-string protected field if all strings are blocked
for string in special_strings:
self.assertRaises(ValidationError, ContextAttribute, type=string)
self.assertRaises(ValidationError,
NamedContextAttribute, name=string)
self.assertRaises(ValidationError,
NamedCommand, name=string)
# Test for the normal protected field if all strings are allowed
for string in special_strings:
Metadata(type=string)
NamedMetadata(name=string)
ContextEntityKeyValues(id=string, type=string)
def test_entity_delete_attributes(self):
"""
Test the delete_attributes methode
also tests the get_attribute_name method
"""
attr = ContextAttribute(**{'value': 20, 'type': 'Text'})
named_attr = NamedContextAttribute(**{'name': 'test2', 'value': 20,
'type': 'Text'})
attr3 = ContextAttribute(**{'value': 20, 'type': 'Text'})
entity = ContextEntity(id="12", type="Test")
entity.add_attributes({"test1": attr, "test3": attr3})
entity.add_attributes([named_attr])
entity.delete_attributes({"test1": attr})
self.assertEqual(entity.get_attribute_names(), {"test2", "test3"})
entity.delete_attributes([named_attr])
self.assertEqual(entity.get_attribute_names(), {"test3"})
entity.delete_attributes(["test3"])
self.assertEqual(entity.get_attribute_names(), set())
def test_entity_get_command_methods(self):
"""
Tests the two methods:
get_commands and get_command_triple
"""
# test the manual creation of an entity with Command
entity = ContextEntity(id="test", type="Tester")
entity.add_attributes([NamedCommand(name="myCommand", value=".")])
self.assertEqual(len(entity.get_commands()), 0)
with self.assertRaises(KeyError):
entity.get_command_triple("myCommand")
with self.assertRaises(KeyError):
entity.get_command_triple("--")
# test the automated command creation via Fiware and DeviceModel
device = Device(device_id="id",
service=settings.FIWARE_SERVICE,
service_path=settings.FIWARE_SERVICEPATH,
entity_name="name",
entity_type="type",
transport=TransportProtocol.HTTP,
endpoint="http://localhost:1234")
device.add_command(DeviceCommand(name="myCommand"))
device.add_command(DeviceCommand(name="myCommand2", type=DataType.TEXT))
with IoTAClient(
url=settings.IOTA_JSON_URL,
fiware_header=FiwareHeader(
service=settings.FIWARE_SERVICE,
service_path=settings.FIWARE_SERVICEPATH)) as client:
client.post_device(device=device)
with ContextBrokerClient(
url=settings.CB_URL,
fiware_header=FiwareHeader(
service=settings.FIWARE_SERVICE,
service_path=settings.FIWARE_SERVICEPATH)) as client:
entity = client.get_entity(entity_id="name", entity_type="type")
(command, c_status, c_info) = entity.get_command_triple("myCommand")
self.assertEqual(command.type, DataType.COMMAND)
self.assertEqual(c_status.type, DataType.COMMAND_STATUS)
self.assertEqual(c_info.type, DataType.COMMAND_RESULT)
(command, c_status, c_info) = entity.get_command_triple(
"myCommand2")
self.assertEqual(command.type, DataType.TEXT)
self.assertEqual(c_status.type, DataType.COMMAND_STATUS)
self.assertEqual(c_info.type, DataType.COMMAND_RESULT)
self.assertEqual(
entity.get_commands(response_format=PropertyFormat.DICT).keys(),
{"myCommand", "myCommand2"})
def tearDown(self) -> None:
"""
Cleanup test server
"""
clear_all(fiware_header=FiwareHeader(
service=settings.FIWARE_SERVICE,
service_path=settings.FIWARE_SERVICEPATH),
cb_url=settings.CB_URL,
iota_url=settings.IOTA_JSON_URL)
|
py | 7dff6201538140d42112d5e252f2570e07d7f0a3 | from setuptools import setup, find_packages
from codecs import open
import os.path as osp
working_dir = osp.abspath(osp.dirname(__file__))
ROOT = osp.abspath(osp.dirname(__file__))
# READ the README
with open(osp.join(ROOT, 'README.md'), encoding='utf-8') as f:
README = f.read()
with open(osp.join(ROOT, 'requirements.txt'), encoding='utf-8') as f:
REQ = f.read().splitlines()
setup(name='fedsimul',
version='0.1',
description='Simulation of Asynchronous Federated Learning',
long_descript=README,
packages=find_packages(exclude=['out*']),
install_requires=REQ,
)
|
py | 7dff6202781cd49d442b5a024524e8cd1e4bc6e7 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import pytest
from six.moves import mock
def test_request():
from sasctl.core import request, RestObj
with mock.patch('sasctl.core.Session') as mock_sess:
mock_sess.request.return_value.status_code = 200
mock_sess.request.return_value.json.return_value = dict()
resp = request('GET', 'example.com', session=mock_sess)
assert mock_sess.request.call_count == 1
assert isinstance(resp, RestObj)
assert hasattr(resp, '_headers')
def test_crud_function_doc():
from sasctl.core import _build_crud_funcs
for func in _build_crud_funcs('/widgets'):
assert ' widgets ' in func.__doc__
assert '{item}' not in func.__doc__
def test_list_items():
from sasctl.core import _build_crud_funcs, RestObj
list_items, _, _, _ = _build_crud_funcs('/items')
with mock.patch('sasctl.core.request') as request:
request.return_value = RestObj()
resp = list_items()
assert request.call_count == 1
assert [RestObj()] == resp
def test_get_item_by_dict():
from sasctl.core import _build_crud_funcs
_, get_item, _, _ = _build_crud_funcs('/widget')
# No REST call needed if complete dictionary is passed
target = {'name': 'Test Widget', 'id': 12345}
with mock.patch('sasctl.core.request') as request:
resp = get_item(target)
assert target == resp
assert request.call_count == 0
def test_get_item_by_name():
from sasctl.core import _build_crud_funcs
_, get_item, _, _ = _build_crud_funcs('/widget')
target = {'name': 'Test Widget', 'id': 12345}
with mock.patch('sasctl.core.request') as request:
with mock.patch('sasctl.core.is_uuid') as is_uuid:
is_uuid.return_value = False
request.return_value = target
resp = get_item(target['name'])
assert target == resp
def test_get_item_by_id():
from sasctl.core import _build_crud_funcs
_, get_item, _, _ = _build_crud_funcs('/widget')
target = {'name': 'Test Widget', 'id': 12345}
with mock.patch('sasctl.core.request') as request:
with mock.patch('sasctl.core.is_uuid') as is_uuid:
is_uuid.return_value = True
request.return_value = target
resp = get_item(12345)
assert is_uuid.call_count == 1
assert request.call_count == 1
assert target == resp
def test_update_item():
from sasctl.core import _build_crud_funcs, RestObj
_, _, update_item, _ = _build_crud_funcs('/widget')
target = RestObj({'name': 'Test Widget', 'id': 12345})
with mock.patch('sasctl.core.request') as request:
request.return_value = target
# ETag should be required
with pytest.raises(ValueError):
resp = update_item(target)
target._headers = {'etag': 'abcd'}
resp = update_item(target)
assert request.call_count == 1
assert ('put', '/widget/12345') == request.call_args[0]
assert target == resp
def test_put_restobj():
from sasctl.core import put, RestObj
url = "/jobDefinitions/definitions/717331fa-f650-4e31-b9e2-6e6d49f66bf9"
obj = RestObj({
'_headers': {'etag': 123, 'content-type': 'spam'}
})
# Base case
with mock.patch('sasctl.core.request') as req:
put(url, obj)
assert req.called
args = req.call_args[0]
kwargs = req.call_args[1]
assert args == ('put', url)
assert kwargs['json'] == obj
assert kwargs['headers']['If-Match'] == 123
assert kwargs['headers']['Content-Type'] == 'spam'
# Should merge with explicit headers
with mock.patch('sasctl.core.request') as req:
put(url, obj, headers={'encoding': 'spammy'})
assert req.called
args = req.call_args[0]
kwargs = req.call_args[1]
assert args == ('put', url)
assert kwargs['json'] == obj
assert kwargs['headers']['If-Match'] == 123
assert kwargs['headers']['Content-Type'] == 'spam'
assert kwargs['headers']['encoding'] == 'spammy'
# Should not overwrite explicit headers
with mock.patch('sasctl.core.request') as req:
put(url, obj, headers={'Content-Type': 'notspam',
'encoding': 'spammy'})
assert req.called
args = req.call_args[0]
kwargs = req.call_args[1]
assert args == ('put', url)
assert kwargs['json'] == obj
assert kwargs['headers']['If-Match'] == 123
assert kwargs['headers']['Content-Type'] == 'notspam'
assert kwargs['headers']['encoding'] == 'spammy'
def test_request_formats():
from requests import Response
import sasctl
from sasctl.core import request, RestObj
response = Response()
response.status_code = 200
response._content = '{"name": "test"}'.encode('utf-8')
with mock.patch('sasctl.core.Session') as mock_sess:
mock_sess.request.return_value = response
resp = request('GET', 'example.com', session=mock_sess, format='response')
assert mock_sess.request.call_count == 1
assert isinstance(resp, Response)
with pytest.warns(DeprecationWarning):
resp = request('GET', 'example.com', session=mock_sess, raw=True)
# Make sure old param is eventually cleaned up
if sasctl.__version__.startswith('1.6'):
pytest.fail("Deprecated 'raw' parameter should be removed.")
assert isinstance(resp, Response)
resp = request('GET', 'example.com', session=mock_sess, format='json')
assert isinstance(resp, dict)
assert resp['name'] == 'test'
resp = request('GET', 'example.com', session=mock_sess, format='text')
assert resp == '{"name": "test"}'
resp = request('GET', 'example.com', session=mock_sess, format='content')
assert resp == response._content
resp = request('GET', 'example.com', session=mock_sess, format=None)
assert isinstance(resp, RestObj)
assert resp.name == 'test'
def test_platform_version():
from sasctl import platform_version
with mock.patch('sasctl.services.model_repository.info') as mock_info:
mock_info.return_value = {'build': {'buildVersion': '3.7.231'}}
version = platform_version()
assert version == '3.5'
with mock.patch('sasctl.services.model_repository.info') as mock_info:
mock_info.return_value = {'build': {'buildVersion': '3.12.77'}}
version = platform_version()
assert version == '4.0' |
py | 7dff621ae54b6a13f09c9a0182f5f747745cb3cc | import gdown
from zipfile import ZipFile
import os
# Download samples from Google Drive
url = "https://drive.google.com/uc?id={}".format("1EKsTdFjtyaEgVbTVjDF5hodo8eej8bFF")
output = 'vids.zip'
gdown.download(url, output, quiet=False)
with ZipFile(output, 'r') as zipObj:
# Extract all the contents of zip file to ./vids/ directory
zipObj.extractall(path = "./vids/")
os.remove("vids.zip") |
py | 7dff62bd8926e3d61ade884856a638c55e9de5ef | import matplotlib.pyplot as plt
import pickle
def plotter(train_name, validation_name):
with open(train_name, 'rb') as fp:
train_data = pickle.load(fp)
with open(validation_name, 'rb') as fp:
validation_data = pickle.load(fp)
X = [i for i in range(0, len(train_data))]
train_loss = [i[0] for i in train_data]
train_acc = [i[1] for i in train_data]
val_loss = [i[0] for i in validation_data]
val_acc = [i[1] for i in validation_data]
plt.scatter(X, train_loss, s=12, label='Training Loss', marker='x')
plt.scatter(X, val_loss, s=6, label='Validation loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
# fig, ax = plt.subplots()
# ax.ticklabel_format(useOffset=False)
# ax.scatter(X, train_loss, s=12, label='Training Loss', marker='x')
# ax.scatter(X, val_loss, s=6, label='Validation loss')
# plt.xlabel('Epoch')
# plt.ylabel('Loss')
# plt.legend()
# plt.show()
plt.scatter(X, train_acc, s=12, label='Training Accuracy', marker='x')
plt.scatter(X, val_acc, s=6, label='Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
print(train_loss)
print(val_loss)
if __name__ == '__main__':
train_file = 'data/test_name_training'
validation_file = 'data/test_name_validation'
plotter(train_file, validation_file)
|
py | 7dff63d9317f4a5ee8c5eedb9535eb47227ad8ad | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 5, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 0); |
py | 7dff644284dbe1043fcad0521942c79b9df2ca5a | import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
))
print(f" [x] Sent {message}")
connection.close() |
py | 7dff6463df6e89cd8c8ff14afb814a9b896e33ca | #test.py
|
py | 7dff646faa6b4248bea35be55cd917172bcdd2d7 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@文件 :encryption_utils.py
@说明 :
@时间 :2020/07/14 10:18:01
@作者 :Riven
@版本 :1.0.0
'''
import base64
import hashlib
import sys
sys.path.append('.')
from app_server.src.utils.apr1 import hash_apr1
def md5_apr1(salt, text):
return hash_apr1(salt, text)
def sha1(text):
result = hashlib.sha1(text.encode('utf-8'))
return base64.b64encode(result.digest()).decode('utf-8')
if __name__ == '__main__':
print(__file__) |
py | 7dff6536084edc8a09c5c9d15c3e4bfd25863316 | #!/usr/bin/env jython
import os
import logging
import shutil
import ConfigParser
from utils.singleton import singleton
log = logging.getLogger('kahuna')
log.setLevel(logging.INFO)
class ConfigLoader:
""" Loads configuration files from a given location """
def __init__(self, basedir="kahuna"):
""" Initializes the ConfigLoader """
self.user_dir = os.environ['HOME'] + "/." + basedir
self.sys_dir = "/etc/" + basedir
def load(self, file_name, default=None):
""" Loads the given configuration file ftom the default locations """
user_config = self.user_dir + "/" + file_name
sys_config = self.sys_dir + "/" + file_name
config_found = user_config
# User config has precedence, then system
files = [user_config, sys_config]
for file in files:
if os.path.exists(file):
log.debug("Config found in %s" % file)
config_found = file
break
if not os.path.exists(config_found):
# Fail if config file is not found and do not
# want to create the default one
if not default:
raise IOError("Configuration file not found. " +
"Please, make sure that %s or %s exist" % (user_config,
sys_config))
# Create the default config file if nexessary
log.warn(("Kahuna config file not found. "
"Creating the default one to %s") % user_config)
if not os.path.isdir(self.user_dir):
os.makedirs(self.user_dir)
shutil.copy(default, user_config)
config_found = user_config
config = ConfigParser.SafeConfigParser()
config.read(config_found)
return config
@singleton
class Config:
""" Main configuration """
def __init__(self):
config = ConfigLoader().load("kahuna.conf", "config/kahuna.conf")
# Connection
self.address = config.get("connection", "address")
self.user = config.get("connection", "user")
self.password = config.get("connection", "pass")
# Logging
try:
self.loglevel = config.get("logging", "level")
level = logging._levelNames[self.loglevel.upper()]
log.setLevel(level)
except ConfigParser.NoOptionError:
# Ignore errors if no logging level has been defined
pass
# Client
self.client_config = []
if config.has_section("client"):
self.client_config.extend(config.items("client"))
for (name, value) in self.client_config:
log.debug("Set %s to %s" % (name, value))
|
py | 7dff653fde7f7397781fb01bae28b16834e4f6ef | # -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.violations.consistency import (
ConsecutiveYieldsViolation,
)
from wemake_python_styleguide.visitors.ast.keywords import (
GeneratorKeywordsVisitor,
)
# Correct:
simple_yield = """
def some():
yield 1
"""
conditional_yield1 = """
def some():
yield 1
if some:
yield 2
"""
conditional_yield2 = """
def some():
if some:
yield 1
yield 2
"""
seprated_yield1 = """
def some():
yield 1
print('---')
yield 2
"""
seprated_yield2 = """
def some():
yield 1
print('---')
yield 2
print('---')
yield 3
"""
yield_with_yield_from1 = """
def some():
yield 1
yield from (2, 3)
"""
yield_with_yield_from2 = """
def some():
yield from (1, 2)
yield 3
"""
# Wrong:
wrong_yield1 = """
def some():
yield 1
yield 2
"""
wrong_yield2 = """
def some():
yield 1
yield 2
yield 3
"""
wrong_yield3 = """
def some():
if some:
yield 1
yield 2
yield 3
"""
wrong_yield4 = """
def some():
if some:
yield 1
yield 2
yield 3
"""
wrong_yield5 = """
def some():
if some:
yield 1
yield 2
yield 3
"""
@pytest.mark.parametrize('code', [
simple_yield,
conditional_yield1,
conditional_yield2,
seprated_yield1,
seprated_yield2,
])
def test_yield_correct(
assert_errors,
parse_ast_tree,
code,
mode,
default_options,
):
"""Ensure that `yield` can be used correctly."""
tree = parse_ast_tree(mode(code))
visitor = GeneratorKeywordsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
yield_with_yield_from1,
yield_with_yield_from2,
])
def test_yield_correct_sync(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Ensure that `yield` can be used correctly."""
tree = parse_ast_tree(code)
visitor = GeneratorKeywordsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
wrong_yield1,
wrong_yield2,
wrong_yield3,
wrong_yield4,
wrong_yield5,
])
def test_yield_inccorect(
assert_errors,
parse_ast_tree,
code,
mode,
default_options,
):
"""Ensure that `yield` cannot follow the same node."""
tree = parse_ast_tree(mode(code))
visitor = GeneratorKeywordsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ConsecutiveYieldsViolation])
|
py | 7dff669d85fb41e30886e23a1205566f2f8fe6e1 | __version__ = '2.9.0' |
py | 7dff67565f33a6d028387f77b8122477bb320110 | import collections.abc as cabc
from functools import singledispatch
from types import MappingProxyType
from typing import Any, Union, Optional, Iterable, Dict, Mapping
import pandas as pd
from anndata import AnnData
from ..get import rank_genes_groups_df
from .._utils import _doc_params
_doc_org = """\
org
Organism to query. Must be an organism in ensembl biomart. "hsapiens",
"mmusculus", "drerio", etc.\
"""
_doc_host = """\
host
A valid BioMart host URL. Alternative values include archive urls (like
"grch37.ensembl.org") or regional mirrors (like "useast.ensembl.org").\
"""
_doc_use_cache = """\
use_cache
Whether pybiomart should use a cache for requests. Will create a
`.pybiomart.sqlite` file in current directory if used.\
"""
@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)
def simple_query(
org: str,
attrs: Union[Iterable[str], str],
*,
filters: Optional[Dict[str, Any]] = None,
host: str = "www.ensembl.org",
use_cache: bool = False,
) -> pd.DataFrame:
"""\
A simple interface to biomart.
Params
------
{doc_org}
attrs
What you want returned.
filters
What you want to pick out.
{doc_host}
{doc_use_cache}
"""
if isinstance(attrs, str):
attrs = [attrs]
elif isinstance(attrs, cabc.Iterable):
attrs = list(attrs)
else:
raise TypeError(f"attrs must be of type list or str, was {type(attrs)}.")
try:
from pybiomart import Server
except ImportError:
raise ImportError(
"This method requires the `pybiomart` module to be installed."
)
server = Server(host, use_cache=use_cache)
dataset = server.marts["ENSEMBL_MART_ENSEMBL"].datasets[
"{}_gene_ensembl".format(org)
]
res = dataset.query(attributes=attrs, filters=filters, use_attr_names=True)
return res
@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)
def biomart_annotations(
org: str,
attrs: Iterable[str],
*,
host: str = "www.ensembl.org",
use_cache: bool = False,
) -> pd.DataFrame:
"""\
Retrieve gene annotations from ensembl biomart.
Parameters
----------
{doc_org}
attrs
Attributes to query biomart for.
{doc_host}
{doc_use_cache}
Returns
-------
Dataframe containing annotations.
Examples
--------
Retrieve genes coordinates and chromosomes
>>> import scanpy as sc
>>> annot = sc.queries.biomart_annotations(
"hsapiens",
["ensembl_gene_id", "start_position", "end_position", "chromosome_name"],
).set_index("ensembl_gene_id")
>>> adata.var[annot.columns] = annot
"""
return simple_query(org=org, attrs=attrs, host=host, use_cache=use_cache)
@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)
def gene_coordinates(
org: str,
gene_name: str,
*,
gene_attr: str = "external_gene_name",
chr_exclude: Iterable[str] = (),
host: str = "www.ensembl.org",
use_cache: bool = False,
) -> pd.DataFrame:
"""\
Retrieve gene coordinates for specific organism through BioMart.
Parameters
----------
{doc_org}
gene_name
The gene symbol (e.g. "hgnc_symbol" for human) for which to retrieve
coordinates.
gene_attr
The biomart attribute the gene symbol should show up for.
chr_exclude
A list of chromosomes to exclude from query.
{doc_host}
{doc_use_cache}
Returns
-------
Dataframe containing gene coordinates for the specified gene symbol.
Examples
--------
>>> import scanpy as sc
>>> sc.queries.gene_coordinates("hsapiens", "MT-TF")
"""
res = simple_query(
org=org,
attrs=["chromosome_name", "start_position", "end_position"],
filters={gene_attr: gene_name},
host=host,
use_cache=use_cache,
)
return res[~res["chromosome_name"].isin(chr_exclude)]
@_doc_params(doc_org=_doc_org, doc_host=_doc_host, doc_use_cache=_doc_use_cache)
def mitochondrial_genes(
org: str,
*,
attrname: str = "external_gene_name",
host: str = "www.ensembl.org",
use_cache: bool = False,
chromosome: str = "MT",
) -> pd.DataFrame:
"""\
Mitochondrial gene symbols for specific organism through BioMart.
Parameters
----------
{doc_org}
attrname
Biomart attribute field to return. Possible values include
"external_gene_name", "ensembl_gene_id", "hgnc_symbol", "mgi_symbol",
and "zfin_id_symbol".
{doc_host}
{doc_use_cache}
chromosome
Mitochrondrial chromosome name used in BioMart for organism.
Returns
-------
Dataframe containing identifiers for mitochondrial genes.
Examples
--------
>>> import scanpy as sc
>>> mito_gene_names = sc.queries.mitochondrial_genes("hsapiens")
>>> mito_ensembl_ids = sc.queries.mitochondrial_genes("hsapiens", attrname="ensembl_gene_id")
>>> mito_gene_names_fly = sc.queries.mitochondrial_genes("dmelanogaster", chromosome="mitochondrion_genome")
"""
return simple_query(
org,
attrs=[attrname],
filters={"chromosome_name": [chromosome]},
host=host,
use_cache=use_cache,
)
@singledispatch
@_doc_params(doc_org=_doc_org)
def enrich(
container: Union[Iterable[str], Mapping[str, Iterable[str]]],
*,
org: str = "hsapiens",
gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),
) -> pd.DataFrame:
"""\
Get enrichment for DE results.
This is a thin convenience wrapper around the very useful gprofiler_.
This method dispatches on the first argument, leading to the following two
signatures::
enrich(container, ...)
enrich(adata: AnnData, group, key: str, ...)
Where::
enrich(adata, group, key, ...) = enrich(adata.uns[key]["names"][group], ...)
.. _gprofiler: https://pypi.org/project/gprofiler-official/#description
Parameters
----------
container
Contains list of genes you'd like to search. If container is a `dict` all
enrichment queries are made at once.
adata
AnnData object whose group will be looked for.
group
The group whose genes should be used for enrichment.
key
Key in `uns` to find group under.
{doc_org}
gprofiler_kwargs
Keyword arguments to pass to `GProfiler.profile`, see gprofiler_. Some
useful options are `no_evidences=False` which reports gene intersections,
`sources=['GO:BP']` which limits gene sets to only GO biological processes and
`all_results=True` which returns all results including the non-significant ones.
**kwargs
All other keyword arguments are passed to `sc.get.rank_genes_groups_df`. E.g.
pval_cutoff, log2fc_min.
Returns
-------
Dataframe of enrichment results.
Examples
--------
Using `sc.queries.enrich` on a list of genes:
>>> import scanpy as sc
>>> sc.queries.enrich(['KLF4', 'PAX5', 'SOX2', 'NANOG'], org="hsapiens")
>>> sc.queries.enrich({{'set1':['KLF4', 'PAX5'], 'set2':['SOX2', 'NANOG']}}, org="hsapiens")
Using `sc.queries.enrich` on an :class:`anndata.AnnData` object:
>>> pbmcs = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(pbmcs, "bulk_labels")
>>> sc.queries.enrich(pbmcs, "CD34+")
"""
try:
from gprofiler import GProfiler
except ImportError:
raise ImportError(
"This method requires the `gprofiler-official` module to be installed."
)
gprofiler = GProfiler(user_agent="scanpy", return_dataframe=True)
gprofiler_kwargs = dict(gprofiler_kwargs)
for k in ["organism"]:
if gprofiler_kwargs.get(k) is not None:
raise ValueError(
f"Argument `{k}` should be passed directly through `enrich`, "
"not through `gprofiler_kwargs`"
)
return gprofiler.profile(container, organism=org, **gprofiler_kwargs)
@enrich.register(AnnData)
def _enrich_anndata(
adata: AnnData,
group: str,
*,
org: Optional[str] = "hsapiens",
key: str = "rank_genes_groups",
pval_cutoff: float = 0.05,
log2fc_min: Optional[float] = None,
log2fc_max: Optional[float] = None,
gene_symbols: Optional[str] = None,
gprofiler_kwargs: Mapping[str, Any] = MappingProxyType({}),
) -> pd.DataFrame:
de = rank_genes_groups_df(
adata,
group=group,
key=key,
pval_cutoff=pval_cutoff,
log2fc_min=log2fc_min,
log2fc_max=log2fc_max,
gene_symbols=gene_symbols,
)
if gene_symbols is not None:
gene_list = list(de[gene_symbols].dropna())
else:
gene_list = list(de["names"].dropna())
return enrich(gene_list, org=org, gprofiler_kwargs=gprofiler_kwargs)
|
py | 7dff67b112d3bc2354e1df39ba2e31bd8f73d6f6 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2015, Lars Asplund [email protected]
import unittest
from os.path import join, dirname
from vunit.ui import VUnit
from common import has_modelsim, check_report
from fnmatch import fnmatch
@unittest.skipUnless(has_modelsim(), "Requires modelsim")
class TestVunitEndToEnd(unittest.TestCase):
def setUp(self):
# Spaces in path intentional to verify that it is supported
self.output_path = join(dirname(__file__), "end to end out")
self.report_file = join(self.output_path, "xunit.xml")
def test_artificial_with_persistent(self):
self._test_artificial(persistent_sim=True)
def test_artificial(self):
self._test_artificial(persistent_sim=False)
def _test_artificial(self, persistent_sim):
self.run_ui_main(persistent_sim=persistent_sim)
check_report(self.report_file, [
("passed", "lib.tb_pass"),
("failed", "lib.tb_fail"),
("failed", "lib.tb_no_finished_signal"),
("passed", "lib.tb_infinite_events"),
("failed", "lib.tb_fail_on_warning"),
("passed", "lib.tb_no_fail_on_warning"),
("passed", "lib.tb_two_architectures.pass"),
("failed", "lib.tb_two_architectures.fail"),
("passed", "lib.tb_with_vhdl_runner.pass"),
("passed", "lib.tb_with_vhdl_runner.Test with spaces"),
("failed", "lib.tb_with_vhdl_runner.fail"),
("failed", "lib.tb_with_vhdl_runner.Test that timeouts"),
("passed", "lib.tb_magic_paths"),
# @TODO verify that these are actually run in separate simulations
("passed", "lib.tb_same_sim_all_pass.Test 1"),
("passed", "lib.tb_same_sim_all_pass.Test 2"),
("passed", "lib.tb_same_sim_all_pass.Test 3"),
("passed", "lib.tb_same_sim_some_fail.Test 1"),
("failed", "lib.tb_same_sim_some_fail.Test 2"),
("skipped", "lib.tb_same_sim_some_fail.Test 3"),
("passed", "lib.tb_with_checks.Test passing check"),
("failed", "lib.tb_with_checks.Test failing check"),
("failed", "lib.tb_with_checks.Test non-stopping failing check")])
def test_run_selected_tests_in_same_sim_test_bench(self):
self.run_ui_main(["*same_sim_some_fail*Test 1*"])
check_report(self.report_file, [
("passed", "lib.tb_same_sim_some_fail.Test 1")])
self.run_ui_main(["*same_sim_some_fail*Test 2*"])
check_report(self.report_file, [
("failed", "lib.tb_same_sim_some_fail.Test 2")])
self.run_ui_main(["*same_sim_some_fail*Test 3*"])
check_report(self.report_file, [
("passed", "lib.tb_same_sim_some_fail.Test 3")])
self.run_ui_main(["*same_sim_some_fail*Test 2*",
"*same_sim_some_fail*Test 3*"])
check_report(self.report_file, [
("failed", "lib.tb_same_sim_some_fail.Test 2"),
("skipped", "lib.tb_same_sim_some_fail.Test 3")])
def test_compile_verilog(self):
verilog_path = join(dirname(__file__), "verilog")
ui = VUnit(verbose=True,
clean=True,
output_path=self.output_path,
xunit_xml=self.report_file,
compile_only=True)
ui.add_library("lib")
ui.add_source_files(join(verilog_path, "*.v"), "lib")
ui.add_source_files(join(verilog_path, "*.sv"), "lib")
try:
ui.main()
except SystemExit as e:
self.assertEqual(e.code, 0)
def create_ui(self, test_patterns=None, persistent_sim=True):
vhdl_path = join(dirname(__file__), "vhdl")
ui = VUnit(verbose=True,
clean=True,
test_filter=make_test_filter(test_patterns),
output_path=self.output_path,
xunit_xml=self.report_file,
persistent_sim=persistent_sim)
ui.add_library("lib")
ui.add_source_files(join(vhdl_path, "*.vhd"), "lib")
return ui
def run_ui_main(self, test_patterns=None, persistent_sim=True):
ui = self.create_ui(test_patterns, persistent_sim)
try:
ui.main()
except SystemExit:
del ui
def make_test_filter(patterns):
def test_filter(name):
if patterns == None:
return True
return any(fnmatch(name, pattern) for pattern in patterns)
return test_filter
|
py | 7dff67e0b4b405dc353df08866e75f4cd5d4a651 | """
jlab_ext_example setup
"""
import json
from pathlib import Path
from jupyter_packaging import (
create_cmdclass,
install_npm,
ensure_targets,
combine_commands,
skip_if_exists,
)
import setuptools
HERE = Path(__file__).parent.resolve()
# The name of the project
name = "jlab_ext_example"
lab_path = HERE / name / "labextension"
# Representative files that should exist after a successful build
jstargets = [
str(lab_path / "package.json"),
]
package_data_spec = {name: ["*"]}
labext_name = "@jupyterlab-examples/server-extension"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, str(lab_path), "**"),
("share/jupyter/labextensions/%s" % labext_name, str(HERE), "install.json"),
("etc/jupyter/jupyter_notebook_config.d", "jupyter-config/jupyter_notebook_config.d", "jlab_ext_example.json"),
("etc/jupyter/jupyter_server_config.d", "jupyter-config/jupyter_server_config.d", "jlab_ext_example.json"),
]
cmdclass = create_cmdclass(
"jsdeps", package_data_spec=package_data_spec, data_files_spec=data_files_spec
)
js_command = combine_commands(
install_npm(HERE, build_cmd="build:prod", npm=["jlpm"]),
ensure_targets(jstargets),
)
is_repo = (HERE / ".git").exists()
if is_repo:
cmdclass["jsdeps"] = js_command
else:
cmdclass["jsdeps"] = skip_if_exists(jstargets, js_command)
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((HERE / "package.json").read_bytes())
setup_args = dict(
name=name,
version=pkg_json["version"],
url=pkg_json["homepage"],
author=pkg_json["author"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
cmdclass=cmdclass,
packages=setuptools.find_packages(),
install_requires=[
"jupyterlab~=3.0",
],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
],
)
if __name__ == "__main__":
setuptools.setup(**setup_args)
|
py | 7dff682cc7aac1a8f193c871f81a3938a193fd73 | import command
import hook
import sys
import utils
import xcbq
class Key:
"""
Defines a keybinding.
"""
def __init__(self, modifiers, key, *commands):
"""
- modifiers: A list of modifier specifications. Modifier
specifications are one of: "shift", "lock", "control", "mod1",
"mod2", "mod3", "mod4", "mod5".
- key: A key specification, e.g. "a", "Tab", "Return", "space".
- *commands: A list of lazy command objects generated with the
command.lazy helper. If multiple Call objects are specified, they
are run in sequence.
"""
self.modifiers = modifiers
self.key = key
self.commands = commands
if key not in xcbq.keysyms:
raise utils.QtileError("Unknown key: %s" % key)
self.keysym = xcbq.keysyms[key]
try:
self.modmask = utils.translateMasks(self.modifiers)
except KeyError, v:
raise utils.QtileError(v)
def __repr__(self):
return "Key(%s, %s)" % (self.modifiers, self.key)
class Drag(object):
"""
Defines binding of a mouse to some dragging action
On each motion event command is executed
with two extra parameters added
x and y offset from previous move
It focuses clicked window by default
If you want to prevent it pass focus=None as an argument
"""
def __init__(self, modifiers, button, *commands, **kwargs):
self.start = kwargs.get("start", None)
self.focus = kwargs.get("focus", "before")
self.modifiers = modifiers
self.button = button
self.commands = commands
try:
self.button_code = int(self.button.replace('Button', ''))
self.modmask = utils.translateMasks(self.modifiers)
except KeyError, v:
raise utils.QtileError(v)
def __repr__(self):
return "Drag(%s, %s)" % (self.modifiers, self.button)
class Click(object):
"""
Defines binding of a mouse click
It focuses clicked window by default
If you want to prevent it pass focus=None as an argument
"""
def __init__(self, modifiers, button, *commands, **kwargs):
self.focus = kwargs.get("focus", "before")
self.modifiers = modifiers
self.button = button
self.commands = commands
try:
self.button_code = int(self.button.replace('Button', ''))
self.modmask = utils.translateMasks(self.modifiers)
except KeyError, v:
raise utils.QtileError(v)
def __repr__(self):
return "Click(%s, %s)" % (self.modifiers, self.button)
class ScreenRect(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def __repr__(self):
return '<%s %d,%d %d,%d>' % (
self.__class__.__name__,
self.x, self.y,
self.width, self.height
)
def hsplit(self, columnwidth):
assert columnwidth > 0
assert columnwidth < self.width
return (
self.__class__(self.x, self.y, columnwidth, self.height),
self.__class__(
self.x + columnwidth, self.y,
self.width - columnwidth, self.height
)
)
def vsplit(self, rowheight):
assert rowheight > 0
assert rowheight < self.height
return (
self.__class__(self.x, self.y, self.width, rowheight),
self.__class__(
self.x, self.y + rowheight,
self.width, self.height - rowheight
)
)
class Screen(command.CommandObject):
"""
A physical screen, and its associated paraphernalia.
"""
def __init__(self, top=None, bottom=None, left=None, right=None,
x=None, y=None, width=None, height=None):
"""
- top, bottom, left, right: Instances of bar objects, or None.
Note that bar.Bar objects can only be placed at the top or the
bottom of the screen (bar.Gap objects can be placed anywhere).
x,y,width and height aren't specified usually unless you are
using 'fake screens'.
"""
self.group = None
self.previous_group = None
self.top = top
self.bottom = bottom
self.left = left
self.right = right
self.qtile = None
self.index = None
# x position of upper left corner can be > 0
# if one screen is "right" of the other
self.x = x
self.y = y
self.width = width
self.height = height
def _configure(self, qtile, index, x, y, width, height, group):
self.qtile = qtile
self.index = index
self.x = x
self.y = y
self.width = width
self.height = height
self.setGroup(group)
for i in self.gaps:
i._configure(qtile, self)
@property
def gaps(self):
lst = []
lst.extend([
i for i in [self.top, self.bottom, self.left, self.right] if i
])
return lst
@property
def dx(self):
return self.x + self.left.size if self.left else self.x
@property
def dy(self):
return self.y + self.top.size if self.top else self.y
@property
def dwidth(self):
val = self.width
if self.left:
val -= self.left.size
if self.right:
val -= self.right.size
return val
@property
def dheight(self):
val = self.height
if self.top:
val -= self.top.size
if self.bottom:
val -= self.bottom.size
return val
def get_rect(self):
return ScreenRect(self.dx, self.dy, self.dwidth, self.dheight)
def setGroup(self, new_group):
"""
Put group on this screen
"""
if new_group.screen == self:
return
self.previous_group = self.group
if new_group is None:
return
if new_group.screen:
# g1 <-> s1 (self)
# g2 (new_group) <-> s2 to
# g1 <-> s2
# g2 <-> s1
g1 = self.group
s1 = self
g2 = new_group
s2 = new_group.screen
s2.group = g1
g1._setScreen(s2)
s1.group = g2
g2._setScreen(s1)
else:
old_group = self.group
self.group = new_group
# display clients of the new group and then hide from old group
# to remove the screen flickering
new_group._setScreen(self)
if old_group is not None:
old_group._setScreen(None)
hook.fire("setgroup")
hook.fire("focus_change")
hook.fire(
"layout_change",
self.group.layouts[self.group.currentLayout],
self.group
)
def _items(self, name):
if name == "layout":
return (True, range(len(self.group.layouts)))
elif name == "window":
return (True, [i.window.wid for i in self.group.windows])
elif name == "bar":
return (False, [x.position for x in self.gaps])
def _select(self, name, sel):
if name == "layout":
if sel is None:
return self.group.layout
else:
return utils.lget(self.group.layouts, sel)
elif name == "window":
if sel is None:
return self.group.currentWindow
else:
for i in self.group.windows:
if i.window.wid == sel:
return i
elif name == "bar":
return getattr(self, sel)
def resize(self, x=None, y=None, w=None, h=None):
x = x or self.x
y = y or self.y
w = w or self.width
h = h or self.height
self._configure(self.qtile, self.index, x, y, w, h, self.group)
for bar in [self.top, self.bottom, self.left, self.right]:
if bar:
bar.draw()
self.group.layoutAll()
def cmd_info(self):
"""
Returns a dictionary of info for this screen.
"""
return dict(
index=self.index,
width=self.width,
height=self.height,
x=self.x,
y=self.y
)
def cmd_resize(self, x=None, y=None, w=None, h=None):
"""
Resize the screen.
"""
self.resize(x, y, w, h)
def cmd_nextgroup(self, skip_empty=False, skip_managed=False):
"""
Switch to the next group.
"""
n = self.group.nextGroup(skip_empty, skip_managed)
self.setGroup(n)
return n.name
def cmd_prevgroup(self, skip_empty=False, skip_managed=False):
"""
Switch to the previous group.
"""
n = self.group.prevGroup(skip_empty, skip_managed)
self.setGroup(n)
return n.name
def cmd_togglegroup(self, groupName=None):
"""
Switch to the selected group or to the previously active one.
"""
group = self.qtile.groupMap.get(groupName)
if group in (self.group, None):
group = self.previous_group
self.setGroup(group)
class Group(object):
"""
Represents a "dynamic" group. These groups can spawn apps, only allow
certain Matched windows to be on them, hide when they're not in use, etc.
"""
def __init__(self, name, matches=None, exclusive=False,
spawn=None, layout=None, layouts=None, persist=True, init=True,
layout_opts=None, screen_affinity=None, position=sys.maxint):
"""
:param name: the name of this group
:type name: string
:param matches: list of ``Match`` objects whose windows will be assigned to this group
:type matches: default ``None``
:param exclusive: when other apps are started in this group, should we allow them here or not?
:type exclusive: boolean
:param spawn: this will be ``exec()`` d when the group is created
:type spawn: string
:param layout: the default layout for this group (e.g. 'max' or 'stack')
:type layout: string
:param layouts: the group layouts list overriding global layouts
:type layouts: list
:param persist: should this group stay alive with no member windows?
:type persist: boolean
:param init: is this group alive when qtile starts?
:type init: boolean
:param position: group position
:type position: int
"""
self.name = name
self.exclusive = exclusive
self.spawn = spawn
self.layout = layout
self.layouts = layouts or []
self.persist = persist
self.init = init
self.matches = matches or []
self.layout_opts = layout_opts or {}
self.screen_affinity = screen_affinity
self.position = position
class Match(object):
"""
Match for dynamic groups
It can match by title, class or role.
"""
def __init__(self, title=None, wm_class=None, role=None, wm_type=None,
wm_instance_class=None, net_wm_pid=None):
"""
``Match`` supports both regular expression objects (i.e. the result of
``re.compile()``) or strings (match as a "include" match). If a window
matches any of the things in any of the lists, it is considered a
match.
:param title: things to match against the title (WM_NAME)
:param wm_class: things to match against the second string in
WM_CLASS atom
:param role: things to match against the WM_ROLE atom
:param wm_type: things to match against the WM_TYPE atom
:param wm_instance_class: things to match against the first string in
WM_CLASS atom
:param net_wm_pid: things to match against the _NET_WM_PID atom
(only int allowed in this rule)
"""
if not title:
title = []
if not wm_class:
wm_class = []
if not role:
role = []
if not wm_type:
wm_type = []
if not wm_instance_class:
wm_instance_class = []
if not net_wm_pid:
net_wm_pid = []
try:
net_wm_pid = map(int, net_wm_pid)
except ValueError:
error = 'Invalid rule for net_wm_pid: "%s" '\
'only ints allowed' % str(net_wm_pid)
raise utils.QtileError(error)
self._rules = [('title', t) for t in title]
self._rules += [('wm_class', w) for w in wm_class]
self._rules += [('role', r) for r in role]
self._rules += [('wm_type', r) for r in wm_type]
self._rules += [('wm_instance_class', w) for w in wm_instance_class]
self._rules += [('net_wm_pid', w) for w in net_wm_pid]
def compare(self, client):
for _type, rule in self._rules:
if _type == "net_wm_pid":
match_func = lambda value: rule == value
else:
match_func = getattr(rule, 'match', None) or \
getattr(rule, 'count')
if _type == 'title':
value = client.name
elif _type == 'wm_class':
value = None
_value = client.window.get_wm_class()
if _value and len(_value) > 1:
value = _value[1]
elif _type == 'wm_instance_class':
value = client.window.get_wm_class()
if value:
value = value[0]
elif _type == 'wm_type':
value = client.window.get_wm_type()
elif _type == 'net_wm_pid':
value = client.window.get_net_wm_pid()
else:
value = client.window.get_wm_window_role()
if value and match_func(value):
return True
return False
def map(self, callback, clients):
""" Apply callback to each client that matches this Match """
for c in clients:
if self.compare(c):
callback(c)
class Rule(object):
"""
A Rule contains a Match object, and a specification about what to do
when that object is matched.
"""
def __init__(self, match, group=None, float=False, intrusive=False,
break_on_match=True):
"""
:param match: ``Match`` object associated with this ``Rule``
:param float: auto float this window?
:param intrusive: override the group's exclusive setting?
:param break_on_match: Should we stop applying rules if this rule is
matched?
"""
self.match = match
self.group = group
self.float = float
self.intrusive = intrusive
self.break_on_match = break_on_match
def matches(self, w):
return self.match.compare(w)
|
py | 7dff68324a3e14ffb0c8b435c43999e32eb4ab2d | import discord
import pytz
from discord import TextChannel
from discord.ext import commands
from pytz import UnknownTimeZoneError
from Cogs.BaseCog import BaseCog
from Util import Configuration, Permissioncheckers, Emoji, Translator, Features, Utils, Confirmation, Pages, \
MessageUtils, Selfroles
from Util.Converters import LoggingChannel, ListMode
class ServerHolder(object):
sid = None
name = None
def __init__(self, sid):
self.id = sid
self.name = sid
async def add_item(ctx, item, item_type, list_name="roles", config_section="PERMISSIONS"):
target = f"{item_type}_{list_name}".upper()
roles = Configuration.get_var(ctx.guild.id, config_section, target)
sname = list_name[:-1] if list_name[-1:] == "s" else list_name
if item == ctx.guild.default_role:
return await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate(f'default_role_forbidden', ctx)}")
if item.id in roles:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate(f'already_{item_type}_{sname}', ctx, item=item.name)}")
else:
roles.append(item.id)
Configuration.save(ctx.guild.id)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate(f'{item_type}_{sname}_added', ctx, item=item.name)}")
async def remove_item(ctx, item, item_type, list_name="roles", config_section="PERMISSIONS"):
target = f"{item_type}_{list_name}".upper()
roles = Configuration.get_var(ctx.guild.id, config_section, target)
sname = list_name[:-1] if list_name[-1:] == "s" else list_name
if item.id not in roles:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate(f'was_no_{item_type}_{sname}', ctx, item=item.name)}")
else:
roles.remove(item.id)
Configuration.save(ctx.guild.id)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate(f'{item_type}_{sname}_removed', ctx, item=item.name)}")
async def list_list(ctx, item_type, list_name="roles", wrapper="<@&{item}>", config_section="PERMISSIONS"):
target = f"{item_type}_{list_name}".upper()
items = Configuration.get_var(ctx.guild.id, config_section, target)
if len(items) == 0:
desc = Translator.translate(f"no_{item_type}_{list_name}", ctx)
else:
desc = "\n".join(wrapper.format(item=item) for item in items)
embed = discord.Embed(title=Translator.translate(f"current_{item_type}_{list_name}", ctx), description=desc)
await ctx.send(embed=embed)
def gen_override_strings(ctx, perm_dict, prefix = ""):
output = ""
for command, d in perm_dict["commands"].items():
lvl = d["required"]
if lvl > -1:
output += f"{prefix} {command}: {lvl} ({Translator.translate(f'perm_lvl_{lvl}', ctx)})\n"
if len(d["commands"].keys()) > 0:
output += gen_override_strings(ctx, d, f"{prefix} {command}")
return output
class ServerAdmin(BaseCog):
LOGGING_TYPES = [
"RAID_LOGS",
"CENSORED_MESSAGES",
"MOD_ACTIONS",
"CHANNEL_CHANGES",
"ROLE_CHANGES",
"MISC",
"TRAVEL_LOGS",
"NAME_CHANGES",
"MESSAGE_LOGS",
"VOICE_CHANGES_DETAILED",
"VOICE_CHANGES",
"SPAM_VIOLATION",
"CONFIG_CHANGES",
"FUTURE_LOGS"
]
def __init__(self, bot):
super().__init__(bot)
bot.to_cache = []
Pages.register("censor_list", self._censorlist_init, self._censorklist_update)
Pages.register("word_censor_list", self._word_censorlist_init, self._word_censor_list_update)
@commands.guild_only()
@commands.group(aliases = ["config", "cfg"])
async def configure(self, ctx:commands.Context):
"""configure_help"""
if ctx.subcommand_passed is None:
await ctx.send("See the subcommands (!help configure) for configurations.")
@configure.command()
async def prefix(self, ctx:commands.Context, *, new_prefix:str = None):
"""configure_prefix_help"""
if new_prefix is None:
await ctx.send(f"{Translator.translate('current_server_prefix', ctx, prefix=Configuration.get_var(ctx.guild.id, 'GENERAL', 'PREFIX'))}")
elif len(new_prefix) > 25:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('prefix_too_long', ctx)}")
else:
Configuration.set_var(ctx.guild.id, "GENERAL", "PREFIX", new_prefix)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('prefix_set', ctx, new_prefix=new_prefix)}")
@configure.group(aliases=["adminroles"])
async def admin_roles(self, ctx: commands.Context):
"""configure_admin_roles_help"""
if ctx.invoked_subcommand is None:
await list_list(ctx, 'admin')
@admin_roles.command(name="add")
async def add_admin_role(self, ctx, *, role:discord.Role):
await add_item(ctx, role, 'admin')
@admin_roles.command(name="remove")
async def remove_admin_role(self, ctx, *, role: discord.Role):
await remove_item(ctx, role, 'admin')
@configure.group(aliases=["modroles"])
async def mod_roles(self, ctx: commands.Context):
"""configure_mod_roles_help"""
if ctx.invoked_subcommand is None:
await list_list(ctx, 'mod')
@mod_roles.command(name="add")
async def add_mod_role(self, ctx, *, role: discord.Role):
await add_item(ctx, role, 'mod')
@mod_roles.command(name="remove")
async def remove_mod_role(self, ctx, *, role: discord.Role):
await remove_item(ctx, role, 'mod')
@configure.group(aliases=["trustedroles"])
async def trusted_roles(self, ctx: commands.Context):
"""configure_trusted_roles_help"""
if ctx.invoked_subcommand is None:
await list_list(ctx, 'trusted')
@trusted_roles.command(name="add")
async def add_trusted_role(self, ctx, *, role: discord.Role):
await add_item(ctx, role, 'trusted')
@trusted_roles.command(name="remove")
async def remove_trusted_role(self, ctx, *, role: discord.Role):
await remove_item(ctx, role, 'trusted')
@configure.command(aliases=["muterole"])
async def mute_role(self, ctx:commands.Context, role:discord.Role):
"""configure_mute_help"""
if role == ctx.guild.default_role:
return await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate(f'default_role_forbidden', ctx)}")
guild:discord.Guild = ctx.guild
perms = guild.me.guild_permissions
if not perms.manage_roles:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('mute_missing_perm', ctx)}")
return
if not guild.me.top_role > role:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('mute_missing_perm', ctx, role=role.mention)}")
return
Configuration.set_var(ctx.guild.id, "ROLES", "MUTE_ROLE", int(role.id))
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('mute_role_confirmation', ctx, role=role.mention)}")
failed = []
for channel in guild.text_channels:
try:
await channel.set_permissions(role, reason=Translator.translate('mute_setup', ctx), send_messages=False, add_reactions=False)
except discord.Forbidden as ex:
failed.append(channel.mention)
for channel in guild.voice_channels:
try:
await channel.set_permissions(role, reason=Translator.translate('mute_setup', ctx), speak=False, connect=False)
except discord.Forbidden as ex:
failed.append(Translator.translate('voice_channel', ctx, channel=channel.name))
if len(failed) > 0:
message = f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('mute_setup_failures', ctx, role=role.mention)}\n"
for fail in failed:
if len(message) + len(fail) > 2000:
await ctx.send(message)
message = ""
message = message + fail
if len(message) > 0:
await ctx.send(message)
else:
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('mute_setup_complete', ctx)}")
@configure.group(aliases=["selfrole", "self_role"])
async def self_roles(self, ctx:commands.Context):
"""configure_self_roles_help"""
if ctx.invoked_subcommand is None:
await list_list(ctx, 'self', config_section="ROLES")
@self_roles.command()
async def add(self, ctx:commands.Context, *, role:discord.Role):
await add_item(ctx, role, 'self', config_section="ROLES")
Selfroles.validate_self_roles(self.bot, ctx.guild)
self.bot.dispatch("self_roles_update", ctx.guild.id)
@self_roles.command()
async def remove(self, ctx:commands.Context, *, role:discord.Role):
await remove_item(ctx, role, 'self', config_section="ROLES")
Selfroles.validate_self_roles(self.bot, ctx.guild)
self.bot.dispatch("self_roles_update", ctx.guild.id)
@configure.group()
async def allowed_invites(self, ctx: commands.Context):
"""configure_allowed_invite_list_help"""
if ctx.invoked_subcommand is None:
await list_list(ctx, "allowed", list_name="invite_list", wrapper="{item}", config_section="CENSORING")
@allowed_invites.command(name="add")
async def add_to_allowed_list(self, ctx: commands.Context, server:int):
await add_item(ctx, ServerHolder(server), "allowed", list_name="invite_list", config_section="CENSORING")
@allowed_invites.command(name="remove")
async def remove_from_allowed_list(self, ctx: commands.Context, server:int):
await remove_item(ctx, ServerHolder(server), "allowed", list_name="invite_list", config_section="CENSORING")
@configure.command(name="censortrustedbypass")
async def enable_trusted_bypass(self, ctx: commands.Context, enabled_status: bool):
config_status = Configuration.get_var(ctx.guild.id, "CENSORING", "ALLOW_TRUSTED_BYPASS")
enabled_string = "enabled" if enabled_status else "disabled"
enabled_string = Translator.translate(enabled_string, ctx.guild.id)
message = MessageUtils.assemble(ctx, "YES", "censor_trusted_bypass", status=enabled_string)
if enabled_status == config_status:
message = MessageUtils.assemble(ctx, "NO", f"censor_trusted_bypass_unchanged", status=enabled_string)
else:
Configuration.set_var(ctx.guild.id, "CENSORING", "ALLOW_TRUSTED_BYPASS", enabled_status)
await ctx.send(message)
@configure.group(aliases=["ignoredUsers"])
async def ignored_users(self, ctx):
"""configure_ignored_users_help"""
if ctx.invoked_subcommand is None:
await list_list(ctx, "ignored", "users", "<@{item}>", config_section="MESSAGE_LOGS")
@ignored_users.command(name="add")
async def addIgnoredUser(self, ctx:commands.Context, user:discord.Member):
await add_item(ctx, user, "ignored", "users", config_section="MESSAGE_LOGS")
@ignored_users.command(name="remove")
async def removeIgnoredUser(self, ctx:commands.Context, user:discord.User):
await remove_item(ctx, user, "ignored", list_name="users", config_section="MESSAGE_LOGS")
@configure.group("cog_overrides")
async def configure_cog_overrides(self, ctx):
"""cog_overrides_help"""
if ctx.invoked_subcommand is None:
overrides = Configuration.get_var(ctx.guild.id, "PERM_OVERRIDES")
desc = ""
for k, v in overrides.items():
lvl = v["required"]
if lvl >= 0:
desc += f"{k}: {lvl} ({Translator.translate(f'perm_lvl_{lvl}', ctx)})\n"
if desc == "":
desc = Translator.translate('no_overrides', ctx)
embed = discord.Embed(color=6008770, title=Translator.translate('cog_overrides', ctx), description=desc)
await ctx.send(embed=embed)
@configure_cog_overrides.command(name="add")
async def add_cog_override(self, ctx, cog:str, perm_lvl:int):
cog = cog
if cog in ctx.bot.cogs.keys():
cogo = ctx.bot.cogs[cog]
if cogo.permissions is None:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('core_cog_no_override', ctx, cog=cog)}")
elif perm_lvl in range(7):
min_lvl = cogo.permissions["min"]
max_lvl = cogo.permissions["max"]
if perm_lvl < min_lvl:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('cog_min_perm_violation', ctx, cog=cog, min_lvl=min_lvl, min_lvl_name=Translator.translate(f'perm_lvl_{min_lvl}', ctx))}")
elif perm_lvl > max_lvl:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate('cog_max_perm_violation', ctx, cog=cog, max_lvl=max_lvl, max_lvl_name=Translator.translate(f'perm_lvl_{max_lvl}', ctx))}")
else:
overrides = Configuration.get_var(ctx.guild.id, "PERM_OVERRIDES")
if cog not in overrides:
overrides[cog] = {
"required": perm_lvl,
"commands": {},
"people": []
}
else:
overrides[cog]["required"] = perm_lvl
Configuration.save(ctx.guild.id)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('cog_override_applied', ctx, cog=cog, perm_lvl=perm_lvl, perm_lvl_name=Translator.translate(f'perm_lvl_{perm_lvl}', ctx))}")
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('invalid_override_lvl', ctx)}")
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('cog_not_found', ctx)}")
@configure_cog_overrides.command(name="remove")
async def remove_cog_override(self, ctx, cog: str):
overrides = Configuration.get_var(ctx.guild.id, "PERM_OVERRIDES")
if cog in overrides:
overrides[cog]["required"] = -1
Configuration.save(ctx.guild.id)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('cog_override_removed', ctx, cog=cog)}")
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('cog_override_not_found', ctx, cog=cog)}")
@configure.group()
async def command_overrides(self, ctx):
"""command_overrides_help"""
if ctx.invoked_subcommand is None:
overrides = Configuration.get_var(ctx.guild.id, "PERM_OVERRIDES")
embed = discord.Embed(color=6008770, title=Translator.translate('command_overrides', ctx))
has_overrides = False
for cog in self.bot.cogs:
if cog in overrides:
out = gen_override_strings(ctx, overrides[cog])
if out != "":
has_overrides = True
embed.add_field(name=cog, value=out)
if not has_overrides:
embed.description = Translator.translate('no_overrides', ctx)
await ctx.send(embed=embed)
@command_overrides.command(name="set", aliases=["add"])
async def add_command_override(self, ctx, command:str, perm_lvl:int):
command = command.lower()
command_object = self.bot.get_command(command)
if command_object is not None:
cog = command_object.cog
cog_name = command_object.cog_name
if cog.permissions is None:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('command_core_cog_no_override', ctx, command=command, cog_name=cog_name)}")
elif perm_lvl in range(7):
perm_dict = Permissioncheckers.get_perm_dict(command_object.qualified_name.split(" "), cog.permissions)
if perm_lvl < perm_dict["min"]:
lvl = perm_dict["min"]
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('command_min_perm_violation', ctx, command=command, min_lvl=lvl, min_lvl_name=Translator.translate(f'perm_lvl_{lvl}', ctx))}")
elif perm_lvl > perm_dict["max"]:
lvl = cog.permissions['max']
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('command_max_perm_violation', ctx, command=command, max_lvl=lvl, max_lvl_name=Translator.translate(f'perm_lvl_{lvl}', ctx))}")
else:
overrides = Configuration.get_var(ctx.guild.id, "PERM_OVERRIDES")
if cog_name not in overrides:
overrides[cog_name] = {
"required": -1,
"commands": {},
"people": []
}
override = overrides[cog_name]
parts = command_object.qualified_name.split(" ")
while len(parts) > 0:
part = parts.pop(0)
if not part in override["commands"]:
override["commands"][part] = override = {
"required": -1,
"commands": {},
"people": []
}
else:
override = override["commands"][part]
override["required"] = perm_lvl
Configuration.save(ctx.guild.id)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('command_override_confirmation', ctx, command=command, perm_lvl=perm_lvl, perm_lvl_name=Translator.translate(f'perm_lvl_{perm_lvl}', ctx))}")
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('invalid_override_lvl', ctx)}")
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('command_not_found', ctx)}")
@command_overrides.command(name="remove")
async def remove_command_override(self, ctx, command:str):
command = command.lower()
command_object = self.bot.get_command(command)
if command_object is not None:
cog_name = command_object.cog_name
overrides = Configuration.get_var(ctx.guild.id, "PERM_OVERRIDES")
found = False
if cog_name in overrides:
override = Permissioncheckers.get_perm_dict(command_object.qualified_name.split(" "), overrides[cog_name], True)
if override is not None:
found = True
override["required"] = -1
Configuration.save(ctx.guild.id)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('command_override_removed', ctx, command=command)}")
if not found:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('command_override_not_found', ctx, command=command)}")
@configure.command()
async def perm_denied_message(self, ctx, value:bool):
"""perm_denied_message_help"""
Configuration.set_var(ctx.guild.id, "GENERAL", "PERM_DENIED_MESSAGE", value)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('configure_perm_msg_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.command()
async def language(self, ctx, lang_code:str = None):
"""language_help"""
if lang_code is None:
await ctx.send(f"See https://crowdin.com/project/gearbot for all available languages and their translation statuses")
else:
code = None
lang_code = lang_code.lower().replace("_", "-")
for name, lcode in Translator.LANG_CODES.items():
if lang_code == lcode.lower() or lang_code == name.lower():
code = lcode
break
if code is None:
for name, lcode in Translator.LANG_CODES.items():
if lang_code == lcode.lower()[:2]:
code = lcode
break
if code is not None:
Configuration.set_var(ctx.guild.id, "GENERAL", "LANG", code)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('lang_changed', ctx.guild.id, lang=code, lang_name=Translator.LANG_NAMES[code])}")
else:
await ctx.send(f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('lang_unknown', ctx.guild.id)}")
@configure.group()
async def lvl4(self, ctx):
"""lvl4_help"""
pass
@lvl4.command(name="add")
async def add_lvl4(self, ctx, command:str, person:discord.Member):
command_object = self.bot.get_command(command)
if command_object is not None:
cog_name = command_object.cog_name
overrides = Configuration.get_var(ctx.guild.id, "PERM_OVERRIDES")
if cog_name not in overrides:
overrides[cog_name] = {
"required": -1,
"commands": {},
"people": []
}
override = overrides[cog_name]
parts = command.split(" ")
while len(parts) > 0:
part = parts.pop(0)
if not part in override["commands"]:
override["commands"][part] = override = {
"required": -1,
"commands": {},
"people": []
}
else:
override = override["commands"][part]
if person.id not in override["people"]:
override["people"].append(person.id)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('lvl4_added', ctx, member=person, command=command)}")
else:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate('already_had_lvl4', ctx, member=person, command=command)}")
Configuration.save(ctx.guild.id)
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('command_not_found', ctx)}")
@lvl4.command(name="remove")
async def remove_lvl4(self, ctx, command: str, person: discord.Member):
command_object = self.bot.get_command(command)
if command_object is not None:
cog_name = command_object.cog_name
overrides = Configuration.get_var(ctx.guild.id, "PERM_OVERRIDES")
found = False
if cog_name in overrides:
lvl4_list = Permissioncheckers.get_perm_dict(command.split(" "), overrides[cog_name], strict=True)
if lvl4_list is not None and person.id in lvl4_list["people"]:
found = True
if found:
lvl4_list["people"].remove(person.id)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('lvl4_removed', ctx, member=person, command=command)}")
Configuration.save(ctx.guild.id)
else:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate('did_not_have_lvl4', ctx, member=person, command=command)}")
@configure.group()
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def logging(self, ctx):
if ctx.invoked_subcommand is None:
embed = discord.Embed(color=6008770, title=Translator.translate('log_channels', ctx))
channels = Configuration.get_var(ctx.guild.id, "LOG_CHANNELS")
if len(channels) > 0:
for cid, info in channels.items():
embed.add_field(name=cid, value=self.get_channel_properties(ctx, cid, info["CATEGORIES"]))
await ctx.send(embed=embed)
@staticmethod
def get_channel_properties(ctx, cid, info):
value = ""
channel = ctx.bot.get_channel(int(cid))
if channel is None:
value += f"{Translator.translate('channel_removed', ctx)}\n"
else:
value += f"**{Translator.translate('channel', ctx)}**{channel.mention}\n\n"
perms = ["send_messages", "embed_links", "attach_files"]
permissions = channel.permissions_for(channel.guild.me)
missing = [p for p in perms if not getattr(permissions, p)]
value += f"**{Translator.translate('channel_perms', ctx)}** \n"
if len(missing) == 0:
value += f"{Emoji.get_chat_emoji('YES')} {Translator.translate('full_channel_perms', ctx)}\n\n"
else:
value += f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('missing_channel_perms', ctx, perms = ', '.join(missing))}\n\n"
value += f"**{Translator.translate('to_be_logged', ctx)}** \n{', '.join(info)}\n\n"
return value
@logging.command(name="add")
async def add_logging(self, ctx, channel:discord.TextChannel, *, types):
cid = str(channel.id)
channels = Configuration.get_var(ctx.guild.id, "LOG_CHANNELS")
if cid not in channels:
channels[cid] = {
"CATEGORIES": [],
"DISABLED_KEYS": []
}
info = channels[cid]["CATEGORIES"]
added = []
ignored = []
message = ""
known, unknown = self.extract_types(types)
for t in known:
if t in info:
ignored.append(t)
else:
info.append(t)
added.append(t)
if len(added) > 0:
message += f"{Emoji.get_chat_emoji('YES')} {Translator.translate('logs_added', ctx)}{', '.join(added)}"
if len(ignored) > 0:
message += f"\n{Emoji.get_chat_emoji('WARNING')}{Translator.translate('logs_ignored', ctx)}{', '.join(ignored)}"
if len(unknown) > 0:
message += f"\n {Emoji.get_chat_emoji('NO')}{Translator.translate('logs_unknown', ctx)}{', '.join(unknown)}"
embed = discord.Embed(color=6008770)
embed.add_field(name=channel.id, value=self.get_channel_properties(ctx, channel.id, channels[cid]["CATEGORIES"]))
await ctx.send(message, embed=embed)
Configuration.save(ctx.guild.id)
features = []
for a in added:
feature = Utils.find_key(Features.requires_logging, a)
if feature is not None and not Configuration.get_var(ctx.guild.id, feature):
features.append(feature)
if len(features) > 0:
async def yes():
await ctx.invoke(self.enable_feature, ", ".join(features))
await Confirmation.confirm(ctx, MessageUtils.assemble(ctx.guild.id, 'WHAT', 'confirmation_enable_features', count=len(features)) + ', '.join(features), on_yes=yes)
@logging.command(name="remove")
async def remove_logging(self, ctx, cid: LoggingChannel, *, types):
channel = self.bot.get_channel(int(cid))
channels = Configuration.get_var(ctx.guild.id, "LOG_CHANNELS")
if cid not in channels:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('no_log_channel', ctx, channel=f'<{cid}>')}")
else:
info = channels[cid]["CATEGORIES"]
removed = []
ignored = []
unable = []
known, unknown = self.extract_types(types)
message = ""
for t in known:
if t in info:
removed.append(t)
info.remove(t)
else:
ignored.append(t)
if len(removed) > 0:
message += f"{Emoji.get_chat_emoji('YES')} {Translator.translate('logs_disabled_channel', ctx, channel=channel.mention if channel is not None else cid)}{', '.join(removed)}"
if len(ignored) > 0:
message += f"\n{Emoji.get_chat_emoji('WARNING')}{Translator.translate('logs_already_disabled_channel', ctx, channel=channel.mention if channel is not None else cid)}{', '.join(ignored)}"
if len(unable) > 0:
message += f"\n {Emoji.get_chat_emoji('NO')}{Translator.translate('logs_unable', ctx)} {', '.join(unable)}"
if len(unknown) > 0:
message += f"\n {Emoji.get_chat_emoji('NO')}{Translator.translate('logs_unknown', ctx)}{', '.join(unknown)}"
if len(info) > 0:
embed = discord.Embed(color=6008770)
embed.add_field(name=cid, value=self.get_channel_properties(ctx, cid, channels[cid]["CATEGORIES"]))
else:
embed=None
await ctx.send(message, embed=embed)
empty = []
for cid, info in channels.items():
if len(info) is 0:
empty.append(cid)
for e in empty:
del channels[e]
Configuration.save(ctx.guild.id)
@logging.command()
async def dash(self, ctx):
await ctx.send(embed=self.get_logging_status(ctx))
def get_logging_status(self, ctx):
enabled = f"{Emoji.get_chat_emoji('YES')} {Translator.translate('enabled', ctx)}"
disabled = f"{Emoji.get_chat_emoji('NO')} {Translator.translate('disabled', ctx)}"
embed = discord.Embed(color=6008770, title=Translator.translate('log_types', ctx))
for t in self.LOGGING_TYPES:
e = Features.is_logged(ctx.guild.id, t)
embed.add_field(name=t, value=enabled if e else disabled)
return embed
@configure.group()
@commands.guild_only()
async def features(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send(embed=self.get_features_status(ctx))
@features.command(name="enable")
async def enable_feature(self, ctx, types):
types = types.upper()
enabled = []
ignored = []
known = []
unknown = []
for t2 in types.split(","):
for t in t2.split():
t = t.strip(",").strip()
if t != "":
if t in Features.requires_logging:
known.append(t)
else:
unknown.append(t)
message = ""
for t in known:
if Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS" if t == "EDIT_LOGS" else "CENSORING", "ENABLED"):
ignored.append(t)
else:
enabled.append(t)
Configuration.set_var(ctx.guild.id, "MESSAGE_LOGS" if t == "EDIT_LOGS" else "CENSORING", "ENABLED", True)
if t == "EDIT_LOGS":
await ctx.send(Translator.translate('minor_log_caching_start', ctx))
self.bot.to_cache.append(ctx)
if len(enabled) > 0:
message += MessageUtils.assemble(ctx.guild.id, 'YES', 'features_enabled', count=len(enabled)) + ', '.join(enabled)
if len(ignored) > 0:
message += MessageUtils.assemble(ctx.guild.id, 'WARNING', 'feature_already_enabled', count=len(ignored)) + ', '.join(ignored)
if len(unknown) > 0:
message += MessageUtils.assemble(ctx.guild.id, 'NO', 'logs_unknown', count=len(unknown)) + ', '.join(unknown)
await ctx.send(message, embed=self.get_features_status(ctx))
@staticmethod
def get_features_status(ctx):
enabled = f"{Emoji.get_chat_emoji('YES')} {Translator.translate('enabled', ctx)}"
disabled = f"{Emoji.get_chat_emoji('NO')} {Translator.translate('disabled', ctx)}"
embed = discord.Embed(color=6008770, title=Translator.translate('features', ctx))
for f, t in Features.requires_logging.items():
e = Configuration.get_var(ctx.guild.id, t, "ENABLED", f)
embed.add_field(name=f, value=enabled if e else disabled)
return embed
def can_remove(self, guild, logging):
counts = dict()
for cid, info in Configuration.get_var(guild, "LOG_CHANNELS").items():
for i in info:
if i not in counts:
counts[i] = 1
else:
counts[i] +=1
return logging not in Features.requires_logging.values() or (logging in counts and counts[logging] > 1) or Configuration.get_var("MESSAGE_LOGS" if logging == "EDIT_LOGS" else "CENSORING", "ENABLED", False)
@features.command(name="disable")
async def feature_disable(self, ctx, types:str):
types = types.upper()
disabled= []
ignored = []
known = []
unknown = []
for t2 in types.split(","):
for t in t2.split():
t = t.strip(",").strip()
if t != "":
if t in Features.requires_logging:
known.append(t)
else:
unknown.append(t)
message = ""
for t in known:
if not Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS" if t == "EDIT_LOGS" else "CENSORING", "ENABLED"):
ignored.append(t)
else:
disabled.append(t)
Configuration.set_var(ctx.guild.id, "MESSAGE_LOGS" if t == "EDIT_LOGS" else "CENSORING", "ENABLED", False)
if len(disabled) > 0:
message += MessageUtils.assemble(ctx.guild.id, 'YES', 'features_disabled', count=len(disabled)) + ', '.join(disabled)
if len(ignored) > 0:
message += MessageUtils.assemble(ctx.guild.id, 'WARNING', 'feature_already_disabled', count=len(ignored)) + ', '.join(ignored)
if len(unknown) > 0:
message += MessageUtils.assemble(ctx.guild.id, 'NO', 'features_unknown', count=len(unknown)) + ', '.join(unknown)
await ctx.send(message, embed=self.get_features_status(ctx))
def extract_types(self, raw_types):
raw_types = raw_types.upper()
if "EVERYTHING" in raw_types:
return self.LOGGING_TYPES, []
types = []
unknown = []
for t2 in raw_types.split(","):
for t in t2.split():
t = t.strip(",").strip()
if t != "":
if t in self.LOGGING_TYPES:
types.append(t)
else:
unknown.append(t)
return types, unknown
@configure.group()
@commands.guild_only()
async def ignored_channels(self, ctx):
"""ignored_channels_help"""
if ctx.invoked_subcommand == self.ignored_channels:
await ctx.invoke(self.bot.get_command("help"), query="configure ignored_channels")
@ignored_channels.group("changes")
@commands.guild_only()
async def ignored_channels_changes(self, ctx):
"""ignored_channels_changes_help"""
if ctx.invoked_subcommand == self.ignored_channels_changes:
await ctx.invoke(self.bot.get_command("help"), query="configure ignored_channels changes")
@ignored_channels_changes.command("add")
async def ignored_channels_changes_add(self, ctx, channel:TextChannel):
"""ignored_channels_add_help"""
channels = Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS", 'IGNORED_CHANNELS_CHANGES')
if channel.id in channels:
await MessageUtils.send_to(ctx, 'NO', 'ignored_channels_already_on_list')
else:
channels.append(channel.id)
await MessageUtils.send_to(ctx, 'YES', 'ignored_channels_changes_added', channel=channel.mention)
Configuration.save(ctx.guild.id)
@ignored_channels_changes.command("remove")
async def ignored_channels_changes_remove(self, ctx, channel: TextChannel):
"""ignored_channels_remove_help"""
channels = Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS", 'IGNORED_CHANNELS_CHANGES')
if not channel.id in channels:
await MessageUtils.send_to(ctx, 'NO', 'ignored_channels_not_on_list', channel=channel.mention)
else:
channels.remove(channel.id)
await MessageUtils.send_to(ctx, 'YES', 'ignored_channels_changes_removed', channel=channel.mention)
Configuration.save(ctx.guild.id)
@ignored_channels_changes.command("list")
async def ignored_channels_changes_list(self, ctx):
"""ignored_channels_list_help"""
await self.list_channels(ctx, "changes")
@staticmethod
async def list_channels(ctx, type):
channel_list = Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS", f'IGNORED_CHANNELS_{type.upper()}')
if len(channel_list) > 0:
channels = "\n".join(ctx.guild.get_channel(c).mention for c in channel_list)
else:
channels = Translator.translate('no_ignored_channels', ctx)
embed = discord.Embed(color=ctx.guild.roles[-1].color, description=channels)
embed.set_author(name=Translator.translate(f'ignored_channels_list_{type}', ctx, guild=ctx.guild.name), icon_url=ctx.guild.icon_url)
await ctx.send(embed=embed)
@ignored_channels.group("edits", aliases=["edit"])
@commands.guild_only()
async def ignored_channels_edits(self, ctx):
"""ignored_channels_edits_help"""
if ctx.invoked_subcommand == self.ignored_channels_edits:
await ctx.invoke(self.bot.get_command("help"), query="configure ignored_channels other")
@ignored_channels_edits.command("add")
async def ignored_channels_edits_add(self, ctx, channel: TextChannel):
"""ignored_channels_add_help"""
channels = Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS", 'IGNORED_CHANNELS_OTHER')
if channel.id in channels:
await MessageUtils.send_to(ctx, 'NO', 'ignored_channels_already_on_list', channel=channel.mention)
else:
channels.append(channel.id)
await MessageUtils.send_to(ctx, 'YES', 'ignored_channels_edits_added', channel=channel.mention)
Configuration.save(ctx.guild.id)
@ignored_channels_edits.command("remove")
async def ignored_channels_edits_remove(self, ctx, channel: TextChannel):
"""ignored_channels_remove_help"""
channels = Configuration.get_var(ctx.guild.id, "MESSAGE_LOGS", 'IGNORED_CHANNELS_OTHER')
if channel.id not in channels:
await MessageUtils.send_to(ctx, 'NO', 'ignored_channels_not_on_list')
else:
channels.remove(channel.id)
await MessageUtils.send_to(ctx, 'YES', 'ignored_channels_edits_removed', channel=channel.mention)
Configuration.save(ctx.guild.id)
@ignored_channels_edits.command("list")
async def ignored_channels_edits_list(self, ctx):
"""ignored_channels_list_help"""
await self.list_channels(ctx, "other")
@commands.group()
@commands.guild_only()
async def disable(self, ctx:commands.Context):
"""disable_help"""
pass
@disable.command()
async def mute(self, ctx:commands.Context):
"""disable_mute_help"""
role = ctx.guild.get_role(Configuration.get_var(ctx.guild.id, "ROLES", "MUTE_ROLE"))
if role is not None:
for member in role.members:
await member.remove_roles(role, reason=f"Mute feature has been disabled")
Configuration.set_var(ctx.guild.id, "ROLES", "MUTE_ROLE", 0)
await ctx.send("Mute feature has been disabled, all people muted have been unmuted and the role can now be removed.")
@configure.command()
async def dm_on_warn(self, ctx, value: bool):
"""dm_on_warn_help"""
Configuration.set_var(ctx.guild.id, "INFRACTIONS", "DM_ON_WARN", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('dm_on_warn_msg_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.command()
async def dm_on_kick(self, ctx, value: bool):
"""dm_on_kick_help"""
Configuration.set_var(ctx.guild.id, "INFRACTIONS", "DM_ON_KICK", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('dm_on_kick_msg_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.command()
async def dm_on_ban(self, ctx, value: bool):
"""dm_on_ban_help"""
Configuration.set_var(ctx.guild.id, "INFRACTIONS", "DM_ON_BAN", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('dm_on_ban_msg_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.command()
async def dm_on_tempban(self, ctx, value: bool):
"""dm_on_tempban_help"""
Configuration.set_var(ctx.guild.id, "INFRACTIONS", "DM_ON_TEMPBAN", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('dm_on_tempban_msg_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.command()
async def dm_on_mute(self, ctx, value: bool):
"""dm_on_mute_help"""
Configuration.set_var(ctx.guild.id, "INFRACTIONS", "DM_ON_MUTE", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('dm_on_mute_msg_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.command()
async def dm_on_unmute(self, ctx, value: bool):
"""dm_on_unmute_help"""
Configuration.set_var(ctx.guild.id, "INFRACTIONS", "DM_ON_UNMUTE", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('dm_on_unmute_msg_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.command()
async def log_embeds(self, ctx, value: bool):
Configuration.set_var(ctx.guild.id, "MESSAGE_LOGS", "EMBED", value)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('embed_log_' + ('enabled' if value else 'disabled'), ctx.guild.id)}")
@configure.group(aliases=["censorlist"])
async def censor_list(self, ctx):
if ctx.invoked_subcommand is None:
await Pages.create_new(self.bot, "censor_list", ctx)
@staticmethod
async def _censorlist_init(ctx):
pages = Pages.paginate("\n".join(Configuration.get_var(ctx.guild.id, "CENSORING", "TOKEN_CENSORLIST")))
return f"**{Translator.translate(f'censor_list', ctx, server=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{pages[0]}```", None, len(pages) > 1
@staticmethod
async def _censorklist_update(ctx, message, page_num, action, data):
pages = Pages.paginate("\n".join(Configuration.get_var(message.channel.guild.id, "CENSORING", "TOKEN_CENSORLIST")))
page, page_num = Pages.basic_pages(pages, page_num, action)
data["page"] = page_num
return f"**{Translator.translate(f'censor_list', message.channel.guild.id, server=message.channel.guild.name, page_num=page_num + 1, pages=len(pages))}**```\n{page}```", None, data
@censor_list.command("add")
async def censor_list_add(self, ctx, *, word: str):
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "TOKEN_CENSORLIST")
if word.lower() in censor_list:
await MessageUtils.send_to(ctx, "NO", "already_censored", word=word)
else:
censor_list.append(word.lower())
await MessageUtils.send_to(ctx, "YES", "entry_added", entry=word)
Configuration.save(ctx.guild.id)
@censor_list.command("remove")
async def censor_list_remove(self, ctx, *, word: str):
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "TOKEN_CENSORLIST")
if word not in censor_list:
await MessageUtils.send_to(ctx, "NO", "not_censored", word=word)
else:
censor_list.remove(word)
await MessageUtils.send_to(ctx, "YES", "entry_removed", entry=word)
Configuration.save(ctx.guild.id)
@configure.group()
async def word_censor_list(self, ctx):
if ctx.invoked_subcommand is None:
await Pages.create_new(self.bot, "word_censor_list", ctx)
@staticmethod
async def _word_censorlist_init(ctx):
pages = Pages.paginate("\n".join(Configuration.get_var(ctx.guild.id, "CENSORING", "WORD_CENSORLIST")))
return f"**{Translator.translate(f'censor_list', ctx, server=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{pages[0]}```", None, len(
pages) > 1
@staticmethod
async def _word_censor_list_update(ctx, message, page_num, action, data):
pages = Pages.paginate(
"\n".join(Configuration.get_var(message.channel.guild.id, "CENSORING", "WORD_CENSORLIST")))
page, page_num = Pages.basic_pages(pages, page_num, action)
data["page"] = page_num
return f"**{Translator.translate(f'censor_list', message.channel.guild.id, server=message.channel.guild.name, page_num=page_num + 1, pages=len(pages))}**```\n{page}```", None, data
@word_censor_list.command("add")
async def word_censor_list_add(self, ctx, *, word: str):
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "WORD_CENSORLIST")
if word.lower() in censor_list:
await MessageUtils.send_to(ctx, "NO", "already_censored", word=word)
else:
censor_list.append(word.lower())
await MessageUtils.send_to(ctx, "YES", "entry_added", entry=word)
Configuration.save(ctx.guild.id)
if ctx.guild.id in self.bot.get_cog("Censor").regexes:
del self.bot.get_cog("Censor").regexes[ctx.guild.id]
@word_censor_list.command("remove")
async def word_censor_list_remove(self, ctx, *, word: str):
censor_list = Configuration.get_var(ctx.guild.id, "CENSORING", "WORD_CENSORLIST")
if word not in censor_list:
await MessageUtils.send_to(ctx, "NO", "not_censored", word=word)
else:
censor_list.remove(word)
await MessageUtils.send_to(ctx, "YES", "entry_removed", entry=word)
Configuration.save(ctx.guild.id)
if ctx.guild.id in self.bot.get_cog("Censor").regexes:
del self.bot.get_cog("Censor").regexes[ctx.guild.id]
@configure.group()
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def role_list(self, ctx):
"""configure_role_list_help"""
if ctx.invoked_subcommand is None:
items = Configuration.get_var(ctx.guild.id, "ROLES", f"ROLE_LIST")
mode = "allow" if Configuration.get_var(ctx.guild.id, "ROLES", "ROLE_LIST_MODE") else "block"
if len(items) == 0:
desc = Translator.translate(f"no_role_{mode}", ctx)
else:
desc = "\n".join(f"<@&{item}>" for item in items)
embed = discord.Embed(title=Translator.translate(f"current_role_{mode}_list", ctx), description=desc)
await ctx.send(embed=embed)
@role_list.command("add")
async def role_list_add(self, ctx, *, role:discord.Role):
"""configure_role_list_add"""
roles = Configuration.get_var(ctx.guild.id, "ROLES", "ROLE_LIST")
mode = "allow" if Configuration.get_var(ctx.guild.id, "ROLES", "ROLE_LIST_MODE") else "block"
if role == ctx.guild.default_role:
await MessageUtils.send_to(ctx, "NO", "default_role_forbidden")
elif role.id in roles:
await MessageUtils.send_to(ctx, "NO", f"role_list_add_fail", role=Utils.escape_markdown(role.name))
else:
roles.append(role.id)
Configuration.save(ctx.guild.id)
await MessageUtils.send_to(ctx, "YES", f"role_list_add_confirmation_{mode}", role=Utils.escape_markdown(role.name))
@role_list.command("remove", aliases=["rmv"])
async def role_list_remove(self, ctx, *, role: discord.Role):
"""configure_role_list_remove"""
roles = Configuration.get_var(ctx.guild.id, "ROLES", "ROLE_LIST")
mode = "allow" if Configuration.get_var(ctx.guild.id, "ROLES", "ROLE_LIST_MODE") else "block"
if role.id not in roles:
await MessageUtils.send_to(ctx, "NO", f"role_list_rmv_fail_{mode}", role=Utils.escape_markdown(role.name))
else:
roles.remove(role.id)
Configuration.save(ctx.guild.id)
await MessageUtils.send_to(ctx, "YES", f"role_list_rmv_confirmation_{mode}", role=Utils.escape_markdown(role.name))
@role_list.command("mode")
async def role_list_mode(self, ctx, mode:ListMode):
"""configure_role_list_mode"""
Configuration.set_var(ctx.guild.id, "ROLES", "ROLE_LIST_MODE", mode)
mode = "allowed" if mode else "blocked"
await MessageUtils.send_to(ctx, "YES", f"role_list_mode_{mode}")
@configure.group()
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def domain_list(self, ctx):
"""configure_domain_list_help"""
if ctx.invoked_subcommand is None:
items = Configuration.get_var(ctx.guild.id, "CENSORING", "DOMAIN_LIST")
mode = "allowed" if Configuration.get_var(ctx.guild.id, "CENSORING", "DOMAIN_LIST_ALLOWED") else "blocked"
if len(items) == 0:
desc = Translator.translate(f"empty_domain_list", ctx)
else:
desc = "\n".join(f"{item}" for item in items)
embed = discord.Embed(title=Translator.translate(f"current_domain_list_{mode}", ctx), description=desc)
await ctx.send(embed=embed)
@domain_list.command("add")
async def domain_list_add(self, ctx, *, domain):
"""configure_domain_list_add"""
domain = domain.lower()
domains = Configuration.get_var(ctx.guild.id, "CENSORING", "DOMAIN_LIST")
mode = "allow" if Configuration.get_var(ctx.guild.id, "CENSORING", "DOMAIN_LIST_ALLOWED") else "block"
if domain in domains:
await MessageUtils.send_to(ctx, "NO", f"domain_list_add_fail_{mode}", domain=domain)
else:
domains.append(domain)
Configuration.save(ctx.guild.id)
await MessageUtils.send_to(ctx, "YES", f"domain_list_add_confirmation_{mode}", domain=domain)
@domain_list.command("remove", aliases=["rmv"])
async def domain_list_remove(self, ctx, *, domain):
"""configure_domain_list_remove"""
domains = Configuration.get_var(ctx.guild.id, "CENSORING", "DOMAIN_LIST")
mode = "allow" if Configuration.get_var(ctx.guild.id, "CENSORING", "DOMAIN_LIST_ALLOWED") else "block"
if domain not in domains:
await MessageUtils.send_to(ctx, "NO", f"domain_list_rmv_fail_{mode}", domain=domain)
else:
domains.remove(domain)
Configuration.save(ctx.guild.id)
await MessageUtils.send_to(ctx, "YES", f"domain_list_rmv_confirmation_{mode}", domain=domain)
@domain_list.command("mode")
async def domain_list_mode(self, ctx, mode:ListMode):
"""configure_domain_list_mode"""
Configuration.set_var(ctx.guild.id, "CENSORING", "DOMAIN_LIST_ALLOWED", mode)
mode = "allow" if mode else "block"
await MessageUtils.send_to(ctx, "YES", f"domain_list_mode_{mode}")
@configure.command()
@commands.guild_only()
async def timezone(self, ctx, new_zone=None):
"""timezone_help"""
current_zone = Configuration.get_var(ctx.guild.id, "GENERAL", "TIMEZONE")
if new_zone is None:
#no new zone, spit out the current one
await MessageUtils.send_to(ctx, "CLOCK", "current_timezone", timezone=current_zone)
else:
try:
zone = str(pytz.timezone(new_zone))
except UnknownTimeZoneError:
await MessageUtils.send_to(ctx, "NO", "invalid_timezone")
else:
if current_zone == new_zone:
await MessageUtils.send_to(ctx, "WHAT", "same_timezone", timezone=current_zone)
else:
Configuration.set_var(ctx.guild.id, "GENERAL", "TIMEZONE", zone)
await MessageUtils.send_to(ctx, "YES", "timezone_set", timezone=zone)
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
changed = False
for name in ["IGNORED_CHANNELS_CHANGES", "IGNORED_CHANNELS_OTHER"]:
channels = Configuration.get_var(channel.guild.id, "MESSAGE_LOGS", name)
if channel.id in channels:
channels.remove(channel.id)
changed = True
if changed:
Configuration.save(channel.guild.id)
def setup(bot):
bot.add_cog(ServerAdmin(bot))
|
py | 7dff684929721699bb3df90a5a4ce52f0552848b | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
)
class ComCarCoffIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?comediansincarsgettingcoffee\.com/(?P<id>[a-z0-9\-]*)'
_TESTS = [{
'url': 'http://comediansincarsgettingcoffee.com/miranda-sings-happy-thanksgiving-miranda/',
'info_dict': {
'id': '2494164',
'ext': 'mp4',
'upload_date': '20141127',
'timestamp': 1417107600,
'duration': 1232,
'title': 'Happy Thanksgiving Miranda',
'description': 'Jerry Seinfeld and his special guest Miranda Sings cruise around town in search of coffee, complaining and apologizing along the way.',
},
'params': {
'skip_download': 'requires ffmpeg',
}
}]
def _real_extract(self, url):
display_id = self._match_id(url)
if not display_id:
display_id = 'comediansincarsgettingcoffee.com'
webpage = self._download_webpage(url, display_id)
full_data = self._parse_json(
self._search_regex(
r'window\.app\s*=\s*({.+?});\n', webpage, 'full data json'),
display_id)['videoData']
display_id = full_data['activeVideo']['video']
video_data = full_data.get('videos', {}).get(display_id) or full_data['singleshots'][display_id]
video_id = compat_str(video_data['mediaId'])
thumbnails = [{
'url': video_data['images']['thumb'],
}, {
'url': video_data['images']['poster'],
}]
timestamp = int_or_none(video_data.get('pubDateTime')) or parse_iso8601(
video_data.get('pubDate'))
duration = int_or_none(video_data.get('durationSeconds')) or parse_duration(
video_data.get('duration'))
return {
'_type': 'url_transparent',
'url': 'crackle:%s' % video_id,
'id': video_id,
'display_id': display_id,
'title': video_data['title'],
'description': video_data.get('description'),
'timestamp': timestamp,
'duration': duration,
'thumbnails': thumbnails,
'season_number': int_or_none(video_data.get('season')),
'episode_number': int_or_none(video_data.get('episode')),
'webpage_url': 'http://comediansincarsgettingcoffee.com/%s' % (video_data.get('urlSlug', video_data.get('slug'))),
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.