max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
jdb2/__init__.py | spdir/jsonDB2 | 1 | 12795151 | # -*- coding: utf-8 -*-
__author = 'Musker.Chao'
__version = '0.2.2'
from .jdb import NoSql
| 1.070313 | 1 |
SchemaCollaboration/core/migrations/0007_add_defaults.py | Swiss-Polar-Institute/schema-collaboration-arctic-century | 15 | 12795152 | # Generated by Django 3.1.4 on 2020-12-01 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_suggestions_from_django_doctor'),
]
operations = [
migrations.AlterField(
model_name='datapackage',
name='name',
field=models.CharField(blank=True, default='', max_length=500),
),
]
| 1.515625 | 2 |
src/exts/automod.py | vcokltfre/hbot-rewrite | 2 | 12795153 | <gh_stars>1-10
from os import environ
from disnake import AllowedMentions, Message, TextChannel
from disnake.ext.commands import Cog
from disnake.http import Route
from src.impl.bot import Bot
CHANNELS = [int(c) for c in environ["CHANNELS"].split(";")]
LOGS = int(environ["LOGS"])
class AutoMod(Cog):
def __init__(self, bot: Bot) -> None:
self.bot = bot
@Cog.listener()
async def on_message(self, message: Message) -> None:
if message.channel.id not in CHANNELS:
return
if not isinstance(message.channel, TextChannel):
return
if (
message.stickers
or message.attachments
or message.embeds
or message.components
or message.content != message.channel.name
):
await message.delete()
lc: TextChannel = self.bot.get_channel(LOGS) # type: ignore
if lc:
await lc.send(
f"Message from {message.author.mention} has been automatically deleted in {message.channel.mention}:\n\n{message.content[:1800]}",
allowed_mentions=AllowedMentions(users=False, roles=False, everyone=False),
)
@Cog.listener()
async def on_raw_message_edit(self, payload: dict) -> None:
if payload["channel_id"] not in CHANNELS:
return
message_id = payload["message_id"]
channel_id = payload["channel_id"]
await self.bot.http.request(
Route(
"DELETE",
"/channels/{channel_id}/messages/{message_id}",
channel_id=channel_id,
message_id=message_id,
)
)
def setup(bot: Bot) -> None:
bot.add_cog(AutoMod(bot))
| 2.3125 | 2 |
tenet/simulation.py | ferrouswheel/tenet | 7 | 12795154 | import random
import logging
import networkx as nx
from tenet.message import (
Message,
DictTransport, MessageSerializer, MessageTypes
)
from tenet.peer import Peer, Friend
from tenet.utils import weighted_choice
log = logging.getLogger(__name__)
class SimulatedPeer(object):
def __init__(self, peer):
self.peer = peer
self.connected = True
def simulate(self, transport, env):
actions = [('friend_post', 4), ('send_msg', 4)]
while True:
available_actions = list(actions)
if self.peer.connected:
available_actions.append(('disconnect', 2))
else:
# NOTE: maybe simulate offline posts
# so that connection behaviour and a sudden egress of messages
# doesn't mess things up
available_actions = [('connect', 1), ('none', 3)]
a = weighted_choice(available_actions)
if a == 'send_msg':
log.debug("{} will send a message.".format(self.peer))
self.random_message(transport)
elif a == 'friend_post':
log.debug("{} will make a post.".format(self.peer))
self.random_post(transport)
elif a == 'disconnect':
log.info("{} disconnecting".format(self.peer))
self.peer.connected = False
elif a == 'connect':
log.info("{} reconnecting".format(self.peer))
self.peer.connected = True
self.peer.on_connect(transport)
wait_duration = random.randint(1,4)
yield env.timeout(wait_duration)
def random_post(self, transport):
sender = self.peer
recipients = set()
if not sender.friends:
log.debug("{} has no friends :-(".format(sender))
return
num_recipients = random.randint(1, len(list(sender.friends.values())))
while len(recipients) < num_recipients:
r = random.choice(list(sender.friends.values()))
recipients.add(r)
msg = Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE, text="This is a general post to mah friends!")
sender.send(msg, transport)
def random_message(self, transport):
sender = self.peer
recipient = None
if not sender.friends:
log.debug("{} has no friends :-(".format(sender))
return
while recipient is None or recipient == sender:
recipient = random.choice(list(sender.friends.values()))
msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text="Hello {}!".format(recipient))
sender.send(msg, transport)
def random_address(i):
names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana']
hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com']
return random.choice(names) + '_' + str(i) + '@' + random.choice(hosts)
def generate_random_peers(number=100):
for i in range(0, number):
p = Peer(random_address(i))
yield SimulatedPeer(p)
def random_friendships(peers, G=None, density=0.1):
x = len(peers)
links = int(x*x*density)
for i in range(0, links):
p1 = random.choice(peers)
p2 = None
while p2 is None or p1 == p2:
p2 = random.choice(peers)
G.add_edge(p1.address, p2.address)
# TODO exchange keys too
p1.friends[p2.address] = Friend(p2.address, p2.key)
p2.friends[p1.address] = Friend(p1.address, p1.key)
#log.debug('{} and {} are now friends'.format(p1, p2))
def gen_social_graph_1(num_people=10):
G=nx.Graph()
peers = [x for x in generate_random_peers(num_people)]
[log.debug(x) for x in peers]
for p in peers:
G.add_node(p.address)
random_friendships([p.peer for p in peers], G)
return (peers, G)
def gen_social_graph_2(num_people=10):
G=nx.random_geometric_graph(num_people,0.325)
peer_by_id = {}
for n in G.nodes():
peer_by_id[n] = SimulatedPeer(Peer(random_address(n)))
for e in G.edges():
p1 = peer_by_id[e[0]]
p2 = peer_by_id[e[1]]
p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key)
p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key)
return peer_by_id.values(), G
def draw_graph(G):
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1, figsize=(8,8))
# layout graphs with positions using graphviz neato
#pos=nx.graphviz_layout(G, prog="neato")
pos=nx.get_node_attributes(G,'pos')
nx.draw_networkx_edges(G,pos,alpha=0.4)
nx.draw_networkx_nodes(G,pos,
node_size=80,
cmap=plt.cm.Reds_r)
#nx.draw(G,
#pos,
#node_size=40,
##node_color=c,
#vmin=0.0,
#vmax=1.0,
#with_labels=False
#)
plt.savefig("tenet.png",dpi=75)
| 2.828125 | 3 |
protostar/commands/test/test_collector.py | software-mansion/protostar | 11 | 12795155 | <filename>protostar/commands/test/test_collector.py
# pylint: disable=no-self-use
import re
from collections import defaultdict
from dataclasses import dataclass
from fnmatch import fnmatch
from glob import glob
from logging import Logger
from pathlib import Path
from time import time
from typing import Dict, List, Optional, Set
from starkware.cairo.lang.compiler.preprocessor.preprocessor_error import (
PreprocessorError,
)
from starkware.starknet.compiler.starknet_preprocessor import (
StarknetPreprocessedProgram,
)
from protostar.commands.test.test_suite import TestSuite
from protostar.protostar_exception import ProtostarException
from protostar.utils.starknet_compilation import StarknetCompiler
TestSuiteGlob = str
TestSuitePath = Path
TestCaseGlob = str
Target = str
"""e.g. `tests/**/::test_*`"""
TestCaseGlobsDict = Dict[TestSuitePath, Set[TestCaseGlob]]
@dataclass(frozen=True)
class ParsedTarget:
test_suite_glob: TestSuiteGlob
test_case_glob: TestCaseGlob
@classmethod
def from_target(
cls, target: Target, default_test_suite_glob: Optional[TestSuiteGlob]
):
test_suite_glob: Optional[TestSuiteGlob] = target
test_case_glob: Optional[TestCaseGlob] = None
if "::" in target:
(test_suite_glob, test_case_glob) = target.split("::")
test_suite_glob = test_suite_glob or default_test_suite_glob or "."
if not test_case_glob:
test_case_glob = "*"
return cls(test_suite_glob, test_case_glob)
@dataclass
class TestSuiteInfo:
path: Path
test_case_globs: Set[TestCaseGlob]
ignored_test_case_globs: Set[TestCaseGlob]
def match_test_case_names(self, test_case_names: List[str]) -> List[str]:
matches = self._find_matching_any_test_case_glob(test_case_names)
result = self._filter_out_matching_any_ignored_test_case_glob(matches)
return list(result)
def _find_matching_any_test_case_glob(self, test_case_names: List[str]) -> Set[str]:
result: Set[str] = set()
for test_case_name in test_case_names:
for test_case_glob in self.test_case_globs:
if fnmatch(test_case_name, test_case_glob):
result.add(test_case_name)
return result
def _filter_out_matching_any_ignored_test_case_glob(
self, test_case_names: Set[str]
) -> Set[str]:
result = (
test_case_names.copy()
) # copy prevents changing lengths of this collection during loop execution
for test_case_name in test_case_names:
for ignored_test_case_glob in self.ignored_test_case_globs:
if fnmatch(test_case_name, ignored_test_case_glob):
result.remove(test_case_name)
break
return result
TestSuiteInfoDict = Dict[TestSuitePath, TestSuiteInfo]
class TestCollectingException(ProtostarException):
pass
@dataclass
class TestCollector:
class Result:
def __init__(self, test_suites: List[TestSuite], duration: float = 0.0) -> None:
self.test_suites = test_suites
self.test_cases_count = sum(
[len(test_suite.test_case_names) for test_suite in test_suites]
)
self.duration = duration
def log(self, logger: Logger):
if self.test_cases_count:
result: List[str] = ["Collected"]
suites_count = len(self.test_suites)
if suites_count == 1:
result.append("1 suite,")
else:
result.append(f"{suites_count} suites,")
result.append("and")
if self.test_cases_count == 1:
result.append("1 test case")
else:
result.append(f"{self.test_cases_count} test cases")
result.append(f"({self.duration:.3f} s)")
logger.info(" ".join(result))
else:
logger.warning("No cases found")
def __init__(
self,
starknet_compiler: StarknetCompiler,
) -> None:
self._starknet_compiler = starknet_compiler
supported_test_suite_filename_patterns = [
re.compile(r"^test_.*\.cairo"),
re.compile(r"^.*_test.cairo"),
]
@classmethod
def is_test_suite(cls, filename: str) -> bool:
return any(
test_re.match(filename)
for test_re in cls.supported_test_suite_filename_patterns
)
def collect(
self,
targets: List[Target],
ignored_targets: Optional[List[Target]] = None,
default_test_suite_glob: Optional[str] = None,
) -> "TestCollector.Result":
start_time = time()
parsed_targets = self.parse_targets(set(targets), default_test_suite_glob)
ignored_parsed_targets = self.parse_targets(
set(ignored_targets or []), default_test_suite_glob
)
test_case_globs_dict = self.build_test_case_globs_dict(parsed_targets)
ignored_test_case_globs_dict = self.build_test_case_globs_dict(
ignored_parsed_targets
)
filtered_test_case_globs_dict = self.filter_out_ignored_test_suites(
test_case_globs_dict,
ignored_test_case_globs_dict,
)
test_suite_info_dict = self.build_test_suite_info_dict(
filtered_test_case_globs_dict,
ignored_test_case_globs_dict,
)
test_suites = self._build_test_suites_from_test_suite_info_dict(
test_suite_info_dict
)
non_empty_test_suites = list(
filter(lambda test_file: (test_file.test_case_names) != [], test_suites)
)
end_time = time()
return TestCollector.Result(
test_suites=non_empty_test_suites, duration=end_time - start_time
)
def build_test_case_globs_dict(
self,
parsed_targets: Set[ParsedTarget],
) -> TestCaseGlobsDict:
results: TestCaseGlobsDict = defaultdict(set)
for parsed_target in parsed_targets:
test_suite_paths = self._find_test_suite_paths_from_glob(
parsed_target.test_suite_glob
)
for test_suite_path in test_suite_paths:
results[test_suite_path].add(parsed_target.test_case_glob)
return results
def parse_targets(
self, targets: Set[Target], default_test_suite_glob: Optional[str] = None
) -> Set[ParsedTarget]:
return {
ParsedTarget.from_target(target, default_test_suite_glob)
for target in targets
}
def filter_out_ignored_test_suites(
self,
test_case_globs_dict: TestCaseGlobsDict,
ignored_test_case_globs_dict: TestCaseGlobsDict,
) -> TestCaseGlobsDict:
result = test_case_globs_dict.copy()
for ignored_target_path in ignored_test_case_globs_dict:
if (
"*" in ignored_test_case_globs_dict[ignored_target_path]
and ignored_target_path in result
):
del result[ignored_target_path]
return result
def build_test_suite_info_dict(
self,
test_case_globs_dict: TestCaseGlobsDict,
ignored_test_case_globs_dict: TestCaseGlobsDict,
) -> TestSuiteInfoDict:
result: TestSuiteInfoDict = {}
for test_suite_path in test_case_globs_dict:
test_suite_info = result.setdefault(
test_suite_path,
TestSuiteInfo(
test_case_globs=set(),
ignored_test_case_globs=set(),
path=test_suite_path,
),
)
test_suite_info.test_case_globs = test_case_globs_dict[test_suite_path]
if test_suite_path in ignored_test_case_globs_dict:
test_suite_info.ignored_test_case_globs = ignored_test_case_globs_dict[
test_suite_path
]
return result
def _find_test_suite_paths_from_glob(
self, test_suite_glob: str
) -> Set[TestSuitePath]:
results: Set[Path] = set()
matches = glob(test_suite_glob, recursive=True)
for match in matches:
path = Path(match)
if path.is_dir():
results.update(self._find_test_suite_paths_in_dir(path))
elif path.is_file() and TestCollector.is_test_suite(path.name):
results.add(path)
return results
def _find_test_suite_paths_in_dir(self, path: Path) -> Set[TestSuitePath]:
filepaths = set(glob(f"{path}/**/*.cairo", recursive=True))
results: Set[Path] = set()
for filepath in filepaths:
path = Path(filepath)
if TestCollector.is_test_suite(path.name):
results.add(path)
return results
def _build_test_suites_from_test_suite_info_dict(
self,
test_suite_info_dict: TestSuiteInfoDict,
) -> List[TestSuite]:
return [
self._build_test_suite_from_test_suite_info(
test_suite_info,
)
for test_suite_info in test_suite_info_dict.values()
]
def _build_test_suite_from_test_suite_info(
self,
test_suite_info: TestSuiteInfo,
) -> TestSuite:
preprocessed = self._preprocess_contract(test_suite_info.path)
collected_test_case_names = self._collect_test_case_names(preprocessed)
matching_test_case_names = test_suite_info.match_test_case_names(
collected_test_case_names
)
return TestSuite(
test_path=test_suite_info.path,
test_case_names=matching_test_case_names,
preprocessed_contract=preprocessed,
setup_fn_name=self._find_setup_hook_name(preprocessed),
)
def _collect_test_case_names(
self, preprocessed: StarknetPreprocessedProgram
) -> List[str]:
return self._starknet_compiler.get_function_names(
preprocessed, predicate=lambda fn_name: fn_name.startswith("test_")
)
def _find_setup_hook_name(
self, preprocessed: StarknetPreprocessedProgram
) -> Optional[str]:
function_names = self._starknet_compiler.get_function_names(
preprocessed, predicate=lambda fn_name: fn_name == "__setup__"
)
return function_names[0] if len(function_names) > 0 else None
def _preprocess_contract(self, file_path: Path) -> StarknetPreprocessedProgram:
try:
return self._starknet_compiler.preprocess_contract(file_path)
except PreprocessorError as p_err:
print(p_err)
raise TestCollectingException("Failed to collect test cases") from p_err
| 1.851563 | 2 |
loopchain/container/tx_service.py | extendjh/loopchain | 2 | 12795156 | <reponame>extendjh/loopchain
# Copyright 2017 theloop, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Send tx to leader. Store tx temporary while leader is broken"""
import logging
import json
import pickle
import queue
from enum import Enum
from loopchain.baseservice import ObjectManager, StubManager
from loopchain.container import Container
from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code
from loopchain import configure as conf
class PeerProcessStatus(Enum):
normal = 1
leader_complained = 2
class TxService(Container, loopchain_pb2_grpc.ContainerServicer):
def __init__(self, port):
Container.__init__(self, port)
self.__handler_map = {
message_code.Request.status: self.__handler_status,
message_code.Request.stop: self.__handler_stop,
message_code.Request.tx_create: self.__handler_create_tx,
message_code.Request.tx_connect_to_leader: self.__handler_connect_to_leader,
message_code.Request.tx_connect_to_inner_peer: self.__handler_connect_to_inner_peer
}
self.__peer_id = None
self.__stub_to_peer_service = None
# ObjectManager().tx_service = self
self.__stub_to_leader = None
self.__stub_to_inner_peer = None
self.__peer_status = PeerProcessStatus.normal
self.__stored_tx = queue.Queue()
self.start()
def __create_tx_continue(self):
# 저장된 작업이 있으면 전송한다.
while not self.__stored_tx.empty():
stored_tx_item = self.__stored_tx.get()
result_add_tx = self.__stub_to_leader.call_in_times(
"AddTx", loopchain_pb2.TxSend(tx=stored_tx_item), is_stub_reuse=True)
if result_add_tx is None and result_add_tx.response_code != message_code.Response.success:
self.__stored_tx.put(stored_tx_item)
raise Exception(result_add_tx.message)
def __handler_status(self, request, context):
"""Service Status
"""
status = dict()
status['status'] = message_code.Response.success
status_json = json.dumps(status)
logging.debug("TxService __handler_status %s : %s", request.message, status_json)
return loopchain_pb2.Message(code=message_code.Response.success, meta=status_json)
def __handler_stop(self, request, context):
logging.debug("TxService handler stop...")
self.stop()
return loopchain_pb2.Message(code=message_code.Response.success)
def __handler_create_tx(self, request, context):
# logging.debug("TxService handler create tx")
tx = request.object
tx_object = pickle.loads(tx)
# logging.debug(f"TxService got tx({tx_object.get_tx_hash()})")
try:
if self.__peer_status == PeerProcessStatus.leader_complained:
self.__stored_tx.put(tx)
logging.warning("Leader is complained your tx just stored in queue by temporally: "
+ str(self.__stored_tx.qsize()))
else:
self.__create_tx_continue()
result_add_tx = self.__stub_to_leader.call(
"AddTx", loopchain_pb2.TxSend(tx=tx), is_stub_reuse=True
)
if result_add_tx.response_code != message_code.Response.success:
raise Exception(result_add_tx.message)
except Exception as e:
logging.warning(f"in tx service create_tx target({self.__stub_to_leader.target}) Exception: " + str(e))
self.__stored_tx.put(tx)
self.__peer_status = PeerProcessStatus.leader_complained
# TODO leader complain 방식 변경중 임시로 현재 트리거는 중단한다.
# stub_to_self_peer.call_in_time(
# "NotifyLeaderBroken",
# loopchain_pb2.CommonRequest(request="Fail Add Tx to Leader")
# )
return loopchain_pb2.Message(code=message_code.Response.success)
def __handler_connect_to_leader(self, request, context):
logging.debug(f"TxService handler connect to leader({request.message})")
leader_target = request.message
self.__stub_to_leader = StubManager.get_stub_manager_to_server(
leader_target, loopchain_pb2_grpc.PeerServiceStub,
time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,
is_allow_null_stub=True
)
self.__peer_status = PeerProcessStatus.normal
# TODO block generator 연결 실패 조건 확인할 것
if self.__stub_to_leader is None:
return loopchain_pb2.Message(code=message_code.Response.fail_connect_to_leader)
else:
try:
self.__create_tx_continue()
except Exception as e:
logging.warning("in tx service create tx continue() Exception: " + str(e))
self.__peer_status = PeerProcessStatus.leader_complained
return loopchain_pb2.Message(code=message_code.Response.fail_add_tx_to_leader)
return loopchain_pb2.Message(code=message_code.Response.success)
def __handler_connect_to_inner_peer(self, request, context):
logging.debug(f"TxService handler connect to inner peer({request.message})")
inner_peer_target = request.message
# 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다.
# pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다.
# 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다.
self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server(
inner_peer_target, loopchain_pb2_grpc.InnerServiceStub,
time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,
is_allow_null_stub=True
)
logging.debug("try connect to inner peer: " + str(inner_peer_target))
return loopchain_pb2.Message(code=message_code.Response.success)
def Request(self, request, context):
# logging.debug("TxService got request: " + str(request))
if request.code in self.__handler_map.keys():
return self.__handler_map[request.code](request, context)
return loopchain_pb2.Message(code=message_code.Response.not_treat_message_code)
| 2.109375 | 2 |
openGaussBase/testcase/SQL/DCL/Set_Session_Authorization/Opengauss_Function_Set_Session_Authorization_Case0007.py | opengauss-mirror/Yat | 0 | 12795157 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试
Case Name : set AUTHORIZATION DEFAULT
Description :
1.使用初始用户连gsql,创建用户,给用户赋登录权限,期望:创建赋权成功
2.使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001
3.使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002
4.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001
5.使用初始用户连gsql,删除表 期望:删除成功
6.使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询table_set_role7_001属主为role7_002 table_set_role7_002属主为role7_001,查
7.使用初始用户连gsql,清理环境。期望:删除用户成功
Expect :
History :
"""
import sys
import unittest
from yat.test import macro
from yat.test import Node
sys.path.append(sys.path[0]+"/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
class Privategrant(unittest.TestCase):
def setUp(self):
logger.info('------------------------Opengauss_Function_Set_Session_Authorization_Case0007开始执行-----------------------------')
self.userNode = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.Constant = Constant()
self.commonsh = CommonSH('dbuser')
def test_common_user_permission(self):
logger.info('------------------------创建用户,给用户赋权为sysadmin,期望:创建赋权成功-----------------------------')
sql_cmd = self.commonsh.execut_db_sql(f'''
drop table if exists table_set_role7_001 cascade;
drop table if exists table_set_role7_002 cascade;
drop role if exists role7_001;
drop role if exists role7_002;
create role role7_001 password '{<PASSWORD>}';
create role role7_002 password '{<PASSWORD>}';
grant all privileges to role7_002;
alter role role7_001 with login;
alter role role7_002 with login;
grant all privileges to role7_001;
''')
logger.info(sql_cmd)
self.assertIn(self.Constant.DROP_ROLE_SUCCESS_MSG, sql_cmd)
self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd)
self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd)
logger.info('-----------使用role7_001连接gsql,查看当前会话用户,当前用户。期望:SESSION_USER, CURRENT_USER均为role7_001-------------')
sql_cmd = ('''
SELECT SESSION_USER, CURRENT_USER;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn("role7_001", msg)
logger.info('----------使用role7_001连接gsql,执行set role语句设置为group用户role7_002 期望:设置成功,查询SESSION_USER和CURRENT_USER为role7_002---------')
sql_cmd = (f'''
SET SESSION AUTHORIZATION role7_002 password '{macro.COMMON_PASSWD}';
SELECT SESSION_USER, CURRENT_USER;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn("SET", msg)
self.assertNotIn("role7_001", msg)
self.assertIn("role7_002", msg)
logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT 期望:resset成功,查询SESSION_USER和CURRENT_USER为role7_001---------')
sql_cmd = ('''
SET SESSION AUTHORIZATION DEFAULT;
SELECT SESSION_USER, CURRENT_USER;
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn("SET", msg)
self.assertNotIn("role7_002", msg)
self.assertIn("role7_001", msg)
logger.info('------------------------删除表 期望:删除成功-----------------------------')
sql_cmd = self.commonsh.execut_db_sql('''
drop table if exists table_set_role7_001 cascade;
drop table if exists table_set_role7_002 cascade;
''')
logger.info(sql_cmd)
self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd)
logger.info('----------使用role7_001连接gsql,执行SET SESSION AUTHORIZATION DEFAULT---------')
logger.info('----------期望:resset成功, table_set_role7_002属主为role7_001,查询SESSION_USER和CURRENT_USER为role7_001---------')
sql_cmd = (f'''
begin
SET local session AUTHORIZATION role7_002 password '{<PASSWORD>}';
create table table_set_role7_001(id int);
SET SESSION AUTHORIZATION DEFAULT;
create table table_set_role7_002(id int);
end;
SELECT SESSION_USER, CURRENT_USER;
select tableowner from pg_tables where tablename ='table_set_role7_002';
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn("role7_002", msg)
self.assertIn("role7_001", msg)
logger.info('-------------------使用role7_001连接gsql,查询table_set_role7_001属主为role7_002---------------')
sql_cmd = ('''
select tableowner from pg_tables where tablename ='table_set_role7_001';
''')
excute_cmd = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U role7_001 -W {macro.COMMON_PASSWD} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn("role7_001", msg)
self.assertIn("role7_002", msg)
def tearDown(self):
logger.info('---------------------------------清理环境。期望:删除用户成功-----------------------------------')
sql_cmd = self.commonsh.execut_db_sql("drop table "
"if exists table_set_role7_001 cascade;"
"drop table if exists table_set_role7_002 cascade;"
"drop role if exists role7_001, role7_002;"
"drop group if exists group7;")
logger.info(sql_cmd)
logger.info('-------------------------Opengauss_Function_Set_Session_Authorization_Case0007执行结束---------------------------')
| 1.632813 | 2 |
Software/RMSDReducer.py | hrch3n/cNMA | 3 | 12795158 | <gh_stars>1-10
'''
Created on Jan 24, 2014
@author: oliwa
'''
from prody.measure.measure import calcDeformVector
import numpy as np
from prody.dynamics.compare import calcOverlap
from prody.dynamics.mode import Vector
from prody.measure.transform import calcRMSD
from scipy.sparse.linalg import cg
from timeout import timeout
from timeout import TimeoutError
from collections import OrderedDict
class RMSDReducer(object):
'''
The RMSDReducer contains method to reduce the RMSD between proteins.
'''
def __init__(self, utils):
'''
Constructor
'''
self.utils = utils
def setupMTMforBetas(self, anm):
""" Calculate and return the dot product of all ANM modes transposed times
all ANM modes."""
M = anm.getArray()
Mtrans = M.T
MTM = np.dot(Mtrans, M)
return MTM
def calcRMSDReductions(self, anm_slc, ref_chain, mob_chain, defvec):
""" Calculate a list of RMSD reductions based increasing number of modes, that are
combined in a linear combination with betas.
Args:
anm_slc: The sliced ANM, with the corresponding entries of the eigenvectors
towards the matched atoms
ref_chain: The overall matched chain atoms from the unbound structure
mob_chain: The overall matched chain atoms from the bound structure
defvec: the deformation vector
Returns:
RMSDReductions: The reduction list of obtained RMSD values
"""
RMSDReductions = []
overlap = []
MTM = self.setupMTMforBetas(anm_slc[0])
betasListWhole = []
stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes())
guard = 0
for i in stepPointsReduction:
if self.utils.config.stopRMSDReductionAt:
if i > self.utils.config.stopRMSDReductionAt:
# temporary, to speedup other calculations
continue
# elif RMSDReductions and (RMSDReductions[-1] == 1):
# # we already reached a RMSD Rreduction of 1.0
# betasListWhole.append(betasListWhole[-1])
# RMSDReductions.append(RMSDReductions[-1])
# overlap.append(overlap[-1])
# print "already reached RMSD = 1 at i:", i
# raw_input()
# continue
if guard < self.utils.config.guard:
# calculate betas
try:
betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc)
except TimeoutError:
print "RMSD timeout at modes", i,"using previous betas"
# with open("RMSDtimeoutMAX"+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
# myfile.write(referenceName+" RMSD timeout at modes " +str(i)+" using previous betas\n ")
betas = self.getInitialGuess(betasListWhole, i)
Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the unbound structure and get the reduced RMSD
ref_chain_copy = ref_chain.copy()
ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain)
if RMSDReductions:
if RMSD_after_Tapprox < RMSDReductions[-1]:
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
guard = 0
else:
print "previous RMSD lower at ", i
# else the previous RMSD was actually lower, the beta calculation was not successful
guard += 1
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
else:
# else it is the first RMSD reduction run, no need to compare against previous RMSD
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
currentOverlap = 0
overlap.append(currentOverlap)
else:
# else guard is >= self.utils.config.guard, and the RMSD reduction should go preconceived
# calculate betas
try:
betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True)
except TimeoutError:
print "RMSD timeout at modes", i, "using previous betas"
# with open("RMSDtimeoutMAX"+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
# myfile.write(referenceName+" RMSD timeout at modes " +str(i)+" using previous betas\n ")
betas = self.getInitialGuess(betasListWhole, i)
Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the unbound structure and get the reduced RMSD
ref_chain_copy = ref_chain.copy()
ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain)
if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]):
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
else:
# else the previous RMSD was actually lower, the beta calculation was not successful
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
# cast objects
overlap = np.array(overlap, dtype=np.float64)
RMSDReductions = np.array(RMSDReductions, dtype=np.float64)
return RMSDReductions, overlap, stepPointsReduction
def calcRMSDReductionsReverse(self, anm_slc, ref_chain, mob_chain, defvec, referenceName, filePrefix):
""" Calculate a list of RMSD reductions based increasing number of modes, that are
combined in a linear combination with betas. RMSD change from mob_chain to ref_chain
Args:
anm_slc: The sliced ANM, with the corresponding entries of the eigenvectors
towards the matched atoms
ref_chain: The overall matched chain atoms from the unbound structure
mob_chain: The overall matched chain atoms from the bound structure
defvec: the deformation vector
referenceName: the name of the reference
Returns:
RMSDReductions: The reduction list of obtained RMSD values
"""
print "anm_slc[0].getArray(): ", anm_slc[0][0:2].getArray().shape
RMSDReductions = []
overlap = []
MTM = self.setupMTMforBetas(anm_slc[0])
betasListWhole = []
stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes())
guard = 0
for i in stepPointsReduction:
if self.utils.config.stopRMSDReductionAt:
if i > self.utils.config.stopRMSDReductionAt:
# temporary, to speedup other calculations
continue
# elif RMSDReductions and (RMSDReductions[-1] == 1):
# # we already reached a RMSD Rreduction of 1.0
# betasListWhole.append(betasListWhole[-1])
# RMSDReductions.append(RMSDReductions[-1])
# overlap.append(overlap[-1])
# print "already reached RMSD = 1 at i:", i
# raw_input()
# continue
if guard < self.utils.config.guard:
# calculate betas
try:
betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc)
except TimeoutError:
print "RMSD timeout at modes", i,"using previous betas"
with open("RMSDtimeout"+filePrefix+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
myfile.write(referenceName+" RMSD timeout at modes " +str(i)+" using previous betas\n ")
betas = self.getInitialGuess(betasListWhole, i)
Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the bound structure and get the reduced RMSD
mob_chain_copy = mob_chain.copy()
mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain)
if RMSDReductions:
if RMSD_after_Tapprox < RMSDReductions[-1]:
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
guard = 0
else:
print "previous RMSD lower at ", i
# else the previous RMSD was actually lower, the beta calculation was not successful
guard += 1
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
else:
# else it is the first RMSD reduction run, no need to compare against previous RMSD
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
currentOverlap = 0
overlap.append(currentOverlap)
else:
# else guard is >= self.utils.config.guard, and the RMSD reduction should go preconceived
# calculate betas
try:
betas = self.obtainLstSqBetas(anm_slc[0][0:i+1], defvec, MTM, i, betasListWhole, anm_slc, preconceived=True)
except TimeoutError:
print "RMSD timeout at modes", i, "using previous betas"
with open("RMSDtimeout"+filePrefix+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
myfile.write(referenceName+" RMSD timeout at modes " +str(i)+" using previous betas\n ")
betas = self.getInitialGuess(betasListWhole, i)
Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the bound structure and get the reduced RMSD
mob_chain_copy = mob_chain.copy()
mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain)
if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]):
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
else:
# else the previous RMSD was actually lower, the beta calculation was not successful
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
# cast objects
overlap = np.array(overlap, dtype=np.float64)
RMSDReductions = np.array(RMSDReductions, dtype=np.float64)
return RMSDReductions, overlap, stepPointsReduction
def calcRMSDReductionsReverseGeneral(self, Marray, ref_chain, mob_chain, defvec, referenceName, filePrefix):
""" Calculate a list of RMSD reductions based increasing number of modes, that are
combined in a linear combination with betas. RMSD change from mob_chain to ref_chain
Args:
Marray: Array of normal modes, same shape as getArray from an ANM object
ref_chain: The overall matched chain atoms from the unbound structure
mob_chain: The overall matched chain atoms from the bound structure
defvec: the deformation vector
referenceName: the name of the reference
Returns:
RMSDReductions: The reduction list of obtained RMSD values
"""
#print "Marray: ", Marray[0:2]
RMSDReductions = []
overlap = []
numModes = Marray.shape[1]
#MTM = self.setupMTMforBetas(anm_slc[0])
Mtrans = Marray.T
MTM = np.dot(Mtrans, Marray)
betasListWhole = []
stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, numModes, initialStep=1)
print "stepPointsReduction: ", stepPointsReduction
guard = 0
for i in stepPointsReduction:
if self.utils.config.stopRMSDReductionAt:
if i > self.utils.config.stopRMSDReductionAt:
# temporary, to speedup other calculations
continue
# elif RMSDReductions and (RMSDReductions[-1] == 1):
# # we already reached a RMSD Rreduction of 1.0
# betasListWhole.append(betasListWhole[-1])
# RMSDReductions.append(RMSDReductions[-1])
# overlap.append(overlap[-1])
# print "already reached RMSD = 1 at i:", i
# raw_input()
# continue
if guard < self.utils.config.guard:
# calculate betas
try:
betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes)
except TimeoutError:
print "RMSD timeout at modes", i,"using previous betas"
with open("RMSDtimeoutgeneral"+filePrefix+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
myfile.write(referenceName+" RMSD timeout at modes " +str(i)+" using previous betas\n ")
betas = self.getInitialGuess(betasListWhole, i)
Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1])
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the bound structure and get the reduced RMSD
mob_chain_copy = mob_chain.copy()
mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain)
if RMSDReductions:
if RMSD_after_Tapprox < RMSDReductions[-1]:
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
guard = 0
else:
print "previous RMSD lower at ", i
# else the previous RMSD was actually lower, the beta calculation was not successful
guard += 1
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
else:
# else it is the first RMSD reduction run, no need to compare against previous RMSD
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
currentOverlap = 0
overlap.append(currentOverlap)
else:
# else guard is >= self.utils.config.guard, and the RMSD reduction should go preconceived
# calculate betas
try:
betas = self.obtainLstSqBetasGeneral(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes, preconceived=True)
except TimeoutError:
print "RMSD timeout at modes", i, "using previous betas"
with open("RMSDtimeoutgeneral"+filePrefix+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
myfile.write(referenceName+" RMSD timeout at modes " +str(i)+" using previous betas\n ")
betas = self.getInitialGuess(betasListWhole, i)
Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1])
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the bound structure and get the reduced RMSD
mob_chain_copy = mob_chain.copy()
mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain)
if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]):
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
else:
# else the previous RMSD was actually lower, the beta calculation was not successful
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
# cast objects
overlap = np.array(overlap, dtype=np.float64)
RMSDReductions = np.array(RMSDReductions, dtype=np.float64)
return RMSDReductions, overlap, stepPointsReduction
def calcRMSDReductionsExpandingSet(self, Marray, ref_chain, mob_chain, defvec, stepPointsReduction, referenceName, filePrefix):
""" Calculate a list of RMSD reductions based increasing number of modes, that are
combined in a linear combination with betas. RMSD change from mob_chain to ref_chain
Args:
Marray: Array of normal modes, same shape as getArray from an ANM object
ref_chain: The overall matched chain atoms from the unbound structure
mob_chain: The overall matched chain atoms from the bound structure
defvec: the deformation vector
stepPointsReduction: list of number of modes to successively calculate the RMSD reductions on
referenceName: the name of the reference, for output debugging purposes
filePrefix: file prefix, for output debugging purposes
Returns:
RMSDReductions: The reduction list of obtained RMSD values
"""
RMSDReductions = []
L_RMSReductions = []
overlap = []
numModes = Marray.shape[1]
Mtrans = Marray.T
MTM = np.dot(Mtrans, Marray)
stepPointsReduction = stepPointsReduction - 1 # reduce every value by one to have the index match the range 0 to n-1
print stepPointsReduction
betasListWhole = [[0] * stepPointsReduction[0]]
deformationSnapshots = OrderedDict()
deformationSnapshots["proteinFrom"] = mob_chain.copy()
for i in stepPointsReduction:
if self.utils.config.stopRMSDReductionAt:
if i > self.utils.config.stopRMSDReductionAt or i > numModes:
# temporary, to speedup other calculations
continue
# calculate betas
try:
betas = self.obtainLstSqBetasGeneralizedExpanding(Marray.T[0:i+1].T, defvec, MTM, i, betasListWhole, numModes)
except TimeoutError:
print "RMSD timeout at modes", i,"using previous betas"
with open("RMSDtimeoutgeneral"+filePrefix+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
myfile.write(referenceName+" RMSD timeout at modes " +str(i)+" using previous betas\n ")
betas = self.getInitialGuessExpanding(betasListWhole, i, numModes)
Tapprox = np.dot(betas[0:i+1], Marray.T[0:i+1])
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the bound structure and get the reduced RMSD
mob_chain_copy = mob_chain.copy()
mob_chain_copy.setCoords(mob_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(mob_chain_copy, ref_chain)
L_RMSD_after_Tapprox = self.getL_RMS(mob_chain_copy, ref_chain, self.utils.config.investigationsOn)
deformationSnapshots[i] = mob_chain_copy.copy()
if RMSDReductions:
if RMSD_after_Tapprox < RMSDReductions[-1]:
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
else:
print "previous RMSD lower at ", i
# else the previous RMSD was actually lower, the beta calculation was not successful
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
else:
# else it is the first RMSD reduction run, store betas and RMSD reduction results
initial_RMSD = calcRMSD(mob_chain, ref_chain)
if RMSD_after_Tapprox < initial_RMSD:
RMSDReductions.append(RMSD_after_Tapprox)
else:
RMSDReductions.append(initial_RMSD)
print "first mode did not lower RMSD"
betasListWhole.append(betas)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
currentOverlap = 0
overlap.append(currentOverlap)
if L_RMSReductions:
if L_RMSD_after_Tapprox < L_RMSReductions[-1]:
L_RMSReductions.append(L_RMSD_after_Tapprox)
else:
print "previous L_RMS lower at ", i
# else the previous LRMS was actually lower, the beta calculation was not successful
L_RMSReductions.append(L_RMSReductions[-1])
else:
# else it is the first L_RMSD reduction run, store L_RMS reduction results
initial_L_RMS = self.getL_RMS(mob_chain, ref_chain, self.utils.config.investigationsOn)
if L_RMSD_after_Tapprox < initial_L_RMS:
L_RMSReductions.append(L_RMSD_after_Tapprox)
else:
L_RMSReductions.append(initial_L_RMS)
print "first mode did not lower L_RMS"
# cast objects
overlap = np.array(overlap, dtype=np.float64)
RMSDReductions = np.array(RMSDReductions, dtype=np.float64)
L_RMSReductions = np.array(L_RMSReductions, dtype=np.float64)
deformationSnapshots["proteinTo"] = ref_chain.copy()
return RMSDReductions, overlap, stepPointsReduction, L_RMSReductions, deformationSnapshots
def getL_RMS(self, proteinFrom, proteinTo, investigationsOn):
""" Get the L_RMS of proteinFrom and proteinTo (they need to be chain matched).
Args:
proteinFrom: Deformed protein
proteinTo: Target protein (target of the deformation vector)
investigationsON: "Complex" or "Individual"
Returns:
L_RMS of proteinFrom and proteinTo
"""
if investigationsOn == "Complex":
proteinFromL = proteinFrom.select('segment \"L.\"')
proteinToL = proteinTo.select('segment \"L.\"')
return calcRMSD(proteinFromL, proteinToL)
else:
# else it is an investigation on individual proteins, L_RMS does not apply,
# return RMSD of individual proteins instead
return calcRMSD(proteinFrom, proteinTo)
def calcRMSDReductionFromTo(self, Marray, proteinFrom, proteinTo, defvec, previousBetas, previousOverlap, previousRMSD, referenceName, filePrefix):
""" Calculate a list of RMSD reductions based increasing number of modes, that are
combined in a linear combination with betas. RMSD change from mob_chain to ref_chain
Args:
Marray: Array of normal modes, same shape as getArray from an ANM object
proteinFrom: The overall matched chains of the protein to deform towards proteinTo
proteinTo: The overall matched chains of the protein which is being deformed towards
previousBetas: The previous betas, serves as part of the initial guess for the fitter
previousOverlap: The previous overlap
previousRMSD: The previous reduced RMSD
defvec: the deformation vector from proteinFrom to proteinTo
referenceName: the name of the reference, for output debugging if the RMSD fitter timeouts
filePrefix: filePrefix, for output debugging if the RMSD fitter timeouts
Returns:
RMSDReduction, overlap, betas
"""
Mtrans = Marray.T
MTM = np.dot(Mtrans, Marray)
if len(previousBetas) == 0:
previousBetas = [0]
else:
previousBetas = previousBetas[-1]
if len(previousOverlap) == 0:
previousOverlap = 0
else:
previousOverlap = previousOverlap[-1]
if len(previousRMSD) == 0:
previousRMSD = calcRMSD(proteinFrom, proteinTo)
else:
previousRMSD = previousRMSD[-1]
try:
betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM)
except TimeoutError:
print "RMSD timeout at modes", Marray.shape[1]," using previous betas"
with open("RMSDtimeoutgeneral"+filePrefix+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
myfile.write(referenceName+" RMSD timeout at modes " +str(Marray.shape[1])+" using previous betas\n ")
betas = self.getInitialGuess(previousBetas, Marray.shape[1])
Tapprox = np.dot(betas, Marray.T)
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of proteinFrom and get the RMSD towards proteinTo
proteinFrom_copy = proteinFrom.copy()
proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo)
# RMSD comparison
if previousRMSD:
if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox) or previousRMSD < RMSD_after_Tapprox:
print "RMSD_after_Tapprox has a numerical problem, maybe the two structures are already too close or the mode vectors are problematic"
RMSD_after_Tapprox = previousRMSD
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem, maybe the two structures are already too close or the mode vectors are problematic"
if previousOverlap:
currentOverlap = previousOverlap
else:
currentOverlap = 0
return RMSD_after_Tapprox, currentOverlap, betas
def RMSDReductionFixedset(self, Marray, proteinFrom, proteinTo, defvec, referenceName, filePrefix):
""" One shot calculation for the RMSD reduction.
Args:
Marray: Array of normal modes, same shape as getArray from an ANM object
proteinFrom: The overall matched chains of the protein to deform towards proteinTo
proteinTo: The overall matched chains of the protein which is being deformed towards
defvec: the deformation vector from proteinFrom to proteinTo
referenceName: the name of the reference, for output debugging if the RMSD fitter timeouts
filePrefix: filePrefix, for output debugging if the RMSD fitter timeouts
Returns:
RMSDReduction
"""
Mtrans = Marray.T
MTM = np.dot(Mtrans, Marray)
try:
betas = self.obtainLstSqBetasGeneralized2(Marray, defvec, MTM)
except TimeoutError:
print "RMSD timeout at modes", Marray.shape[1]," using previous betas"
with open("RMSDtimeoutgeneral"+filePrefix+self.utils.config.whatAtomsToMatch+".txt", "a") as myfile:
myfile.write(referenceName+" RMSD timeout at modes " +str(Marray.shape[1])+" using previous betas\n ")
betas = self.getInitialGuess([0], Marray.shape[1])
Tapprox = np.dot(betas, Marray.T)
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of proteinFrom and get the RMSD towards proteinTo
proteinFrom_copy = proteinFrom.copy()
proteinFrom_copy.setCoords(proteinFrom_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(proteinFrom_copy, proteinTo)
# RMSD comparison
if np.isnan(RMSD_after_Tapprox) or np.isinf(RMSD_after_Tapprox):
print "RMSD_after_Tapprox has a numerical problem, maybe the two structures are already too close or the mode vectors are problematic, returning original RMSD"
RMSD_after_Tapprox = calcRMSD(proteinFrom, proteinTo)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem, maybe the two structures are already too close or the mode vectors are problematic, returning overlap 0"
currentOverlap = 0
return RMSD_after_Tapprox, currentOverlap, betas
@timeout()
def obtainLstSqBetas(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False):
""" Obtain betas by a scipy optimizer fitting, the formula is given in :
Moal, <NAME>., and <NAME>. "SwarmDock and the Use of Normal
Modes in Protein-Protein Docking." International Journal of
Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648.
doi:10.3390/ijms11103623.
Args:
anm: the ANM with modes
defvec: the deformationvector
MTMfull: dot product of the full ANM matrix inverse times
the ANM matrix
modesToConsider: up to how many modes the betas should be calculated
listofPreviousBetas: the list of previously calculated betas
anmTuple: anm tuple as generated by Prody
preconceived: has guard from config been reached or not
Returns:
the beta coefficents
"""
M = anm.getArray()
#print "first M original: ", M
Tdefvec = defvec.getArray()
#print "shape(Tdefvec): ", np.shape(Tdefvec)
#print "shape(M): ", np.shape(M)
if len(M) != len(Tdefvec):
raise ValueError("Cannot calculate betas, len(M) != len(Tdefvec)")
Mtrans = M.T
MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM
maximalIter = self.utils.config.maxIterBetas
if modesToConsider < 1:
#print "original MTM, np.dot(Mtrans, Tdefvec) ", MTM, np.dot(Mtrans, Tdefvec)
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2]
print "modesToConsider, status: ", modesToConsider, status
elif not preconceived:
initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
# betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2]
print "modesToConsider, status: ", modesToConsider, status
else:
# how many modes could be calculated on this structure
nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6
initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)
if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined):
if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0:
print "modesToConsider, nonTrivialModes, status: ", modesToConsider, nonTrivialModes, "det(MTM) == 0, skipped"
return initialGuess
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
# betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2]
if status != 0:
print "modesToConsider, nonTrivialModes, status: ", modesToConsider, nonTrivialModes, "status == ", status, " skipped"
return initialGuess
print "modesToConsider, status: ", modesToConsider, status
return betas
@timeout()
def obtainLstSqBetasGeneral(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False):
""" Obtain betas by a scipy optimizer fitting, the formula is given in :
Moal, <NAME>., and <NAME>. "SwarmDock and the Use of Normal
Modes in Protein-Protein Docking." International Journal of
Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648.
doi:10.3390/ijms11103623.
Args:
anm: the ANM with modes
defvec: the deformationvector
MTMfull: dot product of the full ANM matrix inverse times
the ANM matrix
modesToConsider: up to how many modes the betas should be calculated
listofPreviousBetas: the list of previously calculated betas
maxModes: the number of modes
preconceived: has guard from config been reached or not
Returns:
the beta coefficents
"""
M = anm
Tdefvec = defvec.getArray()
#print "shape(Tdefvec): ", np.shape(Tdefvec)
#print "shape(M): ", np.shape(M)
if len(M) != len(Tdefvec):
print "len(M): ", M.shape
print "len(Tdefvec): ", len(Tdefvec)
raise ValueError("Cannot calculate betas, len(M) != len(Tdefvec)")
Mtrans = M.T
MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM
maximalIter = self.utils.config.maxIterBetas
if modesToConsider < 1:
#print "original MTM, np.dot(Mtrans, Tdefvec) ", MTM, np.dot(Mtrans, Tdefvec)
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2]
print "modesToConsider, status: ", modesToConsider, status
elif not preconceived:
initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
# betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2]
print "modesToConsider, status: ", modesToConsider, status
else:
# how many modes could be calculated on this structure
nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6
initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)
if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined):
if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0:
print "modesToConsider, nonTrivialModes, status: ", modesToConsider, nonTrivialModes, "det(MTM) == 0, skipped"
return initialGuess
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
# betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2]
if status != 0:
print "modesToConsider, nonTrivialModes, status: ", modesToConsider, nonTrivialModes, "status == ", status, " skipped"
return initialGuess
print "modesToConsider, status: ", modesToConsider, status
return betas
@timeout()
def obtainLstSqBetasGeneralizedExpanding(self, anm, defvec, MTMfull, modesToConsider, listofPreviousBetas, maxModes, preconceived=False):
""" Obtain betas by a scipy optimizer fitting, the formula is given in :
Moal, <NAME>., and <NAME>. "SwarmDock and the Use of Normal
Modes in Protein-Protein Docking." International Journal of
Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648.
doi:10.3390/ijms11103623.
Args:
anm: the ANM with modes
defvec: the deformationvector
MTMfull: dot product of the full ANM matrix inverse times
the ANM matrix
modesToConsider: up to how many modes the betas should be calculated, starting from 0 to n-1
listofPreviousBetas: the list of previously calculated betas
maxModes: the number of modes
preconceived: has guard from config been reached or not
Returns:
the beta coefficents
"""
M = anm
Tdefvec = defvec.getArray()
if len(M) != len(Tdefvec):
print "len(M): ", M.shape
print "len(Tdefvec): ", len(Tdefvec)
raise ValueError("Cannot calculate betas, len(M) != len(Tdefvec)")
Mtrans = M.T
MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM
maximalIter = self.utils.config.maxIterBetas
if modesToConsider < 1:
#print "original MTM, np.dot(Mtrans, Tdefvec) ", MTM, np.dot(Mtrans, Tdefvec)
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2]
print "modesToConsider, status: ", modesToConsider, status
elif not preconceived:
initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes)
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
# betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2]
print "modesToConsider, status: ", modesToConsider, status
else:
# how many modes could be calculated on this structure
nonTrivialModes = maxModes #(maxModes[1].select('calpha').numAtoms()*3) - 6
initialGuess = self.getInitialGuessExpanding(listofPreviousBetas, modesToConsider, maxModes)
if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined):
if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0:
print "modesToConsider, nonTrivialModes, status: ", modesToConsider, nonTrivialModes, "det(MTM) == 0, skipped"
return initialGuess
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
# betas, status = lsmr(MTM, np.dot(Mtrans, Tdefvec), atol=self.utils.config.precisionBetaFitting, btol=self.utils.config.precisionBetaFitting, conlim=1000000000.0, maxiter=maximalIter)[0:2]
if status != 0:
print "modesToConsider, nonTrivialModes, status: ", modesToConsider, nonTrivialModes, "status == ", status, " skipped"
return initialGuess
print "modesToConsider, status: ", modesToConsider, status
return betas
@timeout()
def obtainLstSqBetasGeneralized2(self, M, defvec, MTM, previousBetas=None):
""" Obtain betas by a scipy optimizer fitting, the formula is given in :
Moal, <NAME>., and <NAME>. "SwarmDock and the Use of Normal
Modes in Protein-Protein Docking." International Journal of
Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648.
doi:10.3390/ijms11103623.
Args:
M: the modes array
defvec: the deformation vector
MTM: dot product of the ANM matrix inverse times the ANM matrix
previousBetas: previously calculated betas
Returns:
the beta coefficents
"""
Tdefvec = defvec.getArray()
if len(M) != len(Tdefvec):
print "len(M): ", M.shape
print "len(Tdefvec): ", len(Tdefvec)
raise ValueError("Cannot calculate betas, len(M) != len(Tdefvec)")
Mtrans = M.T
# the default maxiter is too low, increase the number
maximalIter = self.utils.config.maxIterBetas
if M.shape[1] == 1:
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2]
else:
if previousBetas is not None:
initialGuess = self.expandInitialGuess(previousBetas, M.shape[1])
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
else:
betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
print "modesToConsider, status: ", M.shape[1], status
return betas
# def obtainLstSqBetasByCollectivity(self, M, defvec, MTMfull, modesToConsider, listofPreviousBetas, anmTuple, preconceived=False):
# """ Obtain betas by a scipy optimizer fitting, the formula is given in :
#
# Moal, <NAME>., and <NAME>. "SwarmDock and the Use of Normal
# Modes in Protein-Protein Docking." International Journal of
# Molecular Sciences 11, no. 10 (September 28, 2010): 3623-3648.
# doi:10.3390/ijms11103623.
#
# Args:
# anm: the ANM with modes
# defvec: the deformationvector
# MTMfull: dot product of the full ANM matrix inverse times
# the ANM matrix
# modesToConsider: up to how many modes the betas should be calculated
#
# Returns:
# the beta coefficents
# """
# ### old
# ### M = anm.getArray()
#
# Tdefvec = defvec.getArray()
# #print "shape(Tdefvec): ", np.shape(Tdefvec)
# #print "shape(M): ", np.shape(M)
# if len(M) != len(Tdefvec):
# raise ValueError("Cannot calculate betas, len(M) != len(Tdefvec)")
# Mtrans = M.T
# MTM = MTMfull[:modesToConsider+1,:modesToConsider+1] # use pre-calculated MTM
# maximalIter = self.utils.config.maxIterBetas
#
# if modesToConsider < 1:
# print "using one column"
# betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), maxiter=maximalIter)[0:2]
# elif not preconceived:
# initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)
# betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
# print "modesToConsider, status: ", modesToConsider, status
# else:
# # how many modes could be calculated on this structure
# nonTrivialModes = (anmTuple[1].select('calpha').numAtoms()*3) - 6
# initialGuess = self.getInitialGuess(listofPreviousBetas, modesToConsider)
# if modesToConsider > (nonTrivialModes+self.utils.config.goOverdetermined):
# if np.linalg.det(MTM) == 0.0 or np.linalg.det(MTM) == -0.0:
# print "modesToConsider, nonTrivialModes, status: ", modesToConsider, nonTrivialModes, "det(MTM) == 0, skipped"
# return initialGuess
# betas, status = cg(MTM, np.dot(Mtrans, Tdefvec), x0=initialGuess, maxiter=maximalIter, tol=self.utils.config.precisionBetaFitting)[0:2]
# if status != 0:
# print "modesToConsider, nonTrivialModes, status: ", modesToConsider, nonTrivialModes, "status == ", status, " skipped"
# return initialGuess
# print "modesToConsider, status: ", modesToConsider, status
# return betas
def getInitialGuessExpanding(self, listofPreviousBetas, modesToConsider, maxModesOverall):
""" Create an initial guess vector, padded with 0.0 values to the correct length.
Args:
listofPreviousBetas: the list of previously calculated Betas
modesToConsider: up to how many modes are given to get the betas
Returns:
The initial guess vector for the betas, padded with 0.0 to reach the
correct length
"""
initialGuess = listofPreviousBetas[-1]
initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)])
if len(initialGuess) > maxModesOverall:
initialGuess = initialGuess[:maxModesOverall]
return initialGuess
def getInitialGuess(self, listofPreviousBetas, modesToConsider):
""" Create an initial guess vector, padded with 0.0 values to the correct length.
Args:
listofPreviousBetas: the list of previously calculated Betas
modesToConsider: up to how many modes are given to get the betas
Returns:
The initial guess vector for the betas, padded with 0.0 to reach the
correct length
"""
initialGuess = listofPreviousBetas[-1]
initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider+1)])
return initialGuess
def expandInitialGuess(self, listofPreviousBetas, modesToConsider):
""" Create an initial guess vector, padded with 0.0 values to the correct length.
Args:
listofPreviousBetas: the list of previously calculated Betas
modesToConsider: up to how many modes are given to get the betas
Returns:
The initial guess vector for the betas, padded with 0.0 to reach the
correct length
"""
initialGuess = listofPreviousBetas
initialGuess = np.append(initialGuess, [x*0.0 for x in range(len(initialGuess), modesToConsider)])
return initialGuess
def calcRMSDReductionsAidedByCollectivity(self, collectivity, highestN, excludeFirstK, anm_slc, ref_chain, mob_chain):
indicesOfHighest = self.utils.getIndiciesofHighestN(np.abs(collectivity), highestN, excludeFirstK)
M = self.getModeArrayBasedOnIndices(anm_slc[0], excludeFirstK, indicesOfHighest)
defvec = calcDeformVector(ref_chain, mob_chain)
RMSDReductions = []
overlap = []
Mtrans = M.T
MTM = np.dot(Mtrans, M)
betasListWhole = []
stepPointsReduction = self.utils.getRMSDReductionStepPoints(10, 10, anm_slc[0].numModes())
guard = 0
for i in stepPointsReduction:
if self.utils.config.stopRMSDReductionAt:
if i > self.utils.config.stopRMSDReductionAt:
# temporary, to speedup other calculations
continue
if guard < self.utils.config.guard:
# calculate betas
## new Mmode instead of anm_slc and then [][]
Mmode = self.getModeArrayKeepingFirstK(M, i)
print "Mmode: ", np.shape(Mmode)
betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc)
Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the unbound structure and get the reduced RMSD
ref_chain_copy = ref_chain.copy()
ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain)
if RMSDReductions:
if RMSD_after_Tapprox < RMSDReductions[-1]:
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
guard = 0
else:
print "previous RMSD lower at ", i
# else the previous RMSD was actually lower, the beta calculation was not successful
guard += 1
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
else:
# else it is the first RMSD reduction run, no need to compare against previous RMSD
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
currentOverlap = 0
overlap.append(currentOverlap)
else:
# else guard is >= self.utils.config.guard, and the RMSD reduction should go preconceived
# calculate betas
Mmode = self.getModeArrayKeepingFirstK(M, i)
betas = self.obtainLstSqBetasByCollectivity(Mmode, defvec, MTM, i, betasListWhole, anm_slc, preconceived=True)
Tapprox = np.dot(betas[0:i+1], anm_slc[0][0:i+1].getArray().T)
TapproxVector = Vector(Tapprox, "Tapprox")
# apply Tapprox to a copy of the unbound structure and get the reduced RMSD
ref_chain_copy = ref_chain.copy()
ref_chain_copy.setCoords(ref_chain_copy.getCoords() + TapproxVector.getArrayNx3())
RMSD_after_Tapprox = calcRMSD(ref_chain_copy, mob_chain)
if self.utils.isLessOrEqualThen(RMSD_after_Tapprox, RMSDReductions[-1]):
# store betas and RMSD reduction results
betasListWhole.append(betas)
RMSDReductions.append(RMSD_after_Tapprox)
# calc overlap
currentOverlap = calcOverlap(TapproxVector, defvec)
if np.isnan(currentOverlap) or np.isinf(currentOverlap):
print "overlap has a numerical problem"
if overlap:
overlap.append(overlap[-1])
else:
currentOverlap = 0
overlap.append(currentOverlap)
else:
# else the previous RMSD was actually lower, the beta calculation was not successful
betasListWhole.append(betasListWhole[-1])
RMSDReductions.append(RMSDReductions[-1])
overlap.append(overlap[-1])
# cast objects
overlap = np.array(overlap, dtype=np.float64)
RMSDReductions = np.array(RMSDReductions, dtype=np.float64)
return RMSDReductions, overlap, stepPointsReduction
def getModeArrayBasedOnIndices(self, anm_slc, excludeFirstK, indicesOfHighest):
""" Create an array of np.arrays with the modes specified by the indices in excludeFirstK,
and the following modes as given by the indices in indicesOfHighest"""
excludeFirstK = range(0, excludeFirstK)
M = anm_slc[excludeFirstK[0]].getArray()
#print "initial M: ", M
for i in range(1, len(excludeFirstK)):
M = np.dstack((M, anm_slc[excludeFirstK[i]].getArray()))
# print "first ",i," M: ", M
for j in range(0, len(indicesOfHighest)):
M = np.dstack((M, anm_slc[indicesOfHighest[j]].getArray()))
# print "highe ",j," M: ", M
return M[0]
def getModeArrayKeepingFirstK(self, arr, k):
k += 1
k = range(0, k)
arrCopy = arr.copy()
if len(k) == 1:
Mbefore = np.array(np.dstack(arrCopy)[0][0])
M = np.zeros((len(Mbefore), 1))
#print "M: ", M
for i in range(0, len(Mbefore)):
M[i] = Mbefore[i]
return M
elif len(arr[0]) == len(k):
return arr
else:
M = np.dstack(arrCopy)[0][0]
#print "first M in keep first k: ", M
for i in range(1, len(k)):
M = np.dstack((M, np.dstack(arrCopy)[0][i]))
#print "M in keep first "+str(i)+": ", M
return M[0] | 1.820313 | 2 |
migrations/versions/002fd410a290_002_owner_id_type.py | LandRegistry/digital-street-title-api | 0 | 12795159 | <reponame>LandRegistry/digital-street-title-api<filename>migrations/versions/002fd410a290_002_owner_id_type.py<gh_stars>0
"""empty message
Revision ID: 002fd410a290
Revises: <KEY>
Create Date: 2019-02-05 13:40:59.112652
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '002fd410a290'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
op.execute('ALTER TABLE "owner" DROP CONSTRAINT "owner_pkey" CASCADE')
op.alter_column('owner', 'identity', existing_type=sa.Integer(), type_=sa.String())
op.alter_column('title', 'owner_identity', existing_type=sa.Integer(), type_=sa.String())
op.create_primary_key('owner_pkey', 'owner', ['identity'])
def downgrade():
op.execute('ALTER TABLE "owner" DROP CONSTRAINT "owner_pkey" CASCADE')
op.alter_column('owner', 'identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using="identity::integer", autoincrement=True)
op.alter_column('title', 'owner_identity', existing_type=sa.String(), type_=sa.Integer(), postgresql_using="identity::integer", autoincrement=True)
op.create_primary_key('owner_pkey', 'owner', ['identity'])
| 1.3125 | 1 |
skelpy/makers/license.py | Steap/skelpy | 0 | 12795160 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module defines :class:`LicenseMaker` class."""
from __future__ import absolute_import, print_function
import os
import datetime
from . import settings
from .base import BaseMaker
#: Supported licenses, corresponding template file names, and descriptions
_LICENSES = {
"APACHE": ["license_apache", "Apache License"],
"CC0": ["license_cc0_1.0", "Creative Commons License for public domain"],
"GPL2": ["license_gpl_2.0", "GNU General Public License v2.0"],
"GPL3": ["license_gpl_3.0", "GNU General Public License v3.0"],
"LGPL2": ["license_lgpl_2.1", "GNU Lesser General Public License v2.1"],
"LGPL3": ["license_lgpl_3.0", "GNU Lesser General Public License v3.0"],
"MIT": ["license_mit", "MIT License, Default"],
"MOZILLA": ["license_mozilla", "Mozilla Public License v2.0"],
"NEW-BSD": ["license_new_bsd", "New BSD(Berkeley Software Distribution) License"],
"SIMPLE-BSD": ["license_simplified_bsd", "Simplified BSD(Berkeley Software Distribution) License"],
"PROPRIETARY": ["license_proprietary", "Proprietary License"],
}
class LicenseMaker(BaseMaker):
"""*Maker* class to create ``LICENSE`` file in the project directory
``LicenseMaker`` basically choose the license specified in setup.cfg file.
But if it can not retrieve a license from the file--for
example, when the user did not specify a license in setup.cfg-- it creates
the default license, which is the `MIT license <https://opensource.org/licenses/MIT>`_.
Args:
projectDir (str): absolute path of project directory to create
force (bool): option for overwriting if the file exists.
license (str): license to create.
Attributes:
default_license (str): default license(class variable)
"""
default_license = 'MIT'
def __init__(self, projectDir, force, license, **kwargs):
self.projectDir = projectDir
self.force = force
self.license = license
self._update_settings()
def _update_settings(self):
"""update :attr:`maker.settings` dictionary"""
info = {
'today': datetime.date.today().isoformat(),
'year': str(datetime.date.today().year),
}
settings.update(info)
@staticmethod
def is_supported_license(license):
"""check to see if the license given is supported by *skelpy* or not
license name is case-insensitive.
.. Note::
Currently supported licenses are::
* APACHE: Apace License
* CC0: Creative Commons License for public domain
* GPL2: GNU General Public License v2.0
* GPL3: GNU General Public License v3.0
* LGPL: GNU Lesser General Public License v2.1
* LGPL3: GNU Lesser General Public License v3.0
* MIT: MIT License, **Default**
* MOZILLA: Mozilla Public License v2.0
* NEW-BSD: New BSD(Berkeley Software Distribution) License
* SIMPLE-BSD: Simplified BSD License
* PROPRIETARY: Proprietary License
Args:
license (str): license name
Returns:
bool: True if the license given is supported, False otherwise
"""
return bool(_LICENSES.get(license.upper()))
@staticmethod
def print_licenses():
"""print supported licenses
Returns:
None
"""
print('Supported licenses are as follows:')
indent = " " * 4
for k, v in _LICENSES.items():
print('{0}{1}: {2}'.format(indent, k, v[1]))
def generate(self):
"""Worker method of :class:`LicenseMaker`
Returns:
bool: True if successful, False otherwise
"""
licFile = os.path.join(self.projectDir, 'LICENSE')
ret = self.write_file(_LICENSES[self.license][0], licFile)
if not ret:
self.logger.info(
"* You can change the license with 'license' sub-command.\n"
"For help, see 'skelpy license -h or --help'.")
return bool(ret)
| 2.46875 | 2 |
tests/test_cpu_increment_instructions.py | Hexadorsimal/pynes | 1 | 12795161 | import unittest
from nes.processors.cpu import Cpu
from nes.bus import Bus
from nes.bus.devices.memory import Ram
class CpuIncrementInstructionsTestCase(unittest.TestCase):
def setUp(self):
bus = Bus()
bus.attach_device('RAM', Ram(256), 0, 256)
self.cpu = Cpu(bus)
def test_inc(self):
self.cpu.write(0x0000, 0x00)
instruction = self.cpu.decode(0xEE)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.read(0x0000), 0x01)
self.assertFalse(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
def test_inx(self):
self.cpu.x.value = 0x00
instruction = self.cpu.decode(0xE8)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.x.value, 0x01)
self.assertFalse(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
def test_iny(self):
self.cpu.y.value = 0x00
instruction = self.cpu.decode(0xC8)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.y.value, 0x01)
self.assertFalse(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
if __name__ == '__main__':
unittest.main()
| 3.046875 | 3 |
apps/account/tests/tests_custom_user_account.py | nfeslim/dashboard_nfe | 0 | 12795162 | from django.test import TestCase
from apps.account.models import CustomUser
class TestsCustomUserModel(TestCase):
def setUp(self):
self.data_insert = {
"email": "<EMAIL>",
"first_name": "Teste01",
"last_name": "Last",
"is_staff": False,
"is_active": True,
"notes": "Teste de Notes",
}
def test_insert_custom_user(self):
user = CustomUser.objects.create(
email=self.data_insert.get("email"),
first_name=self.data_insert.get("first_name"),
last_name=self.data_insert.get("last_name"),
is_staff=self.data_insert.get("is_staff"),
is_active=self.data_insert.get("is_active"),
notes=self.data_insert.get("notes"),
)
self.assertIsNotNone(user.pk)
| 2.734375 | 3 |
thelper/infer/impl.py | beaulima/thelper | 17 | 12795163 | """Explicit Tester definitions from existing Trainers."""
import thelper.concepts
from thelper.infer.base import Tester
from thelper.train.classif import ImageClassifTrainer
from thelper.train.detect import ObjDetectTrainer
from thelper.train.regr import RegressionTrainer
from thelper.train.segm import ImageSegmTrainer
@thelper.concepts.classification
class ImageClassifTester(ImageClassifTrainer, Tester):
"""Session runner specialized for testing of image classification task with safeguard against model training.
.. seealso::
| :class:`thelper.train.base.Tester`
| :class:`thelper.train.base.Trainer`
| :class:`thelper.train.classif.ImageClassifTrainer`
"""
@thelper.concepts.detection
class ObjDetectTester(ObjDetectTrainer, Tester):
"""Session runner specialized for testing of object detection task with safeguard against model training.
.. seealso::
| :class:`thelper.train.base.Tester`
| :class:`thelper.train.base.Trainer`
| :class:`thelper.train.detect.ObjDetectTrainer`
"""
@thelper.concepts.regression
class RegressionTester(RegressionTrainer, Tester):
"""Session runner specialized for testing of regression task with safeguard against model training.
.. seealso::
| :class:`thelper.train.base.Tester`
| :class:`thelper.train.base.Trainer`
| :class:`thelper.train.regr.RegressionTrainer`
"""
@thelper.concepts.segmentation
class ImageSegmTester(ImageSegmTrainer, Tester):
"""Session runner specialized for testing of image segmentation task with safeguard against model training.
.. seealso::
| :class:`thelper.train.base.Tester`
| :class:`thelper.train.base.Trainer`
| :class:`thelper.train.segm.ImageSegmTrainer`
"""
| 2.09375 | 2 |
pipeline/tests/test_model.py | pkgpkr/Packge-Picker | 2 | 12795164 | """
Tests for the pipeline model
"""
import os
import unittest
import psycopg2
from model.database import update_bounded_similarity_scores, \
update_popularity_scores, update_trending_scores, \
package_table_postprocessing, write_similarity_scores
class TestModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
USER = os.environ['DB_USER']
PASSWORD = <PASSWORD>['<PASSWORD>']
DATABASE = os.environ['DB_DATABASE']
REAL_TOKEN = os.environ['GH_TOKEN']
HOST = os.environ['DB_HOST']
PORT = os.environ['DB_PORT']
connection = None
result = None
cls.connection = psycopg2.connect(user=USER,
password=PASSWORD,
host=HOST,
port=PORT,
database=DATABASE)
f = open("tests/provision_db.sql", "r")
cls.connection.cursor().execute(f.read())
cls.connection.commit()
f.close()
"""
Tests for the ML pipeline model
"""
def setUp(self):
# Application data
app_data = [
1,
2
]
# Package data
package_data = [
{
'id': 1,
'name': '\'pkg:npm/countup.js@2\'',
'monthly_downloads_last_month': 400451,
'monthly_downloads_a_year_ago': 0,
'categories': 'null',
'modified': '\'2019-03-14 12:42:34.846-07\'',
'retrieved': '\'2020-04-25 19:03:37.409069-07\'',
'app_ids': app_data[-1:]
},
{
'id': 2,
'name': '\'pkg:npm/d3@5\'',
'monthly_downloads_last_month': 5306004,
'monthly_downloads_a_year_ago': 2966818,
'categories': '\'{dom,visualization,svg,animation,canvas}\'',
'modified': '\'2020-04-20 10:59:10.332-07\'',
'retrieved': '\'2020-04-25 19:03:37.421523-07\'',
'app_ids': app_data
},
{
'id': 4,
'name': '\'pkg:npm/globe.gl@2\'',
'monthly_downloads_last_month': 2221,
'monthly_downloads_a_year_ago': 771,
'categories': '\'{webgl,three,globe,geo,spherical,projection,orthographic}\'',
'modified': '\'2020-04-10 14:13:59.518-07\'',
'retrieved': '\'2020-04-25 19:03:37.426579-07\'',
'app_ids': app_data
},
{
'id': 5,
'name': '\'pkg:npm/react-resize-detector@4\'',
'monthly_downloads_last_month': 0,
'monthly_downloads_a_year_ago': 1957316,
'categories': '\'{react,resize,detector}\'',
'modified': 'null',
'retrieved': '\'2020-04-25 19:03:37.429703-07\'',
'app_ids': app_data
},
{
'id': 8,
'name': '\'pkg:npm/@reach/router@1\'',
'monthly_downloads_last_month': 0,
'monthly_downloads_a_year_ago': 0,
'categories': '\'{react,"react router"}\'',
'modified': '\'2020-02-27 12:14:25.729-08\'',
'retrieved': '\'2020-04-25 19:03:37.434285-07\'',
'app_ids': app_data[:1]
}
]
# Connect to the database
user = os.environ.get('DB_USER')
password = <PASSWORD>('DB_PASSWORD')
host = os.environ.get('DB_HOST')
database = os.environ.get('DB_DATABASE')
port = os.environ.get('DB_PORT')
connection_string = f"host={host} user={user} password={password} dbname={database} port={port}"
self.database = psycopg2.connect(connection_string)
self.cursor = self.database.cursor()
# Populate with package data
for p in package_data:
self.cursor.execute(f"""
INSERT INTO packages (id, name, monthly_downloads_last_month, monthly_downloads_a_year_ago, categories, modified, retrieved )
VALUES ({p['id']}, {p['name']}, {p['monthly_downloads_last_month']}, {p['monthly_downloads_a_year_ago']}, {p['categories']}, {p['modified']}, {p['retrieved']});
""")
# Populate with similarity data
for p1 in package_data:
for p2 in package_data:
if p1['id'] == p2['id']:
continue
# Determine how much overlap the two packages have
similarity = len(set(p1['app_ids']) & set(p2['app_ids'])) / len(set(p1['app_ids']) | set(p2['app_ids']))
if similarity == 0:
continue
# Insert similarity score into database
self.cursor.execute(f"""
INSERT INTO similarity (package_a, package_b, similarity)
VALUES ({p1['id']}, {p2['id']}, {similarity});
""")
def tearDown(self):
# Clean out all data
self.cursor.execute("DELETE FROM dependencies; DELETE FROM similarity; DELETE FROM applications; DELETE FROM packages;")
self.database.commit()
self.cursor.close()
self.database.close()
def test_update_bounded_similarity_scores(self):
update_bounded_similarity_scores(self.cursor)
self.cursor.execute("SELECT bounded_similarity FROM similarity ORDER BY package_a, package_b;")
scores = self.cursor.fetchall()
self.assertListEqual(scores, [
(5,), (5, ), (5, ), # Package 1
(5,), (10,), (10,), (5,), # Package 2
(5,), (10,), (10,), (5,), # Package 4
(5,), (10,), (10,), (5,), # Package 5
(5, ), (5, ), (5,)]) # Package 8
def test_update_popularity_scores(self):
update_popularity_scores(self.cursor)
self.cursor.execute("SELECT bounded_popularity FROM packages ORDER BY id;")
scores = self.cursor.fetchall()
self.assertListEqual(scores, [(8,), (10,), (10,), (10,), (8,)])
def test_update_trending_scores(self):
update_trending_scores(self.cursor)
# Check absolute trend
self.cursor.execute("SELECT absolute_trend FROM packages ORDER by id;")
scores = self.cursor.fetchall()
self.assertListEqual(scores, [(10,), (5,), (6,), (1,), (5,)])
# Check relative trend
self.cursor.execute("SELECT relative_trend FROM packages ORDER BY id;")
scores = self.cursor.fetchall()
self.assertListEqual(scores, [(10,), (2,), (2,), (1,), (1,)])
def test_package_table_postprocessing(self):
package_table_postprocessing(self.cursor)
self.cursor.execute("SELECT short_name, url, display_date FROM packages ORDER BY id;")
metadata = self.cursor.fetchall()
self.assertEqual(metadata[0], ('countup.js@2', 'https://npmjs.com/package/countup.js', '2019-03-14'))
self.assertEqual(metadata[1], ('d3@5', 'https://npmjs.com/package/d3', '2020-04-20'))
self.assertEqual(metadata[2], ('globe.gl@2', 'https://npmjs.com/package/globe.gl', '2020-04-10'))
self.assertEqual(metadata[3], ('react-resize-detector@4', 'https://npmjs.com/package/react-resize-detector', None))
self.assertEqual(metadata[4], ('@reach/router@1', 'https://npmjs.com/package/@reach/router', '2020-02-27'))
@classmethod
def tearDownClass(cls):
#closing and cleaning up the test database
if cls.connection:
f = open("tests/deprovision_db.sql", "r")
cls.connection.cursor().execute(f.read())
cls.connection.commit()
cls.connection.close()
print("PostgreSQL connection is closed succesfully")
f.close()
| 2.828125 | 3 |
executor.py | jina-ai/executor-tagshasher | 1 | 12795165 | import hashlib
import json
import numpy as np
from jina import Executor, DocumentArray, requests
class TagsHasher(Executor):
"""Convert an arbitrary set of tags into a fixed-dimensional matrix using the hashing trick.
Unlike FeatureHashser, you should only use Jaccard/Hamming distance when searching documents
embedded with TagsHasher. This is because the closeness of the value of each feature is meaningless
it is basically the result of a hash function. Hence, only identity value matters.
More info: https://en.wikipedia.org/wiki/Feature_hashing
"""
def __init__(self, n_dim: int = 256, max_val: int = 65536, sparse: bool = False, **kwargs):
"""
:param n_dim: the dimensionality of each document in the output embedding.
Small numbers of features are likely to cause hash collisions,
but large numbers will cause larger overall parameter dimensions.
:param sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.
Note that this feature requires ``scipy``
:param text_attrs: which attributes to be considered as text attributes.
:param kwargs:
"""
super().__init__(**kwargs)
self.n_dim = n_dim
self.max_val = max_val
self.hash = hashlib.md5
self.sparse = sparse
def _any_hash(self, v):
try:
return int(v) # parse int parameter
except ValueError:
try:
return float(v) # parse float parameter
except ValueError:
if not v:
# ignore it when the parameter is empty
return 0
if isinstance(v, str):
v = v.strip()
if v.lower() in {'true', 'yes'}: # parse boolean parameter
return 1
if v.lower() in {'false', 'no'}:
return 0
if isinstance(v, (tuple, dict, list)):
v = json.dumps(v, sort_keys=True)
return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16)
@requests
def encode(self, docs: DocumentArray, **kwargs):
if self.sparse:
from scipy.sparse import csr_matrix
for idx, doc in enumerate(docs):
if doc.tags:
idxs, data = [], [] # sparse
table = np.zeros(self.n_dim) # dense
for k, v in doc.tags.items():
h = self._any_hash(k)
sign_h = np.sign(h)
col = h % self.n_dim
val = self._any_hash(v)
sign_v = np.sign(val)
val = val % self.max_val
idxs.append((0, col))
val = sign_h * sign_v * val
data.append(val)
table[col] += val
if self.sparse:
doc.embedding = csr_matrix(
(data, zip(*idxs)), shape=(1, self.n_dim)
)
else:
doc.embedding = table
| 3.25 | 3 |
web/addons/gamification/__openerp__.py | diogocs1/comps | 1 | 12795166 | <reponame>diogocs1/comps
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Human Resources',
'website' : 'https://www.odoo.com/page/gamification',
'depends': ['mail', 'email_template', 'web_kanban_gauge'],
'description': """
Gamification process
====================
The Gamification module provides ways to evaluate and motivate the users of OpenERP.
The users can be evaluated using goals and numerical objectives to reach.
**Goals** are assigned through **challenges** to evaluate and compare members of a team with each others and through time.
For non-numerical achievements, **badges** can be granted to users. From a simple "thank you" to an exceptional achievement, a badge is an easy way to exprimate gratitude to a user for their good work.
Both goals and badges are flexibles and can be adapted to a large range of modules and actions. When installed, this module creates easy goals to help new users to discover OpenERP and configure their user profile.
""",
'data': [
'wizard/update_goal.xml',
'wizard/grant_badge.xml',
'views/badge.xml',
'views/challenge.xml',
'views/goal.xml',
'data/cron.xml',
'security/gamification_security.xml',
'security/ir.model.access.csv',
'data/goal_base.xml',
'data/badge.xml',
'views/gamification.xml',
],
'application': True,
'auto_install': False,
'qweb': ['static/src/xml/gamification.xml'],
}
| 1.585938 | 2 |
utils/test/rknn.py | stephanballer/deepedgebench | 1 | 12795167 | <reponame>stephanballer/deepedgebench<filename>utils/test/rknn.py<gh_stars>1-10
from rknn.api import RKNN
from time import time
from utils.img_utils import load_preproc_images
from time import time
def inference_rknn(model_path, datatype, input_dims, dataset, batch_size, repeat):
timestamps, results = list(), list()
init_time_start = time()
rknn = RKNN()
rknn.load_rknn(path=model_path)
rknn.init_runtime()
init_time_end = time()
timestamps.append(("init_rknn_start", init_time_start))
timestamps.append(("init_rknn_end", init_time_end))
print('Initialization took %f seconds' % (init_time_end - init_time_start))
# Inference loop
total_time, img_cnt = 0.0, 0
while img_cnt + batch_size <= len(dataset):
img_paths = dataset[img_cnt:img_cnt + batch_size]
img_cnt += batch_size
img_dims, input_tensor = load_preproc_images(img_paths, datatype, input_dims)
input_tensor = [x for x in input_tensor]
# Inference and append output to results
inf_start_time = time()
for i in range(repeat):
out = rknn.inference(inputs=input_tensor)
inf_end_time = time()
timestamps.append(("inf_start_batch_%d" % (img_cnt), inf_start_time))
timestamps.append(("inf_end_batch_%d" % (img_cnt), inf_end_time))
duration = inf_end_time-inf_start_time
total_time += duration
print('Inference took %f seconds' % (duration))
#print(list(filter(lambda x: x >= 0.01, out[0][0])), len(out), len(out[0]), len(out[0][0]))
results.append((out, img_dims))
rknn.release()
print('Inferenced %d images in %f seconds' % (img_cnt, total_time))
return results, timestamps
| 2.09375 | 2 |
p006.py | ChubsB/ProjectEuler | 0 | 12795168 | # Find the absolute difference between the sum of the squares of the first natural numbers and the square of the sum
N = 3
def test(N):
SquareSum = 0
SumSquare = 0
for i in range(1,N+1):
SquareSum += i*i
SumSquare += i
SumSquare = SumSquare * SumSquare
return SumSquare - SquareSum
print(test(N))
| 3.921875 | 4 |
pdfbuilder/sampletemplates.py | citizenengagementlab/django-pdfbuilder | 1 | 12795169 | from pdfbuilder import registry
from pdfbuilder.basetemplates import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate
from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate
from pdfbuilder.basetemplates import PDFTemplate
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
import random
class ThreeColumnDown(PDFTemplate):
doctemplatefactory = BaseDocTemplate
def get_stylesheet(self):
style = getSampleStyleSheet()['Normal']
style.spaceAfter = style.fontSize
return style
class ThreeColumnAcross(PDFTemplate):
def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket):
try:
row = bucket[-1]
except IndexError: # it's an empty list
row = []
bucket.append(row)
if len(row) == 3:
# If the row is full (has 3 elements already) we make a new row
row = []
bucket.append(row)
data = "%s%s" % (entry_prefix, str(entry))
row.append(Paragraph(data, stylesheet))
def post_generate_flowables(self, flowables_buckets):
style = TableStyle([
("VALIGN", (0,0), (-1,-1), "TOP"),
("LINEBELOW", (0,0), (-1,-1), 1, colors.gray),
("LINEABOVE", (0,0), (-1,0), 1, colors.gray),
])
tables = {}
for key, rows in flowables_buckets.items():
t = Table(rows)
t.setStyle(style)
tables[key] = [t]
return tables
class OneColumn(PDFTemplate):
doctemplatefactory = OneColumnDocTemplate
def get_stylesheet(self):
styles = getSampleStyleSheet()
styles['Heading1'].spaceAfter = 12
styles['Heading1'].fontName = "Helvetica"
return styles['Heading1']
registry.register_template(ThreeColumnDown, "threecolumn_down",
"Three column layout, flowing down the page (newspaper style)")
registry.register_template(ThreeColumnAcross, "threecolumn_across",
"Three column layout, filling data across in rows with lines separating the rows")
registry.register_template(OneColumn, "onecolumn_withcomments",
"One column layout")
| 2.421875 | 2 |
python/example/p_chapter05_04.py | groovallstar/test2 | 0 | 12795170 | # Chapter05-04
# 파이썬 심화
# 데코레이터
# 장점
# 1. 중복 제거, 코드 간결, 공통 함수 작성
# 2. 로깅, 프레임워크, 유효성 체크..... -> 공통 기능
# 3. 조합해서 사용 용이
# 단점
# 1. 가독성 감소?
# 2. 특정 기능에 한정된 함수는 -> 단일 함수로 작성하는 것이 유리
# 3. 디버깅 불편
# 데코레이터 실습
import time
def perf_clock(func):
def perf_clocked(*args):
# 함수 시작 시간
st = time.perf_counter()
result = func(*args)
# 함수 종료 시간 계산
et = time.perf_counter() - st
# 실행 함수명
name = func.__name__
# 함수 매개변수
arg_str = ', '.join(repr(arg) for arg in args)
# 결과 출력
print('[%0.5fs] %s(%s) -> %r' % (et, name, arg_str, result))
return result
return perf_clocked
@perf_clock
def time_func(seconds):
time.sleep(seconds)
@perf_clock
def sum_func(*numbers):
return sum(numbers)
# 데코레이터 미사용
none_deco1 = perf_clock(time_func)
none_deco2 = perf_clock(sum_func)
print(none_deco1, none_deco1.__code__.co_freevars)
print(none_deco2, none_deco2.__code__.co_freevars)
print('-' * 40, 'Called None Decorator -> time_func')
print()
none_deco1(1.5)
print('-' * 40, 'Called None Decorator -> sum_func')
print()
none_deco2(100, 150, 250, 300, 350)
print()
print()
# 데코레이터 사용
print('*' * 40, 'Called Decorator -> time_func')
print()
time_func(1.5)
print('*' * 40, 'Called Decorator -> sum_func')
print()
sum_func(100, 150, 250, 300, 350)
print() | 2.8125 | 3 |
Notebooks/Visualization/DataReader.py | keuntaeklee/pytorch-PPUU | 159 | 12795171 | """A class with static methods which can be used to access the data about
experiments.
This includes reading logs to parse success cases, reading images, costs
and speed.
"""
import numpy as np
from glob import glob
import torch
import pandas
import re
import json
from functools import lru_cache
import imageio
EPISODES = 561
class DataReader:
"""Container class for the static data access methods"""
EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json'
@staticmethod
@lru_cache(maxsize=1)
def get_experiments_mapping():
"""Reads the experiments mapping from a json file
EXPERIMENTS_MAPPING_FILE
"""
with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f:
x = json.load(f)
return x
@staticmethod
def get_images(experiment, seed, checkpoint, episode):
"""Get simulator images for a given model evaluation on a
given episode"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png'
images = []
for image_path in sorted(glob(image_paths)):
with open(image_path, 'rb') as f:
images.append(f.read())
return images
@staticmethod
def get_gradients(experiment, seed, checkpoint, episode):
"""Get gradients for a given model evaluation on a given episode"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'
images = []
for image_path in sorted(glob(gradient_paths)):
with open(image_path, 'rb') as f:
images.append(f.read())
return images
@staticmethod
def get_last_gradient(experiment, seed, checkpoint, episode):
"""Get the last gradient for the model and episode
Returns:
(value, x, y) - tuple, where value is the max value of the
gradient, x, y are the location of this max
value in the gradient image.
"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'
images = sorted(glob(gradient_paths))
if len(images) == 0:
return (0, 0, 0)
image_path = sorted(glob(gradient_paths))[-1]
image = imageio.imread(image_path)
mx_index = np.argmax(image)
value = image.flatten()[mx_index]
middle_x = image.shape[0] / 2
middle_y = image.shape[1] / 2
x = mx_index // image.shape[1]
x -= middle_x
y = mx_index % image.shape[1]
y -= middle_y
if value == 0:
return (0, 0, 0)
else:
return (value, x, y)
@staticmethod
def get_evaluation_log_file(experiment, seed, step):
"""Retuns a path to the eval logs for given model"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{step}' + '.model.log'
paths = glob(regex)
assert len(paths) == 1, \
f'paths for {regex} is not length of 1, and is equal to {paths}'
return paths[0]
@staticmethod
def get_training_log_file(experiment, seed):
"""Retuns a path to the eval logs for given model"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'policy_networks/' + path[1] + \
f'-seed={seed}-novalue' + '.log'
paths = glob(regex)
assert len(paths) == 1, \
f'paths for {regex} is not length of 1, and is equal to {paths}'
return paths[0]
@staticmethod
@lru_cache(maxsize=100)
def find_option_values(option,
experiment=None,
seed=None,
checkpoint=None):
"""Returns possible values for selected option.
Depending on option, returns:
if option == 'seed' - returns all seeds for given experiment.
experiment has to passed.
if option == 'checkpoint' - returns all checkpoints for given
experiment and seed.
experiment and seed have to be
passed.
if option == 'episode' - returns all episodes for given
model
experiment, seed, and checkpoint have
to be passed.
"""
if option == 'seed':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] + 'planning_results/' + path[1] + '*.log')
regexp = r"seed=(\d+)-"
elif option == 'checkpoint':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] + 'planning_results/' +
path[1] + f'-seed={seed}' + '*.model.log')
regexp = r'-novaluestep(\d+)\.'
elif option == 'episode':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] +
'planning_results/videos_simulator/' +
path[1] +
f'-seed={seed}-novaluestep{checkpoint}.model/ep*')
regexp = r'model/ep(\d+)'
values = []
for log in logs:
m = re.search(regexp, log)
if m:
result = m.group(1)
values.append(int(result))
else:
print(f'{log} doesn\'t contain {option}')
# log files for each step are generated for seeds
values = list(set(values))
values.sort()
return values
@staticmethod
def get_success_rate(experiment, seed, step):
"""get the success rate for a given model"""
log_file = DataReader.get_evaluation_log_file(experiment, seed, step)
with open(log_file, 'r') as f:
last_line = f.readlines()[-1]
last_colon = last_line.rfind(':')
success_rate = float(last_line[(last_colon + 2):])
return success_rate
@staticmethod
def get_success_rates_for_experiment(experiment):
"""get success rate arrays for each seed for the given experiment
across all checkpoints.
The resulting shape of the np array is
(seeds, checkpoints), where seeds is the number of seeds,
and checkpints is the number of checkpoints.
"""
seeds = DataReader.find_option_values('seed', experiment)
result = {}
steps = []
min_length = 100
max_length = 0
for seed in seeds:
result[seed] = []
checkpoints = DataReader.find_option_values(
'checkpoint', experiment, seed)
if len(steps) < len(checkpoints):
steps = checkpoints
for checkpoint in checkpoints:
success = DataReader.get_success_rate(
experiment, seed, checkpoint)
result[seed].append(success)
min_length = min(min_length, len(result[seed]))
max_length = max(max_length, len(result[seed]))
if len(result) > 0:
result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge')
for seed in result])
steps = np.array(steps)
return steps, result
else:
return None, None
@staticmethod
def get_learning_curves_for_seed(experiment, seed):
"""Gets the training and validation total losses for a given experiment
and seed.
"""
path = DataReader.get_training_log_file(experiment, seed)
with open(path, 'r') as f:
lines = f.readlines()
regex = re.compile(".*step\s(\d+).*\s\[.*\π\:\s(.*)\].*\[.*\π\:\s(.*)\]")
steps = []
train_losses = []
validation_losses = []
for line in lines:
match = regex.match(line)
if match:
steps.append(int(match.group(1)))
train_losses.append(float(match.group(2)))
validation_losses.append(float(match.group(3)))
result = dict(
steps=steps,
train_losses=train_losses,
validation_losses=validation_losses,
)
return result
@staticmethod
def get_learning_curves_for_experiment(experiment):
seeds = DataReader.find_option_values('seed', experiment)
result = {}
steps = []
min_length = 100
max_length = 0
train = {}
validation = {}
for seed in seeds:
result[seed] = []
curves = DataReader.get_learning_curves_for_seed(experiment, seed)
for i, step in enumerate(curves['steps']):
train.setdefault(step, []).append(curves['train_losses'][i])
validation.setdefault(step, []).append(curves['validation_losses'][i])
train_means = []
train_stds = []
validation_means = []
validation_stds = []
for key in train:
train_means.append(float(np.mean(train[key])))
train_stds.append(float(np.std(train[key])))
validation_means.append(float(np.mean(validation[key])))
validation_stds.append(float(np.std(validation[key])))
result = dict(
steps=list(train.keys()),
train=(train_means, train_stds),
validation=(validation_means, validation_stds),
)
return result
@staticmethod
def get_episodes_with_outcome(experiment, seed, step, outcome):
"""Gets episodes with given outcome for a given model.
If outcome == 1, returns successful episodes,
if outcome == 0, returns failing episodes.
"""
path = DataReader.get_evaluation_log_file(experiment, seed, step)
with open(path, 'r') as f:
lines = f.readlines()
regex = re.compile(".*ep:\s+(\d+).*\|\ssuccess:\s+(\d).*")
result = []
for line in lines:
match = regex.match(line)
if match:
if int(match.group(2)) == outcome:
result.append(int(match.group(1)))
return result
@staticmethod
def get_episode_success_map(experiment, seed, step):
"""Gets a 0-1 array of shape (episodes) where episodes is
the number of episodes.
Ith value in the result is 0 if the ith episode failed,
and 1 otherwise.
"""
successes = DataReader.get_episodes_with_outcome(experiment,
seed,
step,
1)
successes = np.array(successes) - 1
result = np.zeros(EPISODES)
result[successes] = 1
return result
@staticmethod
def get_episodes_success_counts(experiment):
"""For a given experiment, for all episodes checks performance of all
the models with all possible seeds and checkpoints, and returns
an array of shape (episodes) where episodes is the number of episodes,
where Ith value is the number of models in this experiment that
succeeded in this episode.
"""
seeds = DataReader.find_option_values('seed', experiment)
result = np.zeros(EPISODES)
for seed in seeds:
checkpoints = DataReader.find_option_values(
'checkpoint', experiment, seed)
for checkpoint in checkpoints:
success = DataReader.get_episodes_with_outcome(experiment,
seed,
checkpoint,
1)
success = np.array(success)
success = success - 1
one_hot = np.zeros((len(success), EPISODES))
one_hot[np.arange(len(success)), success] = 1
one_hot = np.sum(one_hot, axis=0),
one_hot = np.squeeze(one_hot)
result += one_hot
return result
@staticmethod
def get_episode_speeds(experiment, seed, checkpoint, episode):
""" Returns an array of speeds for given model and given episode"""
return DataReader.get_model_speeds(experiment,
seed,
checkpoint)[episode - 1]
@staticmethod
def get_episode_costs(experiment, seed, checkpoint, episode):
""" Returns an array of data frames with all the costs for
given evaluation """
costs = DataReader.get_model_costs(experiment,
seed,
checkpoint)
if costs is not None:
return costs[episode - 1]
else:
return None
@staticmethod
@lru_cache(maxsize=10)
def get_model_costs(experiment, seed, checkpoint):
""" Returns an array of costs for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs'
costs_paths = glob(regex)
if len(costs_paths) == 0:
print(
f'costs_paths for {regex} is {costs_paths} and it\'s length is not 1')
return None
else:
raw_costs = torch.load(costs_paths[0])
# list of DataFrame, one per episode
costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost in raw_costs]
return costs
@staticmethod
@lru_cache(maxsize=10)
def get_model_speeds(experiment, seed, checkpoint):
""" Returns an array of speeds for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'
states_paths = glob(regex)
assert len(states_paths) == 1, \
f'states_paths for {regex} is {states_paths} and it\'s length is not 1'
states_path = states_paths[0]
states = torch.load(states_path)
result = []
for i in range(len(states)):
episode_states = states[i]
episode_states = list(map(lambda x: x[-1], episode_states))
episode_states = torch.stack(episode_states)
result.append(episode_states[:, 2:].norm(dim=1)) # is it correct
return result
@staticmethod
@lru_cache(maxsize=10)
def get_model_states(experiment, seed, checkpoint):
""" Returns an array of states for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'
states_paths = glob(regex)
assert len(states_paths) == 1, \
f'states_paths for {regex} is {states_paths} and it\'s length is not 1'
states_path = states_paths[0]
states = torch.load(states_path)
result = []
for i in range(len(states)):
episode_states = states[i]
episode_states = list(map(lambda x: x[-1], episode_states))
episode_states = torch.stack(episode_states)
result.append(episode_states)
return result
| 2.96875 | 3 |
src/genie/libs/parser/iosxe/tests/ShowLldpNeighborsDetail/cli/equal/golden_output_3_expected.py | balmasea/genieparser | 204 | 12795172 | expected_output = {
"interfaces": {
"GigabitEthernet1/0/32": {
"if_name": "GigabitEthernet1/0/32",
"port_id": {
"222": {
"neighbors": {
"not advertised": {
"neighbor_id": "not advertised",
"chassis_id": "FE80::EC22:9A75:BBC7:71AF",
"port_id": "222",
"port_description": "Description",
"system_name": "not advertised",
"system_description": '{"SN":"SN-NR","Owner":"OWNER"}',
"time_remaining": 92,
"management_address": "0000:0000:0000:0000:0000:ffff:7f00:0001",
"auto_negotiation": "not supported",
}
}
}
},
}
},
"total_entries": 1,
}
| 1.359375 | 1 |
tests/test_ksvd_simple.py | manvhah/pyksvd | 70 | 12795173 | <filename>tests/test_ksvd_simple.py
from ksvd import KSVD
import numpy.random as rn
from numpy import array, zeros, dot
if __name__ == "__main__":
factor = 2
dict_size = 5
target_sparsity = 3
n_examples = 10
dimension = 4
rs = rn.RandomState(0)
D = rs.normal(size = (dict_size, dimension) )
M = zeros( (n_examples, dict_size + 1) )
M[:, :target_sparsity] = rs.normal(size = (n_examples, target_sparsity) )
M = M.ravel()[:n_examples*dict_size].reshape(n_examples, dict_size)
X = dot(M, D)
KSVD(X, dict_size, target_sparsity, 1000)
| 2.453125 | 2 |
two_additon.py | ykf173/coding_everyday | 0 | 12795174 | class ListNode():
def __init__(self, val):
if isinstance(val, int):
self.val = val
self.next = None
elif isinstance(val, list):
self.val = val[0]
self.next = None
cur = self
for i in val[1:]:
cur.next = ListNode(i)
cur = cur.next
def gatherAttrs(self):
return ", ".join("{}: {}".format(k, getattr(self, k)) for k in self.__dict__.keys())
def __str__(self):
return self.__class__.__name__ + " {" + "{}".format(self.gatherAttrs()) + "}"
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
if isinstance(l1, list):
l1 = ListNode(l1)
l2 = ListNode(l2)
if isinstance(l1, int):
l1 = ListNode(l1)
l2 = ListNode(l2)
carry = 0
restemper = ListNode(0)
res = restemper
while l1 or l2:
x = l1.val if l1 else 0
y = l2.val if l2 else 0
s = x + y + carry
carry = s // 10
restemper.next = ListNode(s % 10)
restemper = restemper.next
if l1:
l1 = l1.next
if l2:
l2 = l2.next
if carry > 0:
restemper.next = carry
return res.next
# @lc code=end
if __name__ == "__main__":
test = Solution()
print(test.addTwoNumbers([5,6], [5,7]))
| 3.40625 | 3 |
src/exams/models.py | GiomarOsorio/another-e-learning-platform | 0 | 12795175 | <filename>src/exams/models.py
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
from courses.models import Content
from django.utils import timezone
from django.urls import reverse
from operator import itemgetter
from users.models import User
from django.db import models
import ast
class Exam(models.Model):
user = models.ForeignKey(User, default=None, on_delete=models.CASCADE)
content = models.OneToOneField(
Content, blank=True, null=True, default=None, on_delete=models.SET_DEFAULT
)
name = models.CharField(
("Exam name"),
max_length=150,
unique=False,
blank=False,
help_text=("Required. 150 characters or fewer."),
)
description = models.TextField()
approved = models.DecimalField(
("minimun points to approved"),
default=None,
max_digits=5,
decimal_places=2,
validators=[MinValueValidator(0), MaxValueValidator(100)],
blank=False,
null=False,
help_text=(
"Put here the minimum points necessary to pass this exam. Max 5 digits: 3 for en integer part and 2 for decimal part"
),
error_messages={
"blank": ("you must provied the minimun points."),
"null": ("you must provied the minimun points."),
},
)
create_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "Quiz: " + self.name
def save(self, *args, **kwargs):
questionlistForms = kwargs.pop("questionlist", None)
if kwargs.pop("update", None):
Question.objects.filter(exam=self).delete()
super(Exam, self).save(*args, **kwargs)
if questionlistForms:
for questionForm in questionlistForms:
question_instance = questionForm.save(commit=False)
question_instance.exam = self
question_instance.save(
answerlist=questionForm.answersForms(),
familyInstance={"Exam": self},
)
def get_owner(self):
return self.user.get_full_name()
def get_questions_instances(self):
questions = Question.objects.filter(exam=self)
numbers_of_questions = [index + 1 for index in range(questions.count())]
return zip(numbers_of_questions, questions)
def get_take_url(self):
# return reverse("courses:course-home-week-content", kwargs={'id':self.course.id, 'idModule':self.module.id, 'idContent':self.id})
# return reverse("exams:exam-take", kwargs={'id':self.content.belongs_to_the_course().id, 'idModule':self.content.belongs_to_the_module().id, 'idContent':self.content.id, 'idExam':self.id})
return reverse("exams:exam-take", kwargs={"idExam": self.id})
def get_detail_url(self):
return reverse("exams:exam-detail", kwargs={"id": self.id})
def get_edit_url(self):
return reverse("exams:exam-update", kwargs={"id": self.id})
def get_delete_url(self):
return reverse("exams:exam-delete", kwargs={"id": self.id})
def get_evaluated_url(self):
return reverse("exams:exam-evaluated", kwargs={"idExam": self.id})
def evaluate(self, questions):
if not questions:
raise "they must provide the questions with their answers to evaluate"
for question in questions.values():
question["Answers"] = sorted(question["Answers"], key=itemgetter(0))
questions_instances = [
question for _, question in self.get_questions_instances()
]
if len(questions_instances) != len(questions):
raise "must provide all questions associated with this exam"
validation = {}
for question_instance, question in zip(questions_instances, questions.values()):
validation["Questionid"] = question_instance.id
answers_instances = question_instance.get_answers_instances()
if len(answers_instances) != len(question["Answers"]):
raise "must provide all answers associated with this exam"
question["validate"] = True
for answer_instance, answer in zip(answers_instances, question["Answers"]):
if answer_instance.correct_answer != answer[1]:
question["validate"] = False
continue
del question["Answers"]
question["points"] = (
float(question_instance.question_value)
if question["validate"]
else float(0)
)
questions["approved"] = float(self.approved)
return questions
class Question(models.Model):
exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE)
question = models.CharField(
("question"),
max_length=600,
blank=False,
unique=False,
help_text=("Required. 600 characters or fewer."),
error_messages={"unique": ("A question with that name already exists."),},
)
correct_answers = models.IntegerField(
("number of correct answers for this question"),
help_text=("number of correct answers for this question"),
default=0,
)
question_value = models.DecimalField(
("points"),
default=None,
max_digits=5,
decimal_places=2,
validators=[MinValueValidator(0), MaxValueValidator(100)],
help_text=('Max 5 digits: same as "minimun points to approved"'),
# help_text=('point value of the question. Max 5 digits: 3 for en integer part and 2 for decimal part'),
blank=False,
null=False,
error_messages={
"blank": ("you must provied the point value."),
"null": ("you must provied the point value."),
},
)
def __str__(self):
return "Question: " + self.question
def save(self, *args, **kwargs):
answerlistForms = kwargs.pop("answerlist", None)
familyInstance = kwargs.pop("familyInstance", None)
super(Question, self).save(*args, **kwargs)
if answerlistForms:
for answerForm in answerlistForms:
answer_instance = answerForm.save(commit=False)
answer_instance.exam = familyInstance["Exam"]
answer_instance.question = self
answer_instance.save()
def get_answers_instances(self):
return Answer.objects.filter(question=self)
def get_altAnswers_instances(self):
return Answer.objects.filter(question=self).order_by("?")
class Answer(models.Model):
exam = models.ForeignKey(Exam, default=None, on_delete=models.CASCADE)
question = models.ForeignKey(Question, default=None, on_delete=models.CASCADE)
answer = models.CharField(("answer"), max_length=600, blank=False,)
correct_answer = models.BooleanField(
default=False, help_text=("the answer is correct?."),
)
def __str__(self):
return "answer for question, " + str(self.question_id)
class ExamUserRelations(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
exam = models.ForeignKey(Exam, on_delete=models.CASCADE)
user_answer = models.TextField(default=None, blank=True, null=True)
points = models.DecimalField(
default=0,
max_digits=5,
decimal_places=2,
validators=[MinValueValidator(0), MaxValueValidator(100)],
)
number_of_try = models.IntegerField(
default=0, validators=[MinValueValidator(0), MaxValueValidator(3)]
)
last_try = models.DateTimeField(default=None, blank=True, null=True)
def __str__(self):
return self.exam.name + ": " + self.user.get_full_name()
def can_take_exam(self):
if self.number_of_try >= 3:
if self.time().days > 0 or self.time().seconds > (8 * 60 * 60):
self.number_of_try = 0
self.save()
return True
return False
return True
def get_user_answer(self):
user_answer = ast.literal_eval(self.user_answer)
del user_answer["approved"]
for question in user_answer.values():
question["id"] = int(question["id"])
return user_answer
def time(self):
return timezone.now() - self.last_try
def time_until_take(self):
time = (
timezone.timedelta(days=0, seconds=8 * 60 * 60, microseconds=0)
- self.time()
)
return "%s hour(s) and %s minut(s)" % (
time.seconds // 3600,
(time.seconds // 60) % 60,
)
def try_exam(self, userQuestions):
self.number_of_try += 1
self.last_try = timezone.now()
self.user_answer = userQuestions
new_points = 0
for key, question in userQuestions.items():
if key != "approved":
new_points += question["points"]
if new_points > self.points:
self.points = new_points
self.save()
| 2.234375 | 2 |
benchmark/bm_mutable_complicated_params.py | ryanlevy/pennylane | 1 | 12795176 | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mutable QNode, complicated primary parameters benchmark.
"""
# pylint: disable=invalid-name
import numpy as np
import pennylane as qml
import benchmark_utils as bu
def circuit(p, *, aux=0):
"""A very simple, lightweight mutable quantum circuit."""
qml.RX(p[aux][2], wires=[0])
return qml.expval(qml.PauliZ(0))
class Benchmark(bu.BaseBenchmark):
"""
This benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct` for
mutable QNodes, using an extreme case where the QNode has lots of primary parameters with
a complicated nested structure, but relatively few auxiliary parameters, and only a few
of the primary parameters are actually used in the circuit.
When the QNode is constructed, a VariableRef is built for each primary parameter,
and the qfunc re-evaluated. In this test this is meant to be time-consuming, but it is only
strictly necessary if the auxiliary parameters change.
The main reasons why there are significant differences in the execution speed of this test
between different PL commits:
* :meth:`BaseQNode._construct` should only reconstruct the QNode if the auxiliary params
have changed.
* Most of the primary params are not used in the circuit, hence
:meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives
wrt. them are always zero.
"""
name = "mutable qnode, complicated primary params"
min_wires = 1
n_vals = range(6, 13, 1)
def __init__(self, device=None, verbose=False):
super().__init__(device, verbose)
self.qnode = None
def setup(self):
self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None)
def benchmark(self, n=8):
# n is the number of levels in the primary parameter tree.
# Hence the number of primary parameters depends exponentially on n.
def create_params(n):
"""Recursively builds a tree structure with n levels."""
if n <= 0:
# the leaves are arrays
return np.random.randn(2)
# the other nodes have two branches and a scalar
return [create_params(n - 1), create_params(n - 1), np.random.randn()]
p = create_params(n)
def evaluate(aux):
"""Evaluates the qnode using the given auxiliary params."""
res = self.qnode(p, aux=aux)
# check the result
assert np.allclose(res, np.cos(p[aux][2]))
# first evaluation and construction
evaluate(0)
# evaluate the node several times more with a different auxiliary argument
# (it does not matter if p changes or not, the VariableRefs handle it)
for _ in range(1, 10):
# If we had evaluate(i % 2) here instead the auxiliary arguments would change
# every time, which would negate most possible speedups.
evaluate(1)
return True
| 1.953125 | 2 |
netbox/utilities/querysets.py | BrnoPCmaniak/netbox | 6 | 12795177 | class DummyQuerySet:
"""
A fake QuerySet that can be used to cache relationships to objects that have been deleted.
"""
def __init__(self, queryset):
self._cache = [obj for obj in queryset.all()]
def all(self):
return self._cache
| 2.65625 | 3 |
demo/mock_view.py | shuai93/drf-demo | 2 | 12795178 | from rest_framework.decorators import (
api_view,
permission_classes,
authentication_classes,
renderer_classes,
)
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.authentication import BaseAuthentication
from rest_framework.renderers import JSONRenderer
from django.conf.urls import url
class AnyAuthentication(BaseAuthentication):
def authenticate(self, request):
return
class JSONPRenderer(JSONRenderer):
"""
jsonp render
"""
media_type = "application/javascript"
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
request = renderer_context.get("request", None)
callback = request.query_params.get("callback", "callback")
json = super(JSONPRenderer, self).render(
data, accepted_media_type, renderer_context
)
return callback.encode("utf-8") + b"(" + json + b");"
@api_view(["GET"])
@authentication_classes((AnyAuthentication,))
@permission_classes((AllowAny,))
@renderer_classes(
(JSONPRenderer,),
)
def jsonp(request):
token = request.COOKIES.get("auth", "")
cookies = {
"token": token,
"host": request.get_host(),
}
response = Response(cookies)
return response
@api_view(["POST"])
@authentication_classes((AnyAuthentication,))
@permission_classes((AllowAny,))
def login(request):
token = request.COOKIES.get("auth", "auth")
password = request.data.get("password", "")
username = request.data.get("username", "")
# user center check username password
response = Response({"user": "user_info", "token": token})
response.set_cookie("auth", token, domain="0.0.0.0", expires=30 * 24 * 60 * 60)
return response
@api_view(["GET"])
@authentication_classes((AnyAuthentication,))
@permission_classes((AllowAny,))
def check_token(request, token):
token = request.COOKIES.get("auth")
# user center check token ...
data = {"user_info": {"username": "admin", "user_id": 1}, "token": token}
return Response(data)
mock_urls = [
url("^jsonp/", jsonp),
url("^login/", login),
url(r"^check_token/(?P<token>[A-Za-z0-9]+)/$", check_token),
]
| 2.125 | 2 |
images/views.py | Cyci25/Gallery | 0 | 12795179 | <gh_stars>0
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect
import datetime as dt
# Create your views here.
def welcome(request):
return render(request, 'image.html')
def image(request, id):
try:
image = Image.objects.get(pk = id)
except DoesNotExist:
raise Http404()
return render(request, 'images.html', {"image": image})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = image.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"picture": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
| 2.28125 | 2 |
Unidad 2/packages/extra/ugly/omega.py | angelxehg/utzac-ppy | 0 | 12795180 | def funO():
pass
| 0.949219 | 1 |
snippet/snippet.py | ARMmbed/snippet | 4 | 12795181 | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Text snippet extractor."""
from typing import List, Optional
from snippet import exceptions
from snippet.config import Config
class Example:
"""An example."""
def __init__(self, path: str, line_num: int, example_name: str, line: str) -> None:
"""Initialiser."""
self._key = (path, line_num, example_name)
self._strip = len(line) - len(line.lstrip())
self._text: List[str] = list()
self._cloaking = False
def add_line(self, line: str) -> None:
"""Adds a line."""
if self._cloaking:
return
self._text.append(line)
def cloak(self, line_num: int) -> None:
"""Starts cloaking."""
if self._cloaking:
raise exceptions.CloakMismatch(f"Already cloaked at {self.debug_id} ({line_num})")
self._cloaking = True
def uncloak(self, line_num: int) -> None:
"""Stops cloaking."""
if not self._cloaking:
raise exceptions.CloakMismatch(f"Already uncloaked at {self.debug_id} ({line_num})")
self._cloaking = False
@property
def is_cloaking(self) -> bool:
"""States whether it's in cloaking mode."""
return self._cloaking
@property
def is_empty(self) -> bool:
"""States whether the example is empty or not."""
return len(self._text) == 0
@property
def text(self) -> List[str]:
"""Gets example text."""
return self._text
@property
def strip_number(self) -> int:
"""Gets the example strip number."""
return self._strip
@property
def key(self) -> tuple:
"""Gets the example key."""
return self._key
@property
def debug_id(self) -> str:
"""Gets some debug information about the example."""
return str(self.key)
class Examples:
"""All the examples in a file."""
def __init__(self) -> None:
"""Initialiser."""
self._examples: List[Example] = list()
self._current_example: Optional[Example] = None
def set_current(self, example: Example, line_num: int) -> None:
"""Sets current example."""
if self._current_example:
raise exceptions.StartEndMismatch(f"Already capturing at {self._current_example.debug_id} ({line_num})")
self._current_example = example
def store_current(self, line_num: int) -> None:
"""Stores current example."""
if not self._current_example:
raise exceptions.StartEndMismatch(f"Not yet capturing at {line_num}")
if self._current_example.is_cloaking:
raise exceptions.CloakMismatch(
f"End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})"
)
if not self._current_example.is_empty:
self._examples.append(self._current_example)
self._current_example = None
def cloak(self, line_num: int) -> None:
"""Start cloaking."""
if self._current_example:
self._current_example.cloak(line_num)
def uncloak(self, line_num: int) -> None:
"""Stops cloaking."""
if self._current_example:
self._current_example.uncloak(line_num)
def end(self, line_num: int) -> None:
"""Ends."""
if self._current_example:
raise exceptions.StartEndMismatch(
f"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})"
)
def add_line(self, line: str) -> None:
"""Adds a line."""
if self._current_example:
self._current_example.add_line(line)
def validate_dedent(self, line: str, line_num: int) -> None:
"""Validates dedent."""
if not self._current_example:
return
if any(line[: self._current_example.strip_number].lstrip()):
raise exceptions.ValidationFailure(
f"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})"
)
def validate_line(self, fail_on_contains: List[str], line: str, line_num: int) -> None:
"""Validates line."""
for trigger in fail_on_contains:
if trigger in line:
debug_info = self._current_example.debug_id if self._current_example else ""
raise exceptions.ValidationFailure(f"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})")
def clean_line(self, line: str) -> str:
"""Cleans a line."""
if not self._current_example:
return line
start = self._current_example.strip_number
return line[start:].rstrip()
@property
def all(self) -> list:
"""Gets all the examples."""
return self._examples
def extract_snippets_from_text(config: Config, lines: list, path: str) -> dict:
"""Finds snippets in lines of text."""
examples = Examples()
line_index = 0
for line_num, line in enumerate(lines):
line_index = line_num
if config.start_flag in line:
# start capturing code from the next line
examples.set_current(
Example(path=path, line_num=line_num, example_name=line.rsplit(":")[-1].strip(), line=line), line_num
)
continue
if config.end_flag in line:
# stop capturing, and discard empty blocks
examples.store_current(line_num)
continue
if config.uncloak_flag in line:
examples.uncloak(line_num)
continue
if config.cloak_flag in line:
examples.cloak(line_num)
continue
# whilst capturing, append code lines to the current block
if config.fail_on_dedent:
examples.validate_dedent(line, line_num)
clean_line = examples.clean_line(line)
if any(match in clean_line for match in config.drop_lines):
continue
for r_before, r_after in config.replacements.items():
clean_line = clean_line.replace(r_before, r_after)
examples.validate_line(config.fail_on_contains, clean_line, line_num)
# add this line of code to the example block
examples.add_line(clean_line)
examples.end(line_index)
return {example.key: example.text for example in examples.all}
| 2.671875 | 3 |
permissions/file_permissions.py | mohitbaviskar/http_server | 0 | 12795182 |
#this is dictionary with permissions about dir.
#first one is for read
#second for write
#0 => not allowed
#1 => allowed
dir_per = {
'/',
'/assets',
'/proxy',
'/samplefortemp'
}
prox = ['./documentroot/proxy/proxyfile.html']
proxy_user = ['ram:123']
temporary = {'./documentroot/temp.html':'samplefortemp/temp.html'}
permanent = {'./documentroot/perm.html':'samplefortemp/perm.html'}
authorized = ['./documentroot/auth_file.html']
auth_pass = ['<PASSWORD>']
file_per = {
'auth_file.html',
'dashboard.html',
'favicon.icon',
'form.html',
'index.html',
'menu.html',
'new-user.html',
'orders.html',
'purchases.html',
'staff.html',
'suppliers.html',
'transactions.html',
'/samplefortemp/perm.html',
'/samplefortemp/temp.html',
'/proxy/proxyfile.html'
}
| 1.984375 | 2 |
src/WF_WaitForFile.py | SynerClust/SynerClust | 7 | 12795183 | #!/usr/bin/env python
import sys, os, time
def usage():
print "WF_WaitForFile.py [file's dir] [file] [frequency - default=60s]"
sys.exit(1)
def main(argv):
fileDir = argv[0]
file = argv[1]
freq = 60
if len(argv) > 2:
freq = int(argv[2])
file_found = 0
while not file_found:
dirFiles = os.listdir(fileDir)
if file in dirFiles:
file_found = 1
else:
time.sleep(freq)
sys.exit(0)
if __name__ == "__main__":
if len(sys.argv) == 1:
usage()
else:
main(sys.argv[1:]) | 3.0625 | 3 |
tests/test_space.py | letmaik/exhaust | 1 | 12795184 | import exhaust
def test_double_iteration():
def gen(state: exhaust.State):
return state.maybe()
space = exhaust.space(gen)
assert len(set(space)) == 2
assert len(set(space)) == 2
| 2.875 | 3 |
ispmanccp/lib/ispman_helpers.py | UfSoft/python-perl | 0 | 12795185 | # -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $
# =============================================================================
# $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $
# $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006) $
# $Rev: 84 $
# $LastChangedBy: s0undt3ch $
# =============================================================================
# Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
from string import join
from formencode.variabledecode import variable_decode
from pylons import request, g, cache
from pylons.decorators.cache import beaker_cache
from ispmanccp.lib.helpers import to_unicode, asbool
from ispmanccp.lib.decorators import perlexcept
APP_CONF = g.pylons_config.app_conf
ispman_cache = cache.get_cache('ispman')
allowed_user_attributes = (
'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber',
'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp',
'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort',
'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress',
'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress',
'userPassword', 'mailForwardingAddress', 'givenName')
updatable_attributes = (
'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword',
'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain',
'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess',
'radiusProfileDN'
)
def get_cache(domain):
return cache.get_cache(domain)
def get_domain_users(domain, attr_list): #attributes_to_retrieve):
"""Function to get the `attr_list` from all users on `domain`"""
if attr_list.count('ispmanUserId') < 1:
attr_list.append('ispmanUserId')
userlist = to_unicode(g.ispman.getUsers(domain, attr_list))
decorated = [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()]
decorated.sort()
result = [dict_ for (key, dict_) in decorated]
return result
def address_exists_on_domain(domain, address):
users = get_domain_users(
domain,
[
"ispmanUserId",
"mailAlias",
"mailLocalAddress",
#"mailForwardingAddress"
]
)
for user in users:
for key, val, in user.iteritems():
if isinstance(val, list):
for n in range(len(val)):
if val[n] == address:
return user["ispmanUserId"]
elif val == address:
return user["ispmanUserId"]
return None
def get_users_list(domain, letter, sortby=None, sort_ascending=True):
domain_users = get_domain_users(
domain, [
"dn",
"givenName",
"sn",
"cn",
"ispmanCreateTimestamp",
"ispmanUserId",
"mailLocalAddress",
"mailForwardingAddress",
"userPassword",
"mailQuota",
"mailAlias",
"FTPQuotaMBytes",
"FTPStatus"
]
)
userlist = []
lengths = {}
for user in domain_users:
user_id = user['ispmanUserId']
lengths[user_id] = {}
# Aparently Genshi converts what it can to strings,
# we have to make these lists
if 'mailAlias' in user:
lengths[user_id]['aliases'] = len(user['mailAlias'])
if 'mailForwardingAddress' in user:
lengths[user_id]['forwards'] = len(user['mailForwardingAddress'])
if letter == 'All' or user_id.upper().startswith(letter):
userlist.append(user)
# let's save some time and return right away if we don't need any sorting
if len(userlist) <= 1:
return lengths, userlist
decorated = [(dict_[sortby], dict_) for dict_ in userlist]
decorated.sort()
if not sort_ascending:
decorated.reverse()
result = [dict_ for (key, dict_) in decorated]
return lengths, result
def get_user_info(uid, domain):
user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain))
lengths = {}
lengths[uid] = {}
if 'mailAlias' in user_info:
lengths[uid]['aliases'] = len(user_info['mailAlias'])
if 'mailForwardingAddress' in user_info:
lengths[uid]['forwards'] = len(user_info['mailForwardingAddress'])
user_info['mailQuota'] = int(user_info['mailQuota'])/1024
return lengths, user_info
def get_perl_cgi(params_dict):
attrib_tpl = """ '%(key)s' => ['%(val)s'], """
params_dict = variable_decode(params_dict)
cgi_params = "$q = new CGI({"
for key, val in params_dict.iteritems():
if key in updatable_attributes:
if isinstance(val, list):
cgi_params += attrib_tpl % ( {'key': key, 'val': join(val)} )
else:
cgi_params += attrib_tpl % ( {'key': key, 'val': val} )
cgi_params += """}) or die "$@";"""
cgi = g.perl.eval(cgi_params)
g.perl.eval('$q->header(-charset => "UTF-8");')
return cgi
@perlexcept
def update_user_info(attrib_dict):
cgi = get_perl_cgi(attrib_dict)
return asbool(g.ispman.update_user(cgi))
def get_user_attribute_values(id, domain, attribute):
return to_unicode(
g.ispman.getUserAttributeValues(id, domain, attribute)
)
@perlexcept
def delete_user(post_dict):
cgi = get_perl_cgi(post_dict)
return asbool(g.ispman.deleteUser(cgi))
def user_exists(user_id):
uid = user_id + '@' + request.POST['ispmanDomain']
return bool(int(g.ispman.userExists(uid)))
# cache it for 5 minutes
@beaker_cache(expire=300, query_args=True)
def get_domain_info(domain):
return to_unicode(dict(
g.ispman.getDomainInfo(domain, 2))
)
def get_domain_vhost_count(domain):
return to_unicode(g.ispman.getVhostCount(domain))
def get_domain_user_count(domain):
return to_unicode(g.ispman.getUserCount(domain))
# cache it for 1 hour
@beaker_cache(expire=3600, query_args=True)
def get_default_acount_vars():
defaults = {}
defaults['defaultUserFtpQuota'] = to_unicode(
g.ispman.getConf('defaultUserFtpQuota')
)
defaults['defaultUserMailQuota'] = to_unicode(
g.ispman.getConf('defaultUserMailQuota')
)
return defaults
@perlexcept
def add_user(attrib_dict):
cgi = get_perl_cgi(attrib_dict)
return g.ispman.addUser(cgi)
def ldap_search(ldap_filter="objectClass=*",
attrs=None,
scope="sub",
sort='ispmanUserId',
ascending=True):
base = APP_CONF['ispman_ldap_base_dn']
if attrs is not None:
results = to_unicode(
g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope)
)
else:
results = to_unicode(
g.ispman.getEntriesAsHashRef(base, ldap_filter)
)
entries = []
if not results:
return None
for dn in results:
vals = results[dn]
vals['dn'] = dn
entries.append(vals)
if len(entries) <= 1:
return entries
decorated = [(dict_[sort], dict_) for dict_ in entries]
decorated.sort()
if not ascending:
decorated.reverse()
result = [dict_ for (key, dict_) in decorated]
return result
| 1.523438 | 2 |
Securinets/2021/Quals/web/Warmup/app.py | mystickev/ctf-archives | 1 | 12795186 | from itsdangerous import Signer, base64_encode, base64_decode
from flask import Flask, request, render_template, make_response, g, Response
from flask.views import MethodView
import urlparse
import shutil
import utils
import os
import mimetypes
app = Flask(__name__.split('.')[0])
app.config.from_object(__name__)
BUFFER_SIZE = 128000
URI_BEGINNING_PATH = {
'authorization': '/login/',
'weeb': '/weeb/wtf/',
}
def generate_key():
app.secret_key = os.urandom(24)
def generate_cookie_info(origin=None):
if not origin:
origin = request.headers.get('Origin')
useragent = request.headers.get('User-Agent')
return '%s %s' % (str(origin), str(useragent))
def verify_cookie(cookey):
is_correct = False
cookie_value = request.cookies.get(cookey)
if cookie_value:
s = Signer(app.secret_key)
expected_cookie_content = \
generate_cookie_info(base64_decode(cookey))
expected_cookie_content = s.get_signature(expected_cookie_content)
if expected_cookie_content == cookie_value:
is_correct = True
return is_correct
def is_authorized():
origin = request.headers.get('Origin')
if origin is None:
return True
return verify_cookie(base64_encode(origin))
@app.before_request
def before_request():
headers = {}
headers['Access-Control-Max-Age'] = '3600'
headers['Access-Control-Allow-Credentials'] = 'true'
headers['Access-Control-Allow-Headers'] = \
'Origin, Accept, Accept-Encoding, Content-Length, ' + \
'Content-Type, Authorization, Depth, If-Modified-Since, '+ \
'If-None-Match'
headers['Access-Control-Expose-Headers'] = \
'Content-Type, Last-Modified, WWW-Authenticate'
origin = request.headers.get('Origin')
headers['Access-Control-Allow-Origin'] = origin
specific_header = request.headers.get('Access-Control-Request-Headers')
if is_authorized():
status_code = 200
elif request.method == 'OPTIONS' and specific_header:
headers['Access-Control-Request-Headers'] = specific_header
headers['Access-Control-Allow-Methods'] = ', '.join(['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS'])
response = make_response('', 200, headers)
return response
else:
s = Signer(app.secret_key)
headers['WWW-Authenticate'] = 'Nayookie login_url=' + \
urlparse.urljoin(request.url_root,
URI_BEGINNING_PATH['authorization']) + '?sig=' + \
s.get_signature(origin) + '{&back_url,origin}'
response = make_response('', 401, headers)
return response
g.status = status_code
g.headers = headers
class weeb(MethodView):
methods = ['GET', 'PUT', 'PROPFIND', 'DELETE','COPY', 'MOVE', 'OPTIONS']
def __init__(self):
self.baseuri = URI_BEGINNING_PATH['weeb']
def get_body(self):
request_data = request.data
try:
length = int(request.headers.get('Content-length'))
except ValueError:
length = 0
if not request_data and length:
try:
request_data = request.form.items()[0][0]
except IndexError:
request_data = None
return request_data
def get(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def put(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def propfind(self, pathname):
status = g.status
headers = g.headers
pf = utils.PropfindProcessor(
URI_BEGINNING_PATH['weeb'] + pathname,
app.fs_handler,
request.headers.get('Depth', 'infinity'),
self.get_body())
try:
response = make_response(pf.create_response() + '\n', status, headers)
except IOError, e:
response = make_response('Not found', 404, headers)
return response
def delete(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def copy(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def move(self, pathname):
status = g.status
headers = g.headers
status = 501
return make_response('', status, headers)
def options(self, pathname):
return make_response('', g.status, g.headers)
weeb_view = weeb.as_view('dav')
app.add_url_rule(
'/weeb/wtf/',
defaults={'pathname': ''},
view_func=weeb_view
)
app.add_url_rule(
URI_BEGINNING_PATH['weeb'] + '<path:pathname>',
view_func=weeb_view
)
@app.route(URI_BEGINNING_PATH['authorization'], methods=['GET', 'POST'])
def authorize():
origin = request.args.get('origin')
if request.method == 'POST':
response = make_response()
if request.form.get('continue') != 'true':
generate_key()
s = Signer(app.secret_key)
if s.get_signature(origin) == request.args.get('sig'):
key = base64_encode(str(origin))
back = request.args.get('back_url')
info = generate_cookie_info(origin=origin)
response.set_cookie(key, value=s.get_signature(info), max_age=None,
expires=None, path='/', domain=None, secure=True, httponly=True)
else:
return 'Something went wrong...'
response.status = '301' #
response.headers['Location'] = '/' if not back else back
else:
response = make_response(render_template('authorization_page.html',
cookie_list=[ base64_decode(cookey)
for cookey in
request.cookies.keys()
if verify_cookie(cookey) ],
origin=request.args.get('origin'),
back_url=request.args.get('back_url')))
return response
if __name__ == '__main__':
app.fs_path = '/app/'
app.fs_handler = utils.FilesystemHandler(app.fs_path,
URI_BEGINNING_PATH['weeb'])
generate_key()
app.run(host="0.0.0.0")
| 2.109375 | 2 |
osmchadjango/roulette_integration/utils.py | jbronn/osmcha-django | 27 | 12795187 | import json
from os.path import join
import requests
from django.conf import settings
def remove_unneeded_properties(feature):
keys_to_remove = [
key for key in feature['properties'].keys()
if key.startswith('osm:') or key.startswith('result:')
]
for key in keys_to_remove:
feature['properties'].pop(key)
if feature['properties'].get('oldVersion'):
feature['properties'].pop('oldVersion')
if feature['properties'].get('suspicions'):
feature['properties'].pop('suspicions')
return feature
def format_challenge_task_payload(feature, challenge_id, name, reasons=[]):
if len(reasons):
feature['properties']['osmcha_reasons'] = ", ".join([i for i in reasons])
payload = {
"parent": challenge_id,
"name": "{}".format(name),
"geometries": {"features": [remove_unneeded_properties(feature)]}
}
return json.dumps(payload)
def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]):
if (settings.MAP_ROULETTE_API_KEY is not None and
settings.MAP_ROULETTE_API_URL is not None):
payload = format_challenge_task_payload(
feature, challenge_id, name, reasons
)
headers = {
"Content-Type": "application/json",
"apiKey": settings.MAP_ROULETTE_API_KEY
}
return requests.post(
join(settings.MAP_ROULETTE_API_URL, 'task'),
headers=headers,
data=payload
)
| 2.140625 | 2 |
python/main_zmq.py | fjctp/find_prime_numbers | 0 | 12795188 | #!/bin/env python3
import argparse
import zmq
import threading
import json
import time
from libs.mylib import is_prime
def parse_args():
parser = argparse.ArgumentParser(description='Find all prime number in a range (from 2).')
parser.add_argument('max', type=int, default=1000,
help='from 2 to MAX')
return parser.parse_args()
def worker_routine(worker_url, control_url, context=None):
"""Worker routine"""
print('thread started')
context = context or zmq.Context.instance()
w_socket = context.socket(zmq.REP)
w_socket.connect(worker_url)
c_sub = context.socket(zmq.SUB)
c_sub.connect(control_url)
c_sub.setsockopt(zmq.SUBSCRIBE, b"S")
while True:
try:
[address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK)
print('==> %s, %s'%(address, stop_bit))
if int(stop_bit) == 1:
break
except zmq.Again as e:
pass
try:
string = w_socket.recv(flags=zmq.NOBLOCK)
data = json.loads(string)
value = data['value']
known_primes = data['known_primes']
isPrime = is_prime(value, known_primes)
#print('%d: %d', value, isPrime)
#send reply back to client
w_socket.send(b"%d"%isPrime)
#w_socket.send(b'%d'%True)
except zmq.Again as e:
pass
print('thread terminated')
#w_socket.close()
#context.close()
def main(num_threads=2, num_ceil=10, known_primes=[2, ]):
worker_url = "inproc://workers"
control_url = "inproc://control"
context = zmq.Context.instance()
w_socket = context.socket(zmq.REQ)
w_socket.bind(worker_url)
c_pub = context.socket(zmq.PUB)
c_pub.bind(control_url)
print('Start threads')
for i in range(num_threads):
thread = threading.Thread(target=worker_routine,
args=(worker_url, control_url, ))
thread.start()
print('Find primes')
for i in range(3, num_ceil+1):
data = {'value': i, 'known_primes':known_primes}
str_data = json.dumps(data)
b_data = str_data.encode('ascii');
w_socket.send(b_data)
y_n = w_socket.recv()
if int(y_n) == 1:
known_primes.append(i)
print('Done finding')
c_pub.send_multipart([b'S', b'1'])
time.sleep(1)
w_socket.close()
c_pub.close()
context.term()
return known_primes
if __name__ == '__main__':
args = parse_args()
known_primes = main(2, args.max)
print(known_primes) | 2.625 | 3 |
tests/test_prone.py | 612twilight/CogDL-TensorFlow | 85 | 12795189 | <gh_stars>10-100
import os
import sys
sys.path.append('../')
def test_prone():
os.system("python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model prone --seed 0 1 2 3 4 --hidden-size 2")
pass
if __name__ == "__main__":
test_prone() | 1.632813 | 2 |
testflows/_core/cli/arg/handlers/report/coverage.py | testflows/TestFlows-Core | 3 | 12795190 | # Copyright 2019 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import time
import base64
import threading
import importlib.util
from datetime import datetime
from functools import partial
import testflows.settings as settings
import testflows._core.cli.arg.type as argtype
from testflows._core import __version__
from testflows._core.flags import Flags, SKIP
from testflows._core.testtype import TestType
from testflows._core.cli.arg.common import epilog
from testflows._core.cli.arg.common import HelpFormatter
from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase
from testflows._core.cli.arg.handlers.report.copyright import copyright
from testflows._core.transform.log.pipeline import ResultsLogPipeline
from testflows._core.transform.log.short import format_test, format_result
from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta
from testflows._core.utils.string import title as make_title
from testflows._core.transform.log.report.totals import Counts
from testflows._core.objects import Requirement
logo = '<img class="logo" src="data:image/png;base64,%(data)s" alt="logo"/>'
testflows = '<span class="testflows-logo"></span> [<span class="logo-test">Test</span><span class="logo-flows">Flows</span>]'
testflows_em = testflows.replace("[", "").replace("]", "")
FailResults = ["Fail", "Error", "Null"]
XoutResults = ["XOK", "XFail", "XError", "XNull"]
template = f"""
<section class="clearfix">%(logo)s%(confidential)s%(copyright)s</section>
---
# Requirements Coverage Report%(title)s
%(body)s
---
Generated by {testflows} Open-Source Test Framework
[<span class="logo-test">Test</span><span class="logo-flows">Flows</span>]: https://testflows.com
[ClickHouse]: https://clickhouse.yandex
<script>
%(script)s
</script>
"""
script = """
window.onload = function(){
// Toggle requirement description on click
document.querySelectorAll('.requirement').forEach(
function(item){
item.addEventListener('click', function(){
item.nextElementSibling.classList.toggle('show');
item.children[0].classList.toggle('active');
});
});
// Toggle test procedure on click
document.querySelectorAll('.test').forEach(
function(item){
item.addEventListener('click', function(){
item.nextElementSibling.classList.toggle('show');
item.classList.toggle('active');
});
});
}
"""
class Formatter:
utf_icons = {
"satisfied": "\u2714",
"unsatisfied": "\u2718",
"untested": "\u270E"
}
icon_colors = {
"satisfied": "color-ok",
"unsatisfied": "color-fail",
"untested": "color-error"
}
def format_logo(self, data):
if not data["company"].get("logo"):
return ""
data = base64.b64encode(data["company"]["logo"]).decode("utf-8")
return '\n<p>' + logo % {"data": data} + "</p>\n"
def format_confidential(self, data):
if not data["company"].get("confidential"):
return ""
return f'\n<p class="confidential">Document status - Confidential</p>\n'
def format_copyright(self, data):
if not data["company"].get("name"):
return ""
return (f'\n<p class="copyright">\n'
f'{copyright(data["company"]["name"])}\n'
"</p>\n")
def format_metadata(self, data):
metadata = data["metadata"]
s = (
"\n\n"
f"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\n"
f'||**Framework**||'
f'{testflows} {metadata["version"]}||\n'
)
return s + "\n"
def format_summary(self, data):
counts = data["counts"]
def template(value, title, color):
return (
f'<div class="c100 p{value} {color} smaller-title">'
f'<span>{value}%</span>'
f'<span class="title">{title}</span>'
'<div class="slice">'
'<div class="bar"></div>'
'<div class="fill"></div>'
'</div>'
'</div>\n')
s = "\n## Summary\n"
if counts.units <= 0:
s += "No tests"
else:
s += '<div class="chart">'
if counts.satisfied > 0:
s += template(f"{counts.satisfied / float(counts.units) * 100:.0f}", "Satisfied", "green")
if counts.unsatisfied > 0:
s += template(f"{counts.unsatisfied / float(counts.units) * 100:.0f}", "Unsatisfied", "red")
if counts.untested > 0:
s += template(f"{counts.untested / float(counts.units) * 100:.0f}", "Untested", "orange")
s += '</div>\n'
return s
def format_statistics(self, data):
counts = data["counts"]
result_map = {
"OK": "Satisfied",
"Fail": "Unsatisfied",
"Error": "Untested"
}
s = "\n\n## Statistics\n"
s += "||" + "||".join(
["<span></span>", "Units"]
+ [f'<span class="result result-{k.lower()}">{v}</span>' for k, v in result_map.items()]
) + "||\n"
s += "||" + "||".join([f"<center>{i}</center>" for i in ["**Requirements**",
str(counts.units), str(counts.satisfied),
str(counts.unsatisfied), str(counts.untested)]]) + "||\n"
return s + "\n"
def format_table(self, data):
reqs = data["requirements"]
s = "\n\n## Coverage\n"
for r in reqs.values():
s += f'\n<section class="requirement"><span class="requirement-inline"><i class="utf-icon {self.icon_colors[r["status"]]}">{self.utf_icons[r["status"]]}</i>{r["requirement"].name}</span></section>'
description = r["requirement"].description.replace("\\n","\n")
if description:
s += f'\n<div markdown="1" class="requirement-description hidden">\n{description}\n</div>'
for test in r["tests"]:
result = test["result"]
cls = result["result_type"].lower()
s += f'\n<div class="test"><span class="result result-inline result-{cls}">{result["result_type"]}</span><span class="time time-inline">{strftimedelta(result["message_rtime"])}</span>{test["test"]["test_name"]}</div>'
s += f'\n<div class="test-procedure hidden">\n```testflows\n{test["messages"]}\n```\n</div>'
if not r["tests"]:
s += f'\n<div class="no-tests">\n<span class="result-inline">\u270E</span>\nNo tests\n</div>'
s += "\n"
return s + "\n"
def format_title(self, data):
if data["title"]:
return "<br>" + make_title(data["title"])
return ""
def format(self, data):
body = ""
body += self.format_metadata(data)
body += self.format_summary(data)
body += self.format_statistics(data)
body += self.format_table(data)
return template.strip() % {
"logo": self.format_logo(data),
"confidential": self.format_confidential(data),
"copyright": self.format_copyright(data),
"body": body,
"script": script,
"title": self.format_title(data)
}
class Counts(object):
def __init__(self, name, units, satisfied, unsatisfied, untested):
self.name = name
self.units = units
self.satisfied = satisfied
self.unsatisfied = unsatisfied
self.untested = untested
def __bool__(self):
return self.units > 0
class Handler(HandlerBase):
@classmethod
def add_command(cls, commands):
parser = commands.add_parser("coverage", help="requirements coverage report", epilog=epilog(),
description="Generate requirements coverage report.",
formatter_class=HelpFormatter)
parser.add_argument("requirements", metavar="requirements", type=partial(argtype.path, special=["-"]),
help="requirements source file, default: '-' (from input log)", nargs="?", default="-")
parser.add_argument("input", metavar="input", type=argtype.logfile("r", bufsize=1, encoding="utf-8"),
nargs="?", help="input log, default: stdin", default="-")
parser.add_argument("output", metavar="output", type=argtype.file("w", bufsize=1, encoding="utf-8"),
nargs="?", help='output file, default: stdout', default="-")
parser.add_argument("--show", metavar="status", type=str, nargs="+", help="verification status. Choices: 'satisfied', 'unsatisfied', 'untested'",
choices=["satisfied", "unsatisfied", "untested"],
default=["satisfied", "unsatisfied", "untested"])
parser.add_argument("--input-link", metavar="attribute",
help="attribute that is used as a link to the input log, default: job.url",
type=str, default="job.url")
parser.add_argument("--format", metavar="type", type=str,
help="output format, default: md (Markdown)", choices=["md"], default="md")
parser.add_argument("--copyright", metavar="name", help="add copyright notice", type=str)
parser.add_argument("--confidential", help="mark as confidential", action="store_true")
parser.add_argument("--logo", metavar="path", type=argtype.file("rb"),
help='use logo image (.png)')
parser.add_argument("--title", metavar="name", help="custom title", type=str)
parser.add_argument("--only", metavar="name", type=str, default=[], nargs="+",
help=("name of one or more specifications for which to generate coverage report"
", default: include all specifications. Only a unique part of the name can be specified."
))
parser.set_defaults(func=cls())
def get_attribute(self, result, name, default=None):
tests = list(result["tests"].values())
if not tests:
return default
test = tests[0]["test"]
for attr in test["attributes"]:
if attr["attribute_name"] == name:
return attr["attribute_value"]
return default
def table(self, results):
table = {
"header": ["Requirement", "Tests"],
"rows": [],
}
return table
def metadata(self, results):
return {
"date": time.time(),
"version": __version__,
}
def requirements(self, spec_names, path, results):
_requirements = {}
_specs = []
if path == "-":
for spec in results["specifications"]:
if spec_names:
matched = False
for name in spec_names:
if name in spec["specification_name"]:
matched = True
break
if not matched:
continue
_specs.append(spec)
for req in spec["specification_requirements"]:
_requirements[req["name"]] = {"requirement": Requirement(**req), "tests": []}
else:
spec = importlib.util.spec_from_file_location("requirements", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
for name, value in vars(module).items():
if not isinstance(value, Requirement):
continue
_requirements[value.name] = {"requirement": value, "tests": []}
return (_specs, _requirements)
def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id):
started = test["test"]["message_time"]
ended = test["result"]["message_time"]
messages = [format_test(test["test"], "", tests_by_parent, tests_by_id, no_colors=True)]
if getattr(TestType, test["test"]["test_type"]) > TestType.Test:
for t in tests[idx + 1:]:
flags = Flags(t["test"]["test_flags"])
if flags & SKIP and settings.show_skipped is False:
continue
if t["test"]["message_time"] > ended:
break
if getattr(TestType, t["test"]["test_type"]) >= TestType.Test \
and t["test"]["test_id"].startswith(test["test"]["test_id"]):
messages.append(format_test(t["test"], "", tests_by_parent, tests_by_id, no_colors=True))
messages.append(format_result(t["result"], no_colors=True))
else:
for t in tests[idx + 1:]:
flags = Flags(t["test"]["test_flags"])
if flags & SKIP and settings.show_skipped is False:
continue
if t["test"]["message_time"] > ended:
break
if t["test"]["test_id"].startswith(test["test"]["test_id"]):
messages.append(format_test(t["test"], "", tests_by_parent, tests_by_id, no_colors=True))
messages.append(format_result(t["result"], no_colors=True))
messages.append(format_result(test["result"], no_colors=True))
test["messages"] = "".join(messages)
return test
def add_tests(self, requirements, results):
tests = list(results["tests"].values())
for i, test in enumerate(tests):
flags = Flags(test["test"]["test_flags"])
if flags & SKIP and settings.show_skipped is False:
continue
result = test["result"]
for requirement in test["test"]["requirements"]:
if requirement["requirement_name"] in requirements:
requirements[requirement["requirement_name"]]["tests"].append(self.add_test_messages(test, i, tests, results["tests_by_parent"], results["tests_by_id"]))
return requirements
def counts(self, requirements):
counts = Counts("requirements", *([0] * 4))
for req in requirements.values():
counts.units += 1
tests = req["tests"]
if not tests:
counts.untested += 1
req["status"] = "untested"
else:
satisfied = True
for test in tests:
result = test["result"]
if result["result_type"] != "OK":
satisfied = False
if satisfied:
counts.satisfied += 1
req["status"] = "satisfied"
else:
counts.unsatisfied += 1
req["status"] = "unsatisfied"
return counts
def company(self, args):
d = {}
if args.copyright:
d["name"] = args.copyright
if args.confidential:
d["confidential"] = True
if args.logo:
d["logo"] = args.logo.read()
return d
def data(self, source, results, args):
d = dict()
specs, requirements = self.requirements(args.only, source, results)
# if custom title was not specified generate a title
# that include all specification names
title = args.title
if title is None and specs:
title = "<br>".join([spec["specification_name"] for spec in specs])
d["title"] = title
d["requirements"] = self.add_tests(requirements, results)
d["metadata"] = self.metadata(results)
d["counts"] = self.counts(d["requirements"])
d["company"] = self.company(args)
counts = d["counts"]
return d
def generate(self, formatter, results, args):
output = args.output
output.write(
formatter.format(self.data(args.requirements, results, args))
)
output.write("\n")
def handle(self, args):
results = {}
formatter = Formatter()
ResultsLogPipeline(args.input, results).run()
self.generate(formatter, results, args)
| 1.539063 | 2 |
lorentz_embeddings/lorentz.py | lambdaofgod/lorentz-embeddings | 0 | 12795191 | <gh_stars>0
import os
import sys
import torch
import random
import numpy as np
from torch import nn
from torch import optim
from tqdm import trange, tqdm
from collections import Counter
from datetime import datetime
from tensorboardX import SummaryWriter
from torch.utils.data import Dataset, DataLoader
import datasets
import pickle
import matplotlib
matplotlib.use("Agg") # this needs to come before other matplotlib imports
import matplotlib.pyplot as plt
plt.style.use("ggplot")
def arcosh(x):
return torch.log(x + torch.sqrt(x ** 2 - 1))
def lorentz_scalar_product(x, y):
# BD, BD -> B
m = x * y
result = m[:, 1:].sum(dim=1) - m[:, 0]
return result
def tangent_norm(x):
# BD -> B
return torch.sqrt(lorentz_scalar_product(x, x))
def exp_map(x, v):
# BD, BD -> BD
tn = tangent_norm(v).unsqueeze(dim=1)
tn_expand = tn.repeat(1, x.size()[-1])
result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn)
result = torch.where(tn_expand > 0, result, x) # only update if tangent norm is > 0
return result
def set_dim0(x):
x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves will explode
# NOTE: the paper does not mention the square part of the equation but if
# you try to derive it you get a square term in the equation
dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1))
x[:, 0] = dim0
return x
# ========================= models
class RSGD(optim.Optimizer):
def __init__(self, params, learning_rate=None):
learning_rate = learning_rate if learning_rate is not None else 0.01
defaults = {"learning_rate": learning_rate}
super().__init__(params, defaults=defaults)
def step(self):
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
B, D = p.size()
gl = torch.eye(D, device=p.device, dtype=p.dtype)
gl[0, 0] = -1
grad_norm = torch.norm(p.grad.data)
grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0).to(p.device))
# only normalize if global grad_norm is more than 1
h = (p.grad.data / grad_norm) @ gl
proj = (
h
- (
lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p)
).unsqueeze(1)
* p
)
# print(p, lorentz_scalar_product(p, p))
update = exp_map(p, -group["learning_rate"] * proj)
is_nan_inf = torch.isnan(update) | torch.isinf(update)
update = torch.where(is_nan_inf, p, update)
update[0, :] = p[0, :] # no ❤️ for embedding
update = set_dim0(update)
p.data.copy_(update)
class Lorentz(nn.Module):
"""
This will embed `n_items` in a `dim` dimensional lorentz space.
"""
def __init__(self, n_items, dim, init_range=0.001):
super().__init__()
self.n_items = n_items
self.dim = dim
self.table = nn.Embedding(n_items + 1, dim, padding_idx=0)
nn.init.uniform_(self.table.weight, -init_range, init_range)
# equation 6
with torch.no_grad():
self.table.weight[0] = 5 # padding idx push it to corner
set_dim0(self.table.weight)
def forward(self, I, Ks):
"""
Using the pairwise similarity matrix, generate the following inputs and
provide to this function.
Inputs:
- I : - long tensor
- size (B,)
- This denotes the `i` used in all equations.
- Ks : - long tensor
- size (B, N)
- This denotes at max `N` documents which come from the
nearest neighbor sample.
- The `j` document must be the first of the N indices.
This is used to calculate the losses
Return:
- size (B,)
- Ranking loss calculated using
document to the given `i` document.
"""
n_ks = Ks.size()[1]
ui = torch.stack([self.table(I)] * n_ks, dim=1)
uks = self.table(Ks)
# ---------- reshape for calculation
B, N, D = ui.size()
ui = ui.reshape(B * N, D)
uks = uks.reshape(B * N, D)
dists = -lorentz_scalar_product(ui, uks)
dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists)
# sometimes 2 embedding can come very close in R^D.
# when calculating the lorenrz inner product,
# -1 can become -0.99(no idea!), then arcosh will become nan
dists = -arcosh(dists)
# print(dists)
# ---------- turn back to per-sample shape
dists = dists.reshape(B, N)
loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6))
return loss
def lorentz_to_poincare(self):
table = self.table.weight.data.cpu().numpy()
return table[:, 1:] / (
table[:, :1] + 1
) # diffeomorphism transform to poincare ball
def get_lorentz_table(self):
return self.table.weight.data.cpu().numpy()
def _test_table(self):
x = self.table.weight.data
check = lorentz_scalar_product(x, x) + 1.0
return check.cpu().numpy().sum()
class Graph(Dataset):
def __init__(self, pairwise_matrix, batch_size, sample_size=10):
self.pairwise_matrix = pairwise_matrix
self.n_items = pairwise_matrix.shape[0]
self.sample_size = sample_size
self.arange = np.arange(0, self.n_items)
self.cnter = 0
self.batch_size = batch_size
def __len__(self):
return self.n_items
def __getitem__(self, i):
self.cnter = (self.cnter + 1) % self.batch_size
I = torch.Tensor([i + 1]).squeeze().long()
has_child = (self.pairwise_matrix[i] > 0).sum()
has_parent = (self.pairwise_matrix[:, i] > 0).sum()
if self.cnter == 0:
arange = np.random.permutation(self.arange)
else:
arange = self.arange
if has_parent: # if no child go for parent
valid_idxs = arange[self.pairwise_matrix[arange, i].nonzero()[0]]
j = valid_idxs[0]
min = self.pairwise_matrix[j,i]
elif has_child:
valid_idxs = arange[self.pairwise_matrix[i, arange].nonzero()[1]]
j = valid_idxs[0]
min = self.pairwise_matrix[i,j]
else:
raise Exception(f"Node {i} has no parent and no child")
indices = arange
indices = indices[indices != i]
if has_child:
indices = indices[(self.pairwise_matrix[i,indices] < min).nonzero()[0]]
else:
indices = indices[(self.pairwise_matrix[indices, i] < min).nonzero()[1]]
indices = indices[: self.sample_size]
#print(indices)
#raise NotImplementedError()
Ks = np.concatenate([[j], indices, np.zeros(self.sample_size)])[
: self.sample_size
]
# print(I, Ks)
return I, torch.Tensor(Ks).long()
def recon(table, pair_mat):
"Reconstruction accuracy"
count = 0
table = torch.tensor(table[1:])
n = pair_mat.shape[0]
for i in range(1, n): # 0 padding, 1 root, we leave those two
x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D
mask = torch.tensor([0.0] * len(table))
mask[i] = 1
mask = mask * -10000.0
dists = lorentz_scalar_product(x, table) + mask
dists = (
dists.cpu().numpy()
) # arccosh is monotonically increasing, so no need of that here
# and no -dist also, as acosh in m i, -acosh(-l(x,y)) is nothing but l(x,y)
# print(dists)
predicted_parent = np.argmax(dists)
actual_parent = np.argmax(pair_mat[:, i])
# print(predicted_parent, actual_parent, i, end="\n\n")
count += actual_parent == predicted_parent
count = count / (pair_mat.shape[0] - 1) * 100
return count
| 2.265625 | 2 |
tests/conftest.py | sanger/lighthouse | 1 | 12795192 | import copy
import os
from http import HTTPStatus
from unittest.mock import MagicMock, patch
import pytest
import responses
from lighthouse import create_app
from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED
from lighthouse.constants.fields import (
FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER,
FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME,
FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER,
FIELD_CHERRYTRACK_USER_ID,
FIELD_SAMPLE_ID,
)
from lighthouse.db.dart import load_sql_server_script
from lighthouse.helpers.dart import create_dart_connection
from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table
from lighthouse.messages.message import Message
from lighthouse.types import EventMessage
from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response
from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response
from tests.fixtures.data.centres import CENTRES
from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES
from tests.fixtures.data.event_wh import EVENT_WH_DATA
from tests.fixtures.data.mlwh import (
COG_UK_IDS,
MLWH_LH_SAMPLES,
MLWH_LH_SAMPLES_MULTIPLE,
MLWH_SAMPLE_LIGHTHOUSE_SAMPLE,
MLWH_SAMPLE_STOCK_RESOURCE,
SAMPLES_FOR_MLWH_UPDATE,
cherrytrack_mlwh_example,
)
from tests.fixtures.data.plate_events import PLATE_EVENTS
from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES
from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES
from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack
from tests.fixtures.data.source_plates import SOURCE_PLATES
@pytest.fixture
def app():
# set the 'EVE_SETTINGS' env variable to easily switch to the testing environment when creating an app
os.environ["EVE_SETTINGS"] = "test.py"
app = create_app()
yield app
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def biosero_auth_headers(app):
with app.app_context():
return {"Authorization": app.config.get("API_TOKENS_EVENTS").get("biosero_read_write")}
@pytest.fixture
def lighthouse_ui_auth_headers(app):
with app.app_context():
return {"Authorization": app.config.get("API_TOKENS_EVENTS").get("lighthouse_ui_read_write")}
@pytest.fixture
def centres(app):
with app.app_context():
centres_collection = app.data.driver.db.centres
_ = centres_collection.insert_many(CENTRES)
# yield a copy so that the test change it however it wants
yield copy.deepcopy(CENTRES)
# clear up after the fixture is used
with app.app_context():
centres_collection.delete_many({})
@pytest.fixture
def samples(app):
with app.app_context():
samples_collection = app.data.driver.db.samples
inserted_samples = samples_collection.insert_many(SAMPLES)
# yield a copy of so that the test change it however it wants
yield copy.deepcopy(SAMPLES), inserted_samples
# clear up after the fixture is used
with app.app_context():
samples_collection.delete_many({})
@pytest.fixture
def clear_events(app):
try:
yield
finally:
with app.app_context():
events_collection = app.data.driver.db.events
events_collection.delete_many({})
@pytest.fixture
def priority_samples(app, samples):
_, samples = samples
# create a copy so that the test can change it however it needs
priority_samples = copy.deepcopy(PRIORITY_SAMPLES)
# update the priority samples with the _id of the samples inserted into mongo, currently only uses the number
# of priority samples therefore PRIORITY_SAMPLES needs to be <= SAMPLES
for count, priority_sample in enumerate(priority_samples):
priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count]
with app.app_context():
priority_samples_collection = app.data.driver.db.priority_samples
_ = priority_samples_collection.insert_many(priority_samples)
yield priority_samples
# clear up after the fixture is used
with app.app_context():
priority_samples_collection.delete_many({})
@pytest.fixture
def source_plates(app):
with app.app_context():
source_plates_collection = app.data.driver.db.source_plates
_ = source_plates_collection.insert_many(SOURCE_PLATES)
# yield a copy of that the test change it however it wants
yield copy.deepcopy(SOURCE_PLATES)
# clear up after the fixture is used
with app.app_context():
source_plates_collection.delete_many({})
@pytest.fixture
def plate_events(app):
with app.app_context():
events_collection = app.data.driver.db.events
inserted_events = events_collection.insert_many(PLATE_EVENTS)
# yield a copy of so that the test change it however it wants
yield copy.deepcopy(PLATE_EVENTS), inserted_events
# clear up after the fixture is used
with app.app_context():
events_collection.delete_many({})
@pytest.fixture
def mocked_responses():
"""Easily mock responses from HTTP calls.
https://github.com/getsentry/responses#responses-as-a-pytest-fixture"""
with responses.RequestsMock() as rsps:
yield rsps
@pytest.fixture
def labwhere_samples_simple(app, mocked_responses):
labwhere_url = f"{app.config['LABWHERE_URL']}/api/labwares_by_barcode"
body = [
{
"barcode": "plate_123",
"location_barcode": "location_123",
}
]
mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK)
@pytest.fixture
def samples_for_mlwh_update():
return SAMPLES_FOR_MLWH_UPDATE
@pytest.fixture
def cog_uk_ids():
return COG_UK_IDS
# ********************** WAREHOUSE DATA ************************** #
@pytest.fixture
def mlwh_lh_samples(app, mlwh_sql_engine):
insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
@pytest.fixture
def mlwh_lh_samples_multiple(app, mlwh_sql_engine):
insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
@pytest.fixture
def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine):
def delete_data():
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_STOCK_RESOURCES_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_SAMPLE_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_STUDY_TABLE"])
try:
delete_data()
# inserts
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["sample"],
mlwh_sql_engine,
app.config["MLWH_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["study"],
mlwh_sql_engine,
app.config["MLWH_STUDY_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["stock_resource"],
mlwh_sql_engine,
app.config["MLWH_STOCK_RESOURCES_TABLE"],
)
yield
finally:
delete_data()
@pytest.fixture
def mlwh_beckman_cherrypicked(app, mlwh_sql_engine):
def delete_data():
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_SAMPLE_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
try:
delete_data()
# inserts
insert_into_mlwh(
app,
MLWH_SAMPLE_LIGHTHOUSE_SAMPLE["lighthouse_sample"],
mlwh_sql_engine,
app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_LIGHTHOUSE_SAMPLE["sample"],
mlwh_sql_engine,
app.config["MLWH_SAMPLE_TABLE"],
)
yield
finally:
delete_data()
@pytest.fixture
def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine):
def delete_data():
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_STOCK_RESOURCES_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_SAMPLE_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_STUDY_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
try:
delete_data()
# inserts
insert_into_mlwh(
app,
MLWH_SAMPLE_LIGHTHOUSE_SAMPLE["lighthouse_sample"],
mlwh_sql_engine,
app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["sample"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE["sample"], # type: ignore
mlwh_sql_engine,
app.config["MLWH_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["study"],
mlwh_sql_engine,
app.config["MLWH_STUDY_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["stock_resource"],
mlwh_sql_engine,
app.config["MLWH_STOCK_RESOURCES_TABLE"],
)
yield
finally:
delete_data()
def insert_into_mlwh(app, data, mlwh_sql_engine, table_name):
table = get_table(mlwh_sql_engine, table_name)
with mlwh_sql_engine.begin() as connection:
connection.execute(table.delete()) # delete all rows from table first
print("Inserting MLWH test data")
connection.execute(table.insert(), data)
def delete_from_mlwh(app, mlwh_sql_engine, table_name):
table = get_table(mlwh_sql_engine, table_name)
with mlwh_sql_engine.begin() as connection:
print("Deleting MLWH test data")
connection.execute(table.delete())
@pytest.fixture
def event_wh_data(app, event_wh_sql_engine):
try:
subjects_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_SUBJECTS_TABLE"])
roles_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_ROLES_TABLE"])
events_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_EVENTS_TABLE"])
event_types_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_EVENT_TYPES_TABLE"])
subject_types_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_SUBJECT_TYPES_TABLE"])
role_types_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_ROLE_TYPES_TABLE"])
def delete_event_warehouse_data():
with event_wh_sql_engine.begin() as connection:
connection.execute(roles_table.delete())
connection.execute(subjects_table.delete())
connection.execute(events_table.delete())
connection.execute(event_types_table.delete())
connection.execute(subject_types_table.delete())
connection.execute(role_types_table.delete())
delete_event_warehouse_data()
with event_wh_sql_engine.begin() as connection:
print("Inserting Events Warehouse test data")
connection.execute(role_types_table.insert(), EVENT_WH_DATA["role_types"])
connection.execute(event_types_table.insert(), EVENT_WH_DATA["event_types"])
connection.execute(subject_types_table.insert(), EVENT_WH_DATA["subject_types"])
connection.execute(subjects_table.insert(), EVENT_WH_DATA["subjects"])
connection.execute(events_table.insert(), EVENT_WH_DATA["events"])
connection.execute(roles_table.insert(), EVENT_WH_DATA["roles"])
yield
finally:
delete_event_warehouse_data()
@pytest.fixture
def mlwh_sql_engine(app):
return create_mysql_connection_engine(app.config["WAREHOUSES_RW_CONN_STRING"], app.config["MLWH_DB"])
@pytest.fixture
def dart_connection(app):
return create_dart_connection()
@pytest.fixture
def dart_schema_create(app):
with app.app_context():
load_sql_server_script("tests/data/dart/schema.sql")
@pytest.fixture
def dart_samples(app, dart_schema_create):
with app.app_context():
load_sql_server_script("tests/data/dart/seed.sql")
@pytest.fixture
def dart_mongo_merged_samples():
return DART_MONGO_MERGED_SAMPLES
@pytest.fixture
def event_wh_sql_engine(app):
return create_mysql_connection_engine(app.config["WAREHOUSES_RW_CONN_STRING"], app.config["EVENTS_WH_DB"])
@pytest.fixture
def message_unknown():
message_content: EventMessage = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": "no_callbacks",
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
@pytest.fixture
def message_source_complete():
message_content: EventMessage = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": PE_BECKMAN_SOURCE_COMPLETED,
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [
{
"role_type": "sample",
"subject_type": "sample",
"friendly_name": "friendly_name",
"uuid": "00000000-1111-2222-3333-555555555555",
},
{
"role_type": "cherrypicking_source_labware",
"subject_type": "plate",
"friendly_name": "plate-barcode",
"uuid": "00000000-1111-2222-3333-555555555556",
},
{
"role_type": "robot",
"subject_type": "robot",
"friendly_name": "robot-serial",
"uuid": "00000000-1111-2222-3333-555555555557",
},
],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
@pytest.fixture
def message_source_all_negative():
message_content: EventMessage = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": PE_BECKMAN_SOURCE_ALL_NEGATIVES,
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [
{
"role_type": "cherrypicking_source_labware",
"subject_type": "plate",
"friendly_name": "plate-barcode",
"uuid": "00000000-1111-2222-3333-555555555556",
},
{
"role_type": "robot",
"subject_type": "robot",
"friendly_name": "robot-serial",
"uuid": "00000000-1111-2222-3333-555555555557",
},
],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
@pytest.fixture
def plates_lookup_with_samples(samples, priority_samples):
return PLATES_LOOKUP_WITH_SAMPLES
@pytest.fixture
def plates_lookup_without_samples(samples, priority_samples):
return PLATES_LOOKUP_WITHOUT_SAMPLES
@pytest.fixture
def mocked_rabbit_channel(app):
with app.app_context():
mocked_broker = MagicMock()
with patch("lighthouse.classes.services.warehouse.Broker", return_value=mocked_broker):
mocked_channel = MagicMock()
mocked_broker.__enter__.return_value = mocked_channel
yield mocked_channel
@pytest.fixture
def cherrytrack_mock_run_info(
app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status
):
run_url = f"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}"
mocked_responses.add(
responses.GET,
run_url,
json=cherrytrack_run_info_response,
status=cherrytrack_mock_run_info_status,
)
yield
@pytest.fixture
def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status):
for centre_prefix in baracoda_mock_responses.keys():
if baracoda_mock_responses[centre_prefix] is not None:
num_samples = len(baracoda_mock_responses[centre_prefix]["barcodes_group"]["barcodes"])
baracoda_url = (
f"http://{app.config['BARACODA_URL']}" f"/barcodes_group/{centre_prefix}/new?count={num_samples}"
)
mocked_responses.add(
responses.POST,
baracoda_url,
json=baracoda_mock_responses[centre_prefix],
status=baracoda_mock_status,
)
yield
@pytest.fixture
def cherrytrack_mock_source_plates_status():
return HTTPStatus.OK
@pytest.fixture
def cherrytrack_mock_run_info_status():
return HTTPStatus.OK
@pytest.fixture
def cherrytrack_mock_destination_plate_status():
return HTTPStatus.OK
@pytest.fixture
def baracoda_mock_status():
return HTTPStatus.CREATED
@pytest.fixture
def cherrytrack_mock_source_plates(
app,
mocked_responses,
source_barcode,
destination_barcode,
cherrytrack_source_plates_response,
cherrytrack_mock_source_plates_status,
):
source_plates_url = f"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}"
mocked_responses.add(
responses.GET,
source_plates_url,
json=cherrytrack_source_plates_response,
status=cherrytrack_mock_source_plates_status,
)
yield
@pytest.fixture
def cherrytrack_mock_destination_plate(
app,
mocked_responses,
destination_barcode,
cherrytrack_destination_plate_response,
cherrytrack_mock_destination_plate_status,
):
destination_plate_url = f"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}"
mocked_responses.add(
responses.GET,
destination_plate_url,
json=cherrytrack_destination_plate_response,
status=cherrytrack_mock_destination_plate_status,
)
yield
@pytest.fixture
def cherrytrack_run_info_response(run_id):
return {
"data": {
"id": run_id,
FIELD_CHERRYTRACK_USER_ID: "user1",
FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: "aLiquidHandlerSerialNumber",
FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: "biosero",
FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: "CPA",
}
}
@pytest.fixture
def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id):
return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id)
def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response):
cherrytrack_destination_plate_response["wells"][0]["destination_coordinate"] = "H12"
return cherrytrack_destination_plate_response
@pytest.fixture
def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode):
return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode)
@pytest.fixture
def samples_from_cherrytrack_into_mongo(app, source_barcode):
try:
samples = rows_for_samples_in_cherrytrack(source_barcode)
with app.app_context():
samples_collection = app.data.driver.db.samples
inserted_samples = samples_collection.insert_many(samples)
# yield a copy of so that the test change it however it wants
yield copy.deepcopy(samples), inserted_samples
# clear up after the fixture is used
finally:
samples_collection.delete_many({})
@pytest.fixture
def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine):
def delete_data():
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_SAMPLE_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
try:
delete_data()
example = cherrytrack_mlwh_example(source_barcode)
# inserts
insert_into_mlwh(
app,
example["lighthouse_sample"],
mlwh_sql_engine,
app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
example["sample"],
mlwh_sql_engine,
app.config["MLWH_SAMPLE_TABLE"],
)
yield
finally:
delete_data()
| 1.8125 | 2 |
FoxhoundApp/FoxhoundApp/TrafficApp/urls.py | Anton250/MoscowCityHack2021_FoxoundTeam | 0 | 12795193 | <reponame>Anton250/MoscowCityHack2021_FoxoundTeam
from django.conf.urls import url, include
from rest_auth.views import (
LoginView,
LogoutView,
UserDetailsView
)
from rest_framework import routers
from FoxhoundApp.TrafficApp.views import ItemsView, HeatMapView
rest_auth_urls = [
url(r'^login/$', LoginView.as_view(), name='rest_login'),
url(r'^logout/$', LogoutView.as_view(), name='rest_logout'),
url(r'^user/$', UserDetailsView.as_view(), name='user'),
]
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^auth/', include((rest_auth_urls, 'auth'), namespace='auth')),
url(r'^items/', ItemsView.as_view(), name='Items'),
url(r'^heatmap/', HeatMapView.as_view(), name='HeatMap'),
]
| 1.96875 | 2 |
djnic/cambios/migrations/0003_auto_20201013_2210.py | avdata99/nic | 8 | 12795194 | <reponame>avdata99/nic
# Generated by Django 3.1.2 on 2020-10-14 01:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cambios', '0002_campocambio_uid_anterior'),
]
operations = [
migrations.AlterField(
model_name='campocambio',
name='campo',
field=models.CharField(db_index=True, max_length=240, null=True),
),
]
| 1.515625 | 2 |
tests/utils/test_throttle.py | binderjoe/sdu-commons | 0 | 12795195 | from botocore.exceptions import ClientError
from osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource
class Counter:
def __init__(self):
self.counter = 0
def count(self):
self.counter += 1
def test_throttle_exception():
class BogusException(Exception):
pass
max_retries = 1
retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries)
@retry_on_bogus
def bogus_function(counter):
counter.count()
if counter.counter <= max_retries:
raise BogusException
counter = Counter()
bogus_function(counter)
assert counter.counter == max_retries + 1
def test_throttled_boto_resource():
max_retries = 1
class BogusResource:
def __init__(self, counter, max_retries):
self.counter = counter
self._max_retries = max_retries
def bogus_function(self):
self.counter.count()
if self.counter.counter <= self._max_retries:
raise ClientError(
error_response={
'Error': {
'Code': 'ThrottlingException'
}
},
operation_name='bogus'
)
counter = Counter()
bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries))
bogus_resource.bogus_function()
assert counter.counter == max_retries + 1
| 2.546875 | 3 |
qt/CurrencyListModel.py | BradleyCSO/university-thesis | 0 | 12795196 | <filename>qt/CurrencyListModel.py
from PyQt5.QtCore import QModelIndex
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from core import AppCore
class CurrencyListModel(QStandardItemModel):
"""
Model which fetches all known symbols
from the DB and displays them in a list
"""
core = AppCore()
items = {}
def __init__(self, parent=None):
"""
Initialise with optional parent
:param parent: Parent object
"""
super().__init__(parent)
# Get currencies from DB
self.core.get_data_store().cursor.execute("SELECT symbol from currency_symbols")
results = self.core.get_data_store().cursor.fetchall()
# Insert list into view
for currency in results:
symbol = currency[0]
item = QStandardItem()
item.setText(symbol)
item.setEditable(False)
self.items[symbol] = item
self.appendRow(item)
def item_for(self, symbol):
"""
Get the item for a provided symbol
:param symbol: Symbol
:return: Item for symbol (invalid if not found)
"""
try:
item = self.items[symbol]
return item
finally:
return QStandardItem()
def index_for(self, symbol):
"""
Get the index for a provided symbol
:param symbol: Symbol
:return: Index for symbol (invalid if not found)
"""
try:
item = self.items[symbol]
return item.index()
finally:
return QModelIndex()
| 2.875 | 3 |
odata/navproperty.py | altedra/ODAlchemy | 0 | 12795197 | # -*- coding: utf-8 -*-
"""
Navigation properties
---------------------
The entity can define properties that link to other entities. These are known
as navigation properties and are supported in this library.
.. code-block:: python
>>> order = Service.query(Order).first()
>>> order.Shipper
<Entity(Shipper:3)>
>>> order.Shipper.CompanyName
'Federal Shipping'
When creating new instances, relationships can be assigned via navigation
properties:
.. code-block:: python
# query a shipper instance, just for this example
Shipper = Service.entities['Shipper']
my_shipper = Service.query(Shipper).first()
# assign for the new Order
order.Shipper = my_shipper
Service.save(order)
"""
try:
# noinspection PyUnresolvedReferences
from urllib.parse import urljoin
except ImportError:
# noinspection PyUnresolvedReferences
from urlparse import urljoin
class NavigationProperty(object):
"""
A Property-like object for marking relationships between entities, but does
not inherit from PropertyBase.
"""
def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False):
from odata.property import PropertyBase
self.name = name
self.entitycls = entitycls
self.is_collection = collection
self.is_containment = containment
if isinstance(foreign_key, PropertyBase):
self.foreign_key = foreign_key.name
else:
self.foreign_key = foreign_key
def __repr__(self):
return u'<NavigationProperty to {0}>'.format(self.entitycls)
def instances_from_data(self, raw_data, connection):
if self.is_collection:
return [self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value'] else []
else:
return self.instance_from_data(raw_data, connection) if raw_data else None
def instance_from_data(self, raw_data, connection): # mwa: this needs to be seperated form navproperty
entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type'))
e = entitycls.__new__(entitycls, from_data=raw_data)
es = e.__odata__
es.connection = connection
return e
def _getClass_by_response_type(self, matched_class, odata_type):
if not odata_type: return matched_class
for subclass in matched_class.__subclasses__():
if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type)
return matched_class
def _get_parent_cache(self, instance):
es = instance.__odata__
ic = es.nav_cache
if self.name not in ic:
cache = {}
ic[self.name] = cache
else:
cache = ic[self.name]
return cache
def _get_instances_from_server(self, instance):
es = instance.__odata__
connection = es.connection
parent_url = es.instance_url
parent_url += '/'
url = urljoin(parent_url, self.name)
raw_data = connection.execute_get(url)
instances = self.instances_from_data(raw_data, connection)
while '@odata.nextLink' in raw_data:
url = raw_data.get('@odata.nextLink')
raw_data = connection.execute_get(url)
instances.extend(self.instances_from_data(raw_data, connection))
return instances
def __set__(self, instance, value):
"""
:type instance: odata.entity.EntityBase
"""
cache = self._get_parent_cache(instance)
if self.is_collection:
cache['collection'] = value
else:
cache['single'] = value
instance.__odata__.set_property_dirty(self)
def __get__(self, instance, owner):
"""
:type instance: odata.entity.EntityBase
"""
if instance is None:
return self
es = instance.__odata__
cache = self._get_parent_cache(instance)
if es.instance_url is None:
if self.is_collection:
return cache.get('collection', [])
return cache.get('single', None)
cache_type = 'collection' if self.is_collection else 'single'
try:
return cache[cache_type]
except KeyError:
cache[cache_type] = self._get_instances_from_server(instance)
return cache[cache_type]
| 3.34375 | 3 |
billingyard/cli.py | MartinVondrak/billing-yard | 0 | 12795198 | <reponame>MartinVondrak/billing-yard<gh_stars>0
import click
from .billingyard import BillingYard
from .models import Invoice
@click.group('billingyard')
@click.option('-s', '--sender', type=str, default='sender.json')
@click.option('-t', '--template', type=str)
@click.pass_context
def cli(ctx, sender: str, template: str):
ctx.obj = BillingYard(sender, template)
pass_billing_yard = click.make_pass_decorator(BillingYard)
@cli.command()
@click.option('-r', '--receiver', type=str)
@click.option('-i', '--invoice', type=str)
@click.option('--vat', is_flag=True)
@pass_billing_yard
def issue_invoice(billing_yard: BillingYard, receiver: str, invoice: str, vat: bool):
if vat:
billing_yard.set_vat_invoice_processor()
invoice: Invoice = billing_yard.create_invoice(invoice, receiver)
billing_yard.print_invoice(invoice)
| 2.125 | 2 |
python/pynamodb/pynamodb-test/blog.py | kskumgk63/trace-examples | 75 | 12795199 | <gh_stars>10-100
from pynamodb.models import Model
from pynamodb.attributes import UnicodeAttribute
class Blog(Model):
class Meta:
table_name='Blog'
region = 'us-west-1'
write_capacity_units = 1
read_capacity_units = 1
host = "http://dynamodb:8000"
title = UnicodeAttribute(hash_key=True)
content = UnicodeAttribute(range_key=True)
memo = UnicodeAttribute() | 2.421875 | 2 |
examples/vn_trader/data_download.py | ZJMXX/vnpy | 2 | 12795200 | <gh_stars>1-10
import time
from datetime import datetime
from backtest_entrance.setting import Info
from vnpy.app.data_manager import DataManagerApp
from vnpy.app.data_manager.engine import ManagerEngine
from vnpy.event import EventEngine
from vnpy.gateway.binance import BinanceGateway
from vnpy.gateway.binances import BinancesGateway
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.engine import MainEngine
binance_setting = {
"key": Info.key.value,
"secret": Info.secret.value,
"session_number": 3,
"proxy_host": "127.0.0.1",
"proxy_port": 10809,
}
binances_setting = {
"key": Info.key.value,
"secret": Info.secret.value,
"会话数": 3,
"服务器": "REAL",
"合约模式": "正向",
"代理地址": "127.0.0.1",
"代理端口": 10809,
}
symbol_exchange_dict = {
'btcusdt': Exchange.BINANCE,
'ethusdt': Exchange.BINANCE,
'bnbusdt': Exchange.BINANCE,
'adausdt': Exchange.BINANCE,
'dogeusdt': Exchange.BINANCE,
'xrpusdt': Exchange.BINANCE,
'bchusdt': Exchange.BINANCE,
'linkusdt': Exchange.BINANCE,
'ltcusdt': Exchange.BINANCE,
'xlmusdt': Exchange.BINANCE,
'etcusdt': Exchange.BINANCE,
'cocosusdt': Exchange.BINANCE,
'thetausdt': Exchange.BINANCE,
'vetusdt': Exchange.BINANCE,
'eosusdt': Exchange.BINANCE,
'maticusdt': Exchange.BINANCE,
'trxusdt': Exchange.BINANCE,
'xmrusdt': Exchange.BINANCE,
'neousdt': Exchange.BINANCE,
'fttusdt': Exchange.BINANCE,
}
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
main_engine.add_gateway(BinanceGateway)
main_engine.add_gateway(BinancesGateway)
main_engine.add_app(DataManagerApp)
main_engine.connect(binance_setting, "BINANCE")
main_engine.connect(binances_setting, "BINANCES")
manager_engine = ManagerEngine(main_engine=main_engine, event_engine=event_engine)
time.sleep(5)
for i in symbol_exchange_dict.items():
manager_engine.download_bar_data(symbol=i[0], exchange=i[1], interval=Interval.MINUTE.DAILY,
start=datetime(2000, 1, 1, 0, 0, 0))
print(f'数据下载完成:{i[0]}')
| 1.710938 | 2 |
examples/FigureCanvas.py | Ellis0817/Introduction-to-Programming-Using-Python | 0 | 12795201 | from tkinter import * # Import tkinter
class FigureCanvas(Canvas):
def __init__(self, container, figureType, filled = False,
width = 100, height = 100):
super().__init__(container,
width = width, height = height)
self.__figureType = figureType
self.__filled = filled
self.drawFigure()
def getFigureType(self):
return self.__figureType
def getFilled(self):
return self.__filled
def setFigureType(self, figureType):
self.__figureType = figureType
self.drawFigure()
def setFilled(self, filled):
self.__filled = filled
self.drawFigure()
def drawFigure(self):
if self.__figureType == "line":
self.line()
elif self.__figureType == "rectangle":
self.rectangle()
elif self.__figureType == "oval":
self.oval()
elif self.__figureType == "arc":
self.arc()
def line(self):
width = int(self["width"])
height = int(self["height"])
self.create_line(10, 10, width - 10, height - 10)
self.create_line(width - 10, 10, 10, height - 10)
def rectangle(self):
width = int(self["width"])
height = int(self["height"])
if self.__filled:
self.create_rectangle(10, 10, width - 10, height - 10,
fill = "red")
else:
self.create_rectangle(10, 10, width - 10, height - 10)
def oval(self):
width = int(self["width"])
height = int(self["height"])
if self.__filled:
self.create_oval(10, 10, width - 10, height - 10,
fill = "red")
else:
self.create_oval(10, 10, width - 10, height - 10)
def arc(self):
width = int(self["width"])
height = int(self["height"])
if self.__filled:
self.create_arc(10, 10, width - 10, height - 10,
start = 0, extent = 145, fill = "red")
else:
self.create_arc(10, 10, width - 10, height - 10,
start = 0, extent = 145)
| 3.3125 | 3 |
file_handlers.py | EpocDotFr/microsoft-sticky-notes-kanboard-sync | 0 | 12795202 | from watchdog.events import PatternMatchingEventHandler
from utils import debug
from urllib.parse import unquote
from rtf.Rtf2Markdown import getMarkdown
import watchdog.events
import olefile
import sqlite3
import configparser
import codecs
import threading
class FileHandlerInterface(PatternMatchingEventHandler):
"""Base class for all the Sticky Notes file handlers."""
sync_engine = None
idle_timeout = None
def __init__(self, sync_engine, patterns=None):
self.sync_engine = sync_engine
super().__init__(ignore_directories=True, patterns=patterns)
def is_valid_event(self, event):
"""Check if event is a valid event to be proceesed by the file handler."""
if self.sync_engine.sticky_notes_file_path != event.src_path:
return False
if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED:
return True
elif event.event_type == watchdog.events.EVENT_TYPE_DELETED:
debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True, terminate=True)
elif event.event_type == watchdog.events.EVENT_TYPE_MOVED:
debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to ' + event.dest_path, err=True,
terminate=True)
else:
debug('Unhandled event type: ' + event.event_type, err=True)
return False
def on_any_event(self, event):
if not self.is_valid_event(event):
pass
# Restart the idle timeout
if self.idle_timeout:
self.idle_timeout.cancel()
self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()])
self.idle_timeout.start()
def get_notes(self):
"""Must be overridden to return a list of notes regarding the filetype we are watching."""
raise Exception('get_notes must be overridden')
class SNTFileHandler(FileHandlerInterface):
"""StickyNotes.snt file handler"""
snt_file = None
def __init__(self, sync_engine):
if not olefile.isOleFile(sync_engine.sticky_notes_file_path):
debug(sync_engine.sticky_notes_file_path + ' isn\'t a valid Sticky Notes file', err=True, terminate=True)
super().__init__(patterns=['*.snt'], sync_engine=sync_engine)
def get_notes(self):
notes = []
self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path)
for storage in self.snt_file.listdir(storages=True, streams=False):
note_id = storage[0] # UUID-like string representing the note ID
note_text_rtf_file = '0' # RTF content of the note
with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content:
note_text_rtf = note_content.read().decode('unicode')
notes.append({'text': getMarkdown(note_text_rtf), 'color': None})
self.snt_file.close()
return notes
class SQLiteFileHandler(FileHandlerInterface):
"""plum.sqlite file handler"""
colors_map = {
'Yellow': 'yellow',
'Green': 'green',
'Blue': 'blue',
'Purple': 'purple',
'Pink': 'pink'
}
database = None
def __init__(self, sync_engine):
super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine)
def get_notes(self):
self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True)
self.database.row_factory = sqlite3.Row
notes_in_db = self.database.execute('SELECT Text, Theme FROM Note')
notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db]
self.database.close()
return notes
def get_note_color(self, note):
return self.colors_map[note['color']] if note['color'] in self.colors_map else None
class INIFileHandler(FileHandlerInterface):
"""Settings.ini file handler"""
sidebar_config = None
def __init__(self, sync_engine):
super().__init__(patterns=['*.ini'], sync_engine=sync_engine)
def get_notes(self):
notes = []
# This masquerade to decode the ugly file content from UTF-16 (UCS-2) LE with BOM to unicode
with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file:
sidebar_config_file_content = sidebar_config_file.read()
sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM
self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None)
self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le'))
notes_color = None
for section in self.sidebar_config.sections():
if not section.startswith('Section '):
continue
if 'NoteCount' not in self.sidebar_config[section]:
continue
notes_color = self.sidebar_config[section]['ColorSaved'].strip('"') if 'ColorSaved' in self.sidebar_config[
section] and notes_color is None else None
for key in self.sidebar_config[section]:
if key.isdigit():
notes.append({'text': unquote(self.sidebar_config[section][key].strip('"')), 'color': notes_color})
break
return notes
| 2.375 | 2 |
src/backend/django_messages_drf/utils.py | jeremyd4500/senior-project-2020 | 0 | 12795203 | <gh_stars>0
"""
All Utils used on this package module live here
"""
from django.db import models
from functools import wraps
def cached_attribute(func):
cache_name = f"_{func.__name__}"
@wraps(func)
def inner(self, *args, **kwargs):
if hasattr(self, cache_name):
return getattr(self, cache_name)
val = func(self, *args, **kwargs)
setattr(self, cache_name, val)
return val
return
class AuditModel(models.Model):
"""A common audit model for tracking"""
created_at = models.DateTimeField(null=False, blank=False, auto_now_add=True)
modified_at = models.DateTimeField(null=False, blank=False, auto_now=True)
| 2.453125 | 2 |
crawler/RISJbot/spiders/newsspecifiedspider.py | ausnews/ausnews-search | 10 | 12795204 | # -*- coding: utf-8 -*-
import logging
from scrapy.spiders import Spider
from scrapy.http import Request
logger = logging.getLogger(__name__)
# This spider is a base for those attempting to crawl and parse a specified
# list of URLs rather than using an RSS feed or a sitemap. It needs the
# SPECIFIED_URIS_FILE setting set up to point to a file with a list of URLs.
class NewsSpecifiedSpider(Spider):
start_urls = []
def start_requests(self):
if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True:
logger.warning('RefetchControl is incompatible with '
'NewsSpecifiedSpider and will give spurious '
'warnings. Try setting REFETCHCONTROL_ENABLED to '
'False in settings.py.')
startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE')
if not startfn:
logger.critical("SPECIFIED_URLS_FILE must be configured (e.g. in "
"settings.py) to point to a file containing a "
"list of URLs.")
return
for url in self.start_urls:
yield Request(url, dont_filter=True)
with open(startfn, 'r') as f:
urls = [u.strip() for u in f.readlines()]
logger.debug(f"URLs read from SPECIFIED_URL_FILE: {urls}")
for url in urls:
if url != '':
yield Request(url, dont_filter=True)
def parse(self, response):
return self.parse_page(response)
def parse_page(self, response):
raise NotImplementedError
| 2.90625 | 3 |
unit5/problem_sets/1_1/solution/u5_ps1_1_p_6_7.py | ga-at-socs/pa-2021-code | 0 | 12795205 | # -*- coding: utf-8 -*-
"""
Practical Algorthns
Problem set: Unit 5, 1.1
Problem statement:
4. Modify your binary search algorithm (from #3) to work with words rather
than integers. Test it on a small list of words, e.g.,
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"].
The search should be case in-sensitive.
7. Now, test your binary search algorithm from #6 on a list of all
English words in the dictionary. See this for a tip on how to get a list of
all dictionary words. Note the time taken to search for a word.
Compare it with your timing result from #5, and comment on your findings.
https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/
"""
#%% sequential search
def binary_search_word(list1, word):
"""
Carry out a binary search of the given sorted list for a given word
Parameters
----------
list1: input list, sorted
word: the value to be searched
Returns
-------
True/False
"""
size = len(list1)
mid = size // 2
#debug message, remove when not debugging
#if(size):
# print ("While searching for word: ", word, ", binary search called on this list starting at : ", list1[0], " of size ", size)
#base case
if size == 0:
return False
#item found
if(list1[mid].lower()==word.lower()):
return True
#recursive call
if(list1[mid].lower() < word.lower()):
return binary_search_word(list1[mid+1:size], word)
else:
return binary_search_word(list1[0:mid], word)
#%% test binary search
mylist = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
assert (binary_search_word(mylist, "monDay")==True)
assert (binary_search_word(mylist, "Funday")==False)
print("Binary search test passed")
#%% testing sequential search on list of dictionary words
from nltk.corpus import words
word_list = words.words()
# prints 236736
print (len(word_list))
from timeit import default_timer as timer
list_of_words_to_search = ["yesterday", "omuamua", "waqar", "different", "obtuse", "zoo", "aardvark", "style", "zaazoozum", "aaaaaaaa"]
n = len(list_of_words_to_search)
results = {}
cumulative_time = 0
for word in list_of_words_to_search:
start = timer()
found = binary_search_word(word_list, word)
end = timer()
time_taken = end-start
results[word] = (round(time_taken,5), found)
cumulative_time += time_taken
print("\n** Binary Search of word list**")
print("Search for these words: ", list_of_words_to_search)
print("\nTime taken to search various words and the result:")
for k,v in results.items():
print(k, v)
print("\nTotal time to carry out search of ", n, " words = ", round(cumulative_time,5), " seconds")
print("Average search time per word = ", round(cumulative_time/n,5), " seconds")
| 4.21875 | 4 |
index_2/np_dot.py | specbug/nnfs | 0 | 12795206 | import numpy as np
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
dot_product = np.dot(a, b)
print(dot_product) | 3.34375 | 3 |
Text.py | Joseph-Barker/cst205-group16-project | 0 | 12795207 | """
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>
CST 205 Project: Group 16
Text.py
5/8/2020
This class is responsible for handling text extracted from image.
"""
class Text:
"""A simple class to define text extracted from an image"""
def __init__( self, text, app_window ):
# instance variables unique to each instance
self.text = text
self.app_window = app_window
# handles overlaying of text output
replace = True
# update text edit with newly extracted text
self.app_window.text_output(text, replace)
| 3.046875 | 3 |
calculate.py | cu-swe4s-fall-2020/version-control-maclyne | 1 | 12795208 | #!/usr/bin/env python
# File: calculate.py
# Initial date: 4 Sept 2020
# Author: <NAME>
# School assignment: For Assignment #0 of class MCDB6440: Software Engineering for Scientists
# Description: Intro to using GitHub Classroom. Practice with creating and uploading files to github etc.
# This file imports funcitons div and add from math_lib.py
import sys
import math_lib as ml
## --- input parameters ---(from command line or shell script)
cmd = sys.argv[1] #command (can be 'add' or 'div')
a = float(sys.argv[2]) #numerical
b = float(sys.argv[3]) #numerical
## print statements:
print('input values are '+ str(a) + ' and ' + str(b))
## ---
if cmd == 'add':
foo = ml.add(a,b)
print('the sum is: '+ str(foo))
if cmd == 'div':
foo = ml.div(a,b)
print('first/second is: '+ str(foo))
| 3.78125 | 4 |
tests/test_httpbroker.py | scieloorg/scieloapi.py | 1 | 12795209 | <filename>tests/test_httpbroker.py
import unittest
import mocker
from scieloapi import httpbroker, exceptions
import doubles
class CheckHttpStatusTests(unittest.TestCase):
def test_400_raises_BadRequest(self):
response = doubles.RequestsResponseStub()
response.status_code = 400
self.assertRaises(exceptions.BadRequest,
lambda: httpbroker.check_http_status(response))
def test_401_raises_Unauthorized(self):
response = doubles.RequestsResponseStub()
response.status_code = 401
self.assertRaises(exceptions.Unauthorized,
lambda: httpbroker.check_http_status(response))
def test_403_raises_Forbidden(self):
response = doubles.RequestsResponseStub()
response.status_code = 403
self.assertRaises(exceptions.Forbidden,
lambda: httpbroker.check_http_status(response))
def test_404_raises_NotFound(self):
response = doubles.RequestsResponseStub()
response.status_code = 404
self.assertRaises(exceptions.NotFound,
lambda: httpbroker.check_http_status(response))
def test_405_raises_NotFound(self):
response = doubles.RequestsResponseStub()
response.status_code = 405
self.assertRaises(exceptions.MethodNotAllowed,
lambda: httpbroker.check_http_status(response))
def test_406_raises_NotAcceptable(self):
response = doubles.RequestsResponseStub()
response.status_code = 406
self.assertRaises(exceptions.NotAcceptable,
lambda: httpbroker.check_http_status(response))
def test_500_raises_InternalServerError(self):
response = doubles.RequestsResponseStub()
response.status_code = 500
self.assertRaises(exceptions.InternalServerError,
lambda: httpbroker.check_http_status(response))
def test_502_raises_BadGateway(self):
response = doubles.RequestsResponseStub()
response.status_code = 502
self.assertRaises(exceptions.BadGateway,
lambda: httpbroker.check_http_status(response))
def test_503_raises_ServiceUnavailable(self):
response = doubles.RequestsResponseStub()
response.status_code = 503
self.assertRaises(exceptions.ServiceUnavailable,
lambda: httpbroker.check_http_status(response))
def test_200_returns_None(self):
response = doubles.RequestsResponseStub()
response.status_code = 200
self.assertIsNone(httpbroker.check_http_status(response))
class TranslateExceptionsTests(unittest.TestCase):
def test_from_ConnectionError_to_ConnectionError(self):
"""
from requests.exceptions.ConnectionError
to scieloapi.exceptions.ConnectionError
"""
import requests
@httpbroker.translate_exceptions
def foo():
raise requests.exceptions.ConnectionError()
self.assertRaises(exceptions.ConnectionError,
lambda: foo())
def test_from_HTTPError_to_HTTPError(self):
"""
from requests.exceptions.HTTPError
to scieloapi.exceptions.HTTPError
"""
import requests
@httpbroker.translate_exceptions
def foo():
raise requests.exceptions.HTTPError()
self.assertRaises(exceptions.HTTPError,
lambda: foo())
def test_from_Timeout_to_Timeout(self):
"""
from requests.exceptions.Timeout
to scieloapi.exceptions.Timeout
"""
import requests
@httpbroker.translate_exceptions
def foo():
raise requests.exceptions.Timeout()
self.assertRaises(exceptions.Timeout,
lambda: foo())
def test_from_TooManyRedirects_to_HTTPError(self):
"""
from requests.exceptions.TooManyRedirects
to scieloapi.exceptions.HTTPError
"""
import requests
@httpbroker.translate_exceptions
def foo():
raise requests.exceptions.TooManyRedirects()
self.assertRaises(exceptions.HTTPError,
lambda: foo())
def test_from_RequestException_to_HTTPError(self):
"""
from requests.exceptions.RequestException
to scieloapi.exceptions.HTTPError
"""
import requests
@httpbroker.translate_exceptions
def foo():
raise requests.exceptions.RequestException()
self.assertRaises(exceptions.HTTPError,
lambda: foo())
class PrepareParamsFunctionTests(unittest.TestCase):
def test_sort_dict_by_key(self):
params = {'username': 1, 'api_key': 2, 'c': 3}
self.assertEqual(httpbroker.prepare_params(params),
[('api_key', 2), ('c', 3), ('username', 1)])
def test_sort_list_of_tuples(self):
params = [('username', 1), ('api_key', 2), ('c', 3)]
self.assertEqual(httpbroker.prepare_params(params),
[('api_key', 2), ('c', 3), ('username', 1)])
def test_None_returns_None(self):
params = None
self.assertIsNone(httpbroker.prepare_params(params))
class GetFunctionTests(mocker.MockerTestCase):
def test_user_agent_is_properly_set(self):
"""
By properly I mean: scieloapi/:version, e.g.
scieloapi/0.4
"""
import requests
mock_response = self.mocker.mock(requests.Response)
mock_response.json()
self.mocker.result({'title': 'foo'})
mock_response.status_code
self.mocker.result(200)
mock_requests_get = self.mocker.mock()
mock_requests_get('http://manager.scielo.org/api/v1/journals/70/',
headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')),
params=None)
self.mocker.result(mock_response)
mock_requests = self.mocker.replace('requests')
mock_requests.get
self.mocker.result(mock_requests_get)
self.mocker.replay()
self.assertEqual(
httpbroker.get('http://manager.scielo.org/api/v1/',
endpoint='journals', resource_id='70'),
{'title': 'foo'}
)
def test_resource_id_makes_endpoint_mandatory(self):
self.assertRaises(
ValueError,
lambda: httpbroker.get('http://manager.scielo.org/api/v1/', resource_id='70')
)
def test_https_turns_off_ca_cert_verification(self):
import requests
mock_response = self.mocker.mock(requests.Response)
mock_response.json()
self.mocker.result({'title': 'foo'})
mock_response.status_code
self.mocker.result(200)
mock_requests_get = self.mocker.mock()
mock_requests_get('https://manager.scielo.org/api/v1/journals/70/',
headers=mocker.ANY,
params=None,
verify=False)
self.mocker.result(mock_response)
mock_requests = self.mocker.replace('requests')
mock_requests.get
self.mocker.result(mock_requests_get)
self.mocker.replay()
self.assertEqual(
httpbroker.get('https://manager.scielo.org/api/v1/',
endpoint='journals', resource_id='70'),
{'title': 'foo'}
)
class PostFunctionTests(mocker.MockerTestCase):
def test_user_agent_is_properly_set(self):
"""
By properly I mean: scieloapi/:version, e.g.
scieloapi/0.4
"""
import requests
mock_response = self.mocker.mock(requests.Response)
mock_response.headers
self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'})
self.mocker.count(2)
mock_response.status_code
self.mocker.result(201)
self.mocker.count(2)
mock_requests_post = self.mocker.mock()
mock_requests_post(url='http://manager.scielo.org/api/v1/journals/',
headers=mocker.MATCH(lambda x: x['User-Agent'].startswith('scieloapi/')),
data='{"title": "foo"}')
self.mocker.result(mock_response)
mock_requests = self.mocker.replace('requests')
mock_requests.post
self.mocker.result(mock_requests_post)
self.mocker.replay()
self.assertEqual(
httpbroker.post('http://manager.scielo.org/api/v1/',
endpoint='journals', data='{"title": "foo"}'),
'http://manager.scielo.org/api/v1/journals/4/'
)
def test_content_type_is_properly_set(self):
"""
Content-Type header must be application/json
"""
import requests
mock_response = self.mocker.mock(requests.Response)
mock_response.headers
self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'})
self.mocker.count(2)
mock_response.status_code
self.mocker.result(201)
self.mocker.count(2)
mock_requests_post = self.mocker.mock()
mock_requests_post(url='http://manager.scielo.org/api/v1/journals/',
headers=mocker.MATCH(lambda x: x['Content-Type'] == 'application/json'),
data='{"title": "foo"}')
self.mocker.result(mock_response)
mock_requests = self.mocker.replace('requests')
mock_requests.post
self.mocker.result(mock_requests_post)
self.mocker.replay()
self.assertEqual(
httpbroker.post('http://manager.scielo.org/api/v1/',
endpoint='journals', data='{"title": "foo"}'),
'http://manager.scielo.org/api/v1/journals/4/'
)
def test_unexpected_status_code_raises_APIError(self):
import requests
mock_response = self.mocker.mock(requests.Response)
mock_response.status_code
self.mocker.result(410)
self.mocker.count(3)
mock_requests_post = self.mocker.mock()
mock_requests_post(url='http://manager.scielo.org/api/v1/journals/',
headers=mocker.ANY,
data='{"title": "foo"}')
self.mocker.result(mock_response)
mock_requests = self.mocker.replace('requests')
mock_requests.post
self.mocker.result(mock_requests_post)
self.mocker.replay()
self.assertRaises(exceptions.APIError,
lambda: httpbroker.post('http://manager.scielo.org/api/v1/',
endpoint='journals',
data='{"title": "foo"}')
)
def test_location_header_is_returned(self):
import requests
mock_response = self.mocker.mock(requests.Response)
mock_response.headers
self.mocker.result({'location': 'http://manager.scielo.org/api/v1/journals/4/'})
self.mocker.count(2)
mock_response.status_code
self.mocker.result(201)
self.mocker.count(2)
mock_requests_post = self.mocker.mock()
mock_requests_post(url='http://manager.scielo.org/api/v1/journals/',
headers=mocker.ANY,
data='{"title": "foo"}')
self.mocker.result(mock_response)
mock_requests = self.mocker.replace('requests')
mock_requests.post
self.mocker.result(mock_requests_post)
self.mocker.replay()
self.assertEqual(
httpbroker.post('http://manager.scielo.org/api/v1/',
endpoint='journals', data='{"title": "foo"}'),
'http://manager.scielo.org/api/v1/journals/4/'
)
class MakeFullUrlFunctionTests(unittest.TestCase):
def test_missing_trailing_slash(self):
path_segments = ['http://manager.scielo.org', 'api', 'v1', 'journals']
self.assertEqual(httpbroker._make_full_url(*path_segments),
'http://manager.scielo.org/api/v1/journals/')
def test_missing_scheme(self):
path_segments = ['manager.scielo.org', 'api', 'v1', 'journals']
self.assertEqual(httpbroker._make_full_url(*path_segments),
'http://manager.scielo.org/api/v1/journals/')
def test_https(self):
path_segments = ['https://manager.scielo.org', 'api', 'v1', 'journals']
self.assertEqual(httpbroker._make_full_url(*path_segments),
'https://manager.scielo.org/api/v1/journals/')
| 2.53125 | 3 |
maui/backend/serial/partition.py | cstatz/maui | 0 | 12795210 | <reponame>cstatz/maui<filename>maui/backend/serial/partition.py
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
__author__ = 'christoph.statz <at> tu-dresden.de'
from numpy import ndindex
from maui.backend.helper import calc_local_indices, calculate_adjacency, \
create_mask_from_indices, do_create_domain, modify_halos_and_indices
from maui.backend.domain import Domain
from maui.mesh.helper import intersect_bounds
# TODO: Partition prototype is needed
class Partition(object):
def __init__(self, mesh, partitions, stencil, bounds=None):
""" Mesh partitioning.
:param mesh: Mesh, the mesh instance that is to be partitioned.
:param partitions: Tuple, number of partitions in each dimension of the mesh.
:param stencil: 2-Tuple of Tuple, stencil/footprint of the communication/halos for a multi-domain Field.
:param bounds: 2-Tuple of Tuple, bounds of the partition inside the mesh.
"""
# todo: implement properties as properties
self.parent = self
self.mesh = mesh
self.partitions = partitions
self.stencil = stencil
# TODO: @property protect
self.domains = dict()
self.domain_numbers = dict()
self.masks_without_halos = dict()
p = [idx for idx in ndindex(partitions)]
for k in range(len(p)):
self.domain_numbers[p[k]] = k
if bounds is not None:
self.bounds = intersect_bounds(mesh.bounds, bounds)
else:
self.bounds = self.mesh.bounds
for idx in ndindex(self.partitions):
indices = calc_local_indices(mesh.shape, self.partitions, idx)
halos = calculate_adjacency(self.partitions, idx, stencil)
if do_create_domain(mesh, create_mask_from_indices(indices), bounds):
if bounds is not None:
halos, indices = modify_halos_and_indices(mesh, indices, halos, bounds)
self.masks_without_halos[idx] = create_mask_from_indices(indices)
mask = create_mask_from_indices(indices, halos)
self.domains[idx] = Domain(idx, mesh, mask, halos)
@property
def meta_data(self):
return {key: self.domains[key].mask for key in self.domains}
def copy(self, bounds=None, stencil=None, shift=None):
if stencil is None:
stencil = self.stencil
if bounds is None:
bounds = self.bounds
else:
bounds = intersect_bounds(self.bounds, bounds)
mesh = self.mesh.copy()
if shift is not None:
mesh.shift(shift)
p = Partition(mesh, self.partitions, stencil, bounds)
p.parent = self
return p
| 2.328125 | 2 |
shift_detector/utils/ucb_list.py | hpi-bp1819-naumann/shift-detector | 3 | 12795211 | <reponame>hpi-bp1819-naumann/shift-detector
# from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python
def block(character):
""" Return the Unicode block name for character, or None if character has no block.
from https://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python
:param character"""
assert isinstance(character, str) and len(character) == 1, repr(character)
cp = ord(character)
for start, end, name in blocks:
if start <= cp <= end:
return name
blocks = [(0, 127, 'Basic Latin'),
(128, 255, 'Latin-1 Supplement'),
(256, 383, 'Latin Extended-A'),
(384, 591, 'Latin Extended-B'),
(592, 687, 'IPA Extensions'),
(688, 767, 'Spacing Modifier Letters'),
(768, 879, 'Combining Diacritical Marks'),
(880, 1023, 'Greek and Coptic'),
(1024, 1279, 'Cyrillic'),
(1280, 1327, 'Cyrillic Supplement'),
(1328, 1423, 'Armenian'),
(1424, 1535, 'Hebrew'),
(1536, 1791, 'Arabic'),
(1792, 1871, 'Syriac'),
(1872, 1919, 'Arabic Supplement'),
(1920, 1983, 'Thaana'),
(1984, 2047, 'NKo'),
(2048, 2111, 'Samaritan'),
(2112, 2143, 'Mandaic'),
(2144, 2159, 'Syriac Supplement'),
(2208, 2303, 'Arabic Extended-A'),
(2304, 2431, 'Devanagari'),
(2432, 2559, 'Bengali'),
(2560, 2687, 'Gurmukhi'),
(2688, 2815, 'Gujarati'),
(2816, 2943, 'Oriya'),
(2944, 3071, 'Tamil'),
(3072, 3199, 'Telugu'),
(3200, 3327, 'Kannada'),
(3328, 3455, 'Malayalam'),
(3456, 3583, 'Sinhala'),
(3584, 3711, 'Thai'),
(3712, 3839, 'Lao'),
(3840, 4095, 'Tibetan'),
(4096, 4255, 'Myanmar'),
(4256, 4351, 'Georgian'),
(4352, 4607, 'Hang<NAME>'),
(4608, 4991, 'Ethiopic'),
(4992, 5023, 'Ethiopic Supplement'),
(5024, 5119, 'Cherokee'),
(5120, 5759, 'Unified Canadian Aboriginal Syllabics'),
(5760, 5791, 'Ogham'),
(5792, 5887, 'Runic'),
(5888, 5919, 'Tagalog'),
(5920, 5951, 'Hanunoo'),
(5952, 5983, 'Buhid'),
(5984, 6015, 'Tagbanwa'),
(6016, 6143, 'Khmer'),
(6144, 6319, 'Mongolian'),
(6320, 6399, 'Unified Canadian Aboriginal Syllabics Extended'),
(6400, 6479, 'Limbu'),
(6480, 6527, 'Tai Le'),
(6528, 6623, 'New Tai Lue'),
(6624, 6655, 'Khmer Symbols'),
(6656, 6687, 'Buginese'),
(6688, 6831, 'Tai Tham'),
(6832, 6911, 'Combining Diacritical Marks Extended'),
(6912, 7039, 'Balinese'),
(7040, 7103, 'Sundanese'),
(7104, 7167, 'Batak'),
(7168, 7247, 'Lepcha'),
(7248, 7295, 'Ol Chiki'),
(7296, 7311, 'Cyrillic Extended-C'),
(7312, 7359, 'Georgian Extended'),
(7360, 7375, 'Sundanese Supplement'),
(7376, 7423, 'Vedic Extensions'),
(7424, 7551, 'Phonetic Extensions'),
(7552, 7615, 'Phonetic Extensions Supplement'),
(7616, 7679, 'Combining Diacritical Marks Supplement'),
(7680, 7935, 'Latin Extended Additional'),
(7936, 8191, 'Greek Extended'),
(8192, 8303, 'General Punctuation'),
(8304, 8351, 'Superscripts and Subscripts'),
(8352, 8399, 'Currency Symbols'),
(8400, 8447, 'Combining Diacritical Marks for Symbols'),
(8448, 8527, 'Letterlike Symbols'),
(8528, 8591, 'Number Forms'),
(8592, 8703, 'Arrows'),
(8704, 8959, 'Mathematical Operators'),
(8960, 9215, 'Miscellaneous Technical'),
(9216, 9279, 'Control Pictures'),
(9280, 9311, 'Optical Character Recognition'),
(9312, 9471, 'Enclosed Alphanumerics'),
(9472, 9599, 'Box Drawing'),
(9600, 9631, 'Block Elements'),
(9632, 9727, 'Geometric Shapes'),
(9728, 9983, 'Miscellaneous Symbols'),
(9984, 10175, 'Dingbats'),
(10176, 10223, 'Miscellaneous Mathematical Symbols-A'),
(10224, 10239, 'Supplemental Arrows-A'),
(10240, 10495, 'Braille Patterns'),
(10496, 10623, 'Supplemental Arrows-B'),
(10624, 10751, 'Miscellaneous Mathematical Symbols-B'),
(10752, 11007, 'Supplemental Mathematical Operators'),
(11008, 11263, 'Miscellaneous Symbols and Arrows'),
(11264, 11359, 'Glagolitic'),
(11360, 11391, 'Latin Extended-C'),
(11392, 11519, 'Coptic'),
(11520, 11567, 'Georgian Supplement'),
(11568, 11647, 'Tifinagh'),
(11648, 11743, 'Ethiopic Extended'),
(11744, 11775, 'Cyrillic Extended-A'),
(11776, 11903, 'Supplemental Punctuation'),
(11904, 12031, 'CJK Radicals Supplement'),
(12032, 12255, 'Kangxi Radicals'),
(12272, 12287, 'Ideographic Description Characters'),
(12288, 12351, 'CJK Symbols and Punctuation'),
(12352, 12447, 'Hiragana'),
(12448, 12543, 'Katakana'),
(12544, 12591, 'Bopomofo'),
(12592, 12687, 'Hangul Compatibility Jamo'),
(12688, 12703, 'Kanbun'),
(12704, 12735, 'Bopomofo Extended'),
(12736, 12783, 'CJK Strokes'),
(12784, 12799, 'Katakana Phonetic Extensions'),
(12800, 13055, 'Enclosed CJK Letters and Months'),
(13056, 13311, 'CJK Compatibility'),
(13312, 19903, 'CJK Unified Ideographs Extension A'),
(19904, 19967, 'Yijing Hexagram Symbols'),
(19968, 40959, 'CJK Unified Ideographs'),
(40960, 42127, 'Yi Syllables'),
(42128, 42191, 'Yi Radicals'),
(42192, 42239, 'Lisu'),
(42240, 42559, 'Vai'),
(42560, 42655, 'Cyrillic Extended-B'),
(42656, 42751, 'Bamum'),
(42752, 42783, 'Modifier Tone Letters'),
(42784, 43007, 'Latin Extended-D'),
(43008, 43055, 'Syloti Nagri'),
(43056, 43071, 'Common Indic Number Forms'),
(43072, 43135, 'Phags-pa'),
(43136, 43231, 'Saurashtra'),
(43232, 43263, 'Devanagari Extended'),
(43264, 43311, '<NAME>'),
(43312, 43359, 'Rejang'),
(43360, 43391, 'Hangul Jamo Extended-A'),
(43392, 43487, 'Javanese'),
(43488, 43519, 'Myanmar Extended-B'),
(43520, 43615, 'Cham'),
(43616, 43647, 'Myanmar Extended-A'),
(43648, 43743, 'Tai Viet'),
(43744, 43775, 'Meetei Mayek Extensions'),
(43776, 43823, 'Ethiopic Extended-A'),
(43824, 43887, 'Latin Extended-E'),
(43888, 43967, 'Cherokee Supplement'),
(43968, 44031, '<NAME>'),
(44032, 55215, 'Hangul Syllables'),
(55216, 55295, 'Hangul Jamo Extended-B'),
(55296, 56191, 'High Surrogates'),
(56192, 56319, 'High Private Use Surrogates'),
(56320, 57343, 'Low Surrogates'),
(57344, 63743, 'Private Use Area'),
(63744, 64255, 'CJK Compatibility Ideographs'),
(64256, 64335, 'Alphabetic Presentation Forms'),
(64336, 65023, 'Arabic Presentation Forms-A'),
(65024, 65039, 'Variation Selectors'),
(65040, 65055, 'Vertical Forms'),
(65056, 65071, 'Combining Half Marks'),
(65072, 65103, 'CJK Compatibility Forms'),
(65104, 65135, 'Small Form Variants'),
(65136, 65279, 'Arabic Presentation Forms-B'),
(65280, 65519, 'Halfwidth and Fullwidth Forms'),
(65520, 65535, 'Specials'),
(65536, 65663, 'Linear B Syllabary'),
(65664, 65791, 'Linear B Ideograms'),
(65792, 65855, 'Aegean Numbers'),
(65856, 65935, 'Ancient Greek Numbers'),
(65936, 65999, 'Ancient Symbols'),
(66000, 66047, 'Phaistos Disc'),
(66176, 66207, 'Lycian'),
(66208, 66271, 'Carian'),
(66272, 66303, 'Coptic Epact Numbers'),
(66304, 66351, 'Old Italic'),
(66352, 66383, 'Gothic'),
(66384, 66431, 'Old Permic'),
(66432, 66463, 'Ugaritic'),
(66464, 66527, 'Old Persian'),
(66560, 66639, 'Deseret'),
(66640, 66687, 'Shavian'),
(66688, 66735, 'Osmanya'),
(66736, 66815, 'Osage'),
(66816, 66863, 'Elbasan'),
(66864, 66927, 'Caucasian Albanian'),
(67072, 67455, 'Linear A'),
(67584, 67647, 'Cypriot Syllabary'),
(67648, 67679, 'Imperial Aramaic'),
(67680, 67711, 'Palmyrene'),
(67712, 67759, 'Nabataean'),
(67808, 67839, 'Hatran'),
(67840, 67871, 'Phoenician'),
(67872, 67903, 'Lydian'),
(67968, 67999, 'Meroitic Hieroglyphs'),
(68000, 68095, 'Meroitic Cursive'),
(68096, 68191, 'Kharoshthi'),
(68192, 68223, 'Old South Arabian'),
(68224, 68255, 'Old North Arabian'),
(68288, 68351, 'Manichaean'),
(68352, 68415, 'Avestan'),
(68416, 68447, 'Inscriptional Parthian'),
(68448, 68479, 'Inscriptional Pahlavi'),
(68480, 68527, '<NAME>'),
(68608, 68687, 'Old Turkic'),
(68736, 68863, 'Old Hungarian'),
(68864, 68927, '<NAME>'),
(69216, 69247, 'Rumi Numeral Symbols'),
(69376, 69423, 'Old Sogdian'),
(69424, 69487, 'Sogdian'),
(69600, 69631, 'Elymaic'),
(69632, 69759, 'Brahmi'),
(69760, 69839, 'Kaithi'),
(69840, 69887, '<NAME>'),
(69888, 69967, 'Chakma'),
(69968, 70015, 'Mahajani'),
(70016, 70111, 'Sharada'),
(70112, 70143, 'Sinhala Archaic Numbers'),
(70144, 70223, 'Khojki'),
(70272, 70319, 'Multani'),
(70320, 70399, 'Khudawadi'),
(70400, 70527, 'Grantha'),
(70656, 70783, 'Newa'),
(70784, 70879, 'Tirhuta'),
(71040, 71167, 'Siddham'),
(71168, 71263, 'Modi'),
(71264, 71295, 'Mongolian Supplement'),
(71296, 71375, 'Takri'),
(71424, 71487, 'Ahom'),
(71680, 71759, 'Dogra'),
(71840, 71935, '<NAME>'),
(72096, 72191, 'Nandinagari'),
(72192, 72271, '<NAME>'),
(72272, 72367, 'Soyombo'),
(72384, 72447, '<NAME>'),
(72704, 72815, 'Bhaiksuki'),
(72816, 72895, 'Marchen'),
(72960, 73055, '<NAME>'),
(73056, 73135, '<NAME>'),
(73440, 73471, 'Makasar'),
(73664, 73727, 'Tamil Supplement'),
(73728, 74751, 'Cuneiform'),
(74752, 74879, 'Cuneiform Numbers and Punctuation'),
(74880, 75087, 'Early Dynastic Cuneiform'),
(77824, 78895, 'Egyptian Hieroglyphs'),
(78896, 78911, 'Egyptian Hieroglyph Format Controls'),
(82944, 83583, 'Anatolian Hieroglyphs'),
(92160, 92735, 'Bamum Supplement'),
(92736, 92783, 'Mro'),
(92880, 92927, '<NAME>'),
(92928, 93071, '<NAME>'),
(93760, 93855, 'Medefaidrin'),
(93952, 94111, 'Miao'),
(94176, 94207, 'Ideographic Symbols and Punctuation'),
(94208, 100351, 'Tangut'),
(100352, 101119, 'Tangut Components'),
(110592, 110847, 'Kana Supplement'),
(110848, 110895, 'Kana Extended-A'),
(110896, 110959, 'Small Kana Extension'),
(110960, 111359, 'Nushu'),
(113664, 113823, 'Duployan'),
(113824, 113839, 'Shorthand Format Controls'),
(118784, 119039, 'Byzantine Musical Symbols'),
(119040, 119295, 'Musical Symbols'),
(119296, 119375, 'Ancient Greek Musical Notation'),
(119520, 119551, 'Mayan Numerals'),
(119552, 119647, 'Tai Xuan Jing Symbols'),
(119648, 119679, 'Counting Rod Numerals'),
(119808, 120831, 'Mathematical Alphanumeric Symbols'),
(120832, 121519, 'Sutton SignWriting'),
(122880, 122927, 'Glagolitic Supplement'),
(123136, 123215, '<NAME>'),
(123584, 123647, 'Wancho'),
(124928, 125151, '<NAME>'),
(125184, 125279, 'Adlam'),
(126064, 126143, 'Indic Siyaq Numbers'),
(126208, 126287, 'Ottoman Siyaq Numbers'),
(126464, 126719, 'Arabic Mathematical Alphabetic Symbols'),
(126976, 127023, 'Mahjong Tiles'),
(127024, 127135, 'Domino Tiles'),
(127136, 127231, 'Playing Cards'),
(127232, 127487, 'Enclosed Alphanumeric Supplement'),
(127488, 127743, 'Enclosed Ideographic Supplement'),
(127744, 128511, 'Miscellaneous Symbols and Pictographs'),
(128512, 128591, 'Emoticons'),
(128592, 128639, 'Ornamental Dingbats'),
(128640, 128767, 'Transport and Map Symbols'),
(128768, 128895, 'Alchemical Symbols'),
(128896, 129023, 'Geometric Shapes Extended'),
(129024, 129279, 'Supplemental Arrows-C'),
(129280, 129535, 'Supplemental Symbols and Pictographs'),
(129536, 129647, 'Chess Symbols'),
(129648, 129791, 'Symbols and Pictographs Extended-A'),
(131072, 173791, 'CJK Unified Ideographs Extension B'),
(173824, 177983, 'CJK Unified Ideographs Extension C'),
(177984, 178207, 'CJK Unified Ideographs Extension D'),
(178208, 183983, 'CJK Unified Ideographs Extension E'),
(183984, 191471, 'CJK Unified Ideographs Extension F'),
(194560, 195103, 'CJK Compatibility Ideographs Supplement'),
(195104, 917503, 'No Unicode Block'),
(917504, 917631, 'Tags'),
(917760, 917999, 'Variation Selectors Supplement'),
(983040, 1048575, 'Supplementary Private Use Area-A'),
(1048576, 1114111, 'Supplementary Private Use Area-B')]
| 3.25 | 3 |
subsequence_tree.py | luvalenz/time-series-variability-tree | 1 | 12795212 | <filename>subsequence_tree.py
import numpy as np
from sklearn.cluster import AffinityPropagation
#import pydotplus as pydot
from collections import Counter
from distance_utils import time_series_twed
import pandas as pd
class SubsequenceTree:
def __init__(self, max_level, prototype_subsequences_list,
affinities, db_time_series,
time_window, time_step,
clustering_threshold=1, weighted=True):
self.time_window = time_window
self.time_step = time_step
self.max_level = max_level
#self.graph = pydot.Dot(graph_type='graph')
self.query_ts = None
self.query_score_chart = None
self.node_shortcuts = None
self.weights = None
self.d_data_frame = None
self._original_time_series_ids = None
self._query_vector = None
self.n_nodes = 0
self._weighted = weighted
prototype_subsequences = np.array(prototype_subsequences_list)
self._build_tree(affinities, prototype_subsequences, clustering_threshold)
self._populate_tree(db_time_series)
self._build_node_shorcuts()
self._build_weights_vector()
self._build_d_data_frame()
@property
def n_subsequences(self):
return len(self.db_subsequences_dict)
@property
def original_time_series_ids(self):
if self._original_time_series_ids is None:
self._original_time_series_ids = list(self.root.inverted_file)
return self._original_time_series_ids
@property
def n_original_time_series(self):
return len(self.original_time_series_ids)
@property
def query_vector(self):
if self._query_vector is None:
q_vector = np.array([node.q for node in self.node_shortcuts])
q_norm = np.linalg.norm(q_vector)
self._query_vector = q_vector / q_norm
return self._query_vector
@property
def _queried_time_series_ids(self):
return list(set().union(*self._queried_time_series_ids_iterator()))
def prune(self):
self._build_node_shorcuts(True)
self._build_weights_vector()
self._build_d_data_frame()
def _queried_time_series_ids_iterator(self):
for node in self.node_shortcuts:
if node.is_leaf and node.n_query_subsequences > 0:
yield node.inverted_file.keys()
def get_next_subsequence_id(self):
id_ = self.next_subsequence_id
self.next_subsequence_id += 1
return id_
def make_query(self, time_series, timer=None):
if timer is not None:
timer.start()
subsequences = time_series.run_sliding_window(self.time_window, self.time_step)
if timer is not None:
timer.stop()
timer.start()
for node in self.node_shortcuts:
node.n_query_subsequences = 0
if timer is not None:
timer.stop()
timer.start()
self._query_vector = None
for subsequence in subsequences:
self.root.add_query_subsequence(subsequence)
if timer is not None:
timer.stop()
timer.start()
not_zero_node_ids = np.where(self.query_vector != 0)[0]
not_zero_query_vector = self.query_vector[not_zero_node_ids]
not_zero_ts_ids = self._queried_time_series_ids
not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids]
if timer is not None:
timer.stop()
timer.start()
score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1)
#score = 2-2*score
if timer is not None:
timer.stop()
timer.start()
order = np.argsort(score)
result = not_zero_d_dataframe.index.values[order]
if timer is not None:
timer.stop()
return result
def get_db_subsequences_dict(self):
def _get_db_subsequences_dict():
return self.db_subsequences_dict
return _get_db_subsequences_dict
def get_next_node_id(self):
def _get_next_node_id():
n_nodes = self.n_nodes
self.n_nodes += 1
return n_nodes
return _get_next_node_id
def get_original_time_series_ids(self):
def _get_original_time_series_ids():
return self.original_time_series_ids
return _get_original_time_series_ids
# def save_graph(self):
# self.generate_graph()
# self.graph.write_png('graph.png')
#
# def generate_graph(self):
# self.root.add_to_graph(None, self.graph)
def _build_tree(self, affinities, prototypes,
clustering_threshold):
self.root = Node(0, self.max_level, prototypes, affinities, None,
None, self.get_next_node_id(),
self.get_original_time_series_ids(),
clustering_threshold, weighted=self._weighted)
def _populate_tree(self, db_time_series):
print("populating tree")
print('time window')
print(self.time_window)
print('time step')
print(self.time_step)
print(type(db_time_series))
print(db_time_series)
for i, ts in enumerate(db_time_series):
print(ts)
for subsequence in ts.run_sliding_window(self.time_window, self.time_step):
#print(subsequence)
self._add_subsequence(subsequence)
print("{0} time series added".format(i))
def _build_node_shorcuts(self, just_leaves=False):
shortcut_dict = {}
self.root.add_shortcut_to_dict(shortcut_dict)
shortcut_list = [v for v in shortcut_dict.values()
if not just_leaves or v.is_leaf]
self.node_shortcuts = shortcut_list
def _build_weights_vector(self):
weights_list = [node.weight for node in self.node_shortcuts]
self.weights = np.array(weights_list)
def _build_d_data_frame(self, just_leaves=False):
d_list = [node.d_vector for node in self.node_shortcuts]
d_matrix = np.column_stack(d_list)
d_norm = np.linalg.norm(d_matrix, axis=1)
d_matrix = (d_matrix.T / d_norm).T
d_matrix[d_matrix == np.inf] = 0
self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix),
index=self.original_time_series_ids)
def _add_subsequence(self, subsequence):
self.root.add_db_subsequence(subsequence)
def calculate_inverted_files(self):
return self.root.inverted_file
class Node:
def __init__(self, level, max_level, prototypes, affinities, center,
parent, next_node_id_getter, original_time_series_ids_getter,
clustering_threshold, weighted=True):
self.level = level
self._weighted = weighted
self.max_level = max_level
self.center = center
self.parent = parent
self.get_original_time_series_ids_in_tree = original_time_series_ids_getter
self._id = next_node_id_getter()
parent_id = parent._id if parent is not None else None
print("-- NODE {0} --".format(self._id))
print("parent = {0}".format(parent_id))
print("level {0}".format(level))
print("prototypes length = {0}".format(len(prototypes)))
shape = affinities.shape if affinities is not None else None
print("affinities shape = {0}".format(shape))
print("")
self.n_query_subsequences = 0
self.children = None
self._inverted_file = None
if clustering_threshold is None or clustering_threshold <= 1:
clustering_threshold = 1
if level + 1 == max_level or len(prototypes) <= clustering_threshold:
self._generate_inverted_file()
else:
self._generate_children(affinities, next_node_id_getter, prototypes,
clustering_threshold)
@property
def is_leaf(self):
return self.children is None
@property
def inverted_file(self):
if self._inverted_file is None:
inverted_file = Counter()
for child in self.children:
inverted_file += child.inverted_file
self._inverted_file = inverted_file
return self._inverted_file
@property
def n_original_time_series_in_node(self):
return len(self.inverted_file)
@property
def n_original_time_series_in_tree(self):
return len(self.get_original_time_series_ids_in_tree())
@property
def weight(self):
w = 0
if self.n_original_time_series_in_node != 0:
w = np.log(self.n_original_time_series_in_tree/
self.n_original_time_series_in_node)
try:
if not self._weighted:
w = 1
except AttributeError:
print("Attribute Error caught")
print("weight = {0}".format(w))
return w
@property
def m_vector(self):
m = np.zeros(self.n_original_time_series_in_tree)
ids = self.get_original_time_series_ids_in_tree()
for key, value in self.inverted_file.items():
index = ids.index(key)
m[index] = value
return m
@property
def q(self):
if self.n_query_subsequences is None:
return None
return self.n_query_subsequences*self.weight
@property
def d_vector(self):
return self.weight*self.m_vector
def add_shortcut_to_dict(self, shortcut_dict):
shortcut_dict[self._id] = self
if not self.is_leaf:
for child in self.children:
child.add_shortcut_to_dict(shortcut_dict)
@staticmethod
def run_affinity_propagation(affinities):
smin = np.min(affinities)
smax = np.max(affinities)
candidate_preferences = np.linspace(smin, smax, 10)
ap = AffinityPropagation(affinity='precomputed')
for preference in candidate_preferences:
ap.preference = preference
ap.fit(affinities)
indices = ap.cluster_centers_indices_
if indices is not None and len(indices) > 1:
break
return ap
def _generate_children(self, affinities,
next_node_id_getter, prototypes, clustering_threshold):
ap = self.run_affinity_propagation(affinities)
indices = ap.cluster_centers_indices_
n_clusters = len(ap.cluster_centers_indices_) if indices is not None else None
print("n_clusters = {0}".format(n_clusters))
if n_clusters is None or n_clusters == 1:
cluster_centers = prototypes
self._generate_children_border_case(next_node_id_getter,
cluster_centers, clustering_threshold)
return
cluster_centers = prototypes[ap.cluster_centers_indices_]
labels = ap.labels_
children = []
for cluster_label, center in zip(range(n_clusters),
cluster_centers):
indices = np.where(labels==cluster_label)[0]
child_prototypes = prototypes[indices]
child_affinities = affinities[indices][:, indices]
child = Node(self.level + 1, self.max_level, child_prototypes,
child_affinities, center,
self, next_node_id_getter,
self.get_original_time_series_ids_in_tree,
clustering_threshold)
children.append(child)
self.children = children
def _generate_children_border_case(self, next_node_id_getter,
cluster_centers, clustering_threshold):
children = []
for center in cluster_centers:
child_prototypes = [center]
child_affinities = None
child = Node(self.level + 1, self.max_level, child_prototypes,
child_affinities, center,
self, next_node_id_getter,
self.get_original_time_series_ids_in_tree,
clustering_threshold)
children.append(child)
self.children = children
def add_query_subsequence(self, subsequence):
self.n_query_subsequences += 1
if not self.is_leaf:
distances = [time_series_twed(subsequence, node.center)
for node in self.children]
nearest_child = self.children[np.argmin(distances)]
nearest_child.add_query_subsequence(subsequence)
def add_db_subsequence(self, subsequence):
if self.is_leaf:
counter = Counter({subsequence.original_id: 1})
self._inverted_file += counter
else:
distances = [time_series_twed(subsequence, node.center)
for node in self.children]
nearest_child = self.children[np.argmin(distances)]
nearest_child.add_db_subsequence(subsequence)
def _generate_inverted_file(self):
# original_time_series_id = (subsequence.original_id
# for subsequence in prototypes)
# self._inverted_file = Counter(original_time_series_id)
self._inverted_file = Counter()
# def add_to_graph(self, parent_graph_node, graph):
# graph_node = pydot.Node(str(self))
# graph.add_node(graph_node)
# if parent_graph_node is not None:
# graph.add_edge(pydot.Edge(parent_graph_node,
# graph_node))
# if self.children is not None:
# for child in self.children:
# child.add_to_graph(graph_node, graph) | 2.546875 | 3 |
tests/contrib/backends/hbase/test_domain_cache.py | buildfail/frontera | 1,267 | 12795213 | <filename>tests/contrib/backends/hbase/test_domain_cache.py<gh_stars>1000+
# -*- coding: utf-8 -*-
from frontera.contrib.backends.hbase.domaincache import DomainCache
from happybase import Connection
import logging
import unittest
class TestDomainCache(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.conn = Connection(host="hbase-docker")
if b'domain_metadata' not in self.conn.tables():
self.conn.create_table('domain_metadata', {
'm': {'max_versions': 1, 'block_cache_enabled': 1,}
})
t = self.conn.table('domain_metadata')
t.delete('d1')
t.delete('d2')
t.delete('d3')
t.delete('d4')
def test_domain_cache_both_generations(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
# eviction should happen
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert dc['d1'] == {'domain': 1}
assert dc['d2'] == {'domain': 2}
assert dc['d3'] == {'domain': [3, 2, 1]}
assert dc['d4'] == {'domain': 4}
def test_domain_cache_get_with_default(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert dc.get('d1', {}) == {'domain': 1}
assert dc.get('d3', {}) == {'domain': [3, 2, 1]}
def test_domain_cache_setdefault(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert dc.setdefault('d1', {}) == {'domain': 1}
assert dc.setdefault('d5', {'domain': 6}) == {'domain': 6}
dc.flush()
assert dc.setdefault('d3', {}) == {'domain': [3, 2, 1]}
def test_domain_cache_setdefault_with_second_gen_flush(self):
dc = DomainCache(2, self.conn, 'domain_metadata', batch_size=3)
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
dc.setdefault('d1', {})['domain'] += 1
assert dc.setdefault('d1', {}) == {'domain': 2}
def test_empty_key(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
with self.assertRaises(KeyError):
dc[''] = {'test':1}
def test_deletion(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
with self.assertRaises(KeyError):
del dc['d1']
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
del dc['d1'] # second gen
del dc['d3'] # first gen
dc.flush()
del dc['d4'] # hbase
def test_contains(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert 'd1' in dc # second gen
assert 'd3' in dc # first gen
dc.flush()
assert 'd4' in dc
def test_pop(self):
dc = DomainCache(2, self.conn, 'domain_metadata')
dc['d1'] = {'domain': 1}
dc['d2'] = {'domain': 2}
dc['d3'] = {'domain': [3, 2, 1]}
dc['d4'] = {'domain': 4}
assert dc.pop('d1') == {'domain': 1}
assert 'd1' not in dc
assert dc.pop('d3') == {'domain': [3, 2, 1]}
assert 'd3' not in dc
dc.flush()
assert dc.pop('d4') == {'domain': 4}
assert 'd4' not in dc | 2.140625 | 2 |
opnsense_cli/callbacks/click.py | jan-win1993/opn-cli | 13 | 12795214 | import yaml
import os
from opnsense_cli.facades.commands.base import CommandFacade
from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory
from opnsense_cli.formats.base import Format
"""
Click callback methods
See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications
"""
def defaults_from_configfile(ctx, param, filename):
def dict_from_yaml(path):
with open(path, 'r') as yaml_file:
data = yaml.load(yaml_file, Loader=yaml.SafeLoader)
return data
options = dict_from_yaml(os.path.expanduser(filename))
ctx.default_map = options
def expand_path(ctx, param, filename):
return os.path.expanduser(filename)
def available_formats():
return CliOutputFormatFactory._keymap.keys()
def formatter_from_formatter_name(ctx, param, format_name) -> Format:
factory = CliOutputFormatFactory(format_name)
return factory.get_class()
def bool_as_string(ctx, param, value):
if type(value) == bool:
return str(int(value))
return value
def tuple_to_csv(ctx, param, value):
if param.multiple and not value:
return None
if type(value) == tuple:
return ",".join(value)
return value
def comma_to_newline(ctx, param, value):
if type(value) == str and "," in value:
return value.replace(",", "\n")
return value
def int_as_string(ctx, param, value):
if type(value) == int:
return str(value)
return value
def resolve_linked_names_to_uuids(ctx, param, value):
option_name = param.opts[0].replace("--", "")
resolve_map = ctx.obj.uuid_resolver_map[option_name]
if value and isinstance(ctx.obj, CommandFacade):
return ctx.obj.resolve_linked_uuids(resolve_map, value)
return value
| 2.3125 | 2 |
ceda_intake/database_handler.py | cedadev/ceda-intake | 0 | 12795215 | import psycopg2
import os
class DBHandler:
_max_id_length = 255
_max_record_length = 255
def __init__(self, table_name="intake_records", error_table_name="scan_errors"):
"""
:param table_name: (str) Optional string name of the main db table.
:param error_table_name: (str) Optional string name for the errors db table.
"""
self.connection_info = os.environ.get("CEDA_INTAKE_DB_SETTINGS")
if not self.connection_info:
raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS'
'in for format of "dbname=<db_name> user=<user_name>'
'host=<host_name> password=<password>"')
self._test_connection()
self.table_name = table_name
self.error_table_name = error_table_name
self._create_tables()
def _test_connection(self):
try:
conn = psycopg2.connect(self.connection_info)
except psycopg2.Error as err:
print(err)
raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be'
'in for format of "dbname=<db_name> user=<user_name>'
'host=<host_name> password=<password>"')
conn.close()
def _create_tables(self):
"""
Creates tables if they don't already exist.
"""
# Create main table
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(f"CREATE TABLE IF NOT EXISTS {self.table_name} ("
f" id varchar({self._max_id_length}) PRIMARY KEY, "
f" record varchar({self._max_record_length}) NOT NULL"
f");")
conn.commit()
cur.execute(f"CREATE TABLE IF NOT EXISTS {self.error_table_name} ("
f" id varchar({self._max_id_length}) PRIMARY KEY, "
f" record varchar({self._max_record_length}) NOT NULL"
f");")
conn.commit()
def _delete_tables(self):
"""
Drops the database tables
"""
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(f"DROP TABLE {self.table_name};")
conn.commit()
cur.execute(f"DROP TABLE {self.error_table_name};")
conn.commit()
def get_record(self, identifier):
"""
Selects the record of the job with the identifier parsed
and returns it
:param identifier: (str) Identifier of the job record
:return: (str) Record of job
"""
query = f"SELECT record FROM {self.table_name} " \
f"WHERE id='{identifier}';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
if cur.rowcount > 0:
return cur.fetchone()[0]
return None
def get_all_records(self):
"""
:return: (dict) Dictionary of all job identifiers mapped to their respective records
"""
query = f"SELECT * FROM {self.table_name}"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
record_dict = {}
for (name, record) in cur:
record_dict[name] = record
return record_dict
def get_successful_runs(self):
"""
:return: (str list) Returns a list of the identifiers of all successful runs
"""
query = f"SELECT id FROM {self.table_name} " \
"WHERE record='success';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
return [name[0] for name in cur]
def get_failed_runs(self):
"""
:return: (dict) Dictionary of error types mapped to lists of job identifiers which record in them
"""
query = f"SELECT id, record FROM {self.table_name} " \
"WHERE record<>'success';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
failures = {}
for (name, record) in cur:
failures.setdefault(record, [])
failures[record].append(name)
return failures
def delete_record(self, identifier):
"""
Deletes entry specified by the given identifier
from the database
:param identifier: (str) Identifier of the job
"""
query = f"DELETE FROM {self.table_name} " \
f"WHERE id='{identifier}';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
def delete_all_records(self):
"""
Deletes all entries from the table
"""
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(f"DELETE FROM {self.table_name};")
conn.commit()
def ran_successfully(self, identifier):
"""
Returns true / false on whether the record with this
identifier is successful
:param identifier: (str) Identifier of the job record
:return: (bool) Boolean on if job ran successfully
"""
query = f"SELECT record FROM {self.table_name} " \
f"WHERE id='{identifier}';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
record = cur.fetchone()
if record is not None:
return record[0] == 'success'
return False
def count_records(self):
"""
:return: (int) Number of records in the table
"""
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(f"SELECT COUNT(*) FROM {self.table_name};")
return cur.fetchone()[0]
def count_successes(self):
"""
:return: (int) Number of successfull records in the table
"""
query = f"SELECT COUNT(*) FROM {self.table_name} " \
"WHERE record='success';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchone()[0]
def count_failures(self):
"""
:return: (int) Number of failed records in the table
"""
query = f"SELECT COUNT(*) FROM {self.table_name} " \
"WHERE record<>'success';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchone()[0]
def batch_insert(self, records):
"""Batch insert records.
>>> execute_values(cur,
... "INSERT INTO test (id, v1, v2) VALUES %s",
... [(1, 2, 3), (4, 5, 6), (7, 8, 9)])
"""
raise NotImplementedError
def insert_success(self, identifier):
"""
Inserts an entry into the table with a given identifier
and the record 'success'
:param identifier: (str) Identifier of the job
"""
if self.get_record(identifier):
self.delete_record(identifier)
query = f"INSERT INTO {self.table_name} " \
f"VALUES ('{identifier}', 'success');"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
def insert_failure(self, identifier, error_type='failure'):
"""
Inserts an entry into the table with a given identifier
and erroneous record
:param identifier: (str) Identifier of the job
:param error_type: (str) Record of the job
"""
if self.get_record(identifier):
self.delete_record(identifier)
error_type = error_type[:self._max_record_length]
query = f"INSERT INTO {self.table_name} " \
f"VALUES ('{identifier}', '{error_type}');"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
| 3.3125 | 3 |
cogs/util/file_handling.py | dd0lynx/Toast-Bot | 0 | 12795216 | <filename>cogs/util/file_handling.py<gh_stars>0
import os, os.path
import errno
import json
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def load_json(path):
data = {}
try:
with open(path) as f:
data = json.load(f)
print(f'{path} successfully loaded')
except:
print('Could not load json')
return data
def save_json(path, data):
mkdir_p(os.path.dirname(path))
with open(path, 'w') as f:
json.dump(data, f)
def load_cog(bot, cog):
try:
bot.load_extension(f'cogs.{cog}')
print(f'Loaded extension {cog}.')
return True
except Exception as e:
print(f'Failed to load extension {cog}.', file=sys.stderr)
traceback.print_exc()
return False
def unload_cog(bot, cog):
try:
bot.unload_extension(f'cogs.{cog}')
print(f'Unloaded extension {cog}.')
return True
except Exception as e:
print(f'Failed to unload extension {cog}.', file=sys.stderr)
traceback.print_exc()
return False
| 2.671875 | 3 |
tests/cv/utils/test_iou.py | SkalskiP/onemetric | 48 | 12795217 | from contextlib import ExitStack as DoesNotRaise
from typing import Tuple, Optional
import numpy as np
import pytest
from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch
@pytest.mark.parametrize(
"box_true, box_detection, expected_result, exception",
[
(None, None, None, pytest.raises(ValueError)),
((0., 0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)),
((0., 0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)),
([0., 0., 1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)),
((0., 0., 1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()),
((0, 1., 1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()),
((0., 0., 1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()),
((1., 0., 2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()),
((0., 0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()),
((0.25, 0., 1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()),
((0., 0., 1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()),
((0., 0.25, 1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()),
((0., 0., 1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()),
((0., 0., 3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()),
((1., 1., 2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise())
]
)
def test_box_iou(
box_true: Tuple[float, float, float, float],
box_detection: Tuple[float, float, float, float],
expected_result: Optional[float],
exception: Exception
) -> None:
with exception:
result = box_iou(box_true=box_true, box_detection=box_detection)
assert result == expected_result
@pytest.mark.parametrize(
"boxes_true, boxes_detection, expected_result, exception",
[
(
None,
np.array([
[0., 0.25, 1., 1.25]
]),
None,
pytest.raises(ValueError)
),
(
np.array([
[0., 0.25, 1., 1.25]
]),
None,
None,
pytest.raises(ValueError)
),
(
np.array([
[0., 0., 1., 1.],
[2., 2., 2.5, 2.5]
]),
np.array([
[0., 0., 1., 1.],
[2., 2., 2.5, 2.5]
]),
np.array([
[1., 0.],
[0., 1.]
]),
DoesNotRaise()
),
(
np.array([
[0., 0., 1., 1.],
[0., 0.75, 1., 1.75]
]),
np.array([
[0., 0.25, 1., 1.25]
]),
np.array([
[0.6],
[1/3]
]),
DoesNotRaise()
),
(
np.array([
[0., 0., 1., 1.],
[0., 0.75, 1., 1.75]
]),
np.array([
[0., 0.25, 1., 1.25],
[0., 0.75, 1., 1.75],
[1., 1., 2., 2.]
]),
np.array([
[0.6, 1/7, 0],
[1/3, 1., 0]
]),
DoesNotRaise()
)
]
)
def test_box_iou_batch(
boxes_true: np.ndarray,
boxes_detection: np.ndarray,
expected_result: Optional[float],
exception: Exception
) -> None:
with exception:
result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection)
np.testing.assert_array_equal(result, expected_result)
QUARTER_MASK = np.zeros((10, 10)).astype('uint8')
QUARTER_MASK[0:5, 0:5] = 1
@pytest.mark.parametrize(
"mask_true, mask_detection, expected_result, exception",
[
(None, None, None, pytest.raises(ValueError)),
(np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)),
(np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)),
(np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)),
(np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)),
(np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()),
(np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()),
(np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()),
(np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()),
(np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise())
]
)
def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception) -> None:
with exception:
result = mask_iou(mask_true=mask_true, mask_detection=mask_detection)
assert result == expected_result
| 2.109375 | 2 |
Synapse/Synapse Unsupervised Model.py | Zeel2864/Synapse_ML | 1 | 12795218 |
"""
@author: <NAME>,<NAME>
"""
import numpy as np
import streamlit as st
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
st.title("Synapse Unsupervised Models")
uploaded_file = st.file_uploader("Choose a csv file", type="csv")
if uploaded_file is not None:
data = pd.read_csv(uploaded_file)
st.write(data)
if uploaded_file is not None:
drop_column = st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list())
X = data.drop(drop_column,axis = 1)
st.header('X : Features')
st.write(X)
if uploaded_file is not None:
if st.sidebar.checkbox("Feature Normalization"):
X = (X - np.mean(X))/np.std(X)
st.header("X : Features (Normalized)")
st.write(X)
class Kmeans:
def initialize_var(self,X,K=3):
X = np.array(X)
m,n = X.shape
c = np.random.randn(K,n)
return X,c,K
def assignment_move(self,X,c,K):
m = X.shape[0]
idx = np.zeros(m)
for o in range(10):
for i in range(m):
temp = np.zeros(K)
for j in range(K):
temp[j] = np.sum((X[i,:] - c[j,:]) ** 2)
idx[i] = np.argmin(temp)
for p in range(K):
points = [X[j] for j in range(len(X)) if idx[j] == p]
c[p] = np.mean(points, axis=0)
return idx,c
def test(self,X,K=3):
self.X,c,self.K = self.initialize_var(X,K)
self.idx,self.c = self.assignment_move(self.X,c,self.K)
X_ = pd.DataFrame(self.X)
idx_ = pd.DataFrame(self.idx)
data = pd.concat([X_,idx_],axis =1)
return self.c,data
def plot_clusters(self,d):
a={}
if self.X.shape[1]==2:
for i in range(2):
a['a'+str(i+1)] = self.X[:,i:i+1]
a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],)
a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],)
fig = go.Figure(data=go.Scatter(x=a['a1'],
y=a['a2'],
mode='markers',
marker=dict(color=self.idx)
))
st.plotly_chart(fig)
elif self.X.shape[1]==3:
d.columns = ['x','y','z','l']
fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l')
st.plotly_chart(fig)
elif self.X.shape[1]==3:
print("Incomplete")
else:
st.error("Your data is in Higher Dimension state")
class PCA:
def initialization(self,X):
X = np.array(X)
return X
def train(self,X):
self.X = self.initialization(X)
self.covariance_matrix = np.cov(X.T)
self.u,s,v = np.linalg.svd(self.covariance_matrix)
sum_s = np.sum(s)
self.variance_exp= []
k = 0
for i in s:
k = i+k
variance = k/sum_s
self.variance_exp.append(variance)
def K_components(self,n=2):
self.X= np.dot(self.X,self.u[:,:n])
return self.X
def variance_explained(self):
return self.variance_exp
if uploaded_file is not None:
Algorithms = st.sidebar.selectbox(
'Algorithm',
('None','K-means Clustering','Principal Component Analysis')
)
if uploaded_file is not None:
if Algorithms == 'K-means Clustering':
k_value = st.sidebar.number_input('Enter K value',value = 3)
train_button = st.sidebar.checkbox("Click Here for training")
if train_button:
d = Kmeans()
c,data = d.test(X,k_value)
st.subheader("Centroids")
st.write(c)
st.subheader("Clustering Data with labels")
st.write(data)
d.plot_clusters(data)
#except : raise ValueError('graph not computed with NaN values or no. of K value exceeds try again')
if Algorithms == 'Principal Component Analysis':
k_value = st.sidebar.number_input('Enter K components value',value = 3)
train_button = st.sidebar.checkbox("Click Here for training")
if train_button:
d = PCA()
d.train(X)
st.header('Variance Explained')
st.markdown(d.variance_explained())
st.info('Always Use Feature Normalization when applying PCA')
X_pca = d.K_components(k_value)
st.header('X : Feature (PCA)')
st.write(X_pca)
| 3.171875 | 3 |
main.py | digitaltembo/kismet | 0 | 12795219 | <filename>main.py
from application import app
app = app
| 1.132813 | 1 |
aiopoke/objects/resources/moves/move.py | beastmatser/aiopokeapi | 3 | 12795220 | <gh_stars>1-10
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from aiopoke.objects.resources.contests.super_contest_effect import SuperContestEffect
from aiopoke.objects.resources.pokemon.ability import AbilityEffectChange
from aiopoke.objects.utility import (
MachineVersionDetail,
Name,
NamedResource,
VerboseEffect,
)
from aiopoke.utils.minimal_resources import MinimalResource, Url
from aiopoke.utils.resource import Resource
if TYPE_CHECKING:
from aiopoke.objects.resources import (
ContestEffect,
ContestType,
Generation,
NaturalGiftType,
Pokemon,
Stat,
VersionGroup,
)
from aiopoke.objects.resources.moves import (
MoveAilment,
MoveCategory,
MoveDamageClass,
MoveTarget,
)
from aiopoke.objects.utility import Language
class Move(NamedResource):
accuracy: int
contest_combos: "ContestComboSets"
contest_effect: Url["ContestEffect"]
contest_type: MinimalResource["ContestType"]
damage_class: MinimalResource["MoveDamageClass"]
effect_chance: Optional[int]
effect_changes: List["AbilityEffectChange"]
effect_entries: List["VerboseEffect"]
flavor_text_entries: List["MoveFlavorText"]
generation: MinimalResource["Generation"]
learned_by_pokemon: List[MinimalResource["Pokemon"]]
machines: List["MachineVersionDetail"]
meta: "MoveMetaData"
names: List["Name"]
past_values: List["PastMoveStatValues"]
power: int
pp: int
priority: int
stat_changes: List["MoveStatChange"]
super_contest_effect: Url["SuperContestEffect"]
target: MinimalResource["MoveTarget"]
type: MinimalResource["NaturalGiftType"]
def __init__(
self,
*,
id: int,
name: str,
accuracy: int,
contest_combos: Dict[str, Any],
contest_effect: Dict[str, Any],
contest_type: Dict[str, Any],
damage_class: Dict[str, Any],
effect_chance: Optional[int],
effect_entries: List[Dict[str, Any]],
effect_changes: List[Dict[str, Any]],
flavor_text_entries: List[Dict[str, Any]],
generation: Dict[str, Any],
learned_by_pokemon: List[Dict[str, Any]],
machines: List[Dict[str, Any]],
meta: Dict[str, Any],
names: List[Dict[str, Any]],
past_values: List[Dict[str, Any]],
power: int,
pp: int,
priority: int,
stat_changes: List[Dict[str, Any]],
super_contest_effect: Dict[str, Any],
target: Dict[str, Any],
type: Dict[str, Any],
) -> None:
super().__init__(id=id, name=name)
self.accuracy = accuracy
self.contest_combos = ContestComboSets(**contest_combos)
self.contest_effect = Url(**contest_effect)
self.contest_type = MinimalResource(**contest_type)
self.damage_class = MinimalResource(**damage_class)
self.effect_chance = effect_chance
self.effect_entries = [
VerboseEffect(**effect_entry) for effect_entry in effect_entries
]
self.effect_changes = [
AbilityEffectChange(**effect_change) for effect_change in effect_changes
]
self.flavor_text_entries = [
MoveFlavorText(**flavor_text_entry)
for flavor_text_entry in flavor_text_entries
]
self.generation = MinimalResource(**generation)
self.learned_by_pokemon = [
MinimalResource(**pokemon) for pokemon in learned_by_pokemon
]
self.machines = [MachineVersionDetail(**machine) for machine in machines]
self.meta = MoveMetaData(**meta)
self.names = [Name(**name) for name in names]
self.past_values = [
PastMoveStatValues(**past_value) for past_value in past_values
]
self.power = power
self.pp = pp
self.priority = priority
self.stat_changes = [
MoveStatChange(**stat_change) for stat_change in stat_changes
]
self.super_contest_effect = Url(**super_contest_effect)
self.target = MinimalResource(**target)
self.type = MinimalResource(**type)
class ContestComboSets(Resource):
normal: Optional["ContestComboDetail"]
super: Optional["ContestComboDetail"]
def __init__(
self,
*,
normal: Optional[Dict[str, Any]],
super: Optional[Dict[str, Any]],
) -> None:
self.normal = ContestComboDetail(**normal) if normal is not None else None
self.super = ContestComboDetail(**super) if super is not None else None
class ContestComboDetail(Resource):
use_before: Optional[List[MinimalResource["Move"]]]
use_after: Optional[List[MinimalResource["Move"]]]
def __init__(
self,
*,
use_before: Optional[List[Dict[str, Any]]],
use_after: Optional[List[Dict[str, Any]]],
) -> None:
self.use_before = (
[MinimalResource(**move) for move in use_before]
if use_before is not None
else None
)
self.use_after = (
[MinimalResource(**move) for move in use_after]
if use_after is not None
else None
)
class MoveFlavorText(Resource):
flavor_text: str
language: MinimalResource["Language"]
version_group: MinimalResource["VersionGroup"]
def __init__(
self,
*,
flavor_text: str,
language: Dict[str, Any],
version_group: Dict[str, Any],
) -> None:
self.flavor_text = flavor_text
self.language = MinimalResource(**language)
self.version_group = MinimalResource(**version_group)
class MoveMetaData(Resource):
ailment: MinimalResource["MoveAilment"]
category: MinimalResource["MoveCategory"]
min_hits: int
max_hits: int
min_turns: int
max_turns: int
drain: int
healing: int
crit_rate: int
ailment_chance: int
flinch_chance: int
stat_chance: int
def __init__(
self,
*,
ailment: Dict[str, Any],
category: Dict[str, Any],
min_hits: int,
max_hits: int,
min_turns: int,
max_turns: int,
drain: int,
healing: int,
crit_rate: int,
ailment_chance: int,
flinch_chance: int,
stat_chance: int,
) -> None:
self.ailment = MinimalResource(**ailment)
self.category = MinimalResource(**category)
self.min_hits = min_hits
self.max_hits = max_hits
self.min_turns = min_turns
self.max_turns = max_turns
self.drain = drain
self.healing = healing
self.crit_rate = crit_rate
self.ailment_chance = ailment_chance
self.flinch_chance = flinch_chance
self.stat_chance = stat_chance
class MoveStatChange(Resource):
change: int
stat: MinimalResource["Stat"]
def __init__(
self,
*,
change: int,
stat: Dict[str, Any],
) -> None:
self.change = change
self.stat = MinimalResource(**stat)
class PastMoveStatValues(Resource):
accuracy: int
effect_chance: int
power: int
pp: int
effect_entries: List["VerboseEffect"]
type: MinimalResource["NaturalGiftType"]
version_group: MinimalResource["VersionGroup"]
def __init__(
self,
*,
accuracy: int,
effect_chance: int,
power: int,
pp: int,
effect_entries: List[Dict[str, Any]],
type: Dict[str, Any],
version_group: Dict[str, Any],
) -> None:
self.accuracy = accuracy
self.effect_chance = effect_chance
self.power = power
self.pp = pp
self.effect_entries = [
VerboseEffect(**effect_entry) for effect_entry in effect_entries
]
self.type = MinimalResource(**type)
self.version_group = MinimalResource(**version_group)
| 1.90625 | 2 |
tests/spam_test.py | peixinchen/spam | 2 | 12795221 | import spam
def TestSystem():
r = spam.system("ls -l")
assert r == 0
def TestNothingDone():
r = spam.nothing_done()
assert r is None
| 2.359375 | 2 |
repo_health/check_makefile.py | edly-io/edx-repo-health | 0 | 12795222 | """
Checks to see if Makefile follows standards
"""
import re
import os
import pytest
from pytest_repo_health import add_key_to_metadata
from repo_health import get_file_content
module_dict_key = "makefile"
@pytest.fixture(name='makefile')
def fixture_makefile(repo_path):
"""Fixture containing the text content of Makefile"""
full_path = os.path.join(repo_path, "Makefile")
return get_file_content(full_path)
@add_key_to_metadata((module_dict_key, "upgrade"))
def check_has_upgrade(makefile, all_results):
"""
upgrade: makefile target that upgrades our dependencies to newer released versions
"""
regex_pattern = "upgrade:"
match = re.search(regex_pattern, makefile)
all_results[module_dict_key]["upgrade"] = False
if match is not None:
all_results[module_dict_key]["upgrade"] = True
| 2.375 | 2 |
infra/migrations/0003_instancemodel.py | Maulikchhabra/Terraform-Console | 0 | 12795223 | <gh_stars>0
# Generated by Django 3.1.5 on 2021-02-11 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('infra', '0002_keymodel_region'),
]
operations = [
migrations.CreateModel(
name='InstanceModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('ami', models.CharField(max_length=100)),
('instancetype', models.CharField(max_length=100)),
],
),
]
| 1.703125 | 2 |
BB/bbObjects/battles/BattleShip.py | Laura7089/GOF2BountyBot | 6 | 12795224 | from ..items.modules import bbCloakModule
from ..items import bbShip
class BattleShip:
"""A class representing ships participting in a duel.
The ship has three health pools; hull, armour and shield.
"""
def __init__(self, bbShip : bbShip.bbShip):
"""
:param bbShip bbShip: The bbShip for this BattleShip to inherit stats from
"""
self.bbShip = bbShip
self.hull = bbShip.armour
self.armour = bbShip.getArmour() - self.hull
self.shield = bbShip.getShield()
self.dps = bbShip.getDPS()
self.cloaks = []
self.EMPs = []
self.remainingCloak = 0
self.cloaking = False
self.EMPCooldown = 0
# TODO: Update to use only one cloak module per ship
for module in bbShip.modules:
if isinstance(module, bbCloakModule):
self.cloaks += module
def hasHull(self) -> bool:
"""Test whether this BattleShip has any hull HP remaining.
:return: True if this BattleShip's hull HP is greater than 0, False otherwise
:rtype: bool
"""
return self.hull > 0
def hasArmour(self) -> bool:
"""Test whether this BattleShip has any armour HP remaining.
:return: True if this BattleShip's armour HP is greater than 0, False otherwise
:rtype: bool
"""
return self.armour > 0
def hasShield(self) -> bool:
"""Test whether this BattleShip has any shield HP remaining.
:return: True if this BattleShip's shield HP is greater than 0, False otherwise
:rtype: bool
"""
return self.shield > 0
def hasCloaks(self) -> bool:
"""Test whether this BattleShip has any cloak modules equipped.
:return: True if this BattleShip has at least one cloak module equipped, False otherwise
:rtype: bool
"""
return len(self.cloaks) > 0
def getShield(self) -> int:
"""Get this BattleShip's remaining shield HP.
:return: Integer amount of shield HP remaining; 0 or more.
:rtype: int
"""
return self.shield
def getArmour(self) -> int:
"""Get this BattleShip's remaining armour HP.
:return: Integer amount of armour HP remaining; 0 or more.
:rtype: int
"""
return self.armour
def getHull(self) -> int:
"""Get this BattleShip's remaining hull HP.
:return: Integer amount of hull HP remaining; 0 or more.
:rtype: int
"""
return self.hull
def setShield(self, new : int):
"""Set this BattleShip's remaining shield HP.
:param int new: Integer new amount of shield HP remaining; 0 or more.
"""
self.shield = new
def setArmour(self, new : int):
"""Set this BattleShip's remaining armour HP.
:param int new: Integer new amount of armour HP remaining; 0 or more.
"""
self.armour = new
def setHull(self, new : int):
"""Set this BattleShip's remaining hull HP.
:param int new: Integer new amount of hull HP remaining; 0 or more.
"""
self.hull = new
| 3.34375 | 3 |
tensorflow_data_validation/utils/stats_util.py | rtg0795/data-validation | 0 | 12795225 | <filename>tensorflow_data_validation/utils/stats_util.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for stats generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from typing import Dict, Iterable, Optional, Sequence, Text, Tuple, Union
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tensorflow_data_validation import constants
from tensorflow_data_validation import types
from tensorflow_data_validation.arrow import arrow_util
from tensorflow_data_validation.utils import statistics_io_impl
from tensorflow_data_validation.utils import io_util
from tfx_bsl import statistics
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import statistics_pb2
_NP_DTYPE_KIND_TO_FEATURE_TYPE = {
'f': statistics_pb2.FeatureNameStatistics.FLOAT,
'i': statistics_pb2.FeatureNameStatistics.INT,
'u': statistics_pb2.FeatureNameStatistics.INT,
'S': statistics_pb2.FeatureNameStatistics.STRING,
'O': statistics_pb2.FeatureNameStatistics.STRING,
'U': statistics_pb2.FeatureNameStatistics.STRING,
}
# LINT.IfChange
# Semantic domain information can be passed to schema inference using a
# CustomStatistic with name=DOMAIN_INFO.
DOMAIN_INFO = 'domain_info'
# LINT.ThenChange(../anomalies/custom_domain_util.cc)
def maybe_get_utf8(value: bytes) -> Optional[Text]:
"""Returns the value decoded as utf-8, or None if it cannot be decoded.
Args:
value: The bytes value to decode.
Returns:
The value decoded as utf-8, or None, if the value cannot be decoded.
"""
try:
decoded_value = value.decode('utf-8')
except UnicodeError:
return None
return decoded_value
def get_feature_type(
dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]:
"""Get feature type from numpy dtype.
Args:
dtype: Numpy dtype.
Returns:
A statistics_pb2.FeatureNameStatistics.Type value.
"""
return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind)
def get_feature_type_from_arrow_type(
feature_path: types.FeaturePath,
arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]:
"""Get feature type from Arrow type.
Args:
feature_path: path of the feature.
arrow_type: Arrow DataType.
Returns:
A statistics_pb2.FeatureNameStatistics.Type value or None if arrow_type
is null (which means it cannot be determined for now).
Raises:
TypeError: if the type is not supported.
"""
if pa.types.is_null(arrow_type):
return None
if not arrow_util.is_list_like(arrow_type):
raise TypeError('Expected feature column to be a '
'(Large)List<primitive|struct> or null, but feature {} '
'was {}.'.format(feature_path, arrow_type))
value_type = arrow_util.get_innermost_nested_type(arrow_type)
if pa.types.is_integer(value_type):
return statistics_pb2.FeatureNameStatistics.INT
elif pa.types.is_floating(value_type):
return statistics_pb2.FeatureNameStatistics.FLOAT
elif arrow_util.is_binary_like(value_type):
return statistics_pb2.FeatureNameStatistics.STRING
elif pa.types.is_struct(value_type):
return statistics_pb2.FeatureNameStatistics.STRUCT
elif pa.types.is_null(value_type):
return None
raise TypeError('Feature {} has unsupported arrow type: {}'.format(
feature_path, arrow_type))
def make_dataset_feature_stats_proto(
stats_values: Dict[types.FeaturePath, Dict[Text, float]]
) -> statistics_pb2.DatasetFeatureStatistics:
"""Builds DatasetFeatureStatistics proto with custom stats from input dict.
Args:
stats_values: A Dict[FeaturePath, Dict[str,float]] where the keys are
feature paths, and values are Dicts with keys denoting name of the custom
statistic and values denoting the value of the custom statistic
for the feature.
Ex. {
FeaturePath(('feature_1',)): {
'Mutual Information': 0.5,
'Correlation': 0.1 },
FeaturePath(('feature_2',)): {
'Mutual Information': 0.8,
'Correlation': 0.6 }
}
Returns:
DatasetFeatureStatistics proto containing the custom statistics for each
feature in the dataset.
"""
result = statistics_pb2.DatasetFeatureStatistics()
# Sort alphabetically by feature name to have deterministic ordering
feature_paths = sorted(stats_values.keys())
for feature_path in feature_paths:
feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path],
feature_path)
new_feature_stats_proto = result.features.add()
new_feature_stats_proto.CopyFrom(feature_stats_proto)
return result
def _make_feature_stats_proto(
stats_values: Dict[Text, float],
feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics:
"""Creates the FeatureNameStatistics proto for one feature.
Args:
stats_values: A Dict[str,float] where the key of the dict is the name of the
custom statistic and the value is the numeric value of the custom
statistic of that feature. Ex. {
'Mutual Information': 0.5,
'Correlation': 0.1 }
feature_path: The path of the feature.
Returns:
A FeatureNameStatistic proto containing the custom statistics for a
feature.
"""
result = statistics_pb2.FeatureNameStatistics()
result.path.CopyFrom(feature_path.to_proto())
# Sort alphabetically by statistic name to have deterministic ordering
stat_names = sorted(stats_values.keys())
for stat_name in stat_names:
result.custom_stats.add(name=stat_name, num=stats_values[stat_name])
return result
def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList,
output_path: Text) -> None:
"""Writes a DatasetFeatureStatisticsList proto to a file in text format.
Args:
stats: A DatasetFeatureStatisticsList proto.
output_path: File path to write the DatasetFeatureStatisticsList proto.
Raises:
TypeError: If the input proto is not of the expected type.
"""
if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError(
'stats is of type %s, should be a '
'DatasetFeatureStatisticsList proto.' % type(stats).__name__)
stats_proto_text = text_format.MessageToString(stats)
io_util.write_string_to_file(output_path, stats_proto_text)
def load_stats_text(
input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Loads the specified DatasetFeatureStatisticsList proto stored in text format.
Args:
input_path: File path from which to load the DatasetFeatureStatisticsList
proto.
Returns:
A DatasetFeatureStatisticsList proto.
"""
stats_proto = statistics_pb2.DatasetFeatureStatisticsList()
stats_text = io_util.read_file_to_string(input_path)
text_format.Parse(stats_text, stats_proto)
return stats_proto
def load_stats_binary(
input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Loads a serialized DatasetFeatureStatisticsList proto from a file.
Args:
input_path: File path from which to load the DatasetFeatureStatisticsList
proto.
Returns:
A DatasetFeatureStatisticsList proto.
"""
stats_proto = statistics_pb2.DatasetFeatureStatisticsList()
stats_proto.ParseFromString(io_util.read_file_to_string(
input_path, binary_mode=True))
return stats_proto
def load_stats_tfrecord(
input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Loads data statistics proto from TFRecord file.
Args:
input_path: Data statistics file path.
Returns:
A DatasetFeatureStatisticsList proto.
"""
it = statistics_io_impl.get_io_provider('tfrecords').record_iterator_impl(
[input_path])
result = next(it)
try:
next(it)
raise ValueError('load_stats_tfrecord expects a single record.')
except StopIteration:
return result
except Exception as e:
raise e
def get_feature_stats(stats: statistics_pb2.DatasetFeatureStatistics,
feature_path: types.FeaturePath
) -> statistics_pb2.FeatureNameStatistics:
"""Get feature statistics from the dataset statistics.
Args:
stats: A DatasetFeatureStatistics protocol buffer.
feature_path: The path of the feature whose statistics to obtain from the
dataset statistics.
Returns:
A FeatureNameStatistics protocol buffer.
Raises:
TypeError: If the input statistics is not of the expected type.
ValueError: If the input feature is not found in the dataset statistics.
"""
if not isinstance(stats, statistics_pb2.DatasetFeatureStatistics):
raise TypeError('statistics is of type %s, should be a '
'DatasetFeatureStatistics proto.' %
type(stats).__name__)
for feature_stats in stats.features:
if feature_path == types.FeaturePath.from_proto(feature_stats.path):
return feature_stats
raise ValueError('Feature %s not found in the dataset statistics.' %
feature_path)
def get_custom_stats(
feature_stats: statistics_pb2.FeatureNameStatistics,
custom_stats_name: Text
) -> Union[float, Text, statistics_pb2.Histogram, statistics_pb2.RankHistogram]:
"""Get custom statistics from the feature statistics.
Args:
feature_stats: A FeatureNameStatistics protocol buffer.
custom_stats_name: The name of the custom statistics to obtain from the
feature statistics proto.
Returns:
The custom statistic.
Raises:
TypeError: If the input feature statistics is not of the expected type.
ValueError: If the custom statistic is not found in the feature statistics.
"""
if not isinstance(feature_stats, statistics_pb2.FeatureNameStatistics):
raise TypeError('feature_stats is of type %s, should be a '
'FeatureNameStatistics proto.' %
type(feature_stats).__name__)
for custom_stats in feature_stats.custom_stats:
if custom_stats.name == custom_stats_name:
return getattr(custom_stats, custom_stats.WhichOneof('val'))
raise ValueError('Custom statistics %s not found in the feature statistics.' %
custom_stats_name)
def get_slice_stats(
stats: statistics_pb2.DatasetFeatureStatisticsList,
slice_key: Text) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Get statistics associated with a specific slice.
Args:
stats: A DatasetFeatureStatisticsList protocol buffer.
slice_key: Slice key of the slice.
Returns:
Statistics of the specific slice.
Raises:
ValueError: If the input statistics proto does not have the specified slice
statistics.
"""
for slice_stats in stats.datasets:
if slice_stats.name == slice_key:
result = statistics_pb2.DatasetFeatureStatisticsList()
result.datasets.add().CopyFrom(slice_stats)
return result
raise ValueError('Invalid slice key.')
def load_statistics(
input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Loads data statistics proto from file.
Args:
input_path: Data statistics file path. The file should be a one-record
TFRecord file or a plain file containing the statistics proto in Proto
Text Format.
Returns:
A DatasetFeatureStatisticsList proto.
Raises:
IOError: If the input path does not exist.
"""
if not tf.io.gfile.exists(input_path):
raise IOError('Invalid input path {}.'.format(input_path))
try:
return load_stats_tfrecord(input_path)
except Exception: # pylint: disable=broad-except
logging.info('File %s did not look like a TFRecord. Try reading as a plain '
'file.', input_path)
return load_stats_text(input_path)
def _normalize_feature_id(
name_or_path_or_steps: Union[str, types.FeaturePath, Iterable[str]]
) -> types.FeaturePath:
if isinstance(name_or_path_or_steps, str):
return types.FeaturePath([name_or_path_or_steps])
if isinstance(name_or_path_or_steps, types.FeaturePath):
return name_or_path_or_steps
return types.FeaturePath(name_or_path_or_steps)
class DatasetListView(object):
"""View of statistics for multiple datasets (slices)."""
def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatisticsList):
self._statistics = stats_proto
self._slice_map = {} # type: Dict[str, DatasetView]
self._initialized = False
def _init_index(self):
"""Initializes internal mappings."""
# Lazily initialize in case we don't need an index.
if self._initialized:
return
for dataset in self._statistics.datasets:
if dataset.name in self._slice_map:
raise ValueError('Duplicate slice name %s' % dataset.name)
self._slice_map[dataset.name] = DatasetView(dataset)
self._initialized = True
def proto(self) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Retrieve the underlying proto."""
return self._statistics
def get_slice(self, slice_key: str) -> Optional['DatasetView']:
self._init_index()
return self._slice_map.get(slice_key, None)
def get_default_slice(self) -> Optional['DatasetView']:
self._init_index()
if len(self._slice_map) == 1:
for _, v in self._slice_map.items():
return v
return self._slice_map.get(constants.DEFAULT_SLICE_KEY, None)
def list_slices(self) -> Iterable[str]:
self._init_index()
return self._slice_map.keys()
class DatasetView(object):
"""View of statistics for a dataset (slice)."""
def __init__(self, stats_proto: statistics_pb2.DatasetFeatureStatistics):
self._feature_map = {} # type: Dict[types.FeaturePath, int]
self._cross_feature_map = {
} # type: Dict[Tuple[types.FeaturePath, types.FeaturePath], int]
self._statistics = stats_proto
self._initialized = False
def _init_index(self):
"""Initializes internal indices. Noop if already initialized."""
if self._initialized:
return
field_identifier = None
for j, feature in enumerate(self._statistics.features):
if field_identifier is None:
field_identifier = feature.WhichOneof('field_id')
elif feature.WhichOneof('field_id') != field_identifier:
raise ValueError(
'Features must be specified with either path or name within a'
' Dataset.')
if field_identifier == 'name':
feature_id = types.FeaturePath([feature.name])
else:
feature_id = types.FeaturePath.from_proto(feature.path)
if feature_id in self._feature_map:
raise ValueError('Duplicate feature %s' % feature_id)
self._feature_map[feature_id] = j
for j, cross_feature in enumerate(self._statistics.cross_features):
feature_id = (types.FeaturePath.from_proto(cross_feature.path_x),
types.FeaturePath.from_proto(cross_feature.path_y))
if feature_id in self._cross_feature_map:
raise ValueError('Duplicate feature %s' % feature_id)
self._cross_feature_map[feature_id] = j
self._initialized = True
def proto(self) -> statistics_pb2.DatasetFeatureStatistics:
"""Retrieve the underlying proto."""
return self._statistics
def get_feature(
self, feature_id: Union[str, types.FeaturePath, Iterable[str]]
) -> Optional['FeatureView']:
"""Retrieve a feature if it exists.
Features specified within the underlying proto by name (instead of path) are
normalized to a length 1 path, and can be referred to as such.
Args:
feature_id: A types.FeaturePath, Iterable[str] consisting of path steps,
or a str, which is converted to a length one path.
Returns:
A FeatureView, or None if feature_id is not present.
"""
feature_id = _normalize_feature_id(feature_id)
self._init_index()
index = self._feature_map.get(feature_id, None)
if index is None:
return None
return FeatureView(self._statistics.features[index])
def get_cross_feature(
self, x_path: Union[str, types.FeaturePath,
Iterable[str]], y_path: Union[str, types.FeaturePath,
Iterable[str]]
) -> Optional['CrossFeatureView']:
"""Retrieve a cross-feature if it exists, or None."""
x_path = _normalize_feature_id(x_path)
y_path = _normalize_feature_id(y_path)
self._init_index()
feature_id = (x_path, y_path)
index = self._cross_feature_map.get(feature_id, None)
if index is None:
return None
return CrossFeatureView(self._statistics.cross_features[index])
def list_features(self) -> Iterable[types.FeaturePath]:
"""Lists feature identifiers."""
self._init_index()
return self._feature_map.keys()
def list_cross_features(
self) -> Iterable[Tuple[types.FeaturePath, types.FeaturePath]]:
"""Lists cross-feature identifiers."""
self._init_index()
return self._cross_feature_map.keys()
def get_derived_feature(
self, deriver_name: str,
source_paths: Sequence[types.FeaturePath]) -> Optional['FeatureView']:
"""Retrieve a derived feature based on a deriver name and its inputs.
Args:
deriver_name: The name of a deriver. Matches validation_derived_source
deriver_name.
source_paths: Source paths for derived features. Matches
validation_derived_source.source_path.
Returns:
FeatureView of derived feature.
Raises:
ValueError if multiple derived features match.
"""
# TODO(b/221453427): Consider indexing if performance becomes an issue.
results = []
for feature in self.proto().features:
if feature.validation_derived_source is None:
continue
if feature.validation_derived_source.deriver_name != deriver_name:
continue
if (len(source_paths) != len(
feature.validation_derived_source.source_path)):
continue
all_match = True
for i in range(len(source_paths)):
if (source_paths[i] != types.FeaturePath.from_proto(
feature.validation_derived_source.source_path[i])):
all_match = False
break
if all_match:
results.append(FeatureView(feature))
if len(results) > 1:
raise ValueError('Ambiguous result, %d features matched' % len(results))
if len(results) == 1:
return results.pop()
return None
class FeatureView(object):
"""View of a single feature.
This class provides accessor methods, as well as access to the underlying
proto. Where possible, accessors should be used in place of proto access (for
example, x.numeric_statistics() instead of x.proto().num_stats) in order to
support future extension of the proto.
"""
def __init__(self, stats_proto: statistics_pb2.FeatureNameStatistics):
self._statistics = stats_proto
def proto(self) -> statistics_pb2.FeatureNameStatistics:
"""Retrieve the underlying proto."""
return self._statistics
def custom_statistic(self,
name: str) -> Optional[statistics_pb2.CustomStatistic]:
"""Retrieve a custom_statistic by name."""
result = None
for stat in self._statistics.custom_stats:
if stat.name == name:
if result is None:
result = stat
else:
raise ValueError('Duplicate custom_stats for name %s' % name)
return result
# TODO(b/202910677): Add convenience methods for retrieving first-party custom
# statistics (e.g., MI, NLP).
def numeric_statistics(self) -> Optional[statistics_pb2.NumericStatistics]:
"""Retrieve numeric statistics if available."""
if self._statistics.WhichOneof('stats') == 'num_stats':
return self._statistics.num_stats
return None
def string_statistics(self) -> Optional[statistics_pb2.StringStatistics]:
"""Retrieve string statistics if available."""
if self._statistics.WhichOneof('stats') == 'string_stats':
return self._statistics.string_stats
return None
def bytes_statistics(self) -> Optional[statistics_pb2.BytesStatistics]:
"""Retrieve byte statistics if available."""
if self._statistics.WhichOneof('stats') == 'bytes_stats':
return self._statistics.bytes_stats
return None
def struct_statistics(self) -> Optional[statistics_pb2.StructStatistics]:
"""Retrieve struct statistics if available."""
if self._statistics.WhichOneof('stats') == 'struct_stats':
return self._statistics.struct_stats
return None
def common_statistics(self) -> Optional[statistics_pb2.CommonStatistics]:
"""Retrieve common statistics if available."""
which = self._statistics.WhichOneof('stats')
if which == 'num_stats':
return self._statistics.num_stats.common_stats
if which == 'string_stats':
return self._statistics.string_stats.common_stats
if which == 'bytes_stats':
return self._statistics.bytes_stats.common_stats
if which == 'struct_stats':
return self._statistics.struct_stats.common_stats
return None
class CrossFeatureView(object):
"""View of a single cross feature."""
def __init__(self, stats_proto: statistics_pb2.CrossFeatureStatistics):
self._statistics = stats_proto
def proto(self) -> statistics_pb2.CrossFeatureStatistics:
"""Retrieve the underlying proto."""
return self._statistics
def load_sharded_statistics(
input_path_prefix: Optional[str] = None,
input_paths: Optional[Iterable[str]] = None,
io_provider: Optional[statistics_io_impl.StatisticsIOProvider] = None
) -> DatasetListView:
"""Read a sharded DatasetFeatureStatisticsList from disk as a DatasetListView.
Args:
input_path_prefix: If passed, loads files starting with this prefix and
ending with a pattern corresponding to the output of the provided
io_provider.
input_paths: A list of file paths of files containing sharded
DatasetFeatureStatisticsList protos.
io_provider: Optional StatisticsIOProvider. If unset, a default will be
constructed.
Returns:
A DatasetListView containing the merged proto.
"""
if input_path_prefix is None == input_paths is None:
raise ValueError('Must provide one of input_paths_prefix, input_paths.')
if io_provider is None:
io_provider = statistics_io_impl.get_io_provider()
if input_path_prefix is not None:
input_paths = io_provider.glob(input_path_prefix)
acc = statistics.DatasetListAccumulator()
stats_iter = io_provider.record_iterator_impl(input_paths)
for stats_list in stats_iter:
for dataset in stats_list.datasets:
acc.MergeDatasetFeatureStatistics(dataset.SerializeToString())
stats = statistics_pb2.DatasetFeatureStatisticsList()
stats.ParseFromString(acc.Get())
return DatasetListView(stats)
| 1.828125 | 2 |
Mars_Scraping/scraping.py | SiMewL8/Mission-To-Mars | 0 | 12795226 | # --------------------------------------------------------------------------------------------------------------------------------
# Imports and Executables
# --------------------------------------------------------------------------------------------------------------------------------
from splinter import Browser
from bs4 import BeautifulSoup as soupy
import pandas as pd
import datetime as dt
# --------------------------------------------------------------------------------------------------------------------------------
# Gathered Data
# --------------------------------------------------------------------------------------------------------------------------------
def scrape_all():
# Set the executable path and initialize the chrome browser in splinter
browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True)
# headless = True, doesnt show automated script in action
# pylint: disable=unbalanced-tuple-unpacking
# news_title, news_teaser_sum, news_date = mars_news(browser)
news_title, news_teaser_sum = mars_news(browser)
# Runs all separate scraping functions and stores results in a dictionary
mars_total_data = {
"news_title" : news_title,
"news_paragraph_summary" : news_teaser_sum,
# "news_latest_date" : news_date,
# "news_latest_link" : latest_art_link,
"featured_image" : featured_image(browser),
"facts" : mars_facts(),
"img_and_url": get_url(browser),
"last_modified" : dt.datetime.now()}
browser.quit()
return mars_total_data
# --------------------------------------------------------------------------------------------------------------------------------
# News Title and Paragraph
# --------------------------------------------------------------------------------------------------------------------------------
def mars_news(browser):
# defined outside of the function, basically a catalyst to get the function started, like a grandfather variable
# browser function already defined outside
# Visit the mars nasa news site
nasa_url = 'https://mars.nasa.gov/news/'
browser.visit(nasa_url)
# optional delay for loading page
browser.is_element_present_by_css("ul.item_list li.slide", wait_time=1)
# Convert the browser html to a soup object and then quit the browser
parse_html = browser.html
news_soup = soupy(parse_html, 'html.parser')
try:
# add error handling, espescially for AttributeErros with try/except
# if error, code will keep running, except it will stop when its AttributeError with none returned
slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements to furthur filter
# Use the parent element to find the first a tag and save it as `news_title`
news_title = slide_elem.find('div',class_='content_title').get_text()
# news_date = slide_elem.find('div',class_='list_date').get_text()
# latest_art_link = f"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}"
# Use the parent element to find the paragraph text
news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text()
except AttributeError:
return None, None
# return news_title, news_teaser_sum, news_date, latest_art_link
return news_title, news_teaser_sum
# --------------------------------------------------------------------------------------------------------------------------------
# JPL Featured Space Image
# --------------------------------------------------------------------------------------------------------------------------------
# Visit URL
def featured_image(browser):
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_id('full_image')
full_image_elem.click()
# Find the more info button and click that
browser.is_element_present_by_text('more info', wait_time=1)
more_info_elem = browser.links.find_by_partial_text('more info')
more_info_elem.click()
# Parse the resulting html with soup
parse_html = browser.html
full_img_soup = soupy(parse_html, 'html.parser' )
try:
# find the relative image url
latest_image_full = full_img_soup.select_one('figure.lede a img').get("src")
except AttributeError:
return None
# Use the base url to create an absolute url
latest_imgurl = f"https://www.jpl.nasa.gov{latest_image_full}"
return latest_imgurl
# --------------------------------------------------------------------------------------------------------------------------------
# Mars Fact Table
# --------------------------------------------------------------------------------------------------------------------------------
def mars_facts():
try:
mars_df = pd.read_html('https://space-facts.com/mars/')[0]
except BaseException:
# covers all exception errors
return None
# Assign columns and set index of dataframe
mars_df.columns = ['Description', 'Mars'] # adds column names
mars_df.set_index('Description', inplace=True) # set column index
# Convert dataframe into HTML format, add bootstrap
return mars_df.to_html(classes= "table")
# --------------------------------------------------------------------------------------------------------------------------------
# Mars Hemispheres
# --------------------------------------------------------------------------------------------------------------------------------
def get_url(browser):
hemis_search_list = ['Cerberus Hemisphere Enhanced',
'Schiaparelli Hemisphere Enhanced',
'Syrtis Major Hemisphere Enhanced',
'Valles Marineris Hemisphere Enhanced']
names_n_url = []
Hemisphere = "Hemisphere"
Urlid = "URL"
for x in range(len(hemis_search_list)):
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
try:
browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2)
hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}')
hemi_click.click()
parse_html = browser.html
hemi_parse_html = soupy(parse_html, 'html.parser' )
hemi_img_url = hemi_parse_html.select_one('ul li a').get("href")
names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url})
except IndexError:
return f"Search result not found"
except AttributeError:
return None
# df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns')
# df_hemi_urls.set_index('Hemisphere', inplace=True)
# df_hemi_urls['URL']=str(df_hemi_urls['URL'])
# pd.set_option('display.max_colwidth', -1)
return names_n_url
if __name__ == "__main__":
# if running as script, print scraped data
print(scrape_all()) | 1.875 | 2 |
Aula 14/ex059P.py | alaanlimaa/Python_CVM1-2-3 | 0 | 12795227 | <reponame>alaanlimaa/Python_CVM1-2-3<filename>Aula 14/ex059P.py<gh_stars>0
from time import sleep
n1 = int(input('Primeiro Valor: '))
n2 = int(input('Segundo valor: '))
opcao = 0
while opcao != 5:
print(''' [ 1 ] Somar
[ 2 ] Multiplicar
[ 3 ] Maior
[ 4 ] Novos Números
[ 5 ] Sair do programa''')
opcao = int(input("Qual é a sua opção: "))
if opcao == 1:
soma = n1 + n2
print('A soma entre {} + {} = {}!'.format(n1, n2, soma))
elif opcao == 2:
produto = n2 * n1
print('O produto entre {} * {} = {}!'.format(n1, n2, produto))
elif opcao == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print('Entre os números {} e {} o maior é {}'.format(n1, n2, maior))
elif opcao == 4:
print('Informe novamento os números!!')
n1 = int(input('Primeiro Valor: '))
n2 = int(input('Segundo valor: '))
elif opcao == 5:
print('Finalizando...')
else:
print('Opção inválida, tente outro número!')
print('-=-' * 20)
sleep(2)
print('FIM DO PROGAMA') | 3.53125 | 4 |
bookwyrm/models/status.py | daveross/bookwyrm | 0 | 12795228 | ''' models for storing different kinds of Activities '''
from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from model_utils.managers import InheritanceManager
from bookwyrm import activitypub
from .base_model import ActivitypubMixin, OrderedCollectionPageMixin
from .base_model import BookWyrmModel, PrivacyLevels
from . import fields
from .fields import image_serializer
class Status(OrderedCollectionPageMixin, BookWyrmModel):
''' any post, like a reply to a review, etc '''
user = fields.ForeignKey(
'User', on_delete=models.PROTECT, activitypub_field='attributedTo')
content = fields.TextField(blank=True, null=True)
mention_users = fields.TagField('User', related_name='mention_user')
mention_books = fields.TagField('Edition', related_name='mention_book')
local = models.BooleanField(default=True)
privacy = models.CharField(
max_length=255,
default='public',
choices=PrivacyLevels.choices
)
sensitive = fields.BooleanField(default=False)
# the created date can't be this, because of receiving federated posts
published_date = fields.DateTimeField(
default=timezone.now, activitypub_field='published')
deleted = models.BooleanField(default=False)
deleted_date = models.DateTimeField(blank=True, null=True)
favorites = models.ManyToManyField(
'User',
symmetrical=False,
through='Favorite',
through_fields=('status', 'user'),
related_name='user_favorites'
)
reply_parent = fields.ForeignKey(
'self',
null=True,
on_delete=models.PROTECT,
activitypub_field='inReplyTo',
)
objects = InheritanceManager()
activity_serializer = activitypub.Note
serialize_reverse_fields = [('attachments', 'attachment')]
deserialize_reverse_fields = [('attachments', 'attachment')]
#----- replies collection activitypub ----#
@classmethod
def replies(cls, status):
''' load all replies to a status. idk if there's a better way
to write this so it's just a property '''
return cls.objects.filter(reply_parent=status).select_subclasses()
@property
def status_type(self):
''' expose the type of status for the ui using activity type '''
return self.activity_serializer.__name__
def to_replies(self, **kwargs):
''' helper function for loading AP serialized replies to a status '''
return self.to_ordered_collection(
self.replies(self),
remote_id='%s/replies' % self.remote_id,
**kwargs
)
def to_activity(self, pure=False):
''' return tombstone if the status is deleted '''
if self.deleted:
return activitypub.Tombstone(
id=self.remote_id,
url=self.remote_id,
deleted=self.deleted_date.isoformat(),
published=self.deleted_date.isoformat()
).serialize()
activity = ActivitypubMixin.to_activity(self)
activity['replies'] = self.to_replies()
# privacy controls
public = 'https://www.w3.org/ns/activitystreams#Public'
mentions = [u.remote_id for u in self.mention_users.all()]
# this is a link to the followers list:
followers = self.user.__class__._meta.get_field('followers')\
.field_to_activity(self.user.followers)
if self.privacy == 'public':
activity['to'] = [public]
activity['cc'] = [followers] + mentions
elif self.privacy == 'unlisted':
activity['to'] = [followers]
activity['cc'] = [public] + mentions
elif self.privacy == 'followers':
activity['to'] = [followers]
activity['cc'] = mentions
if self.privacy == 'direct':
activity['to'] = mentions
activity['cc'] = []
# "pure" serialization for non-bookwyrm instances
if pure:
activity['content'] = self.pure_content
if 'name' in activity:
activity['name'] = self.pure_name
activity['type'] = self.pure_type
activity['attachment'] = [
image_serializer(b.cover) for b in self.mention_books.all() \
if b.cover]
if hasattr(self, 'book'):
activity['attachment'].append(
image_serializer(self.book.cover)
)
return activity
def save(self, *args, **kwargs):
''' update user active time '''
if self.user.local:
self.user.last_active_date = timezone.now()
self.user.save()
return super().save(*args, **kwargs)
class GeneratedNote(Status):
''' these are app-generated messages about user activity '''
@property
def pure_content(self):
''' indicate the book in question for mastodon (or w/e) users '''
message = self.content
books = ', '.join(
'<a href="%s">"%s"</a>' % (book.remote_id, book.title) \
for book in self.mention_books.all()
)
return '%s %s %s' % (self.user.display_name, message, books)
activity_serializer = activitypub.GeneratedNote
pure_type = 'Note'
class Comment(Status):
''' like a review but without a rating and transient '''
book = fields.ForeignKey(
'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')
@property
def pure_content(self):
''' indicate the book in question for mastodon (or w/e) users '''
return self.content + '<br><br>(comment on <a href="%s">"%s"</a>)' % \
(self.book.remote_id, self.book.title)
activity_serializer = activitypub.Comment
pure_type = 'Note'
class Quotation(Status):
''' like a review but without a rating and transient '''
quote = fields.TextField()
book = fields.ForeignKey(
'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')
@property
def pure_content(self):
''' indicate the book in question for mastodon (or w/e) users '''
return '"%s"<br>-- <a href="%s">"%s"</a><br><br>%s' % (
self.quote,
self.book.remote_id,
self.book.title,
self.content,
)
activity_serializer = activitypub.Quotation
pure_type = 'Note'
class Review(Status):
''' a book review '''
name = fields.CharField(max_length=255, null=True)
book = fields.ForeignKey(
'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')
rating = fields.IntegerField(
default=None,
null=True,
blank=True,
validators=[MinValueValidator(1), MaxValueValidator(5)]
)
@property
def pure_name(self):
''' clarify review names for mastodon serialization '''
if self.rating:
return 'Review of "%s" (%d stars): %s' % (
self.book.title,
self.rating,
self.name
)
return 'Review of "%s": %s' % (
self.book.title,
self.name
)
@property
def pure_content(self):
''' indicate the book in question for mastodon (or w/e) users '''
return self.content + '<br><br>(<a href="%s">"%s"</a>)' % \
(self.book.remote_id, self.book.title)
activity_serializer = activitypub.Review
pure_type = 'Article'
class Favorite(ActivitypubMixin, BookWyrmModel):
''' fav'ing a post '''
user = fields.ForeignKey(
'User', on_delete=models.PROTECT, activitypub_field='actor')
status = fields.ForeignKey(
'Status', on_delete=models.PROTECT, activitypub_field='object')
activity_serializer = activitypub.Like
def save(self, *args, **kwargs):
''' update user active time '''
self.user.last_active_date = timezone.now()
self.user.save()
super().save(*args, **kwargs)
class Meta:
''' can't fav things twice '''
unique_together = ('user', 'status')
class Boost(Status):
''' boost'ing a post '''
boosted_status = fields.ForeignKey(
'Status',
on_delete=models.PROTECT,
related_name='boosters',
activitypub_field='object',
)
activity_serializer = activitypub.Boost
# This constraint can't work as it would cross tables.
# class Meta:
# unique_together = ('user', 'boosted_status')
class ReadThrough(BookWyrmModel):
''' Store progress through a book in the database. '''
user = models.ForeignKey('User', on_delete=models.PROTECT)
book = models.ForeignKey('Book', on_delete=models.PROTECT)
pages_read = models.IntegerField(
null=True,
blank=True)
start_date = models.DateTimeField(
blank=True,
null=True)
finish_date = models.DateTimeField(
blank=True,
null=True)
def save(self, *args, **kwargs):
''' update user active time '''
self.user.last_active_date = timezone.now()
self.user.save()
super().save(*args, **kwargs)
NotificationType = models.TextChoices(
'NotificationType',
'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT')
class Notification(BookWyrmModel):
''' you've been tagged, liked, followed, etc '''
user = models.ForeignKey('User', on_delete=models.PROTECT)
related_book = models.ForeignKey(
'Edition', on_delete=models.PROTECT, null=True)
related_user = models.ForeignKey(
'User',
on_delete=models.PROTECT, null=True, related_name='related_user')
related_status = models.ForeignKey(
'Status', on_delete=models.PROTECT, null=True)
related_import = models.ForeignKey(
'ImportJob', on_delete=models.PROTECT, null=True)
read = models.BooleanField(default=False)
notification_type = models.CharField(
max_length=255, choices=NotificationType.choices)
class Meta:
''' checks if notifcation is in enum list for valid types '''
constraints = [
models.CheckConstraint(
check=models.Q(notification_type__in=NotificationType.values),
name="notification_type_valid",
)
]
| 2.15625 | 2 |
utils/__init__.py | JacobChen258/AI-Constraints-Satisfaction | 0 | 12795229 | <reponame>JacobChen258/AI-Constraints-Satisfaction
from .directions import Direction
from .directions import direction_to_vector
from .directions import vector_to_direction
from .constants import ASSETS
from .constants import LINE_LIMIT
from .constants import TILESIZE
from .constants import TETROMINO_GRID_SIZE
from .constants import BORDER
from .utils import load_grid
from .matrix_util import MatrixUtil
from .gframe import GFrame
| 0.851563 | 1 |
src/GameObject.py | LangArthur/FlappyBird-AILearning | 1 | 12795230 | <filename>src/GameObject.py
#!/usr/bin/python3
# implementing a gameObject
class GameObject():
# constructor
# param x: the window of the game
def __init__(self, x, y, displayable):
self.x = x
self.y = y
self.displayable = displayable
# draw the gameObject
# param window: the window of the game
def draw(self, window):
pass
# update the gameObject
def update(self):
pass | 3.1875 | 3 |
crossbaker/libs/softFinder.py | josephkirk/MeshBaker | 0 | 12795231 | <filename>crossbaker/libs/softFinder.py
from io import StringIO
import traceback
import wmi
from winreg import (HKEY_LOCAL_MACHINE, KEY_ALL_ACCESS,
OpenKey, EnumValue, QueryValueEx)
softFile = open('softLog.log', 'w')
errorLog = open('errors.log', 'w')
r = wmi.Registry ()
result, names = r.EnumKey (hDefKey=HKEY_LOCAL_MACHINE, sSubKeyName=r"Software\Microsoft\Windows\CurrentVersion\Uninstall")
softFile.write(r'These subkeys are found under "HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Uninstall"\n\n')
errorLog.write("Errors\n\n")
separator = "*" * 80
keyPath = r"Software\Microsoft\Windows\CurrentVersion\Uninstall"
for subkey in names:
try:
softFile.write(separator + '\n\n')
path = keyPath + "\\" + subkey
key = OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_ALL_ACCESS)
try:
temp = QueryValueEx(key, 'DisplayName')
display = str(temp[0])
softFile.write('Display Name: ' + display + '\nRegkey: ' + subkey + '\n')
except:
softFile.write('Regkey: ' + subkey + '\n')
except:
fp = StringIO()
traceback.print_exc(file=fp)
errorMessage = fp.getvalue()
error = 'Error for ' + key + '. Message follows:\n' + errorMessage
errorLog.write(error)
errorLog.write("\n\n")
softFile.close()
errorLog.close() | 2.03125 | 2 |
tests/sfmutils/test_stream_consumer.py | NGTmeaty/sfm-utils | 2 | 12795232 | from __future__ import absolute_import
from unittest import TestCase
from mock import MagicMock, patch
import socket
import tempfile
import os
import shutil
from sfmutils.consumer import MqConfig
from sfmutils.stream_consumer import StreamConsumer
from sfmutils.supervisor import HarvestSupervisor
class TestStreamConsumer(TestCase):
def setUp(self):
self.patcher = patch("sfmutils.stream_consumer.HarvestSupervisor")
mock_supervisor_class = self.patcher.start()
self.mock_supervisor = MagicMock(spec=HarvestSupervisor)
mock_supervisor_class.side_effect = [self.mock_supervisor]
self.working_path = tempfile.mkdtemp()
self.stream_consumer = StreamConsumer("/opt/sfm/test.py", self.working_path,
mq_config=MqConfig(None, None, None, None,
{"test_queue": [
"harvest.start.test.test_usertimeline",
"harvest.start.test.test_search"]}), )
def tearDown(self):
# self.patcher.remove()
if os.path.exists(self.working_path):
shutil.rmtree(self.working_path)
def test_stop_queue(self):
stop_queue = "test_queue_{}".format(socket.gethostname())
self.assertSetEqual({"test_queue", stop_queue},
set(self.stream_consumer.mq_config.queues.keys()))
self.assertListEqual(["harvest.stop.test.test_usertimeline", "harvest.stop.test.test_search"],
self.stream_consumer.mq_config.queues[stop_queue])
def test_start(self):
message = {
"id": "test:1",
"collection_set": {
"id": "test_collection_set"
}
}
self.stream_consumer.message = message
self.stream_consumer.routing_key = "harvest.start.test.test_usertimeline"
self.stream_consumer.on_message()
self.mock_supervisor.start.called_once_with(message, "harvest.start.test.test_usertimeline")
def test_remove(self):
message = {
"id": "test:1"
}
self.stream_consumer.message = message
self.stream_consumer.routing_key = "harvest.stop.test.test_usertimeline"
self.stream_consumer.on_message()
self.mock_supervisor.remove.called_once_with("test:1")
| 2.171875 | 2 |
p25.py | fiskenslakt/aoc-2021 | 0 | 12795233 | from itertools import count
from aocd import lines
rows = len(lines)
cols = len(lines[0])
_map = {}
east = []
south = []
for y, line in enumerate(lines):
for x, sc in enumerate(line):
if sc == '>':
east.append((x,y))
elif sc == 'v':
south.append((x,y))
_map[(x,y)] = sc
for step in count(1):
east_move = False
south_move = False
new_map = {}
new_east = []
for sc in east:
x, y = sc
nx = x + 1 if x + 1 < cols else 0
if _map.get((nx,y), '.') == '.':
new_map[(nx,y)] = '>'
new_east.append((nx, y))
east_move = True
else:
new_map[(x,y)] = '>'
new_east.append((x,y))
east = new_east
new_south = []
for sc in south:
x, y = sc
ny = y + 1 if y + 1 < rows else 0
if new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.') != 'v':
new_map[(x,ny)] = 'v'
new_south.append((x, ny))
south_move = True
else:
new_map[(x,y)] = 'v'
new_south.append((x,y))
south = new_south
_map = new_map
if not east_move and not south_move:
break
print('Part 1:', step)
| 2.859375 | 3 |
src/SpectralAnalysis/bayes.py | axr6077/Black-Hole-X-ray-binary-Evolution | 1 | 12795234 | from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
try:
import cPickle as pickle
except ImportError:
import pickle
import copy
import numpy as np
from src.SpectralAnalysis import utils
from src.SpectralAnalysis import powerspectrum
from src.SpectralAnalysis import mcmc
from src.SpectralAnalysis import mle
from src.SpectralAnalysis import posterior
##########################################
#
# class Bayes: Bayesian data analysis for time series
#
# This class defines a Bayes object that can:
# - pick between two models using likelihood ratio tests
# - find periodicities by picking out the largest power in
# an observation/set of fake periodograms
# - search for QPOs via a model selection approach using LRTs
#
#
# TO DO: Need to add smoothing for picking out narrow signals
#
#
#
class Bayes(object):
""" Bayesian time series analysis
This class defines a Bayes object that can:
- pick between two models using likelihood ratio tests
- find periodicities by picking out the largest power in
an observation/set of fake periodograms
- search for QPOs via a model selection approach using LRTs
Parameters
----------
ps : powerspectrum.Powerspectrum
A periodogram object that is to be searched for QPOs
namestr: string, optional, default "test"
The string that will be used to identify this periodogram when
saving output (text files and plots)
plot: boolean, optional, default True
If True, several diagnostic plots will be saved to disk
m: integer, optional, default 1
If the periodogram used is the result of averaging several
individual periodograms (or bins), this changes the statistical
distributions. Set m to the number of periodograms
averaged to be sure to use the right distribution
Attributes
----------
Examples
--------
"""
def __init__(self, ps, namestr='test', plot=True, m=1):
assert isinstance(ps, powerspectrum.PowerSpectrum), "ps must be of type powerspectrum.PowerSpectrum!"
self.ps = ps
self.namestr = namestr
self.plot = plot
self.m = m
def choose_noise_model(self, func1, par1, func2, par2,
fitmethod='bfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
use_emcee=True,
parname=None,
noise1=-1,
noise2=-1,
writefile=True):
"""
Fit two models func1 and func2, compute the likelihood
ratio at the maximum-a-posteriori paramters.
If func1 and func2 differ in complexity, the less complex
should be func1.
Then sample the posterior distribution for the the simpler
model (func1), pick parameter sets from the posterior
to create fake periodograms.
Fit each fake periodogram with the same models as the data, and
compute the likelihood ratios such that it is possible to
build up a posterior distribution for the likelihood
ratios and compute a posterior predictive p-value
that the data can be explained sufficiently with the simpler
model.
Parameters
----------
func1 : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par1 : {list, array-like}
Input guesses for the MAP fit using func1.
The number of elements *must* equal the number of parameters k
taken by func1.
func2 : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and n
parameters, and returns an array of model powers
The function should include a parameter setting a constant background
level, and this parameter should be last!
par2 : {list, array-like}
Input guesses for the MAP fit using func2.
The number of elements *must* equal the number of parameters n
taken by func2.
fitmethod : string, optional, default bfgs
Allows the choice of different minimization algorithms.
Default uses BFGS, which is pretty robust for most purposes.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise1, noise2 : int, optional, default -1
The index for the noise parameter in func1 and func2.
In the pre-defined models, this index is *always* -1.
"""
resfilename = self.namestr + "_choosenoisemodel.dat"
resfile = utils.TwoPrint(resfilename)
### make strings for function names from function definition
func1name = "model1"
func2name = "model2"
### step 1: fit both models to observation and compute LRT
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m)
### get out best fit parameters and associated quantities
fitpars1 = getattr(psfit, func1name + 'fit')
fitpars2 = getattr(psfit, func2name + 'fit')
if self.plot:
### plot the periodogram and best fit models
psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True)
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func1)
else:
lpost = posterior.StackPerPosterior(self.ps, func1, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars1['popt'],
tcov=fitpars1['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=use_emcee,
plot=self.plot,
printobj=resfile,
m=self.m)
### Step 3: create fake periodograms out of MCMCs
fakeper = mcobs.simulate_periodogram(nsim=nsim)
### empty lists for simulated quantities of interest:
sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [], [], [], [], [], []
### Step 4: Fit fake periodograms and read out parameters of interest from each fit:
for i, x in enumerate(fakeper):
try:
fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False)
lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m)
# resfile('Fitting of fake periodogram ' + str(i) + ' failed! Returning ...')
# return psfit, fakeper, mcobs
sim_pars1 = getattr(fitfake, func1name + 'fit')
sim_pars2 = getattr(fitfake, func2name + 'fit')
# if lrt > 20:
# fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i))
sim_lrt.append(lrt)
sim_deviance.append(sim_pars1['deviance'])
sim_ksp.append(sim_pars1['ksp'])
sim_maxpow.append(sim_pars1['maxpow'])
sim_merit.append(sim_pars1['merit'])
sim_fpeak.append(sim_pars1['maxfreq'])
sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']])
sim_srat.append(sim_pars1['sobs'])
except KeyboardInterrupt:
break
if len(sim_maxpow) == 0:
resfile("Analysis of Burst failed! Returning ...")
return False, False, False
else:
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_maxpow = float(len([x for x in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow))
p_deviance = float(len([x for x in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit))
p_lrt = float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt))
p_srat = float(len([x for x in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat))
resfile('simulated srat: ' + str(sim_srat))
resfile('observed srat: ' + str(fitpars1['sobs']))
resfile("p(LRT) = " + str(p_lrt))
resfile("KSP(obs) = " + str(fitpars1['ksp']))
resfile("mean(sim_ksp) = " + str(np.mean(sim_ksp)))
resfile("Merit(obs) = " + str(fitpars1['merit']))
resfile("mean(sim_merit) = " + str(np.mean(sim_merit)))
resfile("Srat(obs) = " + str(fitpars1['sobs']))
resfile("mean(sim_srat) = " + str(np.mean(sim_srat)))
### Step 6: Compute errors of Bayesian posterior probabilities
pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp)))
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
### Display results on screen and make funky plots
resfile("Bayesian p-value for maximum power P_max = " + str(p_maxpow) + " +/- " + str(pmaxpow_err))
resfile("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
resfile("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
resfile("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
resfile("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
resfile("Bayesian p-value for Likelihood Ratio: " + str(p_lrt) + " +/- " + str(plrt_err))
if self.plot:
n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color="cyan", histtype='stepfilled')
plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy')
plt.savefig(self.namestr + '_lrt.png', format='png')
plt.close()
summary = {"p_lrt": [p_lrt, plrt_err], "p_maxpow": [p_maxpow, pmaxpow_err],
"p_deviance": [p_deviance, pdeviance_err], "p_ksp": [p_ksp, pksp_err],
"p_merit": [p_merit, pmerit_err], "p_srat": [p_srat, psrat_err], "postmean": mcobs.mean,
"posterr": mcobs.std, "postquantiles": mcobs.ci, "rhat": mcobs.rhat, "acor": mcobs.acor,
"acceptance": mcobs.acceptance}
return psfit, fakeper, summary
def find_periodicity(self, func, par,
fitmethod='bfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
parname=None,
noise=-1,
use_emcee=True,
searchfreq=None):
"""
Find periodicities in observed data and compute significance via MCMCs.
First, fit the periodogram with func and compute the
maximum-a-posteriori (MAP) estimate.
Divide the data by the MAP model; for a perfect data-model fit,
the resulting residuals should follow a chi-square distribution
with two degrees of freedom.
Find the highest power in the residuals and its frequency.
Sample the posterior distribution of parameters for func using MCMC,
and create fake periodograms from samples of the posterior.
For each fake periodogram, find the MAP estimate, divide out the
MAP model and find the highest power in that periodogram.
Create a posterior distribution of maximum powers and compute
a posterior predictive p-value of seeing the maximum power
in the data under the null hypothesis (no QPO).
Parameters
----------
func : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par : {list, array-like}
Input guesses for the parameters taken by func.
The number of elements in this list or array must match the
number of parameters k taken by func.
fitmethod : string, optional, default "bfgs"
Choose the optimization algorithm used when minimizing the
-log-likelihood. Choices are listed in mle.py, but the default
(bfgs) should be sufficient for most applications.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise: int, optional, default -1
The index for the noise parameter in func.
In the pre-defined models, this index is *always* -1.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
"""
## the file name where the output will be stored
resfilename = self.namestr + "_findperiodicity_results.dat"
## open the output log file
resfile = utils.TwoPrint(resfilename)
### step 1: fit model to observation
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m)
bindict = fitpars['bindict']
# print('popt: ' + str(fitpars['popt']))
## which posterior do I need to use?
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func)
else:
lpost = posterior.StackPerPosterior(self.ps, func, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars['popt'],
tcov=fitpars['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=True,
plot=self.plot,
printobj=resfile,
m=self.m)
### Step 3: create fake periodograms out of MCMCs
fakeper = mcobs.simulate_periodogram(nsim=nsim)
sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \
sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [], [], [], [], [], [], [], [], []
bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0])))
bins = [1, 3, 5, 7, 10, 15, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000]
binlist = [r for r in fitpars["bindict"].keys()]
nbins = len(binlist) / 4
sain = copy.copy(fitpars['popt'])
# print('popt2: ' + str(fitpars['popt']))
### Step 4: Fit fake periodograms:
for i, x in enumerate(fakeper):
try:
# print('popt' + str(i) + 'a : ' + str(fitpars['popt']))
fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False)
# print('popt' + str(i) + 'b : ' + str(fitpars['popt']))
sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m)
# print('popt' + str(i) + 'c : ' + str(fitpars['popt']))
sim_pars_all.append(sim_pars)
sim_deviance.append(sim_pars['deviance'])
sim_ksp.append(sim_pars['ksp'])
sim_maxpow.append(sim_pars['maxpow'])
sim_merit.append(sim_pars['merit'])
sim_fpeak.append(sim_pars['maxfreq'])
sim_y0.append(sim_pars['mfit'][sim_pars['maxind']])
sim_srat.append(sim_pars['sobs'])
sim_s3max.append(sim_pars['s3max'])
sim_s5max.append(sim_pars['s5max'])
sim_s11max.append(sim_pars['s11max'])
except KeyboardInterrupt:
break
# except:
# print("Simulation failed! Continuing ...")
# continue
# print('popt' + str(i) + 'd : ' + str(fitpars['popt']))
# print('popt3: ' + str(fitpars['popt']))
### upper limit is the power in the sorted array where p_maxpow would be 0.05
### i.e. when only 0.05*nsim simulations are higher than this
### note: sometimes simulations fail, therefore the 5% limit should be 0.05*len(sims)
fiveperlim = int(0.05 * len(sim_maxpow))
if fiveperlim == 0:
resfile('Warning! Too few simulations to compute five percent limit reliably!')
fiveperlim = 1
ninetyfiveperlim = len(sim_maxpow) - fiveperlim
# print('popt4: ' + str(fitpars['popt']))
bindicts = [x["bindict"] for x in sim_pars_all]
### get out binned powers:
maxpows_all = {}
binprob = {}
for b in bins[:nbins]:
binps = fitpars['bindict']['bin' + str(b)]
bmaxpow = np.array([x["bmax" + str(b)] for x in bindicts])
maxpows_all["bin" + str(b)] = bmaxpow
bindict['sim_bmaxpow' + str(b)] = bmaxpow
p_bmaxpow = float(len([x for x in bmaxpow if x > fitpars['bindict']["bmax" + str(b)]])) / float(
len(bmaxpow))
bindict["p_maxpow" + str(b)] = p_bmaxpow
bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow)))
bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err
sim_bmaxpow_sort = np.msort(bmaxpow)
### note: this is the limit for 2*I/S --> multiply by S to get powers for each frequency
### Like everything else, this is n-trial corrected!
# print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort)))
resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim))
bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim]
bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul
resfile('The posterior p-value for the maximum residual power for a binning of ' + str(
self.ps.df * b) + 'Hz is p = ' + str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err))
resfile('The corresponding value of the T_R statistic at frequency f = ' + str(
fitpars["bindict"]["bmaxfreq" + str(b)]) + ' is 2I/S = ' + str(fitpars['bindict']["bmax" + str(b)]))
resfile('The upper limit on the T_R statistic is 2I/S = ' + str(bmaxpow_ul))
### now turn upper limit into an rms amplitude:
## first compute broadband noise model for binned frequencies
bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt'])
resfile("bintemplate[0]: " + str(bintemplate[0]))
## then compute upper limits for powers I_j depending on frequency
binpowers = bmaxpow_ul * bintemplate / 2.0 - bintemplate
## now compute rms amplitude at 40, 70, 100 and 300 Hz
## first, convert powers into rms normalization, if they're not already
if self.ps.norm == 'leahy':
binpowers = binpowers / (self.ps.df * b * self.ps.nphots)
elif self.ps.norm == 'variance':
binpowers = binpowers * self.ps.n ** 2.0 / (self.ps.df * b * self.ps.nphots ** 2.0)
# print('len(binps.freq): ' + str(len(binps.freq)))
# print('len(binpowers): ' + str(len(binpowers)))
if searchfreq is None:
searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0]
## for 40 Hz:
print(searchfreq)
for bc in searchfreq:
if bc > (binps.freq[1] - binps.freq[0]):
bind = np.searchsorted(binps.freq, bc) - 1
bpow = binpowers[bind]
brms = np.sqrt(bpow * b * self.ps.df)
resfile('The upper limit on the power at ' + str(bc) +
'Hz for a binning of ' + str(b) + ' is P = ' +
str(bpow * (self.ps.df * b * self.ps.nphots)))
resfile('The upper limit on the rms amplitude at ' + str(bc) +
'Hz for a binning of ' + str(b) + ' is rms = ' + str(brms))
bindict['bin' + str(b) + '_ul_%.4fHz' % bc] = brms
else:
continue
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_maxpow = float(len([x for x in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow))
p_deviance = float(len([x for x in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > fitpars['merit']])) / float(len(sim_merit))
p_srat = float(len([x for x in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat))
p_s3max = float(len([x for x in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max))
p_s5max = float(len([x for x in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max))
p_s11max = float(len([x for x in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max))
### sort maximum powers from lowest to highest
sim_maxpow_sort = np.msort(sim_maxpow)
sim_s3max_sort = np.msort(sim_s3max)
sim_s5max_sort = np.msort(sim_s5max)
sim_s11max_sort = np.msort(sim_s11max)
### note: this is the limit for 2*I/S --> multiply by S to get powers for each frequency
### Like everything else, this is n-trial corrected!
maxpow_ul = sim_maxpow_sort[ninetyfiveperlim]
### Step 6: Compute errors of Bayesian posterior probabilities
pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp)))
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp)))
ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp)))
ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp)))
### Display results on screen and make funky plots
resfile("Bayesian p-value for maximum power P_max = " + str(p_maxpow) + " +/- " + str(pmaxpow_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(maxpow_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s3max) + " +/- " + str(ps3max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s3max_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s5max) + " +/- " + str(ps5max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s5max_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s11max) + " +/- " + str(ps11max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s11max_ul))
resfile("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
resfile("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
resfile("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
resfile("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
if self.plot:
plt.subplot(2, 2, 1)
n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy')
plt.title('unsmoothed data', fontsize=12)
plt.subplot(2, 2, 2)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (3) data', fontsize=12)
plt.subplot(2, 2, 3)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (5) data/model outlier', fontsize=12)
plt.subplot(2, 2, 4)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (11) data', fontsize=12)
plt.savefig(self.namestr + '_maxpow.png', format='png')
plt.close()
results = {"fitpars": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs,
'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul,
'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err],
'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err],
'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars,
"postmean": mcobs.mean, "posterr": mcobs.std, "postquantiles": mcobs.ci, "rhat": mcobs.rhat,
"acor": mcobs.acor, "acceptance": mcobs.acceptance}
return results
def find_qpo(self, func, ain,
fitmethod='constbfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
parname=None,
plotstr=None,
use_emcee=True):
"""
Find QPOs by fitting a QPO + background model to *every*
frequency.
NOTE: I rarely ever use this because it's really computationally
expensive.
Parameters
----------
func : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par : {list, array-like}
Input guesses for the parameters taken by func.
The number of elements in this list or array must match the
number of parameters k taken by func.
fitmethod : string, optional, default "bfgs"
Choose the optimization algorithm used when minimizing the
-log-likelihood. Choices are listed in mle.py, but the default
(bfgs) should be sufficient for most applications.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise: int, optional, default -1
The index for the noise parameter in func.
In the pre-defined models, this index is *always* -1.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
"""
if plotstr == None:
plotstr = self.namestr
funcname = str(func).split()[1]
# print("<< --- len(self.ps beginning): " + str(len(self.ps.ps)))
### step 1: fit model to observation
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m)
# print("<< --- len(self.ps beginning): " + str(len(self.ps.ps)))
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func)
else:
lpost = posterior.StackPerPosterior(self.ps, func, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars['popt'],
tcov=fitpars['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=True,
plot=self.plot,
m=self.m)
### find optimum QPO values for the real data
obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes')
### simulate lots of realizations of the broadband noise model from MCMCs
funcfake = mcobs.simulate_periodogram(nsim=nsim)
### empty lists to store simulated LRTS and parameters in
sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [], [], [], [], []
simno = 0
### run QPO search on each and return likelihood ratios parameters for each
for x in funcfake:
try:
simno = simno + 1
sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False)
slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True,
plotname=plotstr + '_sim' + str(simno) + '_qposearch')
sim_lrt.append(slrt)
sim_optpars.append(soptpars)
sim_qpopars.append(sqpopars)
sim_deviance.append(soptpars['deviance'])
sim_ksp.append(soptpars['ksp'])
sim_merit.append(soptpars['merit'])
sim_srat.append(soptpars['sobs'])
except KeyboardInterrupt:
break
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_deviance = float(len([x for x in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > optpars['merit']])) / float(len(sim_merit))
p_lrt = float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt))
p_srat = float(len([x for x in sim_srat if x > optpars['sobs']])) / float(len(sim_srat))
print("p(LRT) = " + str(p_lrt))
# print("LRT(obs) = " + str(obslrt))
# print("mean(sim_lrt) = " + str(np.mean(sim_lrt)))
# print("Deviance(obs) = " + str(fitpars1['deviance']))
# print("mean(sim_deviance) = " + str(np.mean(sim_deviance)))
print("KSP(obs) = " + str(optpars['ksp']))
print("mean(sim_ksp) = " + str(np.mean(sim_ksp)))
print("Merit(obs) = " + str(optpars['merit']))
print("mean(sim_merit) = " + str(np.mean(sim_merit)))
print("Srat(obs) = " + str(optpars['sobs']))
print("mean(sim_srat) = " + str(np.mean(sim_srat)))
### Step 6: Compute errors of Bayesian posterior probabilities
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
### Display results on screen and make funky plots
print("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
print("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
print("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
print("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
print("Bayesian p-value for Likelihood Ratio: " + str(p_lrt) + " +/- " + str(plrt_err))
if self.plot:
n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled')
plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m')
plt.savefig(self.namestr + '_qpolrt.png', format='png')
plt.close()
summary = {"p_lrt": [p_lrt, plrt_err],
"p_deviance": [p_deviance, pdeviance_err],
"p_ksp": [p_ksp, pksp_err],
"p_merit": [p_merit, pmerit_err],
"p_srat": [p_srat, psrat_err],
"postmean": mcobs.mean,
"posterr": mcobs.std,
"postquantiles": mcobs.ci,
"rhat": mcobs.rhat,
"acor": mcobs.acor,
"acceptance": mcobs.acceptance}
return summary
def print_summary(self, summary):
"""
Print a summary of the results.
NOT USED!
"""
try:
keys = summary.keys()
except AttributeError:
raise Exception("Summary must be a dictionary!")
probs = dict()
postpars = dict()
### sort out p-values and posterior distribution of parameters
for x in keys:
if x[:2] == 'p_':
probs[x] = summary[x]
else:
postpars[x] = summary[x]
print("The ensemble acceptance rate is " + str(postpars["acceptance"]) + " .")
try:
print("The autocorrelation times are: " + str(postpars["acor"]))
except KeyError:
print("Module Acor not found. Cannot compute autocorrelation times for the parameters")
for i, x in enumerate(postpars["rhat"]):
print("The $R_hat$ value for Parameter " + str(i) + " is " + str(x))
### print posterior summary of parameters:
print("-- Posterior Summary of Parameters: \n")
print("parameter \t mean \t\t sd \t\t 5% \t\t 95% \n")
print("---------------------------------------------\n")
for i in range(len(postpars['postmean'])):
print("theta[" + str(i) + "] \t " + str(postpars['postmean'][i]) + "\t" + str(
postpars['posterr'][i]) + "\t" + str(postpars['postquantiles'][i][0]) + "\t" + str(
postpars["postquantiles"][i][1]) + "\n")
for x in probs.keys():
if x == 'p_lrt':
print("Bayesian p-value for Likelihood Ratio: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_deviance':
print("Bayesian p-value for deviance D = " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_ksp':
print("Bayesian p-value for KS test: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_merit':
print("Bayesian p-value for Merit function: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_srat':
print("Bayesian p-value for the sum of residuals: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_maxpow':
if "fitpars" in probs.keys():
print("Highest [unsmoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["maxpow"]))
print("Bayesian p-value for the highest [unsmoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s3max':
if "fitpars" in probs.keys():
print("Highest [3 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s3maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s3max"]))
print("Bayesian p-value for the highest [3 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s5max':
if "fitpars" in probs.keys():
print("Highest [5 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s5maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s5max"]))
print("Bayesian p-value for the highest [5 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s11max':
if "fitpars" in probs.keys():
print("Highest [11 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s11maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s11max"]))
print("Bayesian p-value for the highest [11 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
return
def write_summary(self, summary, namestr=None):
"""
Write a summary of the analysis to file.
NOT USED!
:param summary:
:param namestr:
:return:
"""
if not namestr:
namestr = self.namestr
try:
keys = summary.keys()
except AttributeError:
raise Exception("Summary must be a dictionary!")
probs = dict()
postpars = dict()
### sort out p-values and posterior distribution of parameters
for x in keys:
if x[:2] == 'p_':
probs[x] = summary[x]
else:
postpars[x] = summary[x]
picklefile = open(namestr + "_summary_pickle.dat", "w")
pickle.dump(summary, picklefile)
picklefile.close()
file = open(namestr + "_summary.dat", "w")
file.write("The ensemble acceptance rate is " + str(postpars["acceptance"]) + " .\n")
try:
file.write("The autocorrelation times are: " + str(postpars["acor"]) + "\n")
except KeyError:
file.write("Module Acor not found. Cannot compute autocorrelation times for the parameters \n")
for i, x in enumerate(postpars["rhat"]):
file.write("The $R_hat$ value for Parameter " + str(i) + " is " + str(x) + "\n")
### print posterior summary of parameters:
file.write("-- Posterior Summary of Parameters: \n")
file.write("parameter \t mean \t\t sd \t\t 5% \t\t 95% \n")
file.write("---------------------------------------------\n")
for i in range(len(postpars['postmean'])):
file.write("theta[" + str(i) + "] \t " + str(postpars['postmean'][i]) + "\t" + str(
postpars['posterr'][i]) + "\t" + str(postpars['postquantiles'][i][0]) + "\t" + str(
postpars["postquantiles"][i][1]) + "\n")
for x in probs.keys():
if x == 'p_lrt':
file.write(
"Bayesian p-value for Likelihood Ratio: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_deviance':
file.write("Bayesian p-value for deviance D = " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_ksp':
file.write("Bayesian p-value for KS test: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_merit':
file.write(
"Bayesian p-value for Merit function: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_srat':
file.write("Bayesian p-value for the sum of residuals: " + str(probs[x][0]) + " +/- " + str(
probs[x][1]) + "\n")
elif x == 'p_maxpow':
file.write("Bayesian p-value for the highest [unsmoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['maxpow_ul']) + "\n")
elif x == 'p_s3max':
file.write("Bayesian p-value for the highest [3 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s3max_ul']) + "\n")
elif x == 'p_s5max':
file.write("Bayesian p-value for the highest [5 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s5max_ul']) + "\n")
elif x == 'p_s11max':
file.write("Bayesian p-value for the highest [11 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s11max_ul']) + "\n")
return
def plot_posteriors(namestr='test', **pars):
plotkeys = pars.keys()
N = len(plotkeys)
### number of parameters
fig = plt.figure(figsize=(2, N / 2 + 1))
plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2)
for i in range(N):
ax = fig.add_subplot(N / 2 + 1, 2, i)
n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30)
ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4)
ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n, "p = " + str(pars[plotkeys[i]][1]))
ax.title("Posterior for " + plotkeys[i])
return
| 2.34375 | 2 |
various/closure.py | sideroff/python-exercises | 0 | 12795235 |
def parent(msg, flag: bool):
local_variable = '15'
print('parent executed {}'.format(msg))
def first_child():
print('first_child {}'.format(msg))
def second_child():
print('first_child {}, {}'.format(msg, local_variable))
return (
first_child if flag
else second_child
)
global_variable = parent('testing', True)
print('-----')
global_variable() | 2.90625 | 3 |
Text/LSTM.py | walter114/TIPRDC | 13 | 12795236 | import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
class FeatureExtractor(nn.Module):
def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300):
super(FeatureExtractor, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.embedding = nn.Embedding(voacb_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
def forward(self, sentence):
x = self.embedding(sentence)
lstm_out, lstm_hidden = self.lstm(x)
return lstm_out
class Classifier(nn.Module):
def __init__(self, target_size=2, hidden_dim=300):
super(Classifier, self).__init__()
self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
self.fc = nn.Sequential(
nn.Linear(hidden_dim, 150),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(150, target_size)
)
def forward(self, x, sentence_length):
self.lstm1.flatten_parameters()
self.lstm2.flatten_parameters()
lstm1_out, lstm1_hidden = self.lstm1(x)
lstm2_out, lstm2_hidden = self.lstm2(lstm1_out)
out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in range(len(lstm2_out))], dim=0)
out = self.fc(out)
return out
class MutlInfo(nn.Module):
def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300):
super(MutlInfo, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.embedding = nn.Embedding(voacb_size, embedding_dim)
self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
self.fc = nn.Sequential(
nn.Linear(2 * hidden_dim + target_size, 150),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(150, 1)
)
def forward(self, sentence, z, u, sentence_length):
self.lstm1.flatten_parameters()
self.lstm2.flatten_parameters()
self.lstm3.flatten_parameters()
x = self.embedding(sentence)
lstm1_out, lstm1_hidden = self.lstm1(x)
lstm2_out, lstm2_hidden = self.lstm2(lstm1_out)
lstm3_out, lstm3_hidden = self.lstm3(lstm2_out)
x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0)
lstm2_out_z, lstm2_hidden_z = self.lstm2(z)
lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z)
z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0)
out = torch.cat((x_new, z_new, u), dim=1)
out = self.fc(out)
return out
def info_loss(MI, x, x_length, z, u, x_prime, x_prime_length):
Ej = -F.softplus(-MI(x, z, u, x_length)).mean()
Em = F.softplus(MI(x_prime, z, u, x_prime_length)).mean()
return Ej - Em
| 2.90625 | 3 |
src/testing/task_plot_private_test_demand_shares.py | covid-19-impact-lab/sid-germany | 4 | 12795237 | import warnings
import matplotlib.pyplot as plt
import pandas as pd
import pytask
import seaborn as sns
from src.config import BLD
from src.config import PLOT_END_DATE
from src.config import PLOT_SIZE
from src.config import PLOT_START_DATE
from src.config import SRC
from src.plotting.plotting import style_plot
from src.testing.shared import get_piecewise_linear_interpolation
@pytask.mark.depends_on(
{
"params": BLD / "params.pkl",
"rki": BLD / "data" / "processed_time_series" / "rki.pkl",
"plotting.py": SRC / "plotting" / "plotting.py",
"testing_shared.py": SRC / "testing" / "shared.py",
}
)
@pytask.mark.produces(
BLD / "figures" / "data" / "testing" / "private_test_demand_shares.pdf"
)
def task_plot_private_test_demand_shares(depends_on, produces):
params = pd.read_pickle(depends_on["params"])
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="indexing past lexsort depth may impact performance."
)
params_slice = params.loc[("rapid_test_demand", "private_demand")]
private_demand_shares = get_piecewise_linear_interpolation(params_slice)
private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE]
fig, ax = plt.subplots(figsize=PLOT_SIZE)
sns.lineplot(
x=private_demand_shares.index,
y=private_demand_shares,
ax=ax,
)
ax.set_title(
"Private Rapid Test Demand\n"
"(Share of Individuals who Do a Rapid Test \n"
"When a Household Member Tests Positive Or Becomes Symptomatic Or \n"
"When Developing Symptoms but not Receiving a Rapid Test Or \n"
"When Participating in Some Private Events)"
)
fig, ax = style_plot(fig, ax)
fig.tight_layout()
fig.savefig(produces)
plt.close()
| 2.234375 | 2 |
services/trainer/lib/network.py | ZeshanA/transport | 1 | 12795238 | import json
import logging
from typing import Dict
import numpy
class ClientSet:
def __init__(self):
self.sockets_by_host_id = {}
self.route_ids_by_host_id = {}
self.host_ids_by_socket = {}
def add(self, host_id, socket):
logging.info(f"Registered hostID '{host_id}'")
self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id
def remove(self, host_id=None, socket=None):
if socket:
host_id = self.host_ids_by_socket[socket]
if not socket:
socket = self.sockets_by_host_id[host_id]
logging.info(f"Deleting hostID '{host_id}'")
self.host_ids_by_socket.pop(socket, None)
self.route_ids_by_host_id.pop(host_id, None)
self.sockets_by_host_id.pop(host_id, None)
def get_socket(self, host_id):
return self.sockets_by_host_id.get(host_id)
def get_host_id(self, socket):
return self.host_ids_by_socket.get(socket)
def get_route_id(self, host_id=None, socket=None):
if socket:
host_id = self.host_ids_by_socket[socket]
return self.route_ids_by_host_id.get(host_id)
def set_route_id(self, host_id=None, socket=None, route_id=None):
if socket:
host_id = self.host_ids_by_socket[socket]
logging.info(f"Assigning routeID '{route_id}' to hostID '{host_id}'")
self.route_ids_by_host_id[host_id] = route_id
def clear_route_id(self, host_id=None, socket=None):
if socket:
host_id = self.host_ids_by_socket[socket]
route_id = self.route_ids_by_host_id[host_id]
logging.info(f"Removing routeID '{route_id}' from hostID '{host_id}'")
del self.route_ids_by_host_id[host_id]
def connected_hosts_count(self):
return len(self.host_ids_by_socket)
def current_state(self):
return self.route_ids_by_host_id
async def send_json(websocket, event: str, msg: Dict = None):
"""
Send a JSON event with optional additional fields via the given websocket connection.
:param websocket: the websocket to send the message on
:param event: the desired value of the "event" field inside the JSON message
:param msg: a dict containing any additional fields for the JSON message to contain
:return:
"""
if msg is None:
msg = {}
msg['event'] = event
json_msg = json.dumps(msg, default=default_json_encoder)
await websocket.send(json_msg)
async def recv_json(websocket):
response = await websocket.recv()
return json.loads(response)
def default_json_encoder(o):
if isinstance(o, numpy.int64):
return int(o)
raise TypeError
| 2.65625 | 3 |
scsr_api/user/api.py | hiperlogic/scsr-api | 1 | 12795239 | <reponame>hiperlogic/scsr-api<filename>scsr_api/user/api.py<gh_stars>1-10
from flask.views import MethodView
from flask import jsonify, request, abort, render_template
import uuid
import json
from jsonschema import Draft4Validator
from jsonschema.exceptions import best_match
from datetime import datetime
from sys_app.decorators import app_required
from user.models.user import UserDB
class UserAPI(MethodView):
"""
API class for user manipulation - Retrieval, Storage, Update and (Logical) Delete
Attributes:
DATA_PER_PAGE: Pagination quantification parameter
decorators: decorators applied to each one of the methods
Methods:
================================
Name: __init__
Parameters: None
Role: Constructor
Result: Setup DATA_PER_PAGE and provides basic validation regarding data states
Requirements: None
================================
Name: get
Parameters: user_id (optional)
Role: Data Retrieval
Result: Return all users or one specific user if requested - or access denied
Requirements: Authentication
================================
Name: post
Parameters: None
Role: Creates and stores user data on the database
Result: Returns the user stored - or access denied
Requirements: Authentication
================================
Name: Put
Parameters: user_id
Role: Data modification
Result: Return the data in the modified state
Requirements: Authentication
================================
Name: Delete
Parameters: user_id
Role: Data modification - logical deletion
Result: Confirmation of deletion
Requirements: Authentication
"""
decorators = [app_required]
def __init__(self):
self.DATA_PER_PAGE=10
if (request.method != 'GET' and request.method != 'DELETE') and not request.json:
abort(400)
def get(self, user_id):
if user_id:
user = User.objects.filter(external_id=user_id, live=True).first()
if user:
response = {
"result": "ok",
"user": user_obj(user)
}
return jsonify(response), 200
else:
return jsonify({}), 404
else:
users = UserDB.objects.filter(live=True)
page = int(request.args.get('page',1))
users = users.paginate(page=page, per_page = self.DATA_PER_PAGE)
response = {
"result": "ok",
"links": [
{
"href": "/users/?page=%s" % page,
"rel": "self"
}
],
"users": UserDB.users_obj(users)
}
if users.has_prev:
response["links"].append(
{
"href": "/users/?page=%s" % (users.prev_num),
"rel": "previous"
}
)
if users.has_next:
response["links"].append(
{
"href": "/users/?page=%s" % (users.next_num),
"rel": "next"
}
)
return jsonify(response), 200
def post(self):
user_json = request.json
error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json))
if error:
return jsonify({"error": error.message}), 400
else:
user = User(
external_id=str(uuid.uuid4()),
country = user_json.get("country"),
state = user_json.get("state"),
city = user_json.get("city"),
lang = user_json.get("lang"),
name = user_json.get("name"),
surname = user_json.get("surname"),
username = user_json.get("username"),
email = user_json.get("email"),
password = user_json.get("password"),
bio = user_json.ger("bio"),
live = True,
created = datetime.utcnow()
).save()
response = {
"result": "ok",
"user": user_obj(user)
}
return jsonify(response), 201
def put(self, user_id):
user = UserDB.objects.filter(external_id=user_id, live=True).first()
if not user:
return jsonify({}), 404
user_json = request.json
error = best_match(Draft4Validator(UserDB.getSchema()).iter_errors(user_json))
if error:
return jsonify({"error": error.message}), 400
else:
user.country = user_json.get("country")
user.state = user_json.get("state")
user.city = user_json.get("city")
user.name = user_json.get("name")
user.surname = user_json.get("surname")
user.username = user_json.get("username")
user.email = user_json.get("email")
user.password = user_json.get("password")
user.save()
response = {
"result": "ok",
"user": user.get_object()
}
return jsonify(response), 200
def delete(self, user_id):
user = User.objects.filter(external_id=user_id, live=True).first()
if not user:
return jsonify({}), 404
user.live = False
user.save()
return jsonify({}), 204
| 2.28125 | 2 |
acmicpc/3058.py | juseongkr/BOJ | 7 | 12795240 | <reponame>juseongkr/BOJ
for _ in range(int(input())):
l = [*map(int, input().split())]
s, m = 0, []
for i in l:
if i % 2 == 0:
s += i
m.append(i)
m.sort()
print(s, end=' ')
print(m[0])
| 2.875 | 3 |
dict_zip/__init__.py | kitsuyui/dict_zip | 0 | 12795241 | import functools
def dict_zip(*dictionaries):
common_keys = functools.reduce(lambda x, y: x | y,
(set(d.keys()) for d in dictionaries),
set())
return {
key: tuple(d[key] for d in dictionaries)
for key in common_keys
if all(key in d for d in dictionaries)
}
def dict_zip_longest(*dictionaries, fillvalue=None):
common_keys = functools.reduce(lambda x, y: x | y,
(set(d.keys()) for d in dictionaries),
set())
return {
key: tuple(d.get(key, fillvalue) for d in dictionaries)
for key in common_keys
}
| 3.3125 | 3 |
tests/test_utils.py | Ouranosinc/Magpie | 0 | 12795242 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_utils
----------------------------------
Tests for the various utility operations employed by Magpie.
"""
import os
import unittest
from distutils.version import LooseVersion
import mock
import six
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk
from pyramid.settings import asbool
from magpie import __meta__, constants
from magpie.api import exception as ax
from magpie.api import generic as ag
from magpie.api import requests as ar
from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url
from tests import runner, utils
class DummyEnum(ExtendedEnum):
VALUE1 = "value-1"
VALUE2 = "value-2"
@runner.MAGPIE_TEST_LOCAL
@runner.MAGPIE_TEST_UTILS
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.version = __meta__.__version__ # only local test
def test_magpie_prefix_direct_request(self):
base_url = "http://localhost"
for url in ["http://localhost", "http://localhost/magpie"]:
app = utils.get_test_magpie_app({"magpie.url": url})
path = "/version"
resp = utils.test_request(app, "GET", path)
utils.check_response_basic_info(resp)
utils.check_val_equal(resp.request.url, base_url + path,
"Proxied path should have been auto-resolved [URL: {}].".format(url))
def test_magpie_prefix_request_with_multiple_route_url(self):
"""
Test multiple request routing with fixed "MAGPIE_URL" within the API application.
Signin with invalid credentials will call "/signin" followed by sub-request "/signin_internal" and finally
"ZigguratSignInBadAuth". Both "/signin" and "ZigguratSignInBadAuth" use "get_multiformat_body".
"""
from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked
base_url = "http://localhost"
def mock_get_post(real_func, *args, **kwargs):
if args[1] != "password":
return real_func(*args, **kwargs)
request, args = args[0], args[1:]
utils.check_val_equal(request.url, base_url + _paths.pop(0),
"Proxied path should have been auto-resolved [URL: {}].".format(url))
return real_func(request, *args, **kwargs)
for url in ["http://localhost", "http://localhost/magpie"]:
# paths are reduced (pop in mock) each time a post to get the 'password' is called in 'login' module
# this combination should happen twice, one in signin route and another on the redirected internal login
_paths = ["/signin", "/signin_internal"]
app = utils.get_test_magpie_app({"magpie.url": url})
with mock.patch("magpie.api.requests.get_value_multiformat_body_checked",
side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)):
data = {"user_name": "foo", "password": "<PASSWORD>"}
headers = {"Content-Type": CONTENT_TYPE_JSON, "Accept": CONTENT_TYPE_JSON}
resp = utils.test_request(app, "POST", _paths[0], json=data, headers=headers, expect_errors=True)
if LooseVersion(self.version) < LooseVersion("0.10.0"):
# user name doesn't exist
utils.check_response_basic_info(resp, expected_code=406, expected_method="POST")
else:
# invalid username/password credentials
utils.check_response_basic_info(resp, expected_code=401, expected_method="POST")
def test_get_header_split(self):
headers = {"Content-Type": "{}; charset=UTF-8".format(CONTENT_TYPE_JSON)}
for name in ["content_type", "content-type", "Content_Type", "Content-Type", "CONTENT_TYPE", "CONTENT-TYPE"]:
for split in [";,", ",;", ";", (",", ";"), [";", ","]]:
utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON)
def test_get_query_param(self):
resp = utils.mock_request("/some/path")
v = ar.get_query_param(resp, "value")
utils.check_val_equal(v, None)
resp = utils.mock_request("/some/path?other=test")
v = ar.get_query_param(resp, "value")
utils.check_val_equal(v, None)
resp = utils.mock_request("/some/path?other=test")
v = ar.get_query_param(resp, "value", True)
utils.check_val_equal(v, True)
resp = utils.mock_request("/some/path?value=test")
v = ar.get_query_param(resp, "value", True)
utils.check_val_equal(v, "test")
resp = utils.mock_request("/some/path?query=value")
v = ar.get_query_param(resp, "query")
utils.check_val_equal(v, "value")
resp = utils.mock_request("/some/path?QUERY=VALUE")
v = ar.get_query_param(resp, "query")
utils.check_val_equal(v, "VALUE")
resp = utils.mock_request("/some/path?QUERY=VALUE")
v = asbool(ar.get_query_param(resp, "query"))
utils.check_val_equal(v, False)
resp = utils.mock_request("/some/path?Query=TRUE")
v = asbool(ar.get_query_param(resp, "query"))
utils.check_val_equal(v, True)
def test_verify_param_proper_verifications_raised(self):
# with default error
utils.check_raises(lambda: ax.verify_param("b", param_compare=["a", "b"], not_in=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("x", param_compare=["a", "b"], is_in=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("1", param_compare=int, is_type=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("x", param_compare="x", not_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("x", param_compare="y", is_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("", not_empty=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("abc", is_empty=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("abc", matches=True, param_compare=r"[A-Z]+"), HTTPBadRequest)
# with requested error
utils.check_raises(lambda:
ax.verify_param("b", param_compare=["a", "b"], not_in=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("x", param_compare=["a", "b"], is_in=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("1", param_compare=int, is_type=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("x", param_compare="x", not_equal=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("x", param_compare="y", is_equal=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("", not_empty=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("abc", is_empty=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda:
ax.verify_param("abc", matches=True, param_compare=r"[A-Z]+", http_error=HTTPForbidden),
HTTPForbidden)
def test_verify_param_proper_verifications_passed(self):
ax.verify_param("x", param_compare=["a", "b"], not_in=True)
ax.verify_param("b", param_compare=["a", "b"], is_in=True)
ax.verify_param(1, param_compare=int, is_type=True)
ax.verify_param("x", param_compare=six.string_types, is_type=True)
ax.verify_param("x", param_compare=str, is_type=True)
ax.verify_param("x", param_compare="y", not_equal=True)
ax.verify_param("x", param_compare="x", is_equal=True)
ax.verify_param(True, is_true=True)
ax.verify_param(False, is_false=True)
ax.verify_param(1, not_none=True)
ax.verify_param(None, is_none=True)
ax.verify_param("abc", not_empty=True)
ax.verify_param("", is_empty=True)
ax.verify_param("abc", matches=True, param_compare=r"[a-z]+")
def test_verify_param_args_incorrect_usage(self):
"""
Invalid usage of function raises internal server error instead of 'normal HTTP error'.
"""
utils.check_raises(lambda: ax.verify_param("b", param_compare=["a", "b"]),
HTTPInternalServerError, msg="missing any flag specification should be caught")
utils.check_raises(lambda: ax.verify_param("b", param_compare=["a", "b"], not_in=None), # noqa
HTTPInternalServerError, msg="flag specified with incorrect type should be caught")
utils.check_raises(lambda: ax.verify_param("b", not_in=True),
HTTPInternalServerError, msg="missing 'param_compare' for flag needing it should be caught")
utils.check_raises(lambda: ax.verify_param("b", param_compare=["b"], not_in=True, http_error=HTTPOk), # noqa
HTTPInternalServerError, msg="incorrect HTTP class to raise error should be caught")
utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True),
HTTPInternalServerError, msg="incorrect non-iterable compare should raise invalid type")
for flag in ["not_none", "not_empty", "not_in", "not_equal", "is_none", "is_empty", "is_in", "is_equal",
"is_true", "is_false", "is_type", "matches"]:
utils.check_raises(lambda: ax.verify_param("x", **{flag: 1}),
HTTPInternalServerError, msg="invalid flag '{}' type should be caught".format(flag))
def test_verify_param_compare_types(self):
"""
Arguments ``param`` and ``param_compare`` must be of same type for valid comparison, except for ``is_type``
where compare parameter must be the type directly.
.. versionchanged:: 2.0
Since ``param`` can come from user input, we should **NOT** raise ``HTTPInternalServerError`` because the
whole point of the method is to ensure that values are compared accordingly in a controlled fashion.
Therefore, error to be raised is an 'expected' validation failure (``HTTPBadRequest`` or whichever
``http_error`` provided) instead of runtime 'unexpected' processing error.
On the other hand, when ``is_type`` flag is requested, we know that ``param_compare`` must be a type.
Inversely, ``param_compare`` must not be a type if ``is_type`` is not requested, but other flags require
some form of comparison between values. We evaluate these use cases here.
.. seealso::
- :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases
"""
# compare flags expecting a value (can only consider it bad request because comparison values are valid)
utils.check_raises(lambda: ax.verify_param("1", param_compare=1, is_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("1", param_compare=True, is_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(1, param_compare="1", is_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest)
# when compare flags expect a value but type is provided, should still detect incorrect input
utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param("1", param_compare=str, is_equal=True), HTTPInternalServerError)
# compare flags expecting param_compare to be a type while value provided is not
utils.check_raises(lambda: ax.verify_param(1, param_compare="x", is_type=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param("1", param_compare=None, is_type=True), HTTPInternalServerError)
# compare flags expecting param_compare to be some container instance while value provided is not
utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param("1", param_compare=str, is_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param("1", param_compare=str, not_in=True), HTTPInternalServerError)
# strings cases handled correctly (no raise)
utils.check_no_raise(lambda: ax.verify_param("1", param_compare="1", is_equal=True))
def test_enum_values_listing(self):
utils.check_all_equal(DummyEnum.values(), ["value-1", "value-2"], any_order=True)
def test_enum_get_by_value(self):
utils.check_val_equal(DummyEnum.get("value-1"), DummyEnum.VALUE1)
utils.check_val_equal(DummyEnum.get("VALUE1"), DummyEnum.VALUE1)
utils.check_val_equal(DummyEnum.get("random"), None)
utils.check_val_equal(DummyEnum.get("random", "something"), "something")
def test_enum_other(self):
class OtherEnum(ExtendedEnum):
VALUE1 = DummyEnum.VALUE1.value # copy internal string representation
utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg="concrete enum elements should be different")
def test_evaluate_call_callable_incorrect_usage(self):
"""
Verifies that incorrect usage of utility is raised accordingly.
"""
utils.check_raises(lambda: ax.evaluate_call(int),
HTTPInternalServerError, msg="invalid callable non-lambda 'call' should raise")
utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa
HTTPInternalServerError, msg="invalid callable non-lambda 'fallback' should raise")
def test_evaluate_call_recursive_safeguard(self):
"""
Validate use case if internal function that handles formatting and generation of a resulting HTTP response
raises itself an error (because of implementation issue), while it is processing another pre-raised error, that
it does not end up into an endless recursive call stack of raised errors.
"""
mock_calls = {"counter": 0}
def mock_raise(*_, **__):
# avoid raising forever if the real safeguard fails doing its job
if mock_calls["counter"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX:
return TypeError()
mock_calls["counter"] += 1
raise TypeError()
def mock_lambda_call(*_, **__):
ax.evaluate_call(lambda: int("x"))
try:
app = utils.get_test_magpie_app()
with mock.patch("magpie.api.exception.generate_response_http_format", side_effect=mock_raise):
with mock.patch("magpie.api.login.login.get_session", side_effect=mock_lambda_call):
# Call request that ends up calling the response formatter via 'evaluate_call' itself raising to
# trigger 'mock_raise' recursively within 'raise_http' function.
# Since tweens are set up to format all response prior to return, the raised error will itself
# call 'raise_http' again each time operation fails, creating recursive raises.
# If recursive safeguard does its job, it should end up raising 'HTTPInternalServerError' directly
# (without further formatting attempt when reaching the MAX value), stopping the endless loop.
utils.test_request(app, "GET", "/session", expect_errors=True)
except AssertionError:
# Request called with above 'test_request' should catch the final 'HTTPInternalServerError' that is
# raised directly instead of usual TestResponse returned. That error is again re-raised as 'AssertionError'
pass
except Exception as exc:
self.fail("unexpected error during request creation should not raise: {}".format(exc))
# if our counter reached higher than the MAX (i.e.: 2*MAX from mock), the safeguard did not do its job
# if it did not get called at least more than once, use cases did not really get tested
utils.check_val_is_in(mock_calls["counter"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa
def test_format_content_json_str_invalid_usage(self):
non_json_serializable_content = {"key": HTTPInternalServerError()}
utils.check_raises(
lambda: ax.format_content_json_str(200, "", non_json_serializable_content, CONTENT_TYPE_JSON),
HTTPInternalServerError, msg="invalid content format expected as JSON serializable should raise"
)
def test_generate_response_http_format_invalid_usage(self):
utils.check_raises(
lambda: ax.generate_response_http_format(None, {}, {}, "", {}), # noqa
HTTPInternalServerError, msg="invalid arguments resulting in error during response generation should raise"
)
def test_guess_target_format_default(self):
request = utils.mock_request()
content_type, where = ag.guess_target_format(request)
utils.check_val_equal(content_type, CONTENT_TYPE_JSON)
utils.check_val_equal(where, True)
def test_get_magpie_url_defined_or_defaults(self):
# Disable constants globals() for every case, since it can pre-loaded from .env when running all tests.
# Always need to provide a settings container (even empty direct when nothing define in settings),
# otherwise 'get_constant' can find the current thread settings generated by any test app
with mock.patch.object(constants, "MAGPIE_URL", None):
with mock.patch.dict(os.environ, {"MAGPIE_URL": ""}):
url = utils.check_no_raise(lambda: get_magpie_url({}))
utils.check_val_equal(url, "http://localhost:2001")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.url": "https://test-server.com"}))
utils.check_val_equal(url, "https://test-server.com")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "localhost"}))
utils.check_val_equal(url, "http://localhost:2001")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "test-server.com"}))
utils.check_val_equal(url, "http://test-server.com:2001")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "test.com", "magpie.port": "1234"}))
utils.check_val_equal(url, "http://test.com:1234")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.port": "1234"}))
utils.check_val_equal(url, "http://localhost:1234")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.port": "9000", "magpie.scheme": "https"}))
utils.check_val_equal(url, "https://localhost:9000")
with mock.patch.dict(os.environ, {"MAGPIE_URL": "localhost:9871"}):
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.url": "https://test-server.com"}))
utils.check_val_equal(url, "https://test-server.com") # settings priority over envs
url = utils.check_no_raise(lambda: get_magpie_url({}))
utils.check_val_equal(url, "http://localhost:9871") # env URL found if not in settings
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "server"})) # ignored, URL priority
utils.check_val_equal(url, "http://localhost:9871") # URL fixed with missing scheme even if defined
with mock.patch.dict(os.environ, {"MAGPIE_URL": "", "MAGPIE_PORT": "1234"}):
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.url": "https://test-server.com"}))
utils.check_val_equal(url, "https://test-server.com") # ignore port, URL has priority
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "server"}))
utils.check_val_equal(url, "http://server:1234")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.scheme": "https"}))
utils.check_val_equal(url, "https://localhost:1234")
| 2.1875 | 2 |
tableloader/tableFunctions/volumes.py | DEQC/yamlloader | 0 | 12795243 | import os
import csv
from sqlalchemy import Table,literal_column,select
def importVolumes(connection, metadata, source_path):
invVolumes = Table('invVolumes', metadata)
invTypes = Table('invTypes', metadata)
with open(
os.path.join(source_path, 'invVolumes1.csv'), 'r'
) as groupVolumes:
volumereader = csv.reader(groupVolumes, delimiter=',')
for group in volumereader:
connection.execute(
invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1])))
)
with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes:
volumereader = csv.reader(groupVolumes, delimiter=',')
for group in volumereader:
connection.execute(
invVolumes.insert(),
typeID=group[1],
volume=group[0]
)
| 2.734375 | 3 |
msgvis/apps/importer/models.py | hds-lab/textvis-drg | 10 | 12795244 | <reponame>hds-lab/textvis-drg
import sys, six
from django.db import IntegrityError
from django.utils.timezone import utc
from urlparse import urlparse
import json
from datetime import datetime
from email.utils import parsedate
from msgvis.apps.questions.models import Article, Question
from msgvis.apps.corpus.models import *
from msgvis.apps.enhance.models import set_message_sentiment
def create_an_user_from_json_obj(user_data, dataset_obj):
sender, created = Person.objects.get_or_create(dataset=dataset_obj,
original_id=user_data['id'])
if user_data.get('screen_name'):
sender.username = user_data['screen_name']
if user_data.get('name'):
sender.full_name = user_data['name']
if user_data.get('lang'):
sender.language = Language.objects.get_or_create(code=user_data['lang'])[0]
if user_data.get('friends_count'):
sender.friend_count = user_data['friends_count']
if user_data.get('followers_count'):
sender.follower_count = user_data['followers_count']
if user_data.get('statuses_count'):
sender.message_count = user_data['statuses_count']
if user_data.get('profile_image_url'):
sender.profile_image_url = user_data['profile_image_url']
sender.save()
return sender
def create_an_instance_from_json(json_str, dataset_obj):
"""
Given a dataset object, imports a tweet from json string into
the dataset.
"""
tweet_data = json.loads(json_str)
if tweet_data.get('lang'):
lang = tweet_data.get('lang')
if lang != "en":
return False
return get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj)
def get_or_create_language(code):
lang, created = Language.objects.get_or_create(code=code)
return lang
def get_or_create_timezone(name):
zone, created = Timezone.objects.get_or_create(name=name)
return zone
def get_or_create_messagetype(name):
mtype, created = MessageType.objects.get_or_create(name=name)
return mtype
def get_or_create_hashtag(hashtagblob):
ht, created = Hashtag.objects.get_or_create(text=hashtagblob['text'])
return ht
def get_or_create_url(urlblob):
urlparse_results = urlparse(urlblob['expanded_url'])
domain = urlparse_results.netloc
url, created = Url.objects.get_or_create(full_url=urlblob['expanded_url'],
domain=domain,
short_url=urlblob['url'])
return url
def get_or_create_media(mediablob):
media, created = Media.objects.get_or_create(type=mediablob['type'],
media_url=mediablob['media_url'])
return media
def handle_reply_to(status_id, user_id, screen_name, dataset_obj):
# update original tweet shared_count
tmp_tweet = {
'id': status_id,
'user': {
'id': user_id,
'screen_name': screen_name,
},
'in_reply_to_status_id': None
}
original_tweet = get_or_create_a_tweet_from_json_obj(tmp_tweet, dataset_obj)
if original_tweet is not None:
original_tweet.replied_to_count += 1
original_tweet.save()
original_tweet.sender.replied_to_count += 1
original_tweet.sender.save()
def handle_retweet(retweeted_status, dataset_obj):
# update original tweet shared_count
original_tweet = get_or_create_a_tweet_from_json_obj(retweeted_status, dataset_obj)
if original_tweet is not None:
original_tweet.shared_count += 1
original_tweet.save()
original_tweet.sender.shared_count += 1
original_tweet.sender.save()
def handle_entities(tweet, entities, dataset_obj):
# hashtags
if entities.get('hashtags') and len(entities['hashtags']) > 0:
tweet.contains_hashtag = True
for hashtag in entities['hashtags']:
tweet.hashtags.add(get_or_create_hashtag(hashtag))
# urls
if entities.get('urls') and len(entities['urls']) > 0:
tweet.contains_url = True
for url in entities['urls']:
tweet.urls.add(get_or_create_url(url))
# media
if entities.get('media') and len(entities['media']) > 0:
tweet.contains_media = True
for me in entities['media']:
tweet.media.add(get_or_create_media(me))
# user_mentions
if entities.get('user_mentions') and len(entities['user_mentions']) > 0:
tweet.contains_mention = True
for mention in entities['user_mentions']:
mention_obj = create_an_user_from_json_obj(mention, dataset_obj)
mention_obj.mentioned_count += 1
mention_obj.save()
tweet.mentions.add(mention_obj)
def get_or_create_a_tweet_from_json_obj(tweet_data, dataset_obj):
"""
Given a dataset object, imports a tweet from json object into
the dataset.
"""
if 'in_reply_to_status_id' not in tweet_data:
return None
# if tweet_data.get('lang') != 'en':
# return None
tweet, created = Message.objects.get_or_create(dataset=dataset_obj,
original_id=tweet_data['id'])
# text
if tweet_data.get('text'):
tweet.text = tweet_data['text']
# created_at
if tweet_data.get('created_at'):
tweet.time = datetime(*(parsedate(tweet_data['created_at']))[:6], tzinfo=utc)
# language
if tweet_data.get('lang'):
tweet.language = get_or_create_language(tweet_data['lang'])
if tweet_data.get('user'):
# sender
tweet.sender = create_an_user_from_json_obj(tweet_data['user'], dataset_obj)
# time_zone
if tweet_data['user'].get('time_zone'):
tweet.timezone = get_or_create_timezone(tweet_data['user']['time_zone'])
# type
if tweet_data.get('retweeted_status') is not None:
tweet.type = get_or_create_messagetype("retweet")
handle_retweet(tweet_data['retweeted_status'], dataset_obj)
elif tweet_data.get('in_reply_to_status_id') is not None:
tweet.type = get_or_create_messagetype("reply")
handle_reply_to(status_id=tweet_data['in_reply_to_status_id'],
user_id=tweet_data['in_reply_to_user_id'],
screen_name=tweet_data['in_reply_to_screen_name'],
dataset_obj=dataset_obj)
else:
tweet.type = get_or_create_messagetype('tweet')
if tweet_data.get('entities'):
handle_entities(tweet, tweet_data.get('entities'), dataset_obj)
# sentiment
set_message_sentiment(tweet, save=False)
tweet.save()
return tweet
def load_research_questions_from_json(json_str):
"""
Load research questions from json string
"""
questions = json.loads(json_str)
for q in questions:
source = q['source']
article, created = Article.objects.get_or_create(title=source['title'],
defaults={'authors': source['authors'],
'year': source['year'],
'venue': source['venue'],
'link': source['link']})
question = Question(source=article, text=q['text'])
question.save()
for dim in q['dimensions']:
question.add_dimension(dim)
question.save()
return True
| 2.203125 | 2 |
theape/plugins/sleep_plugin.py | rsnakamura/theape | 0 | 12795245 |
# python standard library
from collections import OrderedDict
# third party
from configobj import ConfigObj
# this package
from theape import BasePlugin
from theape.parts.sleep.sleep import TheBigSleep
from theape.infrastructure.timemap import time_validator
SLEEP_SECTION = 'SLEEP'
END_OPTION = 'end'
TOTAL_OPTION = 'total'
INTERVAL_OPTION = 'interval'
VERBOSE_OPTION = 'verbose'
configuration = """
[[SLEEP]]
# to allow the section names to be arbitrary
# the plugin names are required
plugin = Sleep
# 'end' should be a timestamp for the end-time (11-12-2013 8:45 pm)
# 'total' should be a timestamp for the run-time (1 hr 23 minutes)
# 'interval' should be <amount> <units> (1 minute)
# if verbose is False, sceen output will be off except at startup
# only one of absolute or relative time is required, although both can be used
end = <absolute time>
total = <relative time>
interval = 1 second
verbose = True
"""
sleep_configspec = """
end = absolute_time(default=None)
total = relative_time(default=None)
interval = relative_time(default=1)
verbose = boolean(default=True)
"""
sections = OrderedDict()
sections['name'] = '{bold}sleep{reset} -- a countdown timer that blocks until time is over'
sections['description'] = '{bold}sleep{reset} is a verbose no-op (by default) meant to allow the insertion of a pause in the execution of the APE. At this point all calls to sleep will get the same configuration.'
sections['configuration'] = configuration
sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime'
sections['options'] = """
The configuration options --
{bold}end{reset} : an absolute time given as a time-stamp that can be interpreted by `dateutil.parser.parse`. This is for the cases where you have a specific time that you want the sleep to end.
{bold}total{reset} : a relative time given as pairs of '<amount> <units>' -- e.g. '3.4 hours'. Most units only use the first letter, but since `months` and `minutes` both start with `m`, you have to use two letters to specify them. The sleep will stop at the start of the sleep + the total time given.
{bold}interval{reset} : The amount of time beween reports of the time remaining (default = 1 second). Use the same formatting as the `total` option.
{bold}verbose{reset} : If True (the default) then report time remaining at specified intervals while the sleep runs.
One of {bold}end{reset} or {bold}total{reset} needs to be specified. Everything else is optional.
"""
sections['author'] = 'ape'
class Sleep(BasePlugin):
"""
A plugin for TheBigSleep
"""
def __init__(self, *args, **kwargs):
"""
Constructor for Sleep
"""
super(Sleep, self).__init__(*args, **kwargs)
self._subsection = None
return
@property
def subsection(self):
"""
the plugin sub-section
"""
if self._subsection is None:
configspec = ConfigObj(sleep_configspec.splitlines(),
list_values=False,
_inspec=True)
section = ConfigObj(self.configuration[self.section_header],
configspec=configspec)
section.validate(time_validator)
self._subsection = section
return self._subsection
def fetch_config(self):
"""
prints a config-file sample
"""
print(configuration)
@property
def sections(self):
"""
Help dictionary
"""
if self._sections is None:
self._sections = sections
return self._sections
@property
def product(self):
"""
A built TheBigSleep object
:return: TheBigSleep
"""
if self._product is None:
end = self.subsection[END_OPTION]
total = self.subsection[TOTAL_OPTION]
interval = self.subsection[INTERVAL_OPTION]
if interval != 1:
interval = interval.total_seconds()
verbose = self.subsection[VERBOSE_OPTION]
self._product = TheBigSleep(end=end,
total=total,
interval=interval,
verbose=verbose)
return self._product | 2.65625 | 3 |
bnbapp/bionetbook/_old/verbs/views.py | Bionetbook/bionetbook | 0 | 12795246 | <reponame>Bionetbook/bionetbook<gh_stars>0
from django.http import Http404
from django.views.generic import TemplateView
from verbs import forms as verb_forms
from verbs.utils import VERB_LIST
class VerbBaseView(object):
def get_verb_form(self, slug=None):
if slug is None:
slug = self.kwargs.get('slug')
# convert the slug into a form name
form_name = "".join([x.title() for x in slug.split('-')]) + "Form"
form = getattr(verb_forms, form_name, None)
if form is None:
raise Http404
return form
class VerbDetailView(VerbBaseView, TemplateView):
template_name = "verbs/verb_detail.html"
def get_context_data(self, **kwargs):
context = super(VerbDetailView, self).get_context_data(**kwargs)
context["verb"] = self.get_verb_form()
return context
class VerbListView(TemplateView):
template_name = "verbs/verb_list.html"
def get_context_data(self, **kwargs):
context = super(VerbListView, self).get_context_data(**kwargs)
quarter = len(VERB_LIST) / 4
context['verb_list1'] = VERB_LIST[:quarter]
context['verb_list2'] = VERB_LIST[quarter:quarter * 2]
context['verb_list3'] = VERB_LIST[quarter * 2:quarter * 3]
context['verb_list4'] = VERB_LIST[quarter * 3:]
return context
| 2.1875 | 2 |
stk/connect.py | jolsten/STK | 1 | 12795247 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 4 20:13:37 2020
@author: jolsten
"""
import sys, logging
import socket
import time
from abc import ABCMeta, abstractmethod
from .exceptions import *
from .utils import STK_DATEFMT, inherit_docstrings
class _AbstractConnect(metaclass=ABCMeta):
'''An STK Connect connection class.
Attributes:
host : str
The host on which the desired instance of STK is running.
port : int
The port on which the desired instance is accepting connections.
address : tuple
The address as a tuple (host, port)
ack : bool
A boolean representing whether the instance is using ACK/NACK.
Changing this after .connect() is called will not change the mode.
connect_attempts : int
The maximum number of attempts at connecting to the socket.
send_attempts : int
Sets the default maximum number of attempts to make while calling
.send() before raising STKNackError.
timeout : float
Sets the default timeout period for calls to .read() before
assuming all data was received.
'''
def __init__(self, **kwargs):
'''Inits an STK connection object (Connect or AsyncConnect)
Args:
host : str (default: 'localhost')
port : int (default: 5001)
ack : bool (default: True)
Specifies whether or not to use ACK/NACK responses with STK
Connect. Highly recommended to leave this to True.
connect_attempts : int (default: 5)
The maximum number of attempts at connecting to the socket.
Several attempts should be made, in case the instance of STK
hasn't finished initializing by the time this is called.
send_attempts : int (default: 1)
Sets the default maximum number of attempts to make while
calling .send() before raising STKNackError.
timeout : int or float (default: 1.0)
Sets the default timeout period for calls to .read() before
assuming all data was received.
Because network traffic is unpredictable, increasing the
timeout will increase the likelihood that you receive all the
data.
However, this also adds a mandatory minimum delay before the
read() function returns.
'''
self._kwargs = kwargs
self.host = str( kwargs.get('host', 'localhost') )
self.port = int( kwargs.get('port', 5001) )
self.ack = bool( kwargs.get('ack', True) )
self.connect_attempts = int( kwargs.get('connect_attempts', 5) )
self.send_attempts = int( kwargs.get('send_attempts', 1) )
self.timeout = float( kwargs.get('timeout', 1 ) )
self.socket = None
@property
def address(self):
'''The socket address tuple.
Args:
None
Returns:
tuple : (host, port)
'''
return (self.host, self.port)
def connect(self):
'''Connect to the STK Connect socket specified.
Args:
None
Returns:
None
Raises:
STKConnectError : If, after .connect_attempts attempts, a
connection couldn't be made successfully.'
'''
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time.sleep(3) # give STK a moment to start
self._connect()
if type(self) == AsyncConnect:
self.send(f'ConControl / AsyncOn')
else:
self.send(f'ConControl / AsyncOff')
if self.ack is not True:
self.send(f'ConControl / AckOff')
def _connect(self):
attempt = 0
while True:
attempt += 1
try:
self.socket.connect(self.address)
except ConnectionRefusedError as e:
logging.debug(f'ConnectionRefusedError: {e}')
else: # exit loop if no exceptions caught
logging.info(f'Connected to STK on {self.host}:{self.port}')
return True
finally: # continue loop if any exception caught
if attempt >= self.connect_attempts:
raise STKConnectError(f'Failed to connect to STK via socket on {self.host}:{self.port}')
time.sleep( 3 )
def send(self, message, attempts=None):
'''Sends a Connect command via socket.
Args:
message: A string containing the STK Connect command
attempts: Optional; The maximum number of times to send the
command if a NACK is received.
Returns:
None
Raises:
STKNackError : If too many NACK responses were received from STK.
Examples:
s.send("Unload / *")
'''
if attempts is None: attempts = self.send_attempts
attempt = 0
while True:
attempt += 1
try:
self._send(message)
if self.ack: self.get_ack(message)
return
except STKNackError as e:
if attempt >= attempts:
logging.error(f'send() failed, received NACK too many times')
raise STKNackError(e)
def _send(self, message: str):
logging.debug(f'stk.send("{message}")')
self.socket.send( (message+'\n').encode() )
def read(self, timeout=None):
'''Read all available data from the TCP/IP socket.
Args:
timeout : int or None (default: None)
Sets the timeout period for this specific call to .read()
before assuming all data was received.
Because network traffic is unpredictable, increasing the
timeout will increase the likelihood that you receive all the
data.
However, this also adds a mandatory minimum delay before the
read() function returns.
Returns:
bytes : a bytes object containing the data received from the socket
'''
timeout = timeout
if timeout is None: timeout = self.timeout
self.socket.setblocking(False)
self.socket.settimeout(timeout)
logging.debug('Reading until no data is left in the socket...')
buffer = b''
while True:
try:
buffer += self.socket.recv(4096)
except socket.timeout:
logging.debug('Timeout reached, returning buffer')
self.socket.settimeout(None)
return buffer
def disconnect(self):
'''Alias of .close()'''
self.close()
def close(self):
'''Closes the STK Connect socket.
Args:
None
Returns:
None
'''
try:
self.socket.close()
except:
pass
def __repr__(self):
return f'{type(self).__name__}({self.host}:{self.port})'
def __del__(self):
self.close()
@abstractmethod
def get_ack(self, message):
'''Block until an ACK is received from STK Connect.
Users should not typically need to use this method directly, as it is
called from .send() if the class attribute ack=True
Args:
None
Returns:
None
'''
pass
@abstractmethod
def get_single_message(self):
pass
@abstractmethod
def get_multi_message(self):
pass
@abstractmethod
def report(self, **kwargs):
'''Create a report in STK and save it to a file.
Args:
ObjPath : str (required)
The STK Object Path for the desired report.
e.g.
Facility/A_Facility_Name
Satellite/A_Satellite_Name
Style : str or path-like object (required)
The Style name, if it is already loaded into STK (or is a
default report style).
Otherwise, pass a path to the desired .RST file.
FilePath : str or path-like object (required)
The path to the file to which the report should be written.
TimePeriod : str or None (default: None)
The time period to use for the report. If None, then use the
default (typically the parent object's time period).
Valid values:
UseAccessTimes
{TimeInterval}
Intervals {"<FilePath>" | "<IntervalOrListSpec>"}
Enter {TimeInterval} to define the start time and stop
time for the report span. For valid {TimeInterval} values
see Time Options.
Or specify UseAccessTimes to only report data during
access times between the <ObjectPath> and an AccessObject,
but you must also specify at least one AccessObject.
Or use the Intervals option to specify an STK interval
file for the time period or an Interval or Interval List
component specification.
For help on creating the STK interval file,
see Create & Import External Files - Interval List
in STK Help.
For information about "<IntervalOrListSpec>" see
Component Specification.
See STK Help for more details on these options.
TimeStep : float or str (default: None)
The timestep to use for the report. If None, then use the
default (typically the parent object's timestep).
Valid values:
<Value>
Bound <Value>
Array "<TimeArraySpec>"
Enter the time step <Value> to be used in creating the
report. This value is entered in seconds and must be
between 0.000001 and 1000000000.0 seconds.
Or enter Bound <Value> to have the report steps calculated
on a specific time boundary. This value is entered in
seconds and must be between 0 and 3600 seconds. If 0 is
entered then the default time step (usually 60 seconds) is
used.
Or enter the Array keyword with a Time Array component
specification to use the array times as time steps. For
information about "<TimeArraySpec>"
see Component Specification.
AdditionalData : str or None (default: None)
Some Report Styles require additional or pre-data, such as a
comparison object for the RIC report for a Satellite. For these
types of reports you must include this option. More information
on styles that require AdditionalData can be found at "Report
Additional Data" in the STK Help.
Summary : str or None (default: None)
Summary data is not generally included. Use this option, to
have the summary data included in the exported report file.
Valid values:
Include
Only
Specify the Include value to have the summary included with the
rest of the report; use the Only value to have only the summary
data reported.
Returns:
None
'''
pass
@abstractmethod
def report_rm(self, **kwargs):
'''Create a report in STK and return them via socket.
Args:
ObjPath : str (required)
The STK Object Path for the desired report.
e.g.
Facility/A_Facility_Name
Satellite/A_Satellite_Name
Style : str or path-like object (required)
The Style name, if it is already loaded into STK (or is a
default report style).
Otherwise, pass a path to the desired .RST file.
TimePeriod : str or None (default: None)
The time period to use for the report. If None, then use the
default (typically the parent object's time period).
Valid values:
UseAccessTimes
{TimeInterval}
Intervals {"<FilePath>" | "<IntervalOrListSpec>"}
Enter {TimeInterval} to define the start time and stop time
for the report span. For valid {TimeInterval} values see
Time Options.
Or specify UseAccessTimes to only report data during access
times between the <ObjectPath> and an AccessObject, but you
must also specify at least one AccessObject.
Or use the Intervals option to specify an STK interval file
for the time period or an Interval or Interval List
component specification.
For help on creating the STK interval file, see Create &
Import External Files - Interval List in STK Help.
For information about "<IntervalOrListSpec>"
see Component Specification.
See STK Help for more details on these options.
TimeStep : float or str
The timestep to use for the report. If None, then use the
default (typically the parent object's timestep).
Valid values:
<Value>
Bound <Value>
Array "<TimeArraySpec>"
Enter the time step <Value> to be used in creating the
report. This value is entered in seconds and must be
between 0.000001 and 1000000000.0 seconds.
Or enter Bound <Value> to have the report steps calculated
on a specific time boundary. This value is entered in
seconds and must be between 0 and 3600 seconds. If 0 is
entered then the default time step (usually 60 seconds) is
used.
Or enter the Array keyword with a Time Array component
specification to use the array times as time steps. For
information about "<TimeArraySpec>"
see Component Specification.
AdditionalData :
Some Report Styles require additional or pre-data, such as a
comparison object for the RIC report for a Satellite. For these
types of reports you must include this option. More information
on styles that require AdditionalData can be found at
"Report Additional Data" in the STK Help.
Summary : str
Valid values:
Include
Only
Summary data is not generally included. Use this option, to
have the summary data included in the exported report file.
Specify the Include value to have the summary included with
the rest of the report; use the Only value to have only the
summary data reported.
Returns:
None
'''
pass
class Connect(_AbstractConnect):
@inherit_docstrings
def get_ack(self, message):
msg = self.socket.recv(3).decode()
if msg == 'ACK':
# logging.debug('ACK Received')
return
elif msg == 'NAC':
k = self.socket.recv(1).decode()
msg = msg + k
raise STKNackError(f'NACK Received: stk.send("{message.rstrip()}")')
else:
logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}')
sys.exit(1)
def get_single_message(self):
header = self.socket.recv(40).decode()
cmd_name, length = header.rstrip().split()
length = int(length)
data = self.socket.recv(length).decode()
return header, data
def get_multi_message(self):
hdr, data = self.get_single_message()
messages = []
for i in range(int(data)):
sm = self.get_single_message()
if len(sm) > 0:
messages.append(sm)
return messages
@inherit_docstrings
def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None):
message = f'ReportCreate */{ObjPath} Style "{Style}" Type "Export" File "{FilePath}"'
if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'
if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'
if TimeStep is not None: message += f' TimeStep {TimeStep}'
if AdditionalData is not None: message += f' AdditionalData "{AdditionalData}"'
if Summary is not None: message += f' Summary {Summary}'
if AllLines is not None: message += f' AllLines {AllLines}'
self.send(message)
@inherit_docstrings
def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs):
message = f'Report_RM */{ObjPath} Style "{Style}"'
if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'
if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'
if TimeStep is not None: message += f' TimeStep {TimeStep}'
if AdditionalData is not None: message += f' AdditionalData "{AdditionalData}"'
if Summary is not None: message += f' Summary {Summary}'
if AllLines is not None: message += f' AllLines {AllLines}'
self.send(message)
messages = self.get_multi_message()
return [x[1] for x in messages]
# report = ''
# for msg in messages:
# report +=
# return .join()
# buffer = self.read(**kwargs).decode()
# if len(buffer) == 0: return []
# logging.debug(f'Report_RM Returned: {buffer}')
# return []
class AsyncConnect(_AbstractConnect):
@inherit_docstrings
def get_ack(self, message):
hdr, data = self.get_single_message()
if hdr.async_type == 'ACK':
return True
elif hdr.async_type == 'NACK':
raise STKNackError(f'NACK Received: stk.send("{message}")')
def get_single_message(self):
msg = self.socket.recv(42).decode()
hdr = AsyncHeader(msg)
pdl = hdr.data_length
data = self.socket.recv( pdl ).decode()
while len(data) < hdr.data_length:
data += self.socket.recv( pdl - len(data) ).decode()
return hdr, data
def get_multi_message(self):
logging.debug('Getting Message Block:')
hdr, data = self.get_single_message()
logging.debug(f'GotMessage: {hdr}{data}')
msg_grp = [None] * hdr.total_packets
msg_grp[hdr.packet_number-1] = data
for i in range(1,hdr.total_packets):
hdr, data = self.get_message()
logging.debug(f'GotMessage: {hdr}{data}')
msg_grp[hdr.packet_number-1] = data
if msg_grp[-1] == '': del msg_grp[-1]
return msg_grp
@inherit_docstrings
def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None):
message = f'ReportCreate */{ObjPath} Style "{Style}"'
if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'
if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'
if TimeStep is not None: message += f' TimeStep {TimeStep}'
if AdditionalData is not None: message += f' AdditionalData "{AdditionalData}"'
if Summary is not None: message += f' Summary {Summary}'
if AllLines is not None: message += f' AllLines {AllLines}'
self.send(message)
@inherit_docstrings
def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs):
message = f'Report_RM */{ObjPath} Style "{Style}"'
if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'
if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'
if TimeStep is not None: message += f' TimeStep {TimeStep}'
if AdditionalData is not None: message += f' AdditionalData "{AdditionalData}"'
if Summary is not None: message += f' Summary {Summary}'
if AllLines is not None: message += f' AllLines {AllLines}'
self.send(message)
buffer = self.read(**kwargs).decode()
if len(buffer) == 0: return []
return [ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ]
class AsyncHeader():
'''A helper class to read the STK Connect Asynchronous Message Format headers.'''
def __init__(self, bytestring):
'''Inits a new object using the raw values, passed as bytes or str.'''
if isinstance(bytestring, bytes): bytestring = bytestring.decode()
self.raw = bytestring
def __repr__(self):
return f'<{self.raw}>'
@property
def sync(self):
'''str : The sync word, should always be "AGI"'''
return self.raw[0:3].decode()
@property
def header_length(self):
'''int : The header_length, should always be 42.'''
return int(self.raw[3:5].decode())
@property
def version(self):
'''str : The version in major.minor format.'''
return f'{self.major_version}.{self.minor_version}'
@property
def major_version(self):
'''int : The major version number.'''
return int(self.raw[5].decode())
@property
def minor_version(self):
'''int : The minor version number.'''
return int(self.raw[6].decode())
@property
def type_length(self):
'''int : The length of the command type string.'''
return int(self.raw[7:9])
@property
def async_type(self):
'''str : The value of the command type string.'''
return (self.raw[9:24])[0:self.type_length]
@property
def identifier(self):
'''int : The value of the response ID.
This should be used to associate the correct responses with each
other if commands are being processed asynchronously.
'''
return int(self.raw[24:30])
@property
def total_packets(self):
'''int : The total number of packets in the current identifier.'''
return int(self.raw[30:34])
@property
def packet_number(self):
'''int : The sequence number of the current packet for this identifier.'''
return int(self.raw[34:38])
@property
def data_length(self):
'''int : The length of the data field for the current packet.'''
return int(self.raw[38:42])
| 2.890625 | 3 |
xcparse/Xcode/PBX/PBXResourcesBuildPhase.py | samdmarshall/xcparser | 59 | 12795248 | from .PBXResolver import *
from .PBX_Base_Phase import *
class PBXResourcesBuildPhase(PBX_Base_Phase):
def __init__(self, lookup_func, dictionary, project, identifier):
super(PBXResourcesBuildPhase, self).__init__(lookup_func, dictionary, project, identifier);
self.bundleid = 'com.apple.buildphase.resources';
self.phase_type = 'Copy Resources'; | 1.875 | 2 |
src/simplestack/hypervisors/xen.py | locaweb/simplestack | 9 | 12795249 | # Copyright 2013 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: <NAME>, Locaweb.
# @author: <NAME> (morellon), Locaweb.
# @author: <NAME> (PotHix), Locaweb.
from simplestack.utils import XenAPI
from simplestack.exceptions import FeatureNotImplemented, EntityNotFound
from simplestack.hypervisors.base import SimpleStack
from simplestack.presenters.formatter import Formatter
import re
import errno
import socket
import httplib
import logging
LOG = logging.getLogger('simplestack.hypervisors.xen')
class Stack(SimpleStack):
state_translation = {
"Running": "STARTED",
"Halted": "STOPPED",
"Suspended": "PAUSED"
}
def __init__(self, poolinfo):
self.connection = False
self.poolinfo = poolinfo
self.format_for = Formatter()
self.connect()
def connect(self):
self.connection = XenAPI.Session(
"https://%s/" % self.poolinfo.get("api_server")
)
try:
self.connection.xenapi.login_with_password(
self.poolinfo.get("username"),
self.poolinfo.get("password")
)
except Exception, error:
# If host is slave, connect to master
if 'HOST_IS_SLAVE' in str(error):
self.poolinfo["api_server"] = str(error).split("'")[3]
self.connect()
else:
raise error
def logout(self):
self.connection.xenapi.session.logout()
def pool_info(self):
used_memory = 0
for vm_rec in self.connection.xenapi.VM.get_all_records().values():
if not vm_rec['is_a_template'] and not vm_rec['is_a_snapshot']:
used_memory += int(vm_rec['memory_dynamic_max'])
total_memory = 0
for host_ref in self.connection.xenapi.host.get_all():
met_ref = self.connection.xenapi.host.get_metrics(host_ref)
m_rec = self.connection.xenapi.host_metrics.get_record(met_ref)
total_memory += int(m_rec['memory_total'])
pool_rec = self.connection.xenapi.pool.get_all_records().values()[0]
master_rec = self.connection.xenapi.host.get_record(pool_rec["master"])
return (
self.format_for.pool(
used_memory / (1024 * 1024),
total_memory / (1024 * 1024),
pool_rec["uuid"],
master_rec["address"],
{ 'version': master_rec.get('software_version', {}).get('product_version') }
)
)
def host_list(self):
hosts = []
for h in self.connection.xenapi.host.get_all_records().values():
hosts.append({'id': h["uuid"]})
return hosts
def host_info(self, host_id):
host_ref = self.connection.xenapi.host.get_by_uuid(host_id)
return self._host_info(host_ref)
def storage_list(self):
storages = []
for sr in self.connection.xenapi.SR.get_all_records().values():
if sr["PBDs"] is not None and len(sr["PBDs"]) > 0:
storages.append({'id': sr["uuid"]})
return storages
def storage_info(self, storage_id):
sr_ref = self.connection.xenapi.SR.get_by_uuid(storage_id)
return self._storage_info(sr_ref)
def guest_list(self):
guests = []
for vm in self.connection.xenapi.VM.get_all_records().values():
if (not vm.get('is_a_snapshot')) and (not vm.get('is_a_template')):
guests.append({'id': vm.get('uuid')})
return guests
def guest_info(self, guest_id):
vm = self._vm_ref(guest_id)
return self._vm_info(vm)
def guest_shutdown(self, guest_id, force=False):
if force:
return self.connection.xenapi.VM.hard_shutdown(
self._vm_ref(guest_id)
)
else:
return self.connection.xenapi.VM.clean_shutdown(
self._vm_ref(guest_id)
)
def guest_start(self, guest_id):
return self.connection.xenapi.VM.start(
self._vm_ref(guest_id), False, False
)
def guest_reboot(self, guest_id, force=False):
vm_ref = self._vm_ref(guest_id)
if force:
return self.connection.xenapi.VM.hard_reboot(vm_ref)
else:
return self.connection.xenapi.VM.clean_reboot(vm_ref)
def guest_suspend(self, guest_id):
return self.connection.xenapi.VM.suspend(self._vm_ref(guest_id))
def guest_resume(self, guest_id):
return self.connection.xenapi.VM.resume(
self._vm_ref(guest_id), False, False
)
def guest_clone(self, guest_id, data):
vm = self.connection.xenapi.VM.clone(
self._vm_ref(guest_id), data["name"]
)
return self._vm_info(vm)
def guest_update(self, guest_id, guestdata):
vm_ref = self._vm_ref(guest_id)
if "name" in guestdata:
self.connection.xenapi.VM.set_name_label(vm_ref, guestdata["name"])
if "memory" in guestdata:
memory = guestdata["memory"]
if not isinstance(memory,dict):
memory = { "memory_target" : memory , "memory_static_min" : memory, "memory_static_max" : memory }
memory_target = str(int(memory["memory_target"])<<20)
memory_static_min = str(int(memory["memory_static_min"])<<20)
memory_static_max = str(int(memory["memory_static_max"])<<20)
self.connection.xenapi.VM.set_memory_limits(
vm_ref, memory_static_min, memory_static_max, memory_target, memory_target
)
if "memory_target_live" in guestdata:
memory_target = str(int(guestdata["memory_target_live"])<<20)
self.connection.xenapi.VM.set_memory_dynamic_range(
vm_ref, memory_target, memory_target
)
if "cpus" in guestdata:
vcpus = guestdata["cpus"]
if not isinstance(vcpus,dict):
vcpus = { "vcpus_at_startup" : vcpus, "vcpus_max" : self.connection.xenapi.VM.get_VCPUs_max(vm_ref) }
vcpus_at_startup = str(vcpus["vcpus_at_startup"])
vcpus_max = str(vcpus["vcpus_max"])
if int(vcpus_at_startup) > int(vcpus_max):
self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_at_startup)
else:
self.connection.xenapi.VM.set_VCPUs_max(vm_ref, vcpus_max)
self.connection.xenapi.VM.set_VCPUs_at_startup(vm_ref, vcpus_at_startup)
if "vcpus_number_live" in guestdata:
self.connection.xenapi.VM.set_VCPUs_number_live(vm_ref, str(guestdata["vcpus_number_live"]))
if "vcpu_settings" in guestdata:
parameters = self.connection.xenapi.VM.get_VCPUs_params(vm_ref)
parameters.update(guestdata["vcpu_settings"])
self.connection.xenapi.VM.set_VCPUs_params(vm_ref, parameters)
if "ha_enabled" in guestdata:
if guestdata["ha_enabled"]:
self.connection.xenapi.VM.set_ha_restart_priority(
vm_ref, "best-effort"
)
else:
self.connection.xenapi.VM.set_ha_restart_priority(vm_ref, "")
if "template" in guestdata:
is_template = self.connection.xenapi.VM.get_is_a_template(vm_ref)
if guestdata["template"] ^ is_template:
self.connection.xenapi.VM.set_is_a_template(
vm_ref, guestdata["template"]
)
if "paravirtualized" in guestdata:
if guestdata["paravirtualized"]:
if guestdata["paravirtualized"] is True:
pv_args = "-- quiet console=hvc0"
else:
pv_args = guestdata["paravirtualized"]
self.connection.xenapi.VM.set_HVM_boot_policy(vm_ref, "")
self.connection.xenapi.VM.set_PV_args(vm_ref, pv_args)
else:
self.connection.xenapi.VM.set_PV_args(vm_ref, "")
self.connection.xenapi.VM.set_HVM_boot_params(
vm_ref, {"order": "dc"}
)
self.connection.xenapi.VM.set_HVM_boot_policy(
vm_ref, "BIOS order"
)
if "hdd" in guestdata:
disk = self.get_disks(vm_ref)[-1]
disks_size = self.get_disks_size(vm_ref)
hdd = guestdata.get("hdd") * 1024 * 1024 * 1024
new_disk_size = hdd - disks_size + int(disk["virtual_size"])
self.connection.xenapi.VDI.resize(disk["ref"], str(new_disk_size))
return self._vm_info(self._vm_ref(guest_id))
def guest_delete(self, guest_id):
self._delete_vm(guest_id)
def guest_import(self, vm_stream, vm_size, storage_id=None):
session_ref = self.connection._session
master = self.poolinfo.get("api_server")
storage_ref = None
if storage_id:
storage_ref = self.connection.xenapi.SR.get_by_uuid(storage_id)
else:
storages = self.connection.xenapi.SR.get_all_records()
max_free_space = 0
for sr_ref, record in storages.iteritems():
free_space = (
int(record["physical_size"]) -
int(record["virtual_allocation"])
)
if free_space > max_free_space:
max_free_space = free_space
storage_ref = sr_ref
if vm_size and vm_size > 0 and vm_size > max_free_space:
raise Exception("No storage space left for importing")
task_ref = self.connection.xenapi.task.create(
"import vm", "import job"
)
path = "/import?session_id=%s&task_id=%s&sr_id=%s" % (
session_ref, task_ref, storage_ref
)
try:
conn = httplib.HTTPConnection(master)
conn.request(
"PUT", path, vm_stream, {"Content-Length": vm_size}
)
response = conn.getresponse()
response.status
response.read()
except socket.error as err:
if err.errno == errno.ECONNRESET:
LOG.warning("error=CONNRESET action=import message='BUG?'")
else:
raise
task_rec = self.connection.xenapi.task.get_record(task_ref)
vm_ref = re.sub(r'<.*?>', "", task_rec["result"])
self.connection.xenapi.task.destroy(task_ref)
return self._vm_info(vm_ref)
def guest_export(self, guest_id):
vm_ref = self._vm_ref(guest_id)
session_ref = self.connection._session
# FIXME: get real master
master = self.poolinfo.get("api_server")
task_ref = self.connection.xenapi.task.create(
"export vm %s" % guest_id, "export job"
)
path = "/export?session_id=%s&task_id=%s&ref=%s" % (
session_ref, task_ref, vm_ref
)
conn = httplib.HTTPConnection(master)
conn.request("GET", path)
response = conn.getresponse()
response_size = response.getheader("Content-Length")
return (response, response_size)
def disk_list(self, guest_id):
vm_ref = self._vm_ref(guest_id)
disks = self.get_disks(vm_ref)
return [self._disk_info(d) for d in disks]
def disk_create(self, guest_id, data):
vm_ref = self._vm_ref(guest_id)
devices = []
for vbd in self.connection.xenapi.VM.get_VBDs(vm_ref):
devices.append(int(self.connection.xenapi.VBD.get_userdevice(vbd)))
next_device = max(devices) + 1
for device in range(next_device):
if device not in devices:
next_device = device
break
vbd_rec = {
"VM": vm_ref,
"userdevice": str(next_device),
"bootable": False,
"mode": "RW",
"type": "Disk",
"unpluggable": False,
"empty": False,
"other_config": {},
"qos_algorithm_type": "",
"qos_algorithm_params": {}
}
vdi_rec = ({
"name_label": "New Disk",
"name_description": "Simplestack generated disk",
"virtual_size": str(data["size"] * 1024 * 1024 * 1024),
"type": "system",
"sharable": False,
"read_only": False,
"other_config": {},
"xenstore_data": {},
"sm_config": {},
"tags": []
})
if data.get("storage_id"):
raise FeatureNotImplemented()
else:
disks = self.get_disks(vm_ref)
vdi_rec["SR"] = disks[0]["SR"]
if "name" in data:
vdi_rec["name_label"] = data["name"]
vdi_rec["name_description"] = data["name"]
vdi_ref = self.connection.xenapi.VDI.create(vdi_rec)
vbd_rec["VDI"] = vdi_ref
self.connection.xenapi.VBD.create(vbd_rec)
disk_rec = self._disk_rec(vm_ref, next_device)
return self._disk_info(disk_rec)
def disk_info(self, guest_id, disk_id):
vm_ref = self._vm_ref(guest_id)
disk_rec = self._disk_rec(vm_ref, disk_id)
return self._disk_info(disk_rec)
def disk_update(self, guest_id, disk_id, data):
vm_ref = self._vm_ref(guest_id)
disk_rec = self._disk_rec(vm_ref, disk_id)
if "name" in data:
self.connection.xenapi.VDI.set_name_label(
disk_rec["ref"], data["name"]
)
self.connection.xenapi.VDI.set_name_description(
disk_rec["ref"], data["name"]
)
if "size" in data:
new_disk_size = int(data["size"])
new_disk_size *= 1024 * 1024 * 1024
self.connection.xenapi.VDI.resize(disk_rec["ref"], str(new_disk_size))
disk_rec = self._disk_rec(vm_ref, disk_id)
return self._disk_info(disk_rec)
def media_mount(self, guest_id, media_data):
vm_ref = self._vm_ref(guest_id)
cd_ref = self._cd_ref(vm_ref)
if media_data.get("name") and media_data["name"] != "":
self.media_unmount(guest_id)
iso_ref = self.connection.xenapi.VDI.get_by_name_label(
media_data["name"]
)[0]
self.connection.xenapi.VBD.insert(cd_ref, iso_ref)
else:
self.media_unmount(guest_id)
def media_unmount(self, guest_id):
vm_ref = self._vm_ref(guest_id)
cd_ref = self._cd_ref(vm_ref)
null_ref = 'OpaqueRef:NULL'
if self.connection.xenapi.VBD.get_record(cd_ref)["VDI"] != null_ref:
self.connection.xenapi.VBD.eject(cd_ref)
def media_info(self, guest_id):
vm_ref = self._vm_ref(guest_id)
cd_ref = self._cd_ref(vm_ref)
iso_ref = self.connection.xenapi.VBD.get_record(cd_ref)["VDI"]
if iso_ref == 'OpaqueRef:NULL':
return {"name": None}
else:
name = self.connection.xenapi.VDI.get_record(iso_ref)["name_label"]
return {"name": name}
def network_list(self):
net_refs = self.connection.xenapi.network.get_all()
ret = []
for net in net_refs:
ret.append({"id": net})
return ret
def network_info(self, net_ref):
return {"name_label": self.connection.xenapi.network.get_name_label(net_ref),
"bridge": self.connection.xenapi.network.get_bridge(net_ref),
"name_description": self.connection.xenapi.network.get_name_description(net_ref),
"other_config": self.connection.xenapi.network.get_other_config(net_ref)}
def _network_ref(self, name):
net_ref = self.connection.xenapi.network.get_by_name_label(name)
if len(net_ref) == 0:
raise EntityNotFound("NetworkInterface", "Unknown network: %s" % name)
return net_ref[0]
def _network_get_pifs(self, name):
ref = self._network_ref(name)
return self.connection.xenapi.network.get_PIFs(ref)
def _network_create(self, name, description, other_config={}):
return self.connection.xenapi.network.create({"name_label": name,
"name_description": description,
"other_config": other_config})
def network_vlan_create(self, name, description, from_network, vlan, other_config={}):
net_ref = self._network_create(name, description, other_config)
pif_ref = self._network_get_pifs(from_network)
ref = self.connection.xenapi.pool.create_VLAN_from_PIF(pif_ref[0], net_ref, str(vlan))
return net_ref
def network_interface_list(self, guest_id):
vm_ref = self._vm_ref(guest_id)
vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref)
return [self._network_interface_info(n) for n in vif_refs]
def network_interface_create(self, guest_id, data):
"""
Data should contain at least a network key:
{"network": "THE NETWORK NAME"}
"""
vm_ref = self._vm_ref(guest_id)
devices = []
for vif in self.connection.xenapi.VM.get_VIFs(vm_ref):
devices.append(int(self.connection.xenapi.VIF.get_device(vif)))
next_device = max(devices) + 1
for device in range(next_device):
if device not in devices:
next_device = device
break
vif_record = {
"VM": vm_ref,
"device": str(next_device),
"MAC_autogenerated": True,
"MAC": "",
"MTU": "0",
"other_config": {},
"qos_algorithm_type": "",
"qos_algorithm_params": {}
}
if "network" in data:
vif_record["network"] = self._network_ref(data["network"])
vif_ref = self.connection.xenapi.VIF.create(vif_record)
try:
self.connection.xenapi.VIF.plug(vif_ref)
except:
pass
return self._network_interface_info(vif_ref)
def network_interface_info(self, guest_id, network_interface_id):
vm_ref = self._vm_ref(guest_id)
vif_ref = self._network_interface_ref(vm_ref, network_interface_id)
return self._network_interface_info(vif_ref)
def network_interface_update(self, guest_id, network_interface_id, data):
vm_ref = self._vm_ref(guest_id)
vif_ref = self._network_interface_ref(vm_ref, network_interface_id)
vif_record = self.connection.xenapi.VIF.get_record(vif_ref)
new_attributes = {}
if "network" in data:
net_refs = self._network_ref(data["network"])
if vif_record["network"] != net_refs:
new_attributes["network"] = net_refs
if "locking_mode" in data and vif_record["locking_mode"] != data["locking_mode"]:
new_attributes["locking_mode"] = data["locking_mode"]
if "ipv4_allowed" in data and vif_record["ipv4_allowed"] != data["ipv4_allowed"]:
new_attributes["ipv4_allowed"] = data["ipv4_allowed"]
if "ipv6_allowed" in data and vif_record["ipv6_allowed"] != data["ipv6_allowed"]:
new_attributes["ipv6_allowed"] = data["ipv6_allowed"]
if len(new_attributes) != 0:
vif_record.update(new_attributes)
try:
self.connection.xenapi.VIF.unplug(vif_ref)
except:
pass
self.connection.xenapi.VIF.destroy(vif_ref)
vif_ref = self.connection.xenapi.VIF.create(vif_record)
try:
self.connection.xenapi.VIF.plug(vif_ref)
except:
pass
if "active" in data:
if data["active"]:
try:
self.connection.xenapi.VIF.plug(vif_ref)
except:
pass
else:
try:
self.connection.xenapi.VIF.unplug(vif_ref)
except:
pass
if "ratelimit" in data:
if data["ratelimit"]:
# kbps in xen is actually kBps
rate = data["ratelimit"] / (8 * 1024)
self.connection.xenapi.VIF.set_qos_algorithm_type(
vif_ref, "ratelimit"
)
self.connection.xenapi.VIF.set_qos_algorithm_params(
vif_ref, {"kbps": str(rate)}
)
else:
self.connection.xenapi.VIF.set_qos_algorithm_type(vif_ref, "")
return self._network_interface_info(vif_ref)
def network_interface_delete(self, guest_id, network_interface_id):
vm_ref = self._vm_ref(guest_id)
vif_ref = self._network_interface_ref(vm_ref, network_interface_id)
try:
self.connection.xenapi.VIF.unplug(vif_ref)
except:
pass
self.connection.xenapi.VIF.destroy(vif_ref)
def snapshot_list(self, guest_id):
snaps = [
self._snapshot_info(s)
for s in self.connection.xenapi.VM.get_snapshots(
self._vm_ref(guest_id)
)
]
return snaps
def snapshot_create(self, guest_id, snapshot_name=None):
if not snapshot_name:
snapshot_name = str(datetime.datetime.now())
snap = self.connection.xenapi.VM.snapshot(
self._vm_ref(guest_id), snapshot_name
)
return self._snapshot_info(snap)
def snapshot_info(self, guest_id, snapshot_id):
snap = self._vm_ref(snapshot_id)
return self._snapshot_info(snap)
def snapshot_revert(self, guest_id, snapshot_id):
self.connection.xenapi.VM.revert(self._vm_ref(snapshot_id))
def snapshot_delete(self, guest_id, snapshot_id):
self._delete_vm(snapshot_id)
def tag_list(self, guest_id):
return self.connection.xenapi.VM.get_tags(self._vm_ref(guest_id))
def tag_create(self, guest_id, tag_name):
vm_ref = self._vm_ref(guest_id)
self.connection.xenapi.VM.add_tags(vm_ref, tag_name)
return self.tag_list(guest_id)
def tag_delete(self, guest_id, tag_name):
vm_ref = self._vm_ref(guest_id)
self.connection.xenapi.VM.remove_tags(vm_ref, tag_name)
def get_disks(self, vm_ref):
disks = []
vm = self.connection.xenapi.VM.get_record(vm_ref)
for vbd_ref in vm['VBDs']:
vbd = self.connection.xenapi.VBD.get_record(vbd_ref)
if vbd["type"] == "Disk":
vdi = self.connection.xenapi.VDI.get_record(vbd['VDI'])
vdi['userdevice'] = vbd['userdevice']
vdi['ref'] = vbd['VDI']
disks.append(vdi)
return sorted(disks, key=lambda vdi: int(vdi['userdevice']))
def get_disks_size(self, vm_ref):
size = 0
for vdi in self.get_disks(vm_ref):
size += int(vdi["virtual_size"])
return size
def _disk_rec(self, vm_ref, disk_id):
disk_id = str(disk_id)
for disk in self.get_disks(vm_ref):
if disk["userdevice"] == disk_id:
return disk
entity_info = "%s - on Guest" % (disk_id)
raise EntityNotFound("Disk", entity_info)
def _network_interface_ref(self, vm_ref, network_interface_id):
vif_refs = self.connection.xenapi.VM.get_VIFs(vm_ref)
for vif_ref in vif_refs:
vif_rec = self.connection.xenapi.VIF.get_record(vif_ref)
if vif_rec["MAC"] == network_interface_id:
return vif_ref
entity_info = "%s - on Guest" % (network_interface_id)
raise EntityNotFound("NetworkInterface", entity_info)
def _vm_ref(self, uuid):
try:
return self.connection.xenapi.VM.get_by_uuid(uuid)
except:
LOG.warning("uuid=%s action=not_found" % uuid)
return None
def _host_info(self, host_ref):
host = self.connection.xenapi.host.get_record(host_ref)
return(
self.format_for.host(
host['uuid'],
host['name_label'],
host['address']
)
)
def _storage_info(self, sr_ref):
sr = self.connection.xenapi.SR.get_record(sr_ref)
return(
self.format_for.storage(
sr['uuid'],
sr['name_label'],
sr['type'],
int(sr['physical_utilisation']) / (1024 * 1024 * 1024),
int(sr['virtual_allocation']) / (1024 * 1024 * 1024),
int(sr['physical_size']) / (1024 * 1024 * 1024)
)
)
def _vm_info(self, vm_ref):
vm = self.connection.xenapi.VM.get_record(vm_ref)
tools_up_to_date = None
ip = None
if vm["guest_metrics"] != "OpaqueRef:NULL":
guest_metrics = self.connection.xenapi.VM_guest_metrics.\
get_record(vm["guest_metrics"])
tools_up_to_date = guest_metrics["PV_drivers_up_to_date"]
if "0/ip" in guest_metrics["networks"].keys():
ip = guest_metrics["networks"]["0/ip"]
host = None
if vm["resident_on"] != "OpaqueRef:NULL":
host = self.connection.xenapi.host.get_name_label(
vm["resident_on"]
)
return(
self.format_for.guest(
vm.get('uuid'),
vm.get('name_label'),
int(vm.get('VCPUs_at_startup')),
int(vm.get('memory_static_max')) / (1024 * 1024),
self.get_disks_size(vm_ref) / (1024 * 1024 * 1024),
vm["PV_args"],
tools_up_to_date,
ip,
self.state_translation[vm.get('power_state')],
host
)
)
def _disk_info(self, disk_rec):
return(
self.format_for.disk(
disk_rec.get('userdevice'),
disk_rec.get('name_label'),
disk_rec.get('userdevice'),
int(disk_rec.get('virtual_size')) / (1024 * 1024 * 1024),
disk_rec.get("uuid")
)
)
def _snapshot_info(self, snapshot_ref):
snapshot = self.connection.xenapi.VM.get_record(snapshot_ref)
return(
self.format_for.snapshot(
snapshot.get('uuid'),
snapshot.get('name_label')
)
)
def _network_interface_info(self, vif_ref):
vif_rec = {'locking_mode': None, 'ipv4_allowed': None, 'ipv6_allowed': None}
vif_rec.update(self.connection.xenapi.VIF.get_record(vif_ref))
network_rec = self.connection.xenapi.network.get_record(
vif_rec["network"]
)
return(
self.format_for.network_interface(
vif_rec["MAC"],
vif_rec["device"],
vif_rec["MAC"],
network_rec["name_label"],
vif_rec["locking_mode"],
vif_rec["ipv4_allowed"],
vif_rec["ipv6_allowed"],
vif_rec["qos_algorithm_params"]
)
)
def _delete_vm(self, vm_id):
vm_ref = self._vm_ref(vm_id)
if not vm_ref:
return
for snap_ref in self.connection.xenapi.VM.get_snapshots(vm_ref):
snap = self.connection.xenapi.VM.get_record(snap_ref)
self._delete_vm(snap["uuid"])
self._delete_disks(vm_ref)
self.connection.xenapi.VM.destroy(vm_ref)
def _cd_ref(self, vm_ref):
vm = self.connection.xenapi.VM.get_record(vm_ref)
for vbd_ref in vm['VBDs']:
vbd = self.connection.xenapi.VBD.get_record(vbd_ref)
if vbd["type"] == "CD":
return vbd_ref
def _delete_disks(self, vm_ref):
for vdi in self.get_disks(vm_ref):
self.connection.xenapi.VDI.destroy(vdi['ref'])
| 1.859375 | 2 |
mathematica/style.py | DbxDev/pygments-mathematica | 62 | 12795250 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright (c) 2016 rsmenon
# Licensed under the MIT License (https://opensource.org/licenses/MIT)
from pygments.style import Style
from mathematica.lexer import MToken
class MathematicaStyle(Style):
default_style = ''
background_color = '#fefefe'
styles = {
MToken.BUILTIN: '#353f42',
MToken.COMMENT: 'italic #aaaaaa',
MToken.GROUP: '#555555',
MToken.LOCAL_SCOPE: '#5d9066',
MToken.MESSAGE: '#ab466a',
MToken.NUMBER: '#b66a4b',
MToken.OPERATOR: '#555555',
MToken.PATTERN: 'italic #6E8413',
MToken.SLOT: 'italic #6E8413',
MToken.STRING: '#499A9F',
MToken.SYMBOL: '#4b78b1',
MToken.UNKNOWN: '#555555',
}
class MathematicaNotebookStyle(Style):
default_style = ''
background_color = '#ffffff'
styles = {
MToken.BUILTIN: 'bold #000000',
MToken.COMMENT: 'bold #999999',
MToken.GROUP: 'bold #000000',
MToken.LOCAL_SCOPE: 'bold #3C7D91',
MToken.MESSAGE: 'bold #666666',
MToken.NUMBER: 'bold #000000',
MToken.OPERATOR: 'bold #000000',
MToken.PATTERN: 'bold italic #438958',
MToken.SLOT: 'bold italic #438958',
MToken.STRING: 'bold #666666',
MToken.SYMBOL: 'bold #002CC3',
MToken.UNKNOWN: 'bold #000000',
}
| 1.523438 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.