metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "2gis/uniassert",
"score": 2
} |
#### File: 2gis/uniassert/conanfile.py
```python
import os
from conans import ConanFile, CMake, tools
class UniassertConan(ConanFile):
name = "uniassert"
version = "1.0.0"
license = "Zlib"
url = "https://github.com/2gis/uniassert.git"
description = "The uniassert library is a small collection of useful macros. Most of them are designed for assumption checks"
settings = "os", "compiler", "build_type", "arch"
build_requires = ('gtest/1.8.0@bincrafters/stable', )
generators = "cmake"
branch = 'master'
def source(self):
url = 'https://github.com/2gis/uniassert/archive/{}.zip'.format(self.branch)
tools.download(url, self.branch)
tools.unzip(self.branch)
os.remove(self.branch)
def build(self):
cmake = CMake(self)
configure_args = {
'args': ['-DUNIASSERT_TESTS=ON', ] if self.develop else None,
'source_dir': self.source_subdir(),
}
cmake.configure(**configure_args)
cmake.build()
cmake.test()
def package(self):
src = os.path.join(self.source_subdir(), 'include')
self.copy("*.h", dst="include", src=src)
def package_id(self):
self.info.header_only()
def source_subdir(self):
subdir = '{}-{}'.format(self.name, self.branch)
return os.path.join(self.source_folder, subdir)
``` |
{
"source": "2gis/vmmaster-agent",
"score": 2
} |
#### File: vmmaster-agent/tests/test_api.py
```python
import unittest
import json
from Queue import Queue
from StringIO import StringIO
from threading import Thread
import requests
from twisted.internet import reactor as twisted_reactor
from twisted.python.failure import Failure
from twisted.internet import defer
from twisted.internet.endpoints import TCP4ClientEndpoint
from autobahn.twisted.websocket import WebSocketClientProtocol
from autobahn.twisted.websocket import WebSocketClientFactory
from vmmaster_agent.agent import VMMasterAgent
script = {
'script': "echo 'hello world'"
}
def sleep(secs):
d = defer.Deferred()
twisted_reactor.callLater(secs, d.callback, None)
return d
@defer.inlineCallbacks
def wait_for_output(output):
while not output.getvalue():
yield sleep(0)
yield defer.succeed(True)
def block_on(d, timeout=None):
q = Queue()
d.addBoth(q.put)
ret = q.get(timeout is not None, timeout)
if isinstance(ret, Failure):
ret.raiseException()
else:
return ret
class MyClientProtocol(WebSocketClientProtocol):
def __init__(self, output):
WebSocketClientProtocol.__init__(self)
self.output = output
def onOpen(self):
self.sendMessage(u'{"script": "фыва"}'.encode('utf8'))
def onMessage(self, payload, isBinary):
self.output.write(payload)
class TestApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.agent = VMMasterAgent(twisted_reactor)
cls.agent_thread = Thread(target=cls.agent.run)
cls.agent_thread.start()
@classmethod
def tearDownClass(cls):
cls.agent.stop()
cls.agent_thread.join()
def test_take_screenshot(self):
result = requests.get("http://localhost:9000/takeScreenshot")
self.assertEqual(200, result.status_code)
self.assertTrue("screenshot" in result.content)
def test_run_script(self):
response = requests.post(
"http://localhost:9000/runScript",
data=json.dumps(script)
)
result = {
"status": 0,
"output": "hello world\n"
}
self.assertEqual(200, response.status_code)
self.assertEqual(result, json.loads(response.content))
def test_run_script_with_command(self):
script_with_command = {
"command": "python",
"script": "print('hello world')"
}
response = requests.post(
"http://localhost:9000/runScript",
data=json.dumps(script_with_command)
)
result = {
"status": 0,
"output": "hello world\n"
}
self.assertEqual(200, response.status_code)
self.assertEqual(result, json.loads(response.content))
def test_run_script_websocket(self):
point = TCP4ClientEndpoint(twisted_reactor, "localhost", 9000)
factory = WebSocketClientFactory("ws://localhost:9000/runScript")
output = StringIO()
factory.protocol = lambda: MyClientProtocol(output)
point.connect(factory)
block_on(wait_for_output(output), 5)
self.assertIn("command not found", output.getvalue())
```
#### File: vmmaster-agent/vmmaster_agent/agent.py
```python
import logging
from twisted.internet import reactor as twisted_reactor
from twisted.internet.endpoints import TCP4ServerEndpoint
from api import ApiServer
class VMMasterAgent(object):
def __init__(self, reactor):
self.reactor = reactor
self.endpoint_api = TCP4ServerEndpoint(self.reactor, 9000)
self.endpoint_api.listen(ApiServer())
def run(self):
self.reactor.run()
def stop(self):
self.reactor.stop()
def main():
VMMasterAgent(twisted_reactor).run()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
```
#### File: vmmaster_agent/backend/run_script.py
```python
import subprocess
import tempfile
import platform
import logging
import os
import locale
import time
from threading import Lock, Thread
log = logging.getLogger(__name__)
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
ENCODING = locale.getpreferredencoding()
class Flusher(Thread):
def __init__(self, interval, callback):
Thread.__init__(self)
self.running = True
self.daemon = True
self.interval = interval
self.callback = callback
def run(self):
while self.running:
self.callback()
time.sleep(self.interval)
def stop(self):
self.running = False
self.join()
class Channel(object):
flush_frequency = 1
buffer = ""
def __init__(self, websocket=None, autoflush=True):
if websocket:
self.channel = websocket
else:
self.channel = ''
self.lock = Lock()
if autoflush:
self.flusher = Flusher(self.flush_frequency, self.flush)
self.flusher.start()
def write(self, _buffer):
with self.lock:
self.buffer += _buffer
def flush(self):
with self.lock:
if self.buffer:
log.info("flushing: %s" % self.buffer)
if isinstance(self.channel, str) or isinstance(self.channel, unicode):
self.channel += self.buffer
else:
self.channel.sendMessage(self.buffer.encode('utf-8'))
self.buffer = ""
def close(self):
if self.buffer:
self.flush()
self.flusher.stop()
def run_command(command, websocket):
env = os.environ.copy()
env['PYTHONUNBUFFERED'] = '1'
log.info("Running command: %s" % str(command))
with tempfile.NamedTemporaryFile(delete=True) as f:
with open(f.name, 'rb') as stdout:
process = subprocess.Popen(command, stdout=stdout, stderr=stdout, env=env)
channel = Channel(websocket, autoflush=True)
while process.poll() is None:
channel.write(process.stdout.read(1).decode(ENCODING))
channel.write(process.stdout.read().decode(ENCODING))
channel.close()
if isinstance(channel.channel, str) or isinstance(channel.channel, unicode):
output = channel.channel
else:
output = ""
return process.returncode, output
def run_script(script, command=None, websocket=None):
log.info("Got script: %s" % script.encode(ENCODING))
tmp_file_path = None
if command is None:
if platform.system() == "Windows":
tmp_file_path = tempfile.mktemp(suffix=".bat")
command = "cmd.exe /c"
else:
command = "/bin/bash"
if tmp_file_path is None:
tmp_file_path = tempfile.mktemp()
with open(tmp_file_path, "w") as f:
log.info("Writing script to: %s" % tmp_file_path)
f.write(script.encode(ENCODING))
return run_command(command.split(" ") + [tmp_file_path], websocket)
``` |
{
"source": "2gunsu/SPL2021-FEN",
"score": 2
} |
#### File: SPL2021-FEN/module/fen.py
```python
import copy
import torch
import torch.nn as nn
import numpy as np
from typing import Dict
from yacs.config import CfgNode
from module.unet import UNet
from data.preprocess import RandomCrop, Resize
class FENs(nn.Module):
def __init__(self,
cfg: CfgNode,
init_freeze: bool = False):
super(FENs, self).__init__()
self.module_list = self._build_modules(cfg, all_identity=(not cfg.MODEL.FEN.USE_FEN))
if init_freeze:
self.freeze()
def forward(self, cloned_features: Dict[str, torch.Tensor]):
forward_dict = {}
for k, m in zip(cloned_features.keys(), self.module_list):
if isinstance(m, nn.Identity):
forward_dict.update({k: m(cloned_features[k])})
else:
forward_dict.update({k: m(cloned_features, level=k)})
return forward_dict
def get_loss(self, cloned_features: Dict[str, torch.Tensor]):
losses = []
for key, module in zip(cloned_features.keys(), self.module_list):
if isinstance(module, nn.Identity):
pass
else:
losses.append(module.get_loss(cloned_features, key))
return sum(losses)
def _build_modules(self,
cfg: CfgNode,
all_identity: bool = False):
full_levels = cfg.MODEL.RPN.IN_FEATURES # ['p2', 'p3', 'p4', 'p5', 'p6']
fen_levels = cfg.MODEL.FEN.LEVELS
if isinstance(fen_levels, str) and (fen_levels in full_levels):
fen_levels = [fen_levels, ]
assert all([(level in full_levels) for level in fen_levels]), \
f"'cfg.MODEL.FEN.LEVELS' must be subset of {full_levels}."
modules = []
for level in full_levels:
if not all_identity:
modules.append(FEN(cfg) if level in fen_levels else nn.Identity())
else:
modules.append(nn.Identity())
return nn.ModuleList(modules)
def freeze(self):
for m in self.module_list:
if not isinstance(m, nn.Identity):
m.freeze()
def unfreeze(self):
for m in self.module_list:
if not isinstance(m, nn.Identity):
m.unfreeze()
class FEN(nn.Module):
"""
Class of FEN (Feature Enhancement Network)
* Args:
cfg (CfgNode):
Configuration
"""
def __init__(self, cfg: CfgNode):
super().__init__()
device = "cuda" if torch.cuda.is_available() else "cpu"
self.cfg = cfg
self.model = UNet(in_channels=cfg.MODEL.FPN.OUT_CHANNELS).to(device)
def forward(self, cloned_features: Dict[str, torch.Tensor], level: str):
return self.model(cloned_features[level])
def get_loss(self, cloned_features: Dict[str, torch.Tensor], level: str):
input_patch, mask_patch, label_patch = self.extract_patches(
features=cloned_features[level],
patch_size=self.cfg.MODEL.FEN.PATCH_SIZE,
patch_per_img=self.cfg.MODEL.FEN.PATCH_PER_IMG,
erase_ratio=self.cfg.MODEL.FEN.ERASE_RATIO,
soften_ratio=self.cfg.MODEL.FEN.SOFTEN_RATIO)
output = self.model(input_patch)
loss = nn.L1Loss()(output * (1 - mask_patch), label_patch * (1 - mask_patch))
return loss
def extract_patches(self,
features: torch.Tensor,
patch_size: int = 32,
min_patch_size: int = 20,
patch_per_img: int = 4,
erase_ratio: float = 0.50,
soften_ratio: float = 0.60,
device: str = 'cuda'):
output_list, mask_list, label_list = [], [], []
SIZE_WIN = (5, 5)
arr_features = features.permute(0, 2, 3, 1).detach().cpu().numpy()
for arr_feature in arr_features:
for p_idx in range(patch_per_img):
cropped_arr = None
changed_size = None
if patch_size < arr_feature.shape[0]:
cropped_arr = RandomCrop(patch_size)(arr_feature)
changed_size = patch_size
elif patch_size == arr_feature.shape[0]:
cropped_arr = RandomCrop(min_patch_size)(arr_feature)
changed_size = min_patch_size
elif patch_size > arr_feature.shape[0]:
arr_feature = Resize(patch_size)(arr_feature)
cropped_arr = RandomCrop(min_patch_size)(arr_feature)
changed_size = min_patch_size
ch = arr_feature.shape[-1]
num_spots = int((changed_size ** 2) * erase_ratio)
mask = np.ones((changed_size, changed_size, ch)) * soften_ratio
output = copy.deepcopy(cropped_arr)
idy_msk = np.random.randint(0, changed_size, num_spots)
idx_msk = np.random.randint(0, changed_size, num_spots)
idy_neigh = np.random.randint(-SIZE_WIN[0] // 2 + SIZE_WIN[0] % 2,
SIZE_WIN[0] // 2 + SIZE_WIN[0] % 2,
num_spots)
idx_neigh = np.random.randint(-SIZE_WIN[1] // 2 + SIZE_WIN[1] % 2,
SIZE_WIN[1] // 2 + SIZE_WIN[1] % 2,
num_spots)
idy_msk_neigh = idy_msk + idy_neigh
idx_msk_neigh = idx_msk + idx_neigh
idy_msk_neigh = idy_msk_neigh + (idy_msk_neigh < 0) * changed_size - (
idy_msk_neigh >= changed_size) * changed_size
idx_msk_neigh = idx_msk_neigh + (idx_msk_neigh < 0) * changed_size - (
idx_msk_neigh >= changed_size) * changed_size
id_msk = (idy_msk, idx_msk)
id_msk_neigh = (idy_msk_neigh, idx_msk_neigh)
output[id_msk] = cropped_arr[id_msk_neigh]
mask[id_msk] = soften_ratio
output_list.append(torch.from_numpy(output).permute(2, 0, 1).unsqueeze(dim=0))
mask_list.append(torch.from_numpy(mask).permute(2, 0, 1).unsqueeze(dim=0))
label_list.append(torch.from_numpy(cropped_arr).permute(2, 0, 1).unsqueeze(dim=0))
output = torch.cat(output_list, dim=0).to(device)
mask = torch.cat(mask_list, dim=0).to(device)
label = torch.cat(label_list, dim=0).to(device)
return output, mask, label
def freeze(self):
for p in self.parameters():
p.requires_grad = False
def unfreeze(self):
for p in self.parameters():
p.requires_grad = True
```
#### File: SPL2021-FEN/utils/utils.py
```python
import torch.nn as nn
def transfer_weight(src_model: nn.Module,
dst_model: nn.Module):
src_dict = src_model.state_dict()
dst_dict = dst_model.state_dict()
assert len(src_dict.keys()) == len(dst_dict.keys()), \
"`src_model` and `dst_model` seems different."
for src_key, dst_key in zip(src_dict.keys(), dst_dict.keys()):
dst_dict[dst_key] = src_dict[src_key]
dst_model.load_state_dict(dst_dict)
def freeze(module: nn.Module):
for p in module.parameters():
p.requires_grad = False
def unfreeze(module: nn.Module):
for p in module.parameters():
p.requires_grad = True
``` |
{
"source": "2h4dl/pymilvus",
"score": 2
} |
#### File: pymilvus/tests/factorys.py
```python
import random
import logging
import string
import time
import datetime
import random
import struct
import sys
from functools import wraps
# Third party imports
import numpy as np
import faker
from faker.providers import BaseProvider
logging.getLogger('faker').setLevel(logging.ERROR)
sys.path.append('.')
# grpc
from milvus.grpc_gen import milvus_pb2
def gen_vectors(num, dim):
return [[random.random() for _ in range(dim)] for _ in range(num)]
def gen_single_vector(dim):
return [[random.random() for _ in range(dim)]]
def gen_vector(nb, d, seed=np.random.RandomState(1234)):
xb = seed.rand(nb, d).astype("float32")
return xb.tolist()
def gen_unique_str(str=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return prefix if str is None else str + "_" + prefix
def get_current_day():
return time.strftime('%Y-%m-%d', time.localtime())
def get_last_day(day):
tmp = datetime.datetime.now() - datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def get_next_day(day):
tmp = datetime.datetime.now() + datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def gen_long_str(num):
string = ''
for _ in range(num):
char = random.choice('tomorrow')
string += char
def gen_one_binary(topk):
ids = [random.randrange(10000000, 99999999) for _ in range(topk)]
distances = [random.random() for _ in range(topk)]
return milvus_pb2.TopKQueryResult(struct.pack(str(topk) + 'l', *ids), struct.pack(str(topk) + 'd', *distances))
def gen_nq_binaries(nq, topk):
return [gen_one_binary(topk) for _ in range(nq)]
def fake_query_bin_result(nq, topk):
return gen_nq_binaries(nq, topk)
class FakerProvider(BaseProvider):
def collection_name(self):
return 'collection_names' + str(random.randint(1000, 9999))
def name(self):
return 'name' + str(random.randint(1000, 9999))
def dim(self):
return random.randint(0, 999)
fake = faker.Faker()
fake.add_provider(FakerProvider)
def collection_name_factory():
return fake.collection_name()
def records_factory(dimension, nq):
return [[random.random() for _ in range(dimension)] for _ in range(nq)]
def binary_records_factory(dimension, nq):
def binary_record(bsize):
s_m = "abcdefghijklmnopqrstuvwxyz"
s_list = [s_m[random.randint(0, 25)] for _ in range(bsize)]
s = "".join(s_list)
return bytes(s, encoding="ASCII")
bs = dimension // 8
return [binary_record(bs) for _ in range(nq)]
def integer_factory(nq):
return [random.randint(0, 128) for _ in range(nq)]
def time_it(func):
@wraps(func)
def inner(*args, **kwrgs):
pref = time.perf_counter()
result = func(*args, **kwrgs)
delt = time.perf_counter() - pref
print(f"[{func.__name__}][{delt:.4}s]")
return result
return inner
```
#### File: pymilvus/tests/test_collection.py
```python
from unittest import mock
import grpc
import pytest
from grpc._channel import _UnaryUnaryMultiCallable as Uum
from milvus import DataType, BaseError
from factorys import collection_name_factory
from utils import MockGrpcError
class TestCreateCollection:
def test_create_collection_normal(self, connect):
collection_name = collection_name_factory()
collection_param = {
"fields": [
{"name": "v", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}
]
}
try:
connect.create_collection(collection_name, collection_param)
except Exception as e:
pytest.fail("Unexpected MyError: ".format(str(e)))
finally:
connect.drop_collection(collection_name)
def test_create_collection_repeat(self, connect):
collection_name = collection_name_factory()
collection_param = {
"fields": [
{"name": "v", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}
]
}
try:
connect.create_collection(collection_name, collection_param)
with pytest.raises(BaseError):
connect.create_collection(collection_name, collection_param)
except Exception as e:
pytest.fail("Unexpected MyError: ".format(str(e)))
finally:
connect.drop_collection(collection_name)
@pytest.mark.parametrize("sd", [DataType.INT32, DataType.INT64, DataType.BOOL, DataType.FLOAT, DataType.DOUBLE])
@pytest.mark.parametrize("vd", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR])
def test_create_collection_scalar_vector(self, sd, vd, connect):
collection_name = collection_name_factory()
collection_param = {
"fields": [
{"name": "A", "type": sd},
{"name": "v", "type": vd, "params": {"dim": 128}}
]
}
try:
connect.create_collection(collection_name, collection_param)
except Exception as e:
pytest.fail("Unexpected MyError: ".format(str(e)))
finally:
connect.drop_collection(collection_name)
def test_create_collection_segment_row_limit(self, connect):
collection_name = collection_name_factory()
collection_param = {
"fields": [
{"name": "v", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}
],
"segment_row_limit": 10000
}
try:
connect.create_collection(collection_name, collection_param)
except Exception as e:
pytest.fail("Unexpected MyError: ".format(str(e)))
finally:
connect.drop_collection(collection_name)
@pytest.mark.parametrize("srl", [1, 10000000])
def test_create_collection_segment_row_limit_outrange(self, srl, connect):
collection_name = collection_name_factory()
collection_param = {
"fields": [
{"name": "v", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}
],
"segment_row_limit": srl
}
with pytest.raises(BaseError):
connect.create_collection(collection_name, collection_param)
@pytest.mark.parametrize("srl", [None, "123"])
def test_create_collection_segment_row_limit_invalid(self, srl, connect):
collection_name = collection_name_factory()
collection_param = {
"fields": [
{"name": "v", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}
],
"segment_row_limit": srl
}
with pytest.raises(BaseError):
connect.create_collection(collection_name, collection_param)
@pytest.mark.parametrize("autoid", [True, False])
def test_create_collection_segment_row_limit_outrange(self, autoid, connect):
collection_name = collection_name_factory()
collection_param = {
"fields": [
{"name": "v", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}
],
"segment_row_limit": 10000,
"auto_id": autoid
}
try:
connect.create_collection(collection_name, collection_param)
except Exception as e:
pytest.fail("Unexpected MyError: ".format(str(e)))
finally:
connect.drop_collection(collection_name)
def test_create_collection_exception(self, connect):
collection_name = collection_name_factory()
collection_param = {
"fields": [
{"name": "v", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}}
],
"segment_row_limit": 10000,
"auto_id": False
}
mock_grpc_timeout = mock.MagicMock(side_effect=grpc.FutureTimeoutError())
with mock.patch.object(Uum, 'future', mock_grpc_timeout):
with pytest.raises(grpc.FutureTimeoutError):
connect.create_collection(collection_name, collection_param)
mock_grpc_error = mock.MagicMock(side_effect=MockGrpcError())
with mock.patch.object(Uum, 'future', mock_grpc_error):
with pytest.raises(grpc.RpcError):
connect.create_collection(collection_name, collection_param)
mock_exception = mock.MagicMock(side_effect=Exception("error"))
with mock.patch.object(Uum, 'future', mock_exception):
with pytest.raises(Exception):
connect.create_collection(collection_name, collection_param)
``` |
{
"source": "2hanhan/LearnCpp",
"score": 3
} |
#### File: Examples/python/cvmat2py.py
```python
import cv2
def load_image(image):
cv2.imshow("image in python", image)
cv2.waitKey(10)
cv2.destroyAllWindows()
return "result"
def load_image_name(name):
image = cv2.imread(name)
cv2.imshow("image python", image)
cv2.waitKey(10)
cv2.destroyAllWindows()
return "result"
``` |
{
"source": "2hdddg/boltkit",
"score": 3
} |
#### File: boltkit/boltkit/addressing.py
```python
from socket import getaddrinfo, getservbyname, SOCK_STREAM, AF_INET, AF_INET6
class Address(tuple):
@classmethod
def parse(cls, s, default_host=None, default_port=None):
if isinstance(s, str):
if s.startswith("["):
# IPv6
host, _, port = s[1:].rpartition("]")
return cls((host or default_host or "localhost",
port.lstrip(":") or default_port or 0,
0, 0))
else:
# IPv4
host, _, port = s.partition(":")
return cls((host or default_host or "localhost",
port or default_port or 0))
else:
raise TypeError("Address.parse requires a string argument")
def __new__(cls, iterable):
n_parts = len(iterable)
if n_parts == 2:
inst = tuple.__new__(cls, iterable)
inst.family = AF_INET
elif n_parts == 4:
inst = tuple.__new__(cls, iterable)
inst.family = AF_INET6
else:
raise ValueError("Addresses must consist of either "
"two parts (IPv4) or four parts (IPv6)")
return inst
def __str__(self):
if self.family == AF_INET6:
return "[{}]:{}".format(*self)
else:
return "{}:{}".format(*self)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, tuple(self))
@property
def host(self):
return self[0]
@property
def port(self):
return self[1]
@property
def port_number(self):
try:
return getservbyname(self[1])
except (OSError, TypeError):
# OSError: service/proto not found
# TypeError: getservbyname() argument 1 must be str, not X
try:
return int(self[1])
except (TypeError, ValueError) as e:
raise type(e)("Unknown port value %r" % self[1])
class AddressList(list):
""" A list of socket addresses, each as a tuple of the format expected by
the built-in `socket.connect` method.
"""
@classmethod
def parse(cls, s, default_host=None, default_port=None):
""" Parse a string containing one or more socket addresses, each
separated by whitespace.
"""
if isinstance(s, str):
return cls([Address.parse(a, default_host, default_port)
for a in s.split()])
else:
raise TypeError("AddressList.parse requires a string argument")
def __init__(self, iterable=None):
items = list(iterable or ())
for item in items:
if not isinstance(item, tuple):
raise TypeError("Object {!r} is not a valid address "
"(tuple expected)".format(item))
super().__init__(items)
def __str__(self):
return " ".join(str(Address(_)) for _ in self)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, list(self))
def resolve(self, family=0):
""" Resolve all addresses into one or more resolved address tuples.
Each host name will resolve into one or more IP addresses, limited by
the given address `family` (if any). Each port value (either integer
or string) will resolve into an integer port value (e.g. 'http' will
resolve to 80).
>>> a = AddressList([("localhost", "http")])
>>> a.resolve()
>>> a
AddressList([('::1', 80, 0, 0), ('127.0.0.1', 80)])
"""
resolved = []
for address in iter(self):
host = address[0]
port = address[1]
for _, _, _, _, addr in getaddrinfo(host, port, family,
SOCK_STREAM):
if addr not in resolved:
resolved.append(addr)
self[:] = resolved
```
#### File: boltkit/client/packstream.py
```python
from struct import pack as raw_pack, unpack_from as raw_unpack
# Python provides a module called `struct` for coercing data to and from binary
# representations of that data. The format codes below are the ones that
# PackStream cares about and each has been given a handy name to make the code
# that uses it easier to follow. The second character in each of these codes
# (the letter) represents the actual data type, the first character (the '>'
# symbol) denotes that all our representations should be big-endian. This means
# that the most significant part of the value is written to the network or
# memory space first and the least significant part is written last. PackStream
# thinks entirely in big ends.
#
INT_8 = ">b" # signed 8-bit integer (two's complement)
INT_16 = ">h" # signed 16-bit integer (two's complement)
INT_32 = ">i" # signed 32-bit integer (two's complement)
INT_64 = ">q" # signed 64-bit integer (two's complement)
UINT_8 = ">B" # unsigned 8-bit integer
UINT_16 = ">H" # unsigned 16-bit integer
UINT_32 = ">I" # unsigned 32-bit integer
FLOAT_64 = ">d" # IEEE double-precision floating-point format
# The PackStream type system supports a set of commonly-used data types (plus
# null) as well as extension types called "structures" that can be used to
# represent composite values. The full list of types is:
#
# - Null (absence of value)
# - Boolean (true or false)
# - Integer (signed 64-bit integer)
# - Float (64-bit floating point number)
# - String (UTF-8 encoded text data)
# - Bytes (Arrays of raw byte data)
# - List (ordered collection of values)
# - Dictionary (ordered, keyed collection of values)
# - Structure (composite set of values with a type tag)
#
# Neither unsigned integers nor byte arrays are supported but may be added in a
# future version of the format. Note that 32-bit floating point numbers are
# also not supported.
# Oh, by the way, we use hexadecimal a lot here. If you're not familiar with
# that, you might want to take a short break and hop over to Wikipedia to read
# up about it before going much further...
class Structure:
""" The `Structure` data type is used to represent composite values. Each
application of PackStream may define one or more structured types, each
uniquely identified by a unique byte `tag`.
"""
def __init__(self, tag, *fields):
self.tag = tag
self.fields = fields
def __eq__(self, other):
return self.tag == other.tag and self.fields == other.fields
def __ne__(self, other):
return not self.__eq__(other)
def pack(*values):
""" This function provides PackStream values-to-bytes functionality, a
process known as "packing". The tag of the method permits any number of
values to be provided as positional arguments. Each will be serialised in
order into the output byte stream.
Markers
-------
Every serialised value begins with a marker byte. The marker contains
information on data type as well as direct or indirect size information
for those types that require it. How that size information is encoded
varies by marker type.
Some values, such as boolean true, can be encoded within a single marker
byte. Many small integers (specifically between -16 and +127 inclusive)
are also encoded within a single byte.
A number of marker bytes are reserved for future expansion of the format
itself. These bytes should not be used, and encountering them in an
incoming stream should treated as an error.
Sized Values
------------
Some value types require variable length representations and, as such,
have their size explicitly encoded. These values generally begin with a
single marker byte, followed by a size, followed by the data content
itself. Here, the marker denotes both type and scale and therefore
determines the number of bytes used to represent the size of the data. The
size itself is either an 8-bit, 16-bit or 32-bit unsigned integer. Sizes
longer than this are not yet supported.
The diagram below illustrates the general layout for a sized value, here
with a 16-bit size:
Marker Size Content
<> <---> <--------------------->
XX XX XX XX XX XX XX .. .. .. XX
Args:
values: Series of values to pack.
Returns:
Byte representation of values.
"""
# First, let's define somewhere to collect the individual byte pieces.
#
data = []
# Next we'll iterate through the values in turn and add the output to our
# collection of byte pieces.
#
for value in values:
# Null is always encoded using the single marker byte C0.
#
if value is None:
data.append(b"\xC0")
# Boolean values are encoded within a single marker byte, using C3 to
# denote true and C2 to denote false.
#
elif value is True:
data.append(b"\xC3")
elif value is False:
data.append(b"\xC2")
# Integers
# --------
# Integer values occupy either 1, 2, 3, 5 or 9 bytes depending on
# magnitude. Several markers are designated specifically as TINY_INT
# values and can therefore be used to pass a small number in a single
# byte. These markers can be identified by a zero high-order bit (for
# positive values) or by a high-order nibble containing only ones (for
# negative values). The available encodings are illustrated below and
# each shows a valid representation for the decimal value 42:
#
# 2A -- TINY_INT
# C8:2A -- INT_8
# C9:00:2A -- INT_16
# CA:00:00:00:2A -- INT_32
# CB:00:00:00:00:00:00:00:2A -- INT_64
#
# Note that while encoding small numbers in wider formats is supported,
# it is generally recommended to use the most compact representation
# possible. The following table shows the optimal representation for
# every possible integer:
#
# Range Minimum | Range Maximum | Variant
# ============================|============================|==========
# -9 223 372 036 854 775 808 | -2 147 483 649 | INT_64
# -2 147 483 648 | -32 769 | INT_32
# -32 768 | -129 | INT_16
# -128 | -17 | INT_8
# -16 | +127 | TINY_INT
# +128 | +32 767 | INT_16
# +32 768 | +2 147 483 647 | INT_32
# +2 147 483 648 | +9 223 372 036 854 775 807 | INT_64
#
elif isinstance(value, int):
if -0x10 <= value < 0x80:
data.append(raw_pack(INT_8, value)) # TINY_INT
elif -0x80 <= value < 0x80:
data.append(b"\xC8")
data.append(raw_pack(INT_8, value)) # INT_8
elif -0x8000 <= value < 0x8000:
data.append(b"\xC9")
data.append(raw_pack(INT_16, value)) # INT_16
elif -0x80000000 <= value < 0x80000000:
data.append(b"\xCA")
data.append(raw_pack(INT_32, value)) # INT_32
elif -0x8000000000000000 <= value < 0x8000000000000000:
data.append(b"\xCB")
data.append(raw_pack(INT_64, value)) # INT_64
else:
raise ValueError("Integer value out of packable range")
# Floating Point Numbers
# ----------------------
# These are double-precision floating-point values, generally used for
# representing fractions and decimals. Floats are encoded as a single
# C1 marker byte followed by 8 bytes which are formatted according to
# the IEEE 754 floating-point "double format" bit layout.
#
# - Bit 63 (the bit that is selected by the mask `0x8000000000000000`)
# represents the sign of the number.
# - Bits 62-52 (the bits that are selected by the mask
# `0x7ff0000000000000`) represent the exponent.
# - Bits 51-0 (the bits that are selected by the mask
# `0x000fffffffffffff`) represent the significand (sometimes called
# the mantissa) of the number.
#
# C1 3F F1 99 99 99 99 99 9A -- Float(+1.1)
# C1 BF F1 99 99 99 99 99 9A -- Float(-1.1)
#
elif isinstance(value, float):
data.append(b"\xC1")
data.append(raw_pack(FLOAT_64, value))
# Strings
# -------
# Text data is represented as UTF-8 encoded bytes. Note that the sizes
# used in string representations are the byte counts of the UTF-8
# encoded data, not the character count of the original text.
#
# Marker | Size | Maximum size
# ========|====================================|=====================
# 80..8F | within low-order nibble of marker | 15 bytes
# D0 | 8-bit big-endian unsigned integer | 255 bytes
# D1 | 16-bit big-endian unsigned integer | 65 535 bytes
# D2 | 32-bit big-endian unsigned integer | 4 294 967 295 bytes
#
# For encoded text containing fewer than 16 bytes, including empty
# strings, the marker byte should contain the high-order nibble '8'
# (binary 1000) followed by a low-order nibble containing the size.
# The encoded data then immediately follows the marker.
#
# For encoded text containing 16 bytes or more, the marker D0, D1 or D2
# should be used, depending on scale. This marker is followed by the
# size and the UTF-8 encoded data.
#
# Examples follow below:
#
# "" -> 80
#
# "A" -> 81:41
#
# "ABCDEFGHIJKLMNOPQRSTUVWXYZ" -> D0:1A:41:42:43:44:45:46:47:48:49:4A:4B:4C
# 4D:4E:4F:50:51:52:53:54:55:56:57:58:59:5A
#
# "Größenmaßstäbe" -> D0:12:47:72:C3:B6:C3:9F:65:6E:6D:61:C3:9F:73:74:C3:A4:62:65
#
elif isinstance(value, str):
utf_8 = value.encode("UTF-8")
size = len(utf_8)
if size < 0x10:
data.append(raw_pack(UINT_8, 0x80 + size))
elif size < 0x100:
data.append(b"\xD0")
data.append(raw_pack(UINT_8, size))
elif size < 0x10000:
data.append(b"\xD1")
data.append(raw_pack(UINT_16, size))
elif size < 0x100000000:
data.append(b"\xD2")
data.append(raw_pack(UINT_32, size))
else:
raise ValueError("String too long to pack")
data.append(utf_8)
# Bytes
# -----
# TODO
# Lists
# -----
# Lists are heterogeneous sequences of values and therefore permit a
# mixture of types within the same list. The size of a list denotes the
# number of items within that list, rather than the total packed byte
# size. The markers used to denote a list are described in the table
# below:
#
# Marker | Size | Maximum size
# ========|====================================|=====================
# 90..9F | within low-order nibble of marker | 15 bytes
# D4 | 8-bit big-endian unsigned integer | 255 items
# D5 | 16-bit big-endian unsigned integer | 65 535 items
# D6 | 32-bit big-endian unsigned integer | 4 294 967 295 items
#
# For lists containing fewer than 16 items, including empty lists, the
# marker byte should contain the high-order nibble '9' (binary 1001)
# followed by a low-order nibble containing the size. The items within
# the list are then serialised in order immediately after the marker.
#
# For lists containing 16 items or more, the marker D4, D5 or D6 should
# be used, depending on scale. This marker is followed by the size and
# list items, serialized in order.
#
# Examples follow below:
#
# [] -> 90
#
# [1, 2, 3] -> 93:01:02:03
#
# [1, 2.0, "three"] -> 93:01:C1:40:00:00:00:00:00:00:00:85:74:68:72:65:65
#
# [1, 2, 3, ... 40] -> D4:28:01:02:03:04:05:06:07:08:09:0A:0B:0C:0D:0E:0F:10
# 10:11:12:13:14:15:16:17:18:19:1A:1B:1C:1D:1E:1F:20:22
# 23:24:25:26:27:28
#
elif isinstance(value, list):
size = len(value)
if size < 0x10:
data.append(raw_pack(UINT_8, 0x90 + size))
elif size < 0x100:
data.append(b"\xD4")
data.append(raw_pack(UINT_8, size))
elif size < 0x10000:
data.append(b"\xD5")
data.append(raw_pack(UINT_16, size))
elif size < 0x100000000:
data.append(b"\xD6")
data.append(raw_pack(UINT_32, size))
else:
raise ValueError("List too long to pack")
data.extend(map(pack, value))
# Dictionaries
# ------------
# Dictionaries are ordered sets of key-value pairs that permit a
# mixture of value types within the same container. The size of a
# dictionary specifically determines the number of pairs within that
# dictionary, not the total packed byte size. The markers used to
# denote a dictionary are described in the table below:
#
# Marker | Size | Maximum size
# ========|====================================|=======================
# A0..AF | within low-order nibble of marker | 15 entries
# D8 | 8-bit big-endian unsigned integer | 255 entries
# D9 | 16-bit big-endian unsigned integer | 65 535 entries
# DA | 32-bit big-endian unsigned integer | 4 294 967 295 entries
#
# For dictionaries containing fewer than 16 key-value pairs, including
# empty dictionaries, the marker byte should contain the high-order
# nibble 'A' (binary 1010) followed by a low-order nibble containing
# the size. The entries within the dictionary are then serialised in
# [key, value, key, value] order immediately after the marker. Keys
# must be string values.
#
# For dictionaries containing 16 pairs or more, the marker D8, D9 or DA
# should be used, depending on scale. This marker is followed by the
# size and dictionary entries. Examples follow below:
#
# {} -> A0
#
# {"one": "eins"} -> A1:83:6F:6E:65:84:65:69:6E:73
#
# {"A": 1, "B": 2 ... "Z": 26} -> D8:1A:81:45:05:81:57:17:81:42:02:81:4A:0A:81:41:01
# 81:53:13:81:4B:0B:81:49:09:81:4E:0E:81:55:15:81:4D
# 0D:81:4C:0C:81:5A:1A:81:54:14:81:56:16:81:43:03:81
# 59:19:81:44:04:81:47:07:81:46:06:81:50:10:81:58:18
# 81:51:11:81:4F:0F:81:48:08:81:52:12
#
# The order in which map entries are encoded is not important; maps are, by definition,
# unordered.
#
elif isinstance(value, dict):
size = len(value)
if size < 0x10:
data.append(raw_pack(UINT_8, 0xA0 + size))
elif size < 0x100:
data.append(b"\xD8")
data.append(raw_pack(UINT_8, size))
elif size < 0x10000:
data.append(b"\xD9")
data.append(raw_pack(UINT_16, size))
elif size < 0x100000000:
data.append(b"\xDA")
data.append(raw_pack(UINT_32, size))
else:
raise ValueError("Dictionary too long to pack")
data.extend(pack(k, v) for k, v in value.items())
# Structures
# ----------
# Structures represent composite values and consist, beyond the marker,
# of a single byte tag followed by a sequence of fields, each an
# individual value. The size of a structure is measured as the number
# of fields and not the total byte size. This count does not include
# the tag. The markers used to denote a structure are described in
# the table below:
#
# Marker | Size | Maximum size
# ========|====================================|===============
# B0..BF | within low-order nibble of marker | 15 fields
# DC | 8-bit big-endian unsigned integer | 255 fields
# DD | 16-bit big-endian unsigned integer | 65 535 fields
#
# The tag byte is used to identify the type or class of the structure.
# Tag bytes may hold any value between 0 and +127. Bytes with the high
# bit set are reserved for future expansion. For structures containing
# fewer than 16 fields, the marker byte should contain the high-order
# nibble 'B' (binary 1011) followed by a low-order nibble containing
# the size. The marker is immediately followed by the tag byte and the
# field values.
#
# For structures containing 16 fields or more, the marker DC or DD
# should be used, depending on scale. This marker is followed by the
# size, the tag byte and the fields, serialised in order. Examples
# follow below:
#
# B3 01 01 02 03 -- Struct(sig=0x01, fields=[1,2,3])
# DC 10 7F 01 02 03 04 05 06 07 08 09 00 01 02 03
# 04 05 06 -- Struct(sig=0x7F, fields=[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6]
#
elif isinstance(value, Structure):
size = len(value.fields)
if size < 0x10:
data.append(raw_pack(UINT_8, 0xB0 + size))
elif size < 0x100:
data.append(b"\xDC")
data.append(raw_pack(UINT_8, size))
elif size < 0x10000:
data.append(b"\xDD")
data.append(raw_pack(UINT_16, size))
else:
raise ValueError("Structure too big to pack")
data.append(raw_pack(UINT_8, value.tag))
data.extend(map(pack, value.fields))
# For anything else, we'll just raise an error as we don't know how to
# encode it.
#
else:
raise ValueError("Cannot pack value %r" % (value,))
# Finally, we can glue all the individual pieces together and return the
# full byte representation of the original values.
#
return b"".join(data)
class Unpackable:
""" The Packed class provides a framework for "unpacking" packed data.
Given a string of byte data and an initial offset, values can be extracted
via the unpack method.
"""
def __init__(self, data, offset=0):
self.data = data
self.offset = offset
def raw_unpack(self, type_code):
value, = raw_unpack(type_code, self.data, self.offset)
self.offset += {
INT_8: 1, INT_16: 2, INT_32: 4, INT_64: 8,
UINT_8: 1, UINT_16: 2, UINT_32: 4, FLOAT_64: 8,
}[type_code]
return value
def unpack_string(self, size):
end = self.offset + size
value = self.data[self.offset:end].decode("UTF-8")
self.offset = end
return value
def unpack(self, count=1):
for _ in range(count):
marker_byte = self.raw_unpack(UINT_8)
if marker_byte == 0xC0:
yield None
elif marker_byte == 0xC3:
yield True
elif marker_byte == 0xC2:
yield False
elif marker_byte < 0x80:
yield marker_byte
elif marker_byte >= 0xF0:
yield marker_byte - 0x100
elif marker_byte == 0xC8:
yield self.raw_unpack(INT_8)
elif marker_byte == 0xC9:
yield self.raw_unpack(INT_16)
elif marker_byte == 0xCA:
yield self.raw_unpack(INT_32)
elif marker_byte == 0xCB:
yield self.raw_unpack(INT_64)
elif marker_byte == 0xC1:
yield self.raw_unpack(FLOAT_64)
elif 0x80 <= marker_byte < 0x90:
yield self.unpack_string(marker_byte & 0x0F)
elif marker_byte == 0xD0:
yield self.unpack_string(self.raw_unpack(UINT_8))
elif marker_byte == 0xD1:
yield self.unpack_string(self.raw_unpack(UINT_16))
elif marker_byte == 0xD2:
yield self.unpack_string(self.raw_unpack(UINT_32))
elif 0x90 <= marker_byte < 0xA0:
yield list(self.unpack(marker_byte & 0x0F))
elif marker_byte == 0xD4:
yield list(self.unpack(self.raw_unpack(UINT_8)))
elif marker_byte == 0xD5:
yield list(self.unpack(self.raw_unpack(UINT_16)))
elif marker_byte == 0xD6:
yield list(self.unpack(self.raw_unpack(UINT_32)))
elif 0xA0 <= marker_byte < 0xB0:
yield dict(tuple(self.unpack(2)) for _ in range(marker_byte & 0x0F))
elif marker_byte == 0xD8:
yield dict(tuple(self.unpack(2)) for _ in range(self.raw_unpack(UINT_8)))
elif marker_byte == 0xD9:
yield dict(tuple(self.unpack(2)) for _ in range(self.raw_unpack(UINT_16)))
elif marker_byte == 0xDA:
yield dict(tuple(self.unpack(2)) for _ in range(self.raw_unpack(UINT_32)))
elif 0xB0 <= marker_byte < 0xC0:
yield Structure(self.raw_unpack(UINT_8), *self.unpack(marker_byte & 0x0F))
else:
raise ValueError("Unknown marker byte {:02X}".format(marker_byte))
def unpack_all(self):
while self.offset < len(self.data):
yield next(self.unpack(1))
def unpack(data, offset=0):
return next(Unpackable(data, offset).unpack())
```
#### File: boltkit/server/bytetools.py
```python
def h(data):
""" A small helper function to translate byte data into a human-readable hexadecimal
representation. Each byte in the input data is converted into a two-character hexadecimal
string and is joined to its neighbours with a colon character.
This function is not essential to driver-building but is a great help when debugging,
logging and writing doctests.
>>> from boltkit.bytetools import h
>>> h(b"\x03A~")
'03:41:7E'
Args:
data: Input byte data as `bytes` or a `bytearray`.
Returns:
A textual representation of the input data.
"""
return ":".join("{:02X}".format(b) for b in bytearray(data))
```
#### File: boltkit/server/proxy.py
```python
from logging import getLogger
from socket import socket, SOL_SOCKET, SO_REUSEADDR, AF_INET, AF_INET6
from struct import unpack_from as raw_unpack
from threading import Thread
from boltkit.addressing import Address, AddressList
from boltkit.server.bytetools import h
from boltkit.client import CLIENT, SERVER
from boltkit.client.packstream import UINT_32, Unpackable
log = getLogger("boltkit")
class Peer(object):
def __init__(self, socket, address):
self.socket = socket
self.address = address
self.bolt_version = 0
class ProxyPair(Thread):
def __init__(self, client, server):
super(ProxyPair, self).__init__()
self.client = client
self.server = server
log.debug("C: <CONNECT> {} -> {}".format(self.client.address, self.server.address))
log.debug("C: <BOLT> {}".format(h(self.forward_bytes(client, server, 4))))
log.debug("C: <VERSION> {}".format(h(self.forward_bytes(client, server, 16))))
raw_bolt_version = self.forward_bytes(server, client, 4)
bolt_version, = raw_unpack(UINT_32, raw_bolt_version)
self.client.bolt_version = self.server.bolt_version = bolt_version
log.debug("S: <VERSION> {}".format(h(raw_bolt_version)))
self.client_messages = {v: k for k, v in CLIENT[self.client.bolt_version].items()}
self.server_messages = {v: k for k, v in SERVER[self.server.bolt_version].items()}
def run(self):
client = self.client
server = self.server
more = True
while more:
try:
self.forward_exchange(client, server)
except RuntimeError:
more = False
log.debug("C: <CLOSE>")
@classmethod
def forward_bytes(cls, source, target, size):
data = source.socket.recv(size)
target.socket.sendall(data)
return data
@classmethod
def forward_chunk(cls, source, target):
chunk_header = cls.forward_bytes(source, target, 2)
if not chunk_header:
raise RuntimeError()
chunk_size = chunk_header[0] * 0x100 + chunk_header[1]
return cls.forward_bytes(source, target, chunk_size)
@classmethod
def forward_message(cls, source, target):
d = b""
size = -1
while size:
data = cls.forward_chunk(source, target)
size = len(data)
d += data
return d
def forward_exchange(self, client, server):
rq_message = self.forward_message(client, server)
rq_signature = rq_message[1]
rq_data = Unpackable(rq_message[2:]).unpack_all()
log.debug("C: {} {}".format(self.client_messages[rq_signature], " ".join(map(repr, rq_data))))
more = True
while more:
rs_message = self.forward_message(server, client)
rs_signature = rs_message[1]
rs_data = Unpackable(rs_message[2:]).unpack_all()
log.debug("S: {} {}".format(self.server_messages[rs_signature], " ".join(map(repr, rs_data))))
more = rs_signature == 0x71
class ProxyServer(Thread):
running = False
def __init__(self, server_addr, listen_addr=None):
super(ProxyServer, self).__init__()
self.socket = socket()
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
addresses = AddressList([listen_addr or Address.parse(":17687")])
addresses.resolve(family=AF_INET)
self.socket.bind(addresses[0])
self.socket.listen(0)
server_addr.resolve()
self.server_addr = server_addr[0]
self.pairs = []
def __del__(self):
self.stop()
def run(self):
self.running = True
while self.running:
client_socket, client_address = self.socket.accept()
server_socket = socket({2: AF_INET, 4: AF_INET6}[len(self.server_addr)])
server_socket.connect(self.server_addr)
client = Peer(client_socket, client_address)
server = Peer(server_socket, self.server_addr)
pair = ProxyPair(client, server)
pair.start()
self.pairs.append(pair)
def stop(self):
self.running = False
```
#### File: boltkit/boltkit/watcher.py
```python
from logging import CRITICAL, DEBUG, ERROR, INFO, WARNING, Formatter, StreamHandler, getLogger
from sys import stdout
def black(s):
return "\x1b[30m{:s}\x1b[0m".format(s)
def red(s):
return "\x1b[31m{:s}\x1b[0m".format(s)
def green(s):
return "\x1b[32m{:s}\x1b[0m".format(s)
def yellow(s):
return "\x1b[33m{:s}\x1b[0m".format(s)
def blue(s):
return "\x1b[34m{:s}\x1b[0m".format(s)
def magenta(s):
return "\x1b[35m{:s}\x1b[0m".format(s)
def cyan(s):
return "\x1b[36m{:s}\x1b[0m".format(s)
def white(s):
return "\x1b[36m{:s}\x1b[0m".format(s)
def bright_black(s):
return "\x1b[30;1m{:s}\x1b[0m".format(s)
def bright_red(s):
return "\x1b[31;1m{:s}\x1b[0m".format(s)
def bright_green(s):
return "\x1b[32;1m{:s}\x1b[0m".format(s)
def bright_yellow(s):
return "\x1b[33;1m{:s}\x1b[0m".format(s)
def bright_blue(s):
return "\x1b[34;1m{:s}\x1b[0m".format(s)
def bright_magenta(s):
return "\x1b[35;1m{:s}\x1b[0m".format(s)
def bright_cyan(s):
return "\x1b[36;1m{:s}\x1b[0m".format(s)
def bright_white(s):
return "\x1b[37;1m{:s}\x1b[0m".format(s)
class ColourFormatter(Formatter):
def format(self, record):
s = super(ColourFormatter, self).format(record)
bits = s.split(" ", maxsplit=1)
bits[0] = bright_black(bits[0])
if record.levelno == CRITICAL:
bits[1] = bright_red(bits[1])
elif record.levelno == ERROR:
bits[1] = bright_yellow(bits[1])
elif record.levelno == WARNING:
bits[1] = yellow(bits[1])
elif record.levelno == INFO:
bits[1] = bits[1]
elif record.levelno == DEBUG:
bits[1] = cyan(bits[1])
return " ".join(bits)
class Watcher(object):
""" Log watcher for monitoring driver and protocol activity.
"""
handlers = {}
def __init__(self, logger_name):
super(Watcher, self).__init__()
self.logger_name = logger_name
self.logger = getLogger(self.logger_name)
self.formatter = ColourFormatter("%(asctime)s %(message)s",
"%H:%M:%S")
def watch(self, level=INFO, out=stdout):
self.stop()
handler = StreamHandler(out)
handler.setFormatter(self.formatter)
self.handlers[self.logger_name] = handler
self.logger.addHandler(handler)
self.logger.setLevel(level)
def stop(self):
try:
self.logger.removeHandler(self.handlers[self.logger_name])
except KeyError:
pass
def watch(logger_name, level=INFO, out=stdout):
""" Quick wrapper for using the Watcher.
:param logger_name: name of logger to watch
:param level: minimum log level to show (default INFO)
:param out: where to send output (default stdout)
:return: Watcher instance
"""
watcher = Watcher(logger_name)
watcher.watch(level, out)
return watcher
``` |
{
"source": "2hdddg/pyvidstream",
"score": 2
} |
#### File: pyvidstream/vidutil/vidstream.py
```python
from subprocess import PIPE, Popen
from threading import Thread
from Queue import Queue, Empty
import signal
import re
from collections import namedtuple
import logging
import json
""" A frame with a bunch of properties
"""
Frame = namedtuple('Frame', ['type', 'key_frame', 'width', 'height',
'coded_picture_number'])
""" A group of pictures defined as a sequence
of frames starting with an I frame that is also
a keyframe (IDR) and all frames after that until
next (but not including) I IDR frame.
frames -- array of frames with the first one
always being an I IDR frame.
"""
GOP = namedtuple('GOP', ['frames'])
""" A frame with a qmap only has type (I/P/B) and
a qmap that is an array of ints representing the
qp value per macroblock
"""
QmapFrame = namedtuple('QmapFrame', ['type', 'qmap'])
_logger = None
def _init_logging():
global _logger
_logger = logging.getLogger(__name__)
if not len(_logger.handlers):
_logger.addHandler(logging.StreamHandler())
_logger.setLevel(logging.INFO)
class QmapParser:
""" Parses ffprobe 'debug pq' output
Produces QmapFrame instances.
"""
def __init__(self, collect):
self._type = None
self._qmap = []
self._collect = collect
self.noise = 0
def parse_line(self, line):
"""Parses a line of ffprobe output.
line -- should be on format output by ffprobe
'-debug qp' command
return True if parsing should continue
False if parsing should stop
"""
m = re.match('^\[.*\] All info found$', line)
if m:
self.noise = 0
_logger.info("Parser encountered end of stream")
return False
m = re.match('^\[.*\] New frame, type: ([IPB])$', line)
if m:
_logger.debug("Parser found start of new frame")
cont = True
if self._type:
frame = QmapFrame(self._type, self._qmap)
cont = self._collect(frame)
self._type = m.group(1)
self._qmap = []
self.noise = 0
return cont
m = re.match('^\[.*\] (\d*)$', line)
if m:
_logger.debug("Parser found qmap digits")
digits = m.group(1)
for i in xrange(0, len(digits), 2):
qp = int(digits[i:i+2])
self._qmap.append(qp)
self.noise = 0
return True
# When not matching, increase noise level
self.noise = self.noise + 1
_logger.debug("Parser unknown line, increasing "
"noise to %d:\n %s", self.noise, line)
return True
class FrameParser:
""" Parses ffprobe --show_frames compact json output
Produces Frame instances.
"""
def __init__(self, collect):
self.noise = 0
self._collect = collect
def _parse_json(self, j):
if j['media_type'] != 'video':
return True
key_frame = j['key_frame'] == 1
pict_type = j['pict_type']
width = j['width']
height = j['height']
coded_picture_number = j['coded_picture_number']
frame = Frame(type=pict_type, key_frame=key_frame,
width=width, height=height,
coded_picture_number=coded_picture_number)
# Parsing ok, reset noise
self.noise = 0
return self._collect(frame)
def parse_line(self, line):
line = line.strip().rstrip(',')
if line == '}':
_logger.info("Parser encountered end of stream")
return False
try:
j = json.loads(line)
except ValueError as e:
self.noise = self.noise + 1
_logger.debug("Parser unknown line, increasing noise "
"to %d:\n %s", self.noise, line)
return True
return self._parse_json(j)
def _put_line_in_queue(f, queue):
""" To be executed in a separate thread.
Puts a line in the queue to be consumed by
main thread.
"""
for line in iter(f.readline, ''):
queue.put(line)
def _process_output(process, f, parser,
line_timeout=3, max_num_timeouts=3,
max_noise=70):
# Will contain lines to parse
queue = Queue()
# Read lines from separate thread
thread = Thread(target=_put_line_in_queue, args=(f, queue))
thread.start()
# Fail when max_num_timeouts reached
num_timeouts = 0
while True:
try:
line = queue.get(timeout=line_timeout)
_logger.debug("Got line to be parsed")
except Empty:
""" Timed out while waiting for a new line in queue,
this could mean that the stream is alive or slow... """
if process.poll() is not None:
_logger.error("Watched process exited with %d, aborting",
process.returncode)
break
else:
num_timeouts = num_timeouts + 1
_logger.warn("Got line timeout number %d of %d",
num_timeouts, max_num_timeouts)
if num_timeouts > max_num_timeouts:
_logger.error("Reached max number of timeouts, "
"aborting")
break
else:
# Got a line to be parsed
try:
cont = parser.parse_line(line)
except:
_logger.exception("Parser exception")
break
if not cont:
break
if parser.noise > max_noise:
_logger.error("Exceeded noise level %d, max is %d," \
" aborting",
parser.noise, max_noise)
break
""" Let the process finish up nicely otherwise
the thread waiting for lines will not exit properly
and the program might appear to hang waiting for it.
"""
try:
process.send_signal(signal.SIGINT)
except OSError as e:
if e.errno == 3:
_logger.error("Process already exited.")
else:
raise e
# Could also wait and terminate to ensure exited
def get_n_qmaps(n, source, line_timeout=3):
""" Retrieves n number of frames from specified source.
Retrieved frames has type (I/P/B) and a qmap (array of
qp values)
n -- number of frames to retrieve
source -- url or path to video.
line_timeout -- number of seconds to wait for input
return tuple of success code and array of frames
"""
_init_logging()
frames = []
def collect(frame):
frames.append(frame)
_logger.debug("Collected frame %d", len(frames))
done = len(frames) == n
if done:
_logger.info("Collected %d frames, done" % n)
# Return value indicates if parser should continue
return not done
command = ['ffprobe',
'-v', 'quiet',
'-show_frames', # Need something...
'-debug', 'qp']
command.append(source)
ffprobe = Popen(command, stderr=PIPE, bufsize=1)
parser = QmapParser(collect=collect)
""" Setting max_num_timeouts to n to allow one timeout per frame
for low framerates. max_noise is set to account for very noisy start
of stream, could be set to a lower value if we decide to ignore
some in the start.
"""
_process_output(ffprobe, ffprobe.stderr, parser,
line_timeout=line_timeout, max_num_timeouts=n,
max_noise=70)
return (len(frames) == n, frames)
def get_n_frames(n, source, line_timeout=30):
_init_logging()
frames = []
def collect(frame):
frames.append(frame)
_logger.debug("Collected frame %d", len(frames))
done = len(frames) == n
if done:
_logger.info("Collected %d frames, done" % n)
# Return value indicates if parser should continue
return not done
command = ['ffprobe',
'-show_frames',
'-v', 'quiet',
'-print_format', 'json=compact=1']
command.append(source)
ffprobe = Popen(command, stdout=PIPE, bufsize=1)
parser = FrameParser(collect=collect)
_process_output(ffprobe, ffprobe.stdout, parser,
line_timeout=line_timeout, max_num_timeouts=n,
max_noise=70)
return (len(frames) == n, frames)
def get_n_gops(n, source, line_timeout=30):
_init_logging()
gops = []
state = {'frames': None, 'gops': gops}
def collect(frame):
if frame.key_frame and frame.type == 'I':
# Start of new gop
if state['frames']:
gop = GOP(frames=state['frames'])
state['gops'].append(gop)
state['frames'] = []
state['frames'].append(frame)
done = len(state['gops']) == n
if done:
_logger.info("Collected %d gops, done" % n)
return not done
if state['frames']:
_logger.debug("Collecting frame to gop")
state['frames'].append(frame)
else:
_logger.info("Skipping frame before start of first gop")
return True
command = ['ffprobe',
'-show_frames',
'-v', 'quiet',
'-print_format', 'json=compact=1']
command.append(source)
ffprobe = Popen(command, stdout=PIPE, bufsize=1)
parser = FrameParser(collect=collect)
_process_output(ffprobe, ffprobe.stdout, parser,
line_timeout=line_timeout, max_num_timeouts=n,
max_noise=70)
return (len(gops) == n, gops)
if __name__ == '__main__':
source = "rtsp://192.168.127.12/vod/mp4:BigBuckBunny_175k.mov"
result = get_n_qmaps(n=6, source=source)
print "ok" if result[0] and len(result[1]) == 6 else "nok"
result = get_n_frames(10,
source=source)
print "ok" if result[0] and len(result[1]) == 10 else "nok"
result = get_n_gops(2, source=source)
print "ok" if result[0] and len(result[1]) == 2 else "nok"
``` |
{
"source": "2henwei/map",
"score": 2
} |
#### File: pipe_view/model/layout_context.py
```python
import logging
import weakref
import os
import time
from .layout import Layout
from .element_set import ElementSet
from .database_handle import DatabaseHandle
from .search_handle import SearchHandle
from .element import FakeElement
from .extension_manager import ExtensionManager
from model.location_manager import LocationManager
import model.content_options as content
import model.highlighting_utils as highlighting_utils
class Layout_Context(object):
EXTENT_L = 0 # Left
EXTENT_T = 1 # Top
EXTENT_R = 2 # Right
EXTENT_B = 3 # Bottom
# # Dummy class for showing that extents are invalid
class InvalidExtents:
pass
# # Creates an OrderedDict for the Layout
# @param loc_vars location string variables dictionary. This reference is
# used (not copied) Created if None
def __init__(self, layout, db, hc = 0, loc_vars = None):
if hc is None:
hc = 0
self.__layout = None # Will be updated later. Allows __str__ to succeed
self.__group = None # Sync group
self.__frame = None # Parent Layout_Frame (weak reference)
self.__db = db
self.__dbhandle = DatabaseHandle(db)
self.__searchhandle = SearchHandle(self)
self.__qapi = self.__dbhandle.api
start = self.__qapi.getFileStart()
inc_end = self.__qapi.getFileInclusiveEnd()
if hc < start:
self.__hc = start
elif hc > inc_end:
self.__hc = inc_end
else:
self.__hc = hc
self.__extents = [0, 0, 1, 1]
self.__loc_variables = loc_vars if loc_vars is not None else {} # Location-string variables
self.__loc_variables_changed = True if loc_vars is not None else False
self.__extensions = ExtensionManager()
if layout.GetFilename():
self.__extensions.AddPath(os.path.dirname(layout.GetFilename()))
self.__elements = ElementSet(self, self.__extensions)
self.SetHC(self.__hc)
self.__layout = layout
self.__layout.LinkLayoutContext(self)
self.__PullFromLayout()
# Number of stabbing query results to store at a time
self.__cache_size = 20
# get visible clock stuff
self.__number_elements = 0
self.__visible_clocks = ()
# Uop highlighting list
self.__highlighted_uops = []
self.__previously_highlighted_uops = []
# # Returns the SearchHandle held by this context.
@property
def searchhandle(self):
return self.__searchhandle
# # Returns the DatabaseHandle held by this context.
@property
def dbhandle(self):
return self.__dbhandle
# # Returns the hypercycle this Context is currently centered on
@property
def hc(self):
return self.__hc
# # Populate our elements from current layout
# At the moment, call this only once.
def __PullFromLayout(self):
# Flatten out struture of layout for easy access
for e in self.__layout.GetElements():
self.AddElement(e)
if e.HasChildren():
children = e.GetChildren()
for child in children:
self.AddElement(child)
# # Returns the location-string variables dictionary for this layout context
def GetLocationVariables(self):
return self.__loc_variables
def GetLocationVariablesChanged(self):
return self.__loc_variables_changed
# # Updates location variables based on any new element locations created with variables in them
def UpdateLocationVariables(self):
for el in self.__layout.GetElements():
if el.HasProperty('LocationString'):
loc_str = el.GetProperty('LocationString')
loc_vars = LocationManager.findLocationVariables(loc_str)
for k, v in loc_vars:
if k in self.__loc_variables and self.__loc_variables[k] != v:
pass # Do not add these to the table since there is a conflict
# # @todo Represent these conflicting variable defaults somehow so that they
# # can be resolved in the location variables dialog
else:
self.__loc_variables[k] = v
self.__loc_variables_changed = True
def AckLocationVariablesChanged(self):
self.__loc_variables_changed = False
# # Adds a new element to the OrderedDict
# @param e Element to add
# @param after_pin PIN of element after which to insert this element
# @profile
def AddElement(self, e, after_pins = [None]):
# #print 'Adding {} after pins {}'.format(e, after_pins)
self.__elements.AddElement(e, after_pins = after_pins)
# Update extents. This can only increase
if isinstance(self.__extents, self.InvalidExtents):
self.__extents = [0, 0, 1, 1]
(x, y), (w, h) = e.GetProperty('position'), e.GetProperty('dimensions')
r = x + w
b = y + h
self.__extents[self.EXTENT_R] = max(r, self.__extents[self.EXTENT_R])
self.__extents[self.EXTENT_B] = max(b, self.__extents[self.EXTENT_B])
return e
# # Remove an element from the ElementSet
def RemoveElement(self, e):
self.__elements.RemoveElement(e)
(x, y), (w, h) = e.GetProperty('position'), e.GetProperty('dimensions')
r = x + w
b = y + h
if isinstance(self.__extents, self.InvalidExtents):
return # Already invalid, nothing to do here. Will recalc later
# If this element was at the edge, we need a full recalc.
# Invalidate extents and recalc when asked
# Just in case extents were wrong and r was greater, use >= cur extent
if r >= self.__extents[self.EXTENT_R]:
self.__extents = self.InvalidExtents()
elif b >= self.__extents[self.EXTENT_B]:
self.__extents = self.InvalidExtents()
# # Move a set of elements to above the highest entry in a list
# @above_pin_list List of pins above which to move each element in the
# elements list. May contain None at the end to indicate "move to top"
# @note elements will retain their own ordering
def MoveElementsAbovePINs(self, elements, above_pin_list):
for e in elements:
self.__layout.RemoveElement(e)
# #self.__elements.RemoveElement(e)
if len(above_pin_list) == 0:
above_pin_list = [-1]
prev_pins_list = above_pin_list[:]
for e in elements:
# #self.__elements.AddElement(e, after_pin=prev_pin)
self.__layout.AddElement(e, follows_pins = prev_pins_list)
prev_pins_list.append(e.GetPIN())
# # Move a set of elements to below the lowest entry in a list
# @below_pin_list List of pins below which to move each element in the
# elements list. May contain -1 at the start to indicate "move to bottom"
# @note elements will retain their own ordering
def MoveElementsBelowPINs(self, elements, below_pin_list):
for e in elements:
self.__layout.RemoveElement(e)
# #self.__elements.RemoveElement(e)
if len(below_pin_list) == 0:
above_pin_list = [None]
prev_pins_list = below_pin_list[:]
for e in elements:
# #self.__elements.AddElement(e, after_pin=prev_pin)
self.__layout.AddElement(e, follows_pins = prev_pins_list)
prev_pins_list.append(e.GetPIN())
# # If a property for an Element is changed such that it will no longer be
# correctly sorted in the ElementSet, this method will figure out where
# it goes
def ReSort(self, e, t_off, loc):
id = self.__dbhandle.database.location_manager.getLocationInfo(loc, self.__loc_variables)[0]
self.__elements.ReSort(e, t_off, id)
# # Resort all elements because some locations variable has changed and all elements may be
# effected
def ReSortAll(self):
self.__elements.ReSortAll()
# # An element in the layout has moved
def ElementMoved(self, e):
# Because this can be called many times during a mass-move or resize,
# simply invalidate the extents
self.__extents = self.InvalidExtents()
# # Returns True if element was moved
def IsElementMoved(self):
return isinstance(self.__extents, self.InvalidExtents)
# # Used in the event that a property was changed for an element which may
# require an updated value to be displayed
def ReValue(self, e):
self.__elements.ReValue(e)
# NOTE: This would be a good place to update any variables extracted
# from this element. Note that this might be very slow here since this
# method can be invoked during mass-updates
# # Used in the event that many elements were changed (e.g. a location string
# variable was updated)
def ReValueAll(self):
self.__elements.ReValueAll()
# # Updates this context's elements for the curent cycle
def Update(self):
self.__elements.Update(self.__hc)
# # update that is called every major display update
def MicroUpdate(self):
self.__elements.MicroUpdate()
# # Force a DB update.
def DBUpdate(self):
self.__elements.DBUpdate()
# # Force a full update.
def FullUpdate(self):
self.__elements.FullUpdate()
# # Force a full redraw of all elements without marking them as changed
def FullRedraw(self):
self.__elements.RedrawAll()
# # Returns the layout which this Context is referrencing
def GetLayout(self):
return self.__layout
def GetExtensionManager(self):
return self.__extensions
# # Return a set of all clockid's referred to
def GetVisibleClocks(self):
elements = self.GetElements()
if self.__number_elements == len(elements):
return self.__visible_clocks
else:
loc_mgr = self.__db.location_manager
clocks = set()
for element in elements:
if element.NeedsDatabase():
info = loc_mgr.getLocationInfo(element.GetProperty('LocationString'), {})
if info[0] != loc_mgr.INVALID_LOCATION_ID:
# only add if valid location
clocks.add(info[2])
clocks = tuple(clocks)
self.__visible_clocks = clocks
self.__number_elements = len(elements)
return clocks
# # Return a set of all locations referred to
def GetVisibleLocations(self):
get_loc_info = self.__db.location_manager.getLocationInfo
locations = set()
for element in self.GetElements():
if element.NeedsDatabase():
locations.add(get_loc_info(element.GetProperty('LocationString'), {})[0])
return locations
# # Returns the All Objects
def GetElementPairs(self):
return self.__elements.GetPairs()
# # Returns all pairs suitable for drawing
def GetDrawPairs(self, bounds):
return self.__elements.GetDrawPairs(bounds)
def GetVisibilityTick(self):
return self.__elements.GetVisibilityTick()
def GetElements(self):
return self.__elements.GetElements()
def GetElementPair(self, e):
return self.__elements.GetPair(e)
def GetElementExtents(self):
'''
Returns the left,right,top,bottom extents of the layout based on what
elements it contains
@note Recalculates extents if necessary
@return (left,top,right,bottom)
'''
if isinstance(self.__extents, self.InvalidExtents):
self.__extents = [0, 0, 1, 1]
els = self.GetElements()
for e in els:
(x, y), (w, h) = e.GetProperty('position'), e.GetProperty('dimensions')
self.__extents[self.EXTENT_R] = max(self.__extents[self.EXTENT_R], x + w)
self.__extents[self.EXTENT_B] = max(self.__extents[self.EXTENT_B], y + h)
return tuple(self.__extents)
# # For testing purposes only
def __repr__(self):
return '<Layout_Context layout={}>'.format(self.__layout)
# TODO eliminate this
def CacheResults(self, res):
self.__qres = res
# TODO eliminate this
def GetQResults(self):
return self.__qres
# # Jumps context to a specific tick.
# @param hc Hypercycle (tick) to jump to. This tick will be constrained
# to the endpoints of this database handle's file range
# @note Directly refreshes the associated Frame if not attached to a group.
# Otherwise, the this context and the associated frame will be refreshed
# through the group, when it invokes 'RefreshFrame' on all its contained
# layout
# contexts
# @todo rework this
#
# Performs new queries at the chosen tick and updates element data
def GoToHC(self, hc = None, no_broadcast = False):
# print "{}: GoToHC called".format(time.time())
# show busy cursor every call
frame = self.__frame()
if frame:
frame.SetBusy(True)
if hc is None:
hc = self.__hc
hc = self.__ClampHC(hc)
if self.__group is not None:
self.__group.MoveTo(hc, self, no_broadcast = no_broadcast)
else:
self.SetHC(hc, no_broadcast = no_broadcast)
self.RefreshFrame()
# print "{}: Refresh done".format(time.time())
# set cursor back
if frame:
frame.SetBusy(False)
# # Sets the current tick and updates.
# This does not notify groups and is an internal method
# @param hc New hypercycle (tick)
# @note Does not refresh. Refresh must be called separately (or use GoToHC
# which Refreshes or notififes a group which indirectly refreshes).
def SetHC(self, hc, no_broadcast = False):
self.__hc = hc
if not no_broadcast:
self.__elements.HandleCycleChangedEvent()
self.Update()
# # Refresh this context (and its associated frame)
def RefreshFrame(self):
assert self.__frame, \
'A Layout_Context should always have a frame before attempting a RefreshFrame call'
self.__elements.MetaUpdate(self.__hc)
frame = self.__frame()
if frame:
frame.Refresh()
def GetHC(self):
'''
Returns the current hypercycle (tick) for this layout context
'''
return self.__hc
def SetGroup(self, group):
assert self.__group is None, \
'(for now) SetGroup cannot be called on a LayoutContext after it already has a group'
assert group is not None, \
'SetGroup parameter group must not be None'
logging.getLogger('LayoutContext').debug('Context {} adding to group {}'.format(self, group))
self.__group = group
self.__group.AddContext(self)
def LeaveGroup(self):
assert self.__group is not None, 'LeaveGroup cannot be called on a LayoutContext before it has joined a group'
logging.getLogger('LayoutContext').debug('Context {} leaving group {}'.format(self, self.__group))
self.__group.RemoveContext(self)
def GetGroup(self):
return self.__group
def SetFrame(self, frame):
assert self.__frame is None, \
'SetFrame cannot be called on a LayoutContext after it already has a frame'
assert frame is not None, \
'SetFrame parameter frame must not be None'
logging.getLogger('LayoutContext').debug('Context {} associated with frame {}'.format(self, frame))
self.__frame = weakref.ref(frame)
# # Returns the frame associated with this context. If the associated frame
# was destroyed (or no Frame associated), returns None
def GetFrame(self):
if self.__frame is None:
return None
return self.__frame() # May be None
# Clamp the HC to the file extents
def __ClampHC(self, hc):
hc = max(hc, self.__qapi.getFileStart())
hc = min(hc, self.__qapi.getFileInclusiveEnd()) # End is normally exclusive
return hc
# # Returns a list of all Elements beneath the given point
# @param pt Point to test for collision with elements
# @param include_subelements Should subelements be searched (e.c. schedule line within a
# schedule)
# @param include_nondrawables Should selectable elements be returned even if they aren't
# drawable? Depth ordering might be lost when including non drawables
# Subelements are fake elements generated by elements on a collision
def DetectCollision(self, pt, include_subelements = False, include_nondrawables = False):
mx, my = pt
res = []
# Search draw pairs instead of all element pairs because they are
# (1) visible
# (2) sorted by depth
# ##for e in self.GetElementPairs():
# Get bounds for quad-tree query
# #bounds = None
frame = self.__frame()
if frame:
bounds = frame.GetCanvas().GetBounds()
else:
bounds = None
# Query by draw pairs to get depth order correct
if include_nondrawables:
pairs = self.GetElementPairs()
else:
pairs = self.GetDrawPairs(bounds = bounds)
vis_tick = self.__elements.GetVisibilityTick() # After GetDrawPairs
for e in pairs:
if not include_nondrawables and bounds is not None and e.GetVisibilityTick() != vis_tick:
continue # Skip: this is off-screen
element = e.GetElement()
x, y = element.GetProperty('position')
w, h = element.GetProperty('dimensions')
if x <= mx <= (x + w) and y <= my <= (y + h):
if include_subelements:
et = element.GetProperty('type')
if et == 'schedule':
sl = element.DetectCollision((mx, my))
if sl and sl.GetProperty('type') == 'schedule_line':
# Hierarchical point containment test assumes that schedule
# objects contain schedule lines
mx, my = pt
c_x, c_y = sl.GetProperty('position')
loc_x = mx - c_x
loc_y = my - c_y
# Go inside this scheule line
sub_object = sl.DetectCollision((loc_x, loc_y), e)
if sub_object:
res.append(sub_object)
elif et == 'schedule_line':
mx, my = pt
c_x, c_y = element.GetProperty('position')
loc_x = mx - c_x
loc_y = my - c_y
# Go inside this element
sub_object = element.DetectCollision((loc_x, loc_y), e)
if sub_object:
res.append(sub_object)
else:
res.append(e)
else:
# just attach element
res.append(e)
return res
def GetLocationPeriod(self, location_string):
'''
a function for getting the period of the clock at a certain time
'''
clock = self.dbhandle.database.location_manager.getLocationInfo(location_string,
self.GetLocationVariables())[2]
return self.dbhandle.database.clock_manager.getClockDomain(clock).tick_period
def GetTransactionFields(self, time, location_string, fields):
'''
performs random-access query at time and place and returns requested attributes in dictionary
@return Dictionary of results {field:value}
'''
# No results if time is outside of the currently loaded window.
# Cannot do a reasonably fast query and there is no way tha the data is currently visible to
# the user anyway
dbapi = self.dbhandle.database.api
if time < dbapi.getWindowLeft() or time >= dbapi.getWindowRight():
return {}
results = {}
def callback(t, tapi):
assert t == time, f'bad tick {t}'
loc_mgr = self.dbhandle.database.location_manager
location = loc_mgr.getLocationInfo(location_string,
self.GetLocationVariables())[0]
trans_proxy = tapi.getTransactionProxy(location)
if trans_proxy:
for field in fields:
if field == 'time':
value = t
else:
fake_element = FakeElement()
fake_element.SetProperty('LocationString', location_string)
value = content.ProcessContent(field,
trans_proxy,
fake_element,
self.dbhandle,
t,
self.GetLocationVariables())
results[field] = value
self.dbhandle.query(time, time, callback, mod_tracking = False)
return results
def HighlightUop(self, anno_string):
'''
Highlight the uop with the given annotation string
'''
uop_uid = highlighting_utils.GetUopUid(anno_string)
if uop_uid is not None:
self.__highlighted_uops.append(uop_uid)
def UnhighlightUop(self, anno_string):
'''
Unhighlight the uop with the given annotation string
'''
uop_uid = highlighting_utils.GetUopUid(anno_string)
if uop_uid is not None:
if uop_uid in self.__highlighted_uops:
self.__highlighted_uops.remove(uop_uid)
self.__previously_highlighted_uops.append(uop_uid)
# # Check if a uop has been highlighted (by UID)
def IsUopUidHighlighted(self, uop_uid):
return uop_uid in self.__highlighted_uops
# # Check if a uop has been unhighlighted (by UID), but not yet redrawn
def WasUopUidHighlighted(self, uop_uid):
return uop_uid in self.__previously_highlighted_uops
# # Check if a uop has been highlighted (by annotation string)
def IsUopHighlighted(self, anno_string):
return highlighting_utils.GetUopUid(anno_string) in self.__highlighted_uops
# # Check if a uop has been unhighlighted (by annotation string), but not yet redrawn
def WasUopHighlighted(self, anno_string):
return highlighting_utils.GetUopUid(anno_string) in self.__previously_highlighted_uops
# # Redraw elements that have changed their highlighting state
def RedrawHighlightedElements(self):
self.__elements.RedrawHighlighted()
del self.__previously_highlighted_uops[:]
``` |
{
"source": "2hindas/automatic-adaptive-meshing",
"score": 2
} |
#### File: automatic-adaptive-meshing/src/ErrorEstimator.py
```python
import numpy as np
from scipy.interpolate import Rbf, LinearNDInterpolator, NearestNDInterpolator
import numdifftools as nd
from src.Meshing import *
from SimPEG.utils import surface2ind_topo
from src.Utils import *
from SimPEG import maps
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
def interpolate_rbf(x, y, z, x_val, y_val, z_val):
"""Radial basis function interpolation.
Parameters
----------
x : np.ndarray
x-faces or x-edges of a mesh
y : np.ndarray
y-faces or y-edges of a mesh
z : np.ndarray
z-faces or z-edges of a mesh
x_val : np.ndarray
curl values or electric field values in the x-direction
y_val : np.ndarray
curl values or electric field values in the y-direction
z_val : np.ndarray
curl values or electric field values in the z-direction
Returns
-------
scipy.interpolate.rbf.Rbf
a radial basis function interpolation object
"""
x_interpolated = Rbf(x[:, 0], x[:, 1], x[:, 2], x_val)
y_interpolated = Rbf(y[:, 0], y[:, 1], y[:, 2], y_val)
z_interpolated = Rbf(z[:, 0], z[:, 1], z[:, 2], z_val)
return x_interpolated, y_interpolated, z_interpolated
def interpolate_nearest(x, y, z, x_val, y_val, z_val):
"""Neirest neighbour interpolation.
Parameters
----------
x : np.ndarray
x-faces or x-edges of a mesh
y : np.ndarray
y-faces or y-edges of a mesh
z : np.ndarray
z-faces or z-edges of a mesh
x_val : np.ndarray
curl values or electric field values in the x-direction
y_val : np.ndarray
curl values or electric field values in the y-direction
z_val : np.ndarray
curl values or electric field values in the z-direction
Returns
-------
scipy.interpolate.ndgriddata.NearestNDInterpolator
a neirest neighbour interpolation object
"""
x_interpolated = NearestNDInterpolator(x, x_val)
y_interpolated = NearestNDInterpolator(y, y_val)
z_interpolated = NearestNDInterpolator(z, z_val)
return x_interpolated, y_interpolated, z_interpolated
def interpolate_linear(x, y, z, x_val, y_val, z_val):
"""Linear interpolation.
Parameters
----------
x : np.ndarray
x-faces or x-edges of a mesh
y : np.ndarray
y-faces or y-edges of a mesh
z : np.ndarray
z-faces or z-edges of a mesh
x_val : np.ndarray
curl values or electric field values in the x-direction
y_val : np.ndarray
curl values or electric field values in the y-direction
z_val : np.ndarray
curl values or electric field values in the z-direction
Returns
-------
scipy.interpolate.interpnd.LinearNDInterpolator
a linear interpolation object
"""
x_interpolated = LinearNDInterpolator(x, x_val)
y_interpolated = LinearNDInterpolator(y, y_val)
z_interpolated = LinearNDInterpolator(z, z_val)
return x_interpolated, y_interpolated, z_interpolated
def estimate_curl_electric_field(mesh, survey, model_map, model, interpolation='rbf', frequency=1.0,
omega=2 * np.pi, parameter='resistivity'):
"""Interpolates the curl and the electric field values in the mesh."""
x_faces = mesh.faces_x
y_faces = mesh.faces_y
z_faces = mesh.faces_z
x_edges = mesh.edges_x
y_edges = mesh.edges_y
z_edges = mesh.edges_z
# Solution by forward modelling for magnetic flux density and electric field
if parameter == 'resistivity':
simulation = fdem.simulation.Simulation3DMagneticFluxDensity(
mesh, survey=survey, rhoMap=model_map, Solver=Solver
)
simulationelectricfield = fdem.simulation.Simulation3DElectricField(
mesh, survey=survey, rhoMap=model_map, Solver=Solver
)
else:
simulation = fdem.simulation.Simulation3DMagneticFluxDensity(
mesh, survey=survey, sigmaMap=model_map, Solver=Solver
)
simulationelectricfield = fdem.simulation.Simulation3DElectricField(
mesh, survey=survey, sigmaMap=model_map, Solver=Solver
)
# Compute magnetic flux density
fields = simulation.fields(model)
magnetic_flux_density = fields[:, 'bSolution']
# Source field
sources = simulation.getSourceTerm(frequency)
Sm = sources[0]
# Curl of Electric field computed on the cell faces
curl = Sm - 1j * omega * magnetic_flux_density
curl = np.reshape(curl, len(curl))
x_curl = curl[0:mesh.n_faces_x]
y_curl = curl[mesh.n_faces_x:mesh.n_faces_x + mesh.n_faces_y]
z_curl = curl[mesh.n_faces_x + mesh.n_faces_y:mesh.n_faces_x + mesh.n_faces_y + mesh.n_faces_z]
if interpolation == 'rbf':
interpolator = interpolate_rbf
elif interpolation == 'linear':
interpolator = interpolate_linear
else:
interpolator = interpolate_nearest
curl_x_inter, curl_y_inter, curl_z_inter = interpolator(x_faces, y_faces, z_faces, x_curl,
y_curl, z_curl)
# Electric field solution
fieldselectric = simulationelectricfield.fields(model)
EF = fieldselectric[:, 'eSolution']
EF = np.reshape(EF, len(EF))
EF_x = EF[0:mesh.n_edges_x]
EF_y = EF[mesh.n_edges_x:mesh.n_edges_x + mesh.n_edges_y]
EF_z = EF[
mesh.n_edges_x + mesh.n_edges_y:mesh.n_edges_x + mesh.n_edges_y + mesh.n_edges_z]
EF_x_inter, EF_y_inter, EF_z_inter = interpolator(x_edges, y_edges, z_edges, EF_x, EF_y, EF_z)
return curl_x_inter, curl_y_inter, curl_z_inter, EF_x_inter, EF_y_inter, EF_z_inter
def compute_cell_error(cell, curl_x, curl_y, curl_z, ef_x, ef_y, ef_z):
"""Computes the error in a given cell of a mesh"""
def ef_interpolator(x):
return np.array([ef_x(*x), ef_y(*x), ef_z(*x)])
jacobian = nd.Jacobian(ef_interpolator, order=2)(cell)
jacobian[np.isnan(jacobian)] = 0 # handle NaN-values in the jacobian
curl = np.array([jacobian[2, 1] - jacobian[1, 2], jacobian[0, 2] -
jacobian[2, 0], jacobian[1, 0] - jacobian[0, 1]])
curl_field = np.array([curl_x(*cell), curl_y(*cell), curl_z(*cell)])
error = np.linalg.norm(curl_field - curl)
return error
def estimate_error(search_area, curl_x, curl_y, curl_z
, ef_x, ef_y, ef_z,
refine_percentage=0.05):
"""Estimates the error in a predefined search area in a mesh"""
cell_errors = []
for cell in search_area:
error = compute_cell_error(cell, curl_x, curl_y, curl_z, ef_x, ef_y, ef_z)
cell_errors.append(error)
np.save('error.npy', np.asarray(cell_errors))
n_refine_cells = int(np.ceil(refine_percentage * len(search_area)))
cells_to_refine = search_area[np.argpartition(cell_errors, -n_refine_cells)[-n_refine_cells:]]
return cells_to_refine
def iterator(mesh, domain, surface, cell_width, objct, coordinates
, receiver_locations, source_locations, survey, par_background, par_object,
ind_object, frequency=1, omega=2 * np.pi
, parameter='resistivity', interpolation='rbf', type_object='block'
, lim_iterations=5, factor_object=2, factor_receiver=3, factor_source=3
, refine_percentage=0.05, axis='x', degrees_rad=0, radius=1, Ex=None, Ey=None, Ez=None
, diff_list=np.array([[0, 0]]), r_a_o_list=None, r_a_r_list=None, r_a_s_list=None):
"""An iteration scheme that implements an error estimator to adaptively refine
a mesh, in order to reduce the error of the electric field solution.
This function is mainly used for small objects in a domain.
If you want to continue from previous iteration, you have to give the mesh, field values,
convergence list and previous refinements as input arguments."""
# Find cells that are active in the forward modeling (cells below surface)
ind_active = surface2ind_topo(mesh, surface)
# Define mapping from model to active cells
model_map = maps.InjectActiveCells(mesh, ind_active, par_background)
# Define model. Models in SimPEG are vector arrays
model = par_background * np.ones(ind_active.sum())
if type_object == 'block':
ind_object = get_ind_block(mesh, ind_active, coordinates, axis, degrees_rad)
if type_object == 'sphere':
ind_object = get_ind_sphere(mesh, ind_active, coordinates, radius)
model[ind_object] = par_object
diff = 10
if diff_list[0, 0] == 0:
i = 0
av_diff_list = []
refine_at_object_list = []
refine_at_receivers_list = []
refine_at_sources_list = []
# Starting after the pause
else:
i = diff_list[-1, 0]
av_diff_list = list(diff_list)
refine_at_object_list = r_a_o_list
refine_at_receivers_list = r_a_r_list
refine_at_sources_list = r_a_s_list
lim_iterations = lim_iterations + i
ef_old_x = Ex
ef_old_y = Ey
ef_old_z = Ez
def ef_interpolator(x):
return np.array([ef_x(*x), ef_y(*x), ef_z(*x)])
def ef_old_interpolator(x):
return np.array([ef_old_x(*x), ef_old_y(*x), ef_old_z(*x)])
while diff > 0.01 and i < lim_iterations:
# Maximum relative difference between current and previous iteration should fall below 1% in order to converge.
# Define search areas
search_area_obj = search_area_object(mesh, objct, factor=factor_object)
search_area_receiv = search_area_receivers(mesh, receiver_locations,
factor=factor_receiver)
search_area_sourc = search_area_sources(mesh, source_locations,
factor=factor_source)
# Interpolate curl and electric field
curl_x, curl_y, curl_z, ef_x, ef_y, ef_z = estimate_curl_electric_field(mesh, survey,
model_map,
model,
interpolation=interpolation,
frequency=frequency,
omega=omega
,
parameter=parameter)
# Compare electric field values until relative difference falls below 1%
if diff_list[0, 0] == 0:
if i > 0:
relative_difference_Efield = []
for cell in search_area_obj:
# This equation is sensitive to catastrophic failure
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(
cell))
relative_difference_Efield.append(np.linalg.norm(form))
for cell in search_area_receiv:
# This equation is sensitive to catastrophic failure
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(
cell))
relative_difference_Efield.append(np.linalg.norm(form))
for cell in search_area_sourc:
# This equation is sensitive to catastrophic failure
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(
cell))
relative_difference_Efield.append(np.linalg.norm(form))
diff = sum(relative_difference_Efield) / len(relative_difference_Efield)
av_diff_list.append([i + 1, diff])
print("Average relative difference is ", diff)
else:
relative_difference_Efield = []
for cell in search_area_obj:
# This equation is sensitive to catastrophic failure
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(cell))
relative_difference_Efield.append(np.linalg.norm(form))
for cell in search_area_receiv:
# This equation is sensitive to catastrophic failure
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(cell))
relative_difference_Efield.append(np.linalg.norm(form))
for cell in search_area_sourc:
# This equation is sensitive to catastrophic failure
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(cell))
relative_difference_Efield.append(np.linalg.norm(form))
diff = sum(relative_difference_Efield) / len(relative_difference_Efield)
av_diff_list.append([i + 1, diff])
print("Average relative difference is ", diff)
ef_old_x = ef_x
ef_old_y = ef_y
ef_old_z = ef_z
# Define cells to refine near object
cells_to_refine_object = estimate_error(search_area_obj
, curl_x, curl_y, curl_z
, ef_x, ef_y, ef_z
, refine_percentage=refine_percentage)
refine_at_object_list.append(cells_to_refine_object)
# Define cells to refine near receivers
cells_to_refine_receivers = estimate_error(search_area_receiv
, curl_x, curl_y, curl_z
, ef_x, ef_y, ef_z
, refine_percentage=refine_percentage)
refine_at_receivers_list.append(cells_to_refine_receivers)
# Define cells to refine near sources
cells_to_refine_sources = estimate_error(search_area_sourc
, curl_x, curl_y, curl_z
, ef_x, ef_y, ef_z
, refine_percentage=refine_percentage)
refine_at_sources_list.append(cells_to_refine_sources)
# Refine the mesh
mesh = create_octree_mesh(domain, cell_width, objct, 'surface')
refine_at_locations(mesh, source_locations)
refine_at_locations(mesh, receiver_locations)
for refo in refine_at_object_list:
refine_at_locations(mesh, refo)
for refr in refine_at_receivers_list:
refine_at_locations(mesh, refr)
for refs in refine_at_sources_list:
refine_at_locations(mesh, refs)
mesh.finalize()
# Find cells that are active in the forward modeling (cells below surface)
ind_active = surface2ind_topo(mesh, surface)
# Define mapping from model to active cells
model_map = maps.InjectActiveCells(mesh, ind_active, par_background)
# Define model. Models in SimPEG are vector arrays
model = par_background * np.ones(ind_active.sum())
if type_object == 'block':
ind_object = get_ind_block(mesh, ind_active, coordinates)
if type_object == 'sphere':
ind_object = get_ind_sphere(mesh, ind_active, coordinates, radius)
model[ind_object] = par_object
print(i)
i += 1
if diff < 0.01:
return mesh, ef_x, ef_y, ef_z, np.array(av_diff_list)
else:
return mesh, ef_x, ef_y, ef_z, np.array(
av_diff_list), refine_at_object_list, refine_at_receivers_list, refine_at_sources_list
def iteratornonobject(mesh, domain, cell_width, landscape, receiver_locations, source_locations,
survey,
resistivity_function, model_map, model, frequency=1, omega=2 * np.pi
, parameter='resistivity', interpolation='rbf'
, lim_iterations=5, factor_receiver=2, factor_source=2, factor_landscape=2,
refine_percentage=0.05, par_inactive=1e8
, Ex=None, Ey=None, Ez=None, diff_list=np.array([[0, 0]]), r_a_l_list=None,
r_a_r_list=None, r_a_s_list=None):
"""An iteration scheme that implements an error estimator to adaptively refine
a mesh, in order to reduce the error of the electric field solution.
This function is mainly used for large geophysical models.
If you want to continue from previous iteration, you have to give the mesh, field values,
convergence list and previous refinements as input arguments."""
# Find cells that are active in the forward modeling (cells below surface)
ind_active = np.array([True] * mesh.n_cells)
# Define mapping from model to active cells
model_map = maps.InjectActiveCells(mesh, ind_active, par_inactive)
# Define model. Models in SimPEG are vector arrays
model = resistivity_function(mesh.cell_centers)
diff = 10
i = 0
if diff_list[0, 0] == 0:
i = 0
av_diff_list = []
refine_at_landscape_list = []
refine_at_receivers_list = []
refine_at_sources_list = []
# Starting after the pause
else:
i = diff_list[-1, 0]
av_diff_list = list(diff_list)
refine_at_landscape_list = r_a_l_list
refine_at_receivers_list = r_a_r_list
refine_at_sources_list = r_a_s_list
lim_iterations = lim_iterations + i
ef_old_x = Ex
ef_old_y = Ey
ef_old_z = Ez
def ef_interpolator(x):
return np.array([ef_x(*x), ef_y(*x), ef_z(*x)])
def ef_old_interpolator(x):
return np.array([ef_old_x(*x), ef_old_y(*x), ef_old_z(*x)])
while diff > 0.01 and i < lim_iterations:
# Maximum relative difference between current and previous iteration should fall below 1%
# in order to converge.
# Define search areas
search_area_below_landscape = search_area_landscape(mesh, domain, landscape,
factor=factor_landscape)
search_area_receiv = search_area_receivers(mesh, receiver_locations,
factor=factor_receiver)
search_area_sourc = search_area_sources(mesh, source_locations,
factor=factor_source)
# Interpolate curl and electric field
curl_x, curl_y, curl_z, ef_x, ef_y, ef_z = estimate_curl_electric_field(mesh, survey,
model_map,
model,
interpolation=interpolation,
frequency=frequency,
omega=omega
,
parameter=parameter)
# Compare electric field values until relative difference falls below 1%
if diff_list[0, 0] == 0:
if i > 0:
relative_difference_Efield = []
for cell in search_area_below_landscape:
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(
cell))
relative_difference_Efield.append(np.linalg.norm(form))
for cell in search_area_receiv:
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(
cell))
relative_difference_Efield.append(np.linalg.norm(form))
for cell in search_area_sourc:
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(
cell))
relative_difference_Efield.append(np.linalg.norm(form))
diff = sum(relative_difference_Efield) / len(relative_difference_Efield)
av_diff_list.append([i + 1, diff])
print("Average relative difference is ", diff)
else:
relative_difference_Efield = []
for cell in search_area_below_landscape:
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(cell))
relative_difference_Efield.append(np.linalg.norm(form))
for cell in search_area_receiv:
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(cell))
relative_difference_Efield.append(np.linalg.norm(form))
for cell in search_area_sourc:
form = np.abs(
(ef_old_interpolator(cell) - ef_interpolator(cell)) / ef_old_interpolator(cell))
relative_difference_Efield.append(np.linalg.norm(form))
diff = sum(relative_difference_Efield) / len(relative_difference_Efield)
av_diff_list.append([i + 1, diff])
print("Average relative difference is ", diff)
ef_old_x = ef_x
ef_old_y = ef_y
ef_old_z = ef_z
# Define cells to refine near object
cells_to_refine_landscape = estimate_error(search_area_below_landscape
, curl_x, curl_y, curl_z
, ef_x, ef_y, ef_z
, refine_percentage=refine_percentage)
refine_at_landscape_list.append(cells_to_refine_landscape)
# Define cells to refine near receivers
cells_to_refine_receivers = estimate_error(search_area_receiv
, curl_x, curl_y, curl_z
, ef_x, ef_y, ef_z
, refine_percentage=refine_percentage)
refine_at_receivers_list.append(cells_to_refine_receivers)
# Define cells to refine near sources
cells_to_refine_sources = estimate_error(search_area_sourc
, curl_x, curl_y, curl_z
, ef_x, ef_y, ef_z
, refine_percentage=refine_percentage)
refine_at_sources_list.append(cells_to_refine_sources)
# Refine the mesh
mesh = create_octree_mesh(domain, cell_width, landscape, 'surface')
refine_at_locations(mesh, source_locations)
refine_at_locations(mesh, receiver_locations)
for refo in refine_at_landscape_list:
refine_at_locations(mesh, refo)
for refr in refine_at_receivers_list:
refine_at_locations(mesh, refr)
for refs in refine_at_sources_list:
refine_at_locations(mesh, refs)
mesh.finalize()
# Find cells that are active in the forward modeling (cells below surface)
ind_active = np.array([True] * mesh.n_cells)
# Define mapping from model to active cells
model_map = maps.InjectActiveCells(mesh, ind_active, par_inactive)
# Define model. Models in SimPEG are vector arrays
model = resistivity_function(mesh.cell_centers)
print("Iteration: ", i)
i += 1
if diff < 0.01:
return mesh, ef_x, ef_y, ef_z, np.array(av_diff_list)
else:
return mesh, ef_x, ef_y, ef_z, np.array(
av_diff_list), refine_at_landscape_list, refine_at_receivers_list, refine_at_sources_list
``` |
{
"source": "2hog/docker-training-samples-micro-django",
"score": 2
} |
#### File: training/web/views.py
```python
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
import requests
@login_required
def home(request):
# Get greeting from Sinatra service
greeting_basic_auth_credentials = requests.auth.HTTPBasicAuth(
settings.GREETING_APP_USER,
settings.GREETING_APP_PASSWORD,
)
greeting_response = requests.post(
settings.GREETING_APP_URL,
auth=greeting_basic_auth_credentials,
)
greeting = greeting_response.json()['greeting']
# Get page content based on received greeting from Flask service
content_basic_auth_credentials = requests.auth.HTTPBasicAuth(
settings.CONTENT_APP_USER,
settings.CONTENT_APP_PASSWORD,
)
content_response = requests.get(
settings.CONTENT_APP_URL,
auth=content_basic_auth_credentials,
params={
'greeting': greeting,
},
)
# Render our page based on the received content
content = content_response.text
context = {
'content': content,
}
return render(request, 'web/index.html', context=context)
``` |
{
"source": "2hoursleep/Iroha-CLI",
"score": 2
} |
#### File: iroha_cli/iroha_tools/client.py
```python
import binascii
from binascii import Error
import json
import pprint
import iroha.primitive_pb2 as iroha_primitive
import iroha.queries_pb2 as queries_pb2
from google.protobuf.json_format import MessageToDict, MessageToJson, ParseDict
from iroha import Iroha, IrohaGrpc
from iroha import IrohaCrypto as ic
from cli import console
class IrohaClient:
def __init__(self, creator_account, private_key, iroha_host):
self.creator_account = creator_account
self.iroha = Iroha(creator_account)
self.ic = ic
self.permissions = iroha_primitive
self.user_private_key = private_key
self.net = IrohaGrpc(iroha_host, timeout=60)
def send_batch_and_print_status(self, transactions):
self.net.send_txs(transactions)
for tx in transactions:
hex_hash = binascii.hexlify(ic.hash(tx))
print("\t" + "-" * 20)
print(
"Transaction hash = {}, creator = {}".format(
hex_hash, tx.payload.reduced_payload.creator_account_id
)
)
for status in self.net.tx_status_stream(tx):
print(status)
def submit_transaction(self, transaction):
hex_hash = str(binascii.hexlify(self.ic.hash(transaction)), "utf-8")
tx_result = {}
msg = f"[bold yellow]Transaction Hash:[/bold yellow] [bold green]{hex_hash}[/bold green] \n[bold yellow]Creator Account ID:[/bold yellow] [bold green]{transaction.payload.reduced_payload.creator_account_id}[/bold green]"
console.print(msg)
try:
self.net.send_tx(transaction)
tx_status = []
for status in self.net.tx_status_stream(transaction):
tx_status.append(status)
tx_result = {
"tx_hash": hex_hash,
"tx_statuses": tx_status,
"tx_result": tx_status[-1][0],
}
console.print(f"{tx_result}")
except Exception as error:
print(error)
tx_result = {
"tx_hash": hex_hash,
"tx_statuses": [],
"tx_result": "REJECTED",
}
console.print(tx_result)
finally:
return tx_result
def send_transaction_print_status_and_return_result(self, transaction):
"""
Main Transaction submission
"""
hex_hash = binascii.hexlify(self.ic.hash(transaction))
msg = f"""
Transaction Hash:
\n [bold green]{hex_hash}[/bold green]
\n
Creator Account ID:
\n [bold green]{transaction.payload.reduced_payload.creator_account_id}[/bold green]
\n"""
print_msg(msg)
self.net.send_tx(transaction)
tx_result = []
for status in self.net.tx_status_stream(transaction):
tx_result.append(status)
print(status)
tx_result.append(hex_hash)
return tx_result
## Important Function
def sign_and_submit_tx(self, transaction):
new_tx = self.iroha.transaction([])
tx = ParseDict(transaction, new_tx)
print(tx)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def check_pending_txs(self):
query = self.iroha.query("GetPendingTransactions")
ic.sign_query(query, self.user_private_key)
response = self.net.send_query(query)
data = MessageToJson(response)
return data
def stream_blocks(self):
"""
Start incomming stream for new blocks
"""
# add height
query = self.iroha.blocks_query()
ic.sign_query(query, self.user_private_key)
for block in self.net.send_blocks_stream_query(query):
pprint("The next block arrived: {}".format(MessageToDict(block)), indent=1)
def get_signatories(self, account_id):
"""
List signatories by public key for specified user@domain
"""
query = self.iroha.query("GetSignatories", account_id=account_id)
ic.sign_query(query, self.user_private_key)
response = self.net.send_query(query)
data = MessageToDict(response)
return data
def get_account(self, account_id):
"""
List Account user@domain
"""
query = self.iroha.query("GetAccount", account_id=account_id)
ic.sign_query(query, self.user_private_key)
response = self.net.send_query(query)
data = MessageToDict(response)
return data
def get_account_details(self, account_id, writer=None, key=None):
"""
List Account details for user@domain
"""
query = self.iroha.query(
"GetAccountDetail", account_id=account_id, writer=writer, key=key
)
ic.sign_query(query, self.user_private_key)
response = self.net.send_query(query)
data = json.loads(response.account_detail_response.detail)
return data
def create_new_account(self, account_name, domain, public_key):
"""
register new user
"""
tx = self.iroha.transaction(
[
self.iroha.command(
"CreateAccount",
account_name=account_name,
domain_id=domain,
public_key=public_key,
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def set_account_detail(self, account_id, key, value):
tx = self.iroha.transaction(
[
self.iroha.command(
"SetAccountDetail", account_id=account_id, key=key, value=value
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def create_domain(self, domain_id, default_role):
"""
register non existing/new domain on network
"""
tx = self.iroha.transaction(
[
self.iroha.command(
"CreateDomain", domain_id=domain_id, default_role="user"
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
### Dev Batch Functions
def init_test_balance_batch(self, account_id):
# Add Dummy Asset Supply For Demo
qty = "10.00000000"
description = "Welcome To Ubuntu Exchange"
currencies = ["BTC", "LTC", "ETH", "XLM", "XMR"]
tx = self.iroha.transaction(
[
self.iroha.command(
"AddAssetQuantity", asset_id="btc#iroha", amount=qty
),
self.iroha.command(
"AddAssetQuantity", asset_id="ltc#iroha", amount=qty
),
self.iroha.command(
"AddAssetQuantity", asset_id="eth#iroha", amount=qty
),
self.iroha.command(
"AddAssetQuantity", asset_id="xlm#iroha", amount=qty
),
self.iroha.command(
"AddAssetQuantity", asset_id="xmr#iroha", amount=qty
),
self.iroha.command(
"TransferAsset",
description=description,
src_account_id="admin@iroha",
dest_account_id=account_id,
asset_id="btc#iroha",
amount=qty,
),
self.iroha.command(
"TransferAsset",
description=description,
src_account_id="admin@iroha",
dest_account_id=account_id,
asset_id="ltc#iroha",
amount=qty,
),
self.iroha.command(
"TransferAsset",
description=description,
src_account_id="admin@iroha",
dest_account_id=account_id,
asset_id="eth#iroha",
amount=qty,
),
self.iroha.command(
"TransferAsset",
description=description,
src_account_id="admin@iroha",
dest_account_id=account_id,
asset_id="xlm#iroha",
amount=qty,
),
self.iroha.command(
"TransferAsset",
description=description,
src_account_id="admin@iroha",
dest_account_id=account_id,
asset_id="xmr#iroha",
amount=qty,
),
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def grant_account_write_permission(self, account_id):
"""
grand permission write permission for AccountDetails
"""
tx = self.iroha.transaction(
[
self.iroha.command(
"GrantPermission",
account_id=account_id,
permission=self.permissions.can_set_my_account_detail,
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def grant_account_read_permission(self, account_id):
tx = self.iroha.transaction(
[
self.iroha.command(
"GrantPermission",
account_id=account_id,
permission=self.permissions.can_get_my_acc_detail,
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
# add signatory
# remove signatory
# find peer and remove peer has been added in v1.1
def add_peer(self, ip_address, peer_key):
peer = self.permissions.Peer()
peer.address = ip_address
peer.peer_key = peer_key
tx = self.iroha.transaction([self.iroha.command("AddPeer", peer=peer)])
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def grant_asset_tx_history_permission(self, account_id):
tx = self.iroha.transaction(
[
self.iroha.command(
"GrantPermission",
account_id=account_id,
permission=can_get_my_acc_ast_txs,
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def grant_account_tx_history_permission(self, account_id):
tx = self.iroha.transaction(
[
self.iroha.command(
"GrantPermission",
account_id=account_id,
permission=can_get_my_acc_txs,
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def create_new_asset(self, asset, domain, precision):
tx = self.iroha.transaction(
[
self.iroha.command(
"CreateAsset",
asset_name=asset,
domain_id=domain,
precision=precision,
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def transfer_asset(self, account_id, recipient, asset_id, description, qty):
tx = self.iroha.transaction(
[
self.iroha.command(
"TransferAsset",
src_account_id=account_id,
dest_account_id=recipient,
asset_id=asset_id,
description=description,
amount=qty,
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def add_asset_qty(self, asset_id, qty):
"""
Add asset supply
"""
tx = self.iroha.transaction(
[self.iroha.command("AddAssetQuantity", asset_id=asset_id, amount=qty)]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def subtract_asset_qty(self, asset_id, qty):
"""
Subtract asset supply
"""
tx = self.iroha.transaction(
[self.iroha.command("SubtractAssetQuantity", asset_id=asset_id, amount=qty)]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
def detach_role_tx(self, account_id, role_name):
tx = self.iroha.transaction(
[
self.iroha.command(
"DetachRole", account_id=account_id, role_name=role_name
)
]
)
ic.sign_transaction(tx, self.user_private_key)
self.submit_transaction(tx)
```
#### File: iroha_cli/iroha_tools/commons.py
```python
from iroha import primitive_pb2, block_pb2
from iroha import Iroha, IrohaCrypto
import binascii
from time import time
permissions_dict = {
"can_append_role": primitive_pb2.can_append_role,
"can_create_role": primitive_pb2.can_create_role,
"can_detach_role": primitive_pb2.can_detach_role,
"can_add_asset_qty": primitive_pb2.can_add_asset_qty,
"can_subtract_asset_qty": primitive_pb2.can_subtract_asset_qty,
"can_add_peer": primitive_pb2.can_add_peer,
"can_add_signatory": primitive_pb2.can_add_signatory,
"can_remove_signatory": primitive_pb2.can_remove_signatory,
"can_set_quorum": primitive_pb2.can_set_quorum,
"can_create_account": primitive_pb2.can_create_account,
"can_set_detail": primitive_pb2.can_set_detail,
"can_create_asset": primitive_pb2.can_create_asset,
"can_transfer": primitive_pb2.can_transfer,
"can_receive": primitive_pb2.can_receive,
"can_create_domain": primitive_pb2.can_create_domain,
"can_read_assets": primitive_pb2.can_read_assets,
"can_get_roles": primitive_pb2.can_get_roles,
"can_get_my_account": primitive_pb2.can_get_my_account,
"can_get_all_accounts": primitive_pb2.can_get_all_accounts,
"can_get_domain_accounts": primitive_pb2.can_get_domain_accounts,
"can_get_my_signatories": primitive_pb2.can_get_my_signatories,
"can_get_all_signatories": primitive_pb2.can_get_all_signatories,
"can_get_domain_signatories": primitive_pb2.can_get_domain_signatories,
"can_get_my_acc_ast": primitive_pb2.can_get_my_acc_ast,
"can_get_all_acc_ast": primitive_pb2.can_get_all_acc_ast,
"can_get_domain_acc_ast": primitive_pb2.can_get_domain_acc_ast,
"can_get_my_acc_detail": primitive_pb2.can_get_my_acc_detail,
"can_get_all_acc_detail": primitive_pb2.can_get_all_acc_detail,
"can_get_domain_acc_detail": primitive_pb2.can_get_domain_acc_detail,
"can_get_my_acc_txs": primitive_pb2.can_get_my_acc_txs,
"can_get_all_acc_txs": primitive_pb2.can_get_all_acc_txs,
"can_get_domain_acc_txs": primitive_pb2.can_get_domain_acc_txs,
"can_get_my_acc_ast_txs": primitive_pb2.can_get_my_acc_ast_txs,
"can_get_all_acc_ast_txs": primitive_pb2.can_get_all_acc_ast_txs,
"can_get_domain_acc_ast_txs": primitive_pb2.can_get_domain_acc_ast_txs,
"can_get_my_txs": primitive_pb2.can_get_my_txs,
"can_get_all_txs": primitive_pb2.can_get_all_txs,
"can_get_blocks": primitive_pb2.can_get_blocks,
"can_grant_can_set_my_quorum": primitive_pb2.can_grant_can_set_my_quorum,
"can_grant_can_add_my_signatory": primitive_pb2.can_grant_can_add_my_signatory,
"can_grant_can_remove_my_signatory": primitive_pb2.can_grant_can_remove_my_signatory,
"can_grant_can_transfer_my_assets": primitive_pb2.can_grant_can_transfer_my_assets,
"can_grant_can_set_my_account_detail": primitive_pb2.can_grant_can_set_my_account_detail,
}
def now():
return int(time() * 1000)
def genesis_block(users: list, roles: list, peers: list, domains: list): -> list
"""
Composes a set genesis block transactions
:param users: list of users containing name, domain_id and list of user roles
:param users: list of users containing name, domain_id and list of user roless
:param test_permissions: permissions for users in test domain
:param multidomain:
:return: a list containing payload dictionaries containing Iroha.command's
"""
commands = []
for iroha_peer in peers:
_add_peer = {
"addPeer": {
"peer": {
"address": iroha_peer["address"],
"peerKey": iroha_peer["peer_key"],
}
}
}
commands.append(_add_peer)
for role in roles:
_add_role = {
"createRole": {
"roleName": role["role_name"],
"permissions": role["permissions"],
}
}
commands.append(_add_role)
for domain in domains:
_add_domain = {
"createRole": {
"domainId": domain["domain_id"],
"defaultRole": domain["default_role"],
}
}
commands.append(_add_domain)
for user in users:
_add_user = {
"createAccount": {
"accountName": user["account_name"],
"domainId": user["domain_id"],
"publicKey": user["public_key"],
}
}
commands.append(_add_user)
for user_role in user["user_roles"]:
_append_user_role = {
"appendRole": {
"accountId": f'{user["account_name"]}@{user["domain_id"]}',
"roleName": user_role,
}
}
commands.append(_append_user_role)
return commands
``` |
{
"source": "2hoursleep/iroha-compose",
"score": 3
} |
#### File: iroha-compose/scripts/generate_node_key.py
```python
from iroha import IrohaCrypto as ic
# TODO
# Add support for ursa keys
def save_keys_to_file(account_id):
private_key = ic.private_key()
public_key = ic.derive_public_key(private_key)
try:
with open(f"{account_id}.priv", "wb+") as private_key_file:
private_key_file.write(private_key)
except:
raise
try:
with open(f"{account_id}.pub", "wb+") as public_key_file:
public_key_file.write(public_key)
except:
raise
account_id = input("Please provide a name for the keypair files")
save_keys_to_file(account_id=account_id)
print("Done!, Please copy the keys to the correct paths and change config")
``` |
{
"source": "2hoursleep/iroha-db-api",
"score": 2
} |
#### File: migrations/versions/0ec40831769e_.py
```python
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "0ec40831769e"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("balances_wallet_id_fkey", "balances", type_="foreignkey")
op.create_foreign_key(None, "balances", "wallets", ["wallet_id"], ["id"])
op.add_column("orders", sa.Column("currency", sa.String(length=50), nullable=False))
op.add_column("orders", sa.Column("fee", sa.Float(), nullable=False))
op.add_column("orders", sa.Column("filled_qty", sa.Float(), nullable=False))
op.add_column("orders", sa.Column("unfulfilled_qty", sa.Float(), nullable=False))
op.drop_column("orders", "leaves_qty")
op.drop_column("orders", "cum_qty")
op.add_column("trades", sa.Column("currency", sa.String(length=50), nullable=False))
op.add_column("trades", sa.Column("trade_fee", sa.Float(), nullable=False))
op.add_column("transactions", sa.Column("tx_type", sa.String(), nullable=True))
op.drop_constraint(
"transactions_wallet_id_fkey", "transactions", type_="foreignkey"
)
op.drop_column("transactions", "wallet_id")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"transactions",
sa.Column("wallet_id", sa.INTEGER(), autoincrement=False, nullable=False),
)
op.create_foreign_key(
"transactions_wallet_id_fkey", "transactions", "wallets", ["wallet_id"], ["id"]
)
op.drop_column("transactions", "tx_type")
op.drop_column("trades", "trade_fee")
op.drop_column("trades", "currency")
op.add_column(
"orders",
sa.Column(
"cum_qty",
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=False,
),
)
op.add_column(
"orders",
sa.Column(
"leaves_qty",
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=False,
),
)
op.drop_column("orders", "unfulfilled_qty")
op.drop_column("orders", "filled_qty")
op.drop_column("orders", "fee")
op.drop_column("orders", "currency")
op.drop_constraint(None, "balances", type_="foreignkey")
op.create_foreign_key(
"balances_wallet_id_fkey", "balances", "balances", ["wallet_id"], ["id"]
)
# ### end Alembic commands ###
```
#### File: server/blocks/views.py
```python
import json
import logging
import os
from flask import Blueprint, jsonify, make_response, request
from flask.views import MethodView
from project.server import db
from project.server.iroha.accounts import submit_query_to_iroha, submit_tx_to_iroha
from project.server.models import Block_V1
from project.server.schemas import Block
blocks_blueprint = Blueprint("blocks", __name__)
class BlockAPI(MethodView):
"""
Iroha Block API
"""
def get(self):
"Get all blocks"
results = Block_V1.query.all()
response = []
for result in results:
block = Block.from_orm(result)
response.append(block.dict())
responseObject = {
"result": response,
"status": "success",
"message": "Successfully returned blocks.",
}
return make_response(jsonify(responseObject)), 201
def post(self):
"Get block data by height"
post_data = request.get_json()
height = int(post_data.get("height"))
result = Block_V1.query.filter_by(height=height).first()
block = Block.from_orm(result)
responseObject = {
"result": block.dict(),
"status": "success",
"message": "Successfully registered.",
}
return make_response(jsonify(responseObject)), 201
class Transactions(MethodView):
"""
Iroha Block API
"""
def get(self):
"Get all transactions"
results = Block_V1.query.all()
response = []
for result in results:
db_block = Block.from_orm(result)
block = db_block.dict()
print(block["payload"]["transactions"])
response.append(block.dict())
responseObject = {
"result": response,
"status": "success",
"message": "Successfully returned blocks.",
}
return make_response(jsonify(responseObject)), 201
def post(self):
"Get block data by height"
post_data = request.get_json()
print("tx data request")
height = int(post_data.get("height"))
result = Block_V1.query.filter_by(height=height).first()
block = Block.from_orm(result).dict()
print(block.keys())
responseObject = {
"result": block,
"status": "success",
"message": "Successfully returned block.",
}
return make_response(jsonify(responseObject)), 201
block_query_api = BlockAPI.as_view("block_api")
tx_query_api = Transactions.as_view("transactions_history_api")
blocks_blueprint.add_url_rule(
"/v1/data/blocks/", view_func=block_query_api, methods=["GET", "POST"]
)
blocks_blueprint.add_url_rule(
"/v1/data/tx-history/", view_func=tx_query_api, methods=["GET", "POST"]
)
```
#### File: server/brvs/views.py
```python
import json
import logging
import os
from flask import Blueprint, jsonify, make_response, request
from flask.views import MethodView
from project.server import bcrypt, db
from project.server.iroha.accounts import submit_query_to_iroha, submit_tx_to_iroha
from project.server.models import BlacklistToken, User
brvs_blueprint = Blueprint("brvs", __name__)
class IrohaQueryAPI(MethodView):
"""
Iroha Query Resource
"""
def post(self):
# get the post data
post_data = request.get_json()
# check if user already exists
account_id = post_data.get("account_id")
transaction = post_data.get("transaction")
print(transaction)
# transaction["payload"]["reducedPayload"]["commands"] = transaction["payload"]["reducedPayload"].pop("commandsList")
# transaction["signatures"] = transaction.pop("signaturesList")
result = submit_query_to_iroha(account_id, transaction)
responseObject = {
"result": result,
"status": "success",
"message": "Successfully registered.",
}
return make_response(jsonify(responseObject)), 201
class IrohaTxAPI(MethodView):
"""
Iroha Account Detail Resource
"""
def post(self):
# get the post data
post_data = request.get_json()
account_id = post_data.get("account_id")
transaction = post_data.get("transaction")
transaction["payload"]["reducedPayload"]["commands"] = transaction["payload"][
"reducedPayload"
].pop("commandsList")
transaction["signatures"] = transaction.pop("signaturesList")
submit_tx_to_iroha(account_id, transaction)
responseObject = {"status": "success", "message": "Successfully registered."}
return make_response(jsonify(responseObject)), 201
query_api = IrohaQueryAPI.as_view("iroha_query_api")
tx_api = IrohaTxAPI.as_view("iroha_tx_api")
# add Rules for API Endpoints
brvs_blueprint.add_url_rule("/submit/tx", view_func=tx_api, methods=["POST"])
brvs_blueprint.add_url_rule("/submit/query", view_func=query_api, methods=["POST"])
```
#### File: project/server/models.py
```python
import datetime
from dataclasses import dataclass
import jwt
from project.server import app, bcrypt, db, db2
from sqlalchemy.dialects.postgresql.json import JSONB
from sqlalchemy.ext.automap import automap_base
# Init reflection from WSV DB
db2.reflect(app=app)
db2.Model.metadata.reflect(db2.engine)
AutoMapModel = automap_base(db2.Model)
# For additional tables copy model and change table_name
@dataclass
class Wsv_Block_Info(AutoMapModel):
__bind_key__ = "iroha_wsv"
__tablename__ = "top_block_info"
@dataclass
class Block_V1(db.Model):
__bind_key__ = "api"
__tablename__ = "iroha_blocks"
__table_args__ = {"extend_existing": True}
prev_block_hash = db.Column(db.String)
height = db.Column(db.Integer, primary_key=True)
added_on = db.Column(db.DateTime)
created_time = db.Column(db.String)
signatures = db.Column(JSONB)
rejected_transactions_hashes = db.Column(JSONB)
transactions = db.Column(JSONB)
def __init__(
self,
prev_block_hash: str,
created_time: str,
height: int,
transactions: JSONB,
rejected_transactions_hashes: JSONB,
signatures: JSONB,
) -> dict:
self.prev_block_hash = prev_block_hash
self.height = height
self.created_time = created_time
self.transactions = transactions
self.rejected_transactions_hashes = rejected_transactions_hashes
self.signatures = signatures
self.added_on = datetime.datetime.now()
def __repr__(self):
return f"Block Height: {self.height}"
@staticmethod
def add_block(
prev_block_hash,
created_time,
height,
transactions,
rejected_transactions_hashes,
signatures,
):
# add Iroha block to database
block = Block_V1(
prev_block_hash,
created_time,
height,
transactions,
rejected_transactions_hashes,
signatures,
)
try:
db.session.add(block)
db.session.commit()
except:
db.session.rollback()
raise
finally:
db.session.close()
@staticmethod
def get_block_by_height(
height: int = 1,
) -> dict:
"Get Iroha block from database by height"
block_result = {}
try:
block = db.session.query(Block_V1).filter_by(height=height)
block_result = block.__dict__
except:
raise
finally:
return block_result
@staticmethod
def get_block_by_hash(block_hash):
block = db.session.query(Block_V1).filter_by(height=block_hash)
return block.__dict__
@staticmethod
def get_last_block():
"Get Iroha block from database by height"
block_result = {}
try:
block = block = (
db.session.query(Block_V1).order_by(Block_V1.height.desc()).first()
)
block_result = block.__dict__
except:
raise
finally:
return block_result
class User(db.Model):
""" User Model for storing user related details """
__bind_key__ = "api"
__tablename__ = "users"
__table_args__ = {"extend_existing": True}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
account_id = db.Column(db.String(255), unique=True, nullable=False)
public_key = db.Column(db.String(255), unique=False, nullable=False)
def __init__(self, email, password, account_id, public_key, admin=False):
self.email = email
self.password = bcrypt.generate_password_hash(
password, app.config.get("BCRYPT_LOG_ROUNDS")
).decode()
self.registered_on = datetime.datetime.now()
self.admin = admin
self.account_id = account_id
self.public_key = public_key
def encode_auth_token(self, user_id):
"""
Generates the Auth Token
Token validilty determined by 'exp' field in paybload
Current Duration 30 Minute
:return: string
"""
try:
payload = {
"exp": datetime.datetime.utcnow()
+ datetime.timedelta(days=0, minutes=30),
"iat": datetime.datetime.utcnow(),
"sub": user_id,
}
return jwt.encode(payload, app.config.get("SECRET_KEY"), algorithm="HS256")
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Validates the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get("SECRET_KEY"))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return "Token blacklisted. Please log in again."
else:
return payload["sub"]
except jwt.ExpiredSignatureError:
return "Signature expired. Please log in again."
except jwt.InvalidTokenError:
return "Invalid token. Please log in again."
class BlacklistToken(db.Model):
"""
Token Model for storing JWT tokens
"""
__tablename__ = "blacklist_tokens"
__table_args__ = {"extend_existing": True}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
token = db.Column(db.String(500), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.datetime.now()
def __repr__(self):
return "<id: token: {}".format(self.token)
@staticmethod
def check_blacklist(auth_token):
# check whether auth token has been blacklisted
res = BlacklistToken.query.filter_by(token=str(auth_token)).first()
if res:
return True
else:
return False
AutoMapModel.prepare(db2.engine, reflect=True)
``` |
{
"source": "2hycaw/Model",
"score": 4
} |
#### File: 2hycaw/Model/agentframework.py
```python
import random
class Agent:
def __init__(self, environment, agents, y, x):
# Pointers to the gloabl environment and agents lists, shared by all
# agents.
self.environment = environment
self.agents = agents
# The agents store, unique to this agent.
self.store = 0
# Set the initial position of the agent. We have to test against None
# as the scraped data from the web may produce this value.
if x is None:
self._x = random.randint(0,100)
else:
self._x = x
if y is None:
self._y = random.randint(0,100)
else:
self._y = y
# Nice representation for the agent class.
def __repr__(self):
return "Agent({y}, {x})".format(y=self.y, x=self.x)
# Defines the variable x as a propety.
@property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
# Define the variable y as a property.
@property
def y(self):
return self._y
@y.setter
def y(self, value):
self._y = value
def move(self):
"""Moves the agent randomly."""
if random.random() < 0.5:
self.y = (self.y + 1) % 100
else:
self.y = (self.y - 1) % 100
if random.random() < 0.5:
self.x = (self.x + 1) % 100
else:
self.x = (self.x - 1) % 100
def eat(self):
"""Let the agent eat the environment."""
if self.environment[self.y][self.x] > 10:
self.environment[self.y][self.x] -= 10
else:
self.store += 10
def distance_between(self, agent):
"""Returns the distance between two agents."""
return (((self.x - agent.x)**2) + ((self.y - agent.y)**2))**0.5
def share_with_neighbours(self, neighbourhood):
"""Divides the stores between two agents."""
for agent in self.agents:
distance = self.distance_between(agent)
if distance <= neighbourhood:
total = agent.store + self.store
# If either of the agents has a greater store then we exchange
# which agent has the larger store. Otherwise the stores are
# divided evenly between the two agents.
if self.store < agent.store:
agent.store = total*0.25
self.store = total*0.75
elif agent.store < self.store:
self.store = total*0.25
agent.store = total*0.75
else:
self.store = total*0.5
agent.store = total*0.5
``` |
{
"source": "2i2c-org/similar-hubs",
"score": 4
} |
#### File: similar-hubs/deployer/file_acquisition.py
```python
import os
import subprocess
import tempfile
import warnings
from contextlib import ExitStack, contextmanager
from pathlib import Path
from ruamel.yaml import YAML
from ruamel.yaml.scanner import ScannerError
yaml = YAML(typ="safe", pure=True)
def _assert_file_exists(filepath):
"""Assert a filepath exists, raise an error if not. This function is to be used for
files that *absolutely have to exist* in order to successfully complete deployment,
such as, files listed in the `helm_chart_values_file` key in the `cluster.yaml` file
Args:
filepath (str): Absolute path to the file that is to be asserted for existence
"""
if not os.path.isfile(filepath):
raise FileNotFoundError(
f"""
File Not Found at following location! Have you checked it's the correct path?
{filepath}
"""
)
def find_absolute_path_to_cluster_file(cluster_name: str, is_test: bool = False):
"""Find the absolute path to a cluster.yaml file for a named cluster
Args:
cluster_name (str): The name of the cluster we wish to perform actions on.
This corresponds to a folder name, and that folder should contain a
cluster.yaml file.
is_test (bool, optional): A flag to determine whether we are running a test
suite or not. If True, only return the paths to cluster.yaml files under the
'tests/' directory. If False, explicitly exclude the cluster.yaml files
nested under the 'tests/' directory. Defaults to False.
Returns:
Path object: The absolute path to the cluster.yaml file for the named cluster
"""
if is_test:
# We are running a test via pytest. We only want to focus on the cluster
# folders nested under the `tests/` folder.
filepaths = [
filepath
for filepath in Path(os.getcwd()).glob(f"**/{cluster_name}/cluster.yaml")
if "tests/" in str(filepath)
]
else:
# We are NOT running a test via pytest. We want to explicitly ignore the
# cluster folders nested under the `tests/` folder.
filepaths = [
filepath
for filepath in Path(os.getcwd()).glob(f"**/{cluster_name}/cluster.yaml")
if "tests/" not in str(filepath)
]
if len(filepaths) > 1:
raise FileExistsError(
"Multiple files found. "
+ "Only ONE (1) cluster.yaml file should exist per cluster folder."
)
elif len(filepaths) == 0:
raise FileNotFoundError(
f"No cluster.yaml file exists for cluster {cluster_name}. "
+ "Please create one and then continue."
)
else:
cluster_file = filepaths[0]
with open(cluster_file) as cf:
cluster_config = yaml.load(cf)
if not os.path.dirname(cluster_file).endswith(cluster_config["name"]):
warnings.warn(
"Cluster Name Mismatch: It is convention that the cluster name defined "
+ "in cluster.yaml matches the name of the parent directory. "
+ "Deployment won't be halted but please update this for consistency!"
)
return cluster_file
@contextmanager
def get_decrypted_file(original_filepath):
"""
Assert that a given file exists. If the file is sops-encryped, we provide secure,
temporarily decrypted contents of the file. We raise an error if we do not find the
sops key when we expect to, in case the decrypted contents have been leaked via
version control. We expect to find the sops key in a file if the filename begins
with "enc-" or contains the word "secret". If the file is not encrypted, we return
the original filepath.
Args:
original_filepath (path object): Absolute path to a file to perform checks on
and decrypt if it's encrypted
Yields:
(path object): EITHER the absolute path to a tempfile containing the
decrypted contents, OR the original filepath. The original filepath is
yielded if the file is not valid JSON/YAML, or does not have the prefix
'enc-' or contain 'secret'.
"""
_assert_file_exists(original_filepath)
filename = os.path.basename(original_filepath)
_, ext = os.path.splitext(filename)
# Our convention is that secrets in the repository include "secret" in their filename,
# so first we check for that
if "secret" in filename:
# We must then determine if the file is using sops
# sops files are JSON/YAML with a `sops` key. So we first check
# if the file is valid JSON/YAML, and then if it has a `sops` key.
# Since valid JSON is also valid YAML by design, a YAML parser can read in JSON.
with open(original_filepath) as f:
try:
content = yaml.load(f)
except ScannerError:
raise ScannerError(
"We expect encrypted files to be valid JSON or YAML files."
)
if "sops" not in content:
raise KeyError(
"Expecting to find the `sops` key in this encrypted file - but it "
+ "wasn't found! Please regenerate the secret in case it has been "
+ "checked into version control and leaked!"
)
# If file has a `sops` key, we assume it's sops encrypted
with tempfile.NamedTemporaryFile() as f:
subprocess.check_call(
["sops", "--output", f.name, "--decrypt", original_filepath]
)
yield f.name
else:
# The file does not have "secret" in its name, therefore does not need to be
# decrypted. Yield the original filepath unchanged.
yield original_filepath
@contextmanager
def get_decrypted_files(files):
"""
This is a context manager that combines multiple `get_decrypted_file`
context managers that open and/or decrypt the files in `files`.
files should be all absolute paths
"""
with ExitStack() as stack:
yield [stack.enter_context(get_decrypted_file(f)) for f in files]
``` |
{
"source": "2i2c-org/sphinx-2i2c-theme",
"score": 2
} |
#### File: 2i2c-org/sphinx-2i2c-theme/noxfile.py
```python
import nox
nox.options.reuse_existing_virtualenvs = True
build_command = ["-b", "html", "docs", "docs/_build/html"]
@nox.session
def docs(session):
session.install("-e", ".[dev]")
session.install("-r", "docs/requirements.txt")
session.run("stb", "compile")
session.run("sphinx-build", *build_command)
@nox.session(name="docs-live")
def docs_live(session):
session.install("-e", ".[dev]")
session.install("-r", "docs/requirements.txt")
session.run("stb", "serve", "docs")
``` |
{
"source": "2i9/ls.joyous",
"score": 2
} |
#### File: joyous/tests/test_ical.py
```python
import sys
import datetime as dt
import pytz
from io import BytesIO
from icalendar import vDatetime
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.test import TestCase, RequestFactory
from django.utils import timezone
from wagtail.core.models import Site, Page
from ls.joyous.models.calendar import CalendarPage
from ls.joyous.models import (SimpleEventPage, MultidayEventPage,
RecurringEventPage, CancellationPage, MultidayRecurringEventPage)
from ls.joyous.models import getAllEvents
from ls.joyous.utils.recurrence import Recurrence
from ls.joyous.utils.recurrence import WEEKLY, MONTHLY, TU, SA
from ls.joyous.formats.ical import ICalHandler
from freezegun import freeze_time
from .testutils import datetimetz
# ------------------------------------------------------------------------------
class TestImport(TestCase):
def setUp(self):
Site.objects.update(hostname="joy.test")
self.home = Page.objects.get(slug='home')
self.user = User.objects.create_user('i', '<EMAIL>', '<PASSWORD>')
self.requestFactory = RequestFactory()
self.calendar = CalendarPage(owner = self.user,
slug = "events",
title = "Events")
self.home.add_child(instance=self.calendar)
self.calendar.save_revision().publish()
self.handler = ICalHandler()
def _getRequest(self, path="/"):
request = self.requestFactory.get(path)
request.user = self.user
request.site = self.home.get_site()
request.session = {}
request._messages = FallbackStorage(request)
request.POST = request.POST.copy()
request.POST['action-publish'] = "action-publish"
return request
@freeze_time("2018-07-24 19:00:00")
def testMeetup(self):
stream = BytesIO(b"""\
BEGIN:VCALENDAR\r
VERSION:2.0\r
PRODID:-//Meetup//RemoteApi//EN\r
CALSCALE:GREGORIAN\r
METHOD:PUBLISH\r
X-ORIGINAL-URL:https://www.meetup.com/Code-for-Boston/events/249894034/ic\r
al/Weekly+Hack+Night.ics\r
X-WR-CALNAME:Events - Weekly Hack Night.ics\r
X-MS-OLK-FORCEINSPECTOROPEN:TRUE\r
BEGIN:VTIMEZONE\r
TZID:America/New_York\r
X-LIC-LOCATION:America/New_York\r
BEGIN:DAYLIGHT\r
TZOFFSETFROM:-0500\r
TZOFFSETTO:-0400\r
TZNAME:EDT\r
DTSTART:19700308T020000\r
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU\r
END:DAYLIGHT\r
BEGIN:STANDARD\r
TZOFFSETFROM:-0400\r
TZOFFSETTO:-0500\r
TZNAME:EST\r
DTSTART:19701101T020000\r
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU\r
END:STANDARD\r
END:VTIMEZONE\r
BEGIN:VEVENT\r
DTSTAMP:20180721T015100Z\r
DTSTART;TZID=America/New_York:20180724T190000\r
DTEND;TZID=America/New_York:20180724T213000\r
STATUS:CONFIRMED\r
SUMMARY:Weekly Hack Night\r
DESCRIPTION:Code for Boston\\nTuesday\\, July 24 at 7:00 PM\\n\\nOur weekly w\r
ork session will be at the Cambridge Innovation Center in Kendall Square\r
\\, on the FOURTH FLOOR\\, in the CAFE. These Hack Nights are our time...\\\r
n\\nhttps://www.meetup.com/Code-for-Boston/events/249894034/\r
CLASS:PUBLIC\r
CREATED:20180404T010420Z\r
GEO:42.36;-71.09\r
LOCATION:Cambridge Innovation Center\\, 4th Floor Cafe (1 Broadway\\, Cambr\r
idge\\, MA)\r
URL:https://www.meetup.com/Code-for-Boston/events/249894034/\r
LAST-MODIFIED:20180404T010420Z\r
UID:event_xwqmnpyxkbg<EMAIL>.<EMAIL>\r
END:VEVENT\r
END:VCALENDAR""")
self.handler.load(self.calendar, self._getRequest(), stream)
events = SimpleEventPage.events.child_of(self.calendar).all()
self.assertEqual(len(events), 1)
event = events[0]
self.assertEqual(event.owner, self.user)
self.assertEqual(event.slug, "weekly-hack-night")
self.assertEqual(event.title, "Weekly Hack Night")
self.assertEqual(event.details, "\n".join(["Code for Boston",
"Tuesday, July 24 at 7:00 PM", "",
"Our weekly work session will be at the Cambridge Innovation Center in Kendall Square"
", on the FOURTH FLOOR, in the CAFE. These Hack Nights are our time...", "",
"https://www.meetup.com/Code-for-Boston/events/249894034/"]))
self.assertEqual(event.date, dt.date(2018,7,24))
self.assertEqual(event.time_from, dt.time(19))
self.assertEqual(event.time_to, dt.time(21,30))
self.assertEqual(event.tz.zone, "America/New_York")
@freeze_time("2018-02-01")
@timezone.override("Pacific/Auckland")
def testGoogleCalendar(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Google Inc//Google Calendar 70.9054//EN
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
X-WR-CALNAME:Test Data
X-WR-TIMEZONE:Pacific/Auckland
X-WR-CALDESC:Sample data for Joyous test_ical unittest
BEGIN:VTIMEZONE
TZID:Pacific/Auckland
X-LIC-LOCATION:Pacific/Auckland
BEGIN:DAYLIGHT
TZOFFSETFROM:+1200
TZOFFSETTO:+1300
TZNAME:NZDT
DTSTART:19700927T020000
RRULE:FREQ=YEARLY;BYMONTH=9;BYDAY=-1SU
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:+1300
TZOFFSETTO:+1200
TZNAME:NZST
DTSTART:19700405T030000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTART:20180725T210000Z
DTEND:20180726T083000Z
DTSTAMP:20180722T060025Z
UID:<EMAIL>
CREATED:20180722T035919Z
DESCRIPTION:Hounit <b>catlike</b> at ethatial to thin a usistiques onshiend
alits mily tente duse prommuniss ind sedships itommunte of perpollood.
LAST-MODIFIED:20180722T035919Z
LOCATION:
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Big Thursday
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=Pacific/Auckland:20180703T093000
DTEND;TZID=Pacific/Auckland:20180703T113000
RRULE:FREQ=WEEKLY;UNTIL=20180828T115959Z;BYDAY=TU
EXDATE;TZID=Pacific/Auckland:20180814T093000
DTSTAMP:20180722T060025Z
UID:<EMAIL>
CREATED:20180722T035429Z
DESCRIPTION:\nFammulturacha matent theaminerviencess atinjuse it shin sue o
f Aothips to ming an sed prage thnisithass invernships oftegruct and encome
. Taimen in grose to to ner grough ingin orgagences' of Fries seed\n\nFrith
erovere Houps of custims analienessuppol. Tiriendindnew\, vality a gruccous
er to be the juse Truch ince lity Te therneramparcialues the the neshipland
s tortandamength\, Comene ups a mitioney dend peachassfy de are to entices
meand evelas of Friscerple th iseek arces a wind.
LAST-MODIFIED:20180722T035937Z
LOCATION:Coast Rd\, Barrytown\, New Zealand
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Tuesday Mornings
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART;VALUE=DATE:20180713
DTEND;VALUE=DATE:20180716
DTSTAMP:20180722T060025Z
UID:<EMAIL>
CREATED:20180722T040054Z
DESCRIPTION:
LAST-MODIFIED:20180722T040054Z
LOCATION:Home
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Three days off
TRANSP:TRANSPARENT
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=Pacific/Auckland:20180725T093000
DTEND;TZID=Pacific/Auckland:20180725T113000
DTSTAMP:20180722T060025Z
UID:<EMAIL>
RECURRENCE-ID;TZID=Pacific/Auckland:20180724T093000
CREATED:20180722T035429Z
DESCRIPTION:\nFammulturacha matent theaminerviencess atinjuse it shin sue o
f Aothips to ming an sed prage thnisithass invernships oftegruct and encome
. Taimen in grose to to ner grough ingin orgagences' of Fries seed\n\nFrith
erovere Houps of custims analienessuppol. Tiriendindnew\, vality a gruccous
er to be the juse Truch ince lity Te therneramparcialues the the neshipland
s tortandamength\, Comene ups a mitioney dend peachassfy de are to entices
meand evelas of Friscerple th iseek arces a wind.
LAST-MODIFIED:20180722T051000Z
LOCATION:Coast Rd\, Barrytown\, New Zealand
SEQUENCE:1
STATUS:CONFIRMED
SUMMARY:Tuesday Mornings Postponed
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=Pacific/Auckland:20180731T093000
DTEND;TZID=Pacific/Auckland:20180731T113000
DTSTAMP:20180722T060025Z
UID:<EMAIL>
RECURRENCE-ID;TZID=Pacific/Auckland:20180731T093000
CREATED:20180722T035429Z
DESCRIPTION:\nExtra Famin fork\, andivery\, Hough in the re of re whels ot
edshiplue porturat inve in nurectic.
LAST-MODIFIED:20180722T051201Z
LOCATION:Coast Rd\, Barrytown\, New Zealand
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Tuesday Morning Extra Info
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART:20180717T220000Z
DTEND:20180717T223000Z
DTSTAMP:20180722T060025Z
UID:<EMAIL>
CREATED:20180722T050847Z
DESCRIPTION:
LAST-MODIFIED:20180722T055756Z
LOCATION:Pariroa Beach
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Little Wednesday
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
DTSTART:20180723T190000Z
DTEND:20180723T200000Z
DTSTAMP:20180722T060025Z
UID:<EMAIL>
CREATED:20180722T055954Z
DESCRIPTION:
LAST-MODIFIED:20180722T055954Z
LOCATION:
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Conference Call
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = getAllEvents(request, home=self.calendar)
self.assertEqual(len(events), 5)
tueMorn, daysOff, lilWeds, cnfCall, bigThur = events
self.assertEqual(tueMorn.owner, self.user)
self.assertEqual(tueMorn.slug, "tuesday-mornings")
self.assertEqual(tueMorn.title, "Tuesday Mornings")
self.assertEqual(tueMorn.details, "\n".join(["",
"Fammulturacha matent theaminerviencess atinjuse it shin sue of "
"Aothips to ming an sed prage thnisithass invernships oftegruct "
"and encome. Taimen in grose to to ner grough ingin orgagences' "
"of Fries seed", "",
"Fritherovere Houps of custims analienessuppol. Tiriendindnew, "
"vality a gruccouser to be the juse Truch ince lity Te "
"therneramparcialues the the neshiplands tortandamength, "
"Comene ups a mitioney dend peachassfy de are to entices meand "
"evelas of Friscerple th iseek arces a wind."]))
self.assertEqual(tueMorn.tz.zone, "Pacific/Auckland")
self.assertEqual(tueMorn.time_from, dt.time(9,30))
self.assertEqual(tueMorn.time_to, dt.time(11,30))
self.assertEqual(tueMorn.location, "Coast Rd, Barrytown, New Zealand")
self.assertEqual(tueMorn.when,
"Tuesdays (until 28 August 2018) at 9:30am to 11:30am")
tueExceptions = tueMorn.get_children()
self.assertEqual(len(tueExceptions), 3)
tue24th, tue31st, tue14th = [page.specific for page in tueExceptions]
self.assertEqual(tue24th.owner, self.user)
self.assertEqual(tue24th.overrides, tueMorn)
self.assertEqual(tue24th.slug, "2018-07-24-postponement")
self.assertEqual(tue24th.title, "Postponement for Tuesday 24th of July")
self.assertEqual(tue24th.details, tueMorn.details)
self.assertEqual(tue24th.tz.zone, "Pacific/Auckland")
self.assertEqual(tue24th.except_date,dt.date(2018,7,24))
self.assertEqual(tue24th.date, dt.date(2018,7,25))
self.assertEqual(tue24th.time_from, dt.time(9,30))
self.assertEqual(tue24th.time_to, dt.time(11,30))
self.assertEqual(tue24th.location, "Coast Rd, Barrytown, New Zealand")
self.assertEqual(tue31st.owner, self.user)
self.assertEqual(tue31st.overrides, tueMorn)
self.assertEqual(tue31st.slug, "2018-07-31-extra-info")
self.assertEqual(tue31st.title, "Extra-Info for Tuesday 31st of July")
self.assertEqual(tue31st.extra_title,"Tuesday Morning Extra Info")
self.assertEqual(tue31st.extra_information, "\n".join(["",
"Extra Famin fork, andivery, Hough in the re of re whels "
"otedshiplue porturat inve in nurectic."]))
self.assertEqual(tue31st.tz.zone, "Pacific/Auckland")
self.assertEqual(tue31st.except_date,dt.date(2018,7,31))
self.assertEqual(tue14th.owner, self.user)
self.assertEqual(tue14th.overrides, tueMorn)
self.assertEqual(tue14th.slug, "2018-08-14-cancellation")
self.assertEqual(tue14th.title, "Cancellation for Tuesday 14th of August")
self.assertEqual(tue14th.cancellation_title, "")
self.assertEqual(tue14th.cancellation_details, "")
self.assertEqual(tue14th.tz.zone, "Pacific/Auckland")
self.assertEqual(tue14th.except_date,dt.date(2018,8,14))
self.assertEqual(daysOff.owner, self.user)
self.assertEqual(daysOff.slug, "three-days-off")
self.assertEqual(daysOff.title, "Three days off")
self.assertEqual(daysOff.details, "")
self.assertEqual(daysOff.tz.zone, "Pacific/Auckland")
self.assertEqual(daysOff.date_from, dt.date(2018,7,13))
self.assertEqual(daysOff.time_from, None)
self.assertEqual(daysOff.date_to, dt.date(2018,7,15))
self.assertEqual(daysOff.time_to, None)
self.assertEqual(daysOff.location, "Home")
self.assertEqual(lilWeds.owner, self.user)
self.assertEqual(lilWeds.slug, "little-wednesday")
self.assertEqual(lilWeds.title, "Little Wednesday")
self.assertEqual(lilWeds.details, "")
self.assertEqual(lilWeds.tz, pytz.utc)
self.assertEqual(lilWeds.date, dt.date(2018,7,17))
self.assertEqual(lilWeds.time_from, dt.time(22))
self.assertEqual(lilWeds.time_to, dt.time(22,30))
self.assertEqual(lilWeds.location, "Pariroa Beach")
self.assertEqual(lilWeds.when, "Wednesday 18th of July at 10am to 10:30am")
self.assertEqual(cnfCall.owner, self.user)
self.assertEqual(cnfCall.slug, "conference-call")
self.assertEqual(cnfCall.title, "Conference Call")
self.assertEqual(cnfCall.details, "")
self.assertEqual(cnfCall.tz, pytz.utc)
self.assertEqual(cnfCall.date, dt.date(2018,7,23))
self.assertEqual(cnfCall.time_from, dt.time(19))
self.assertEqual(cnfCall.time_to, dt.time(20))
self.assertEqual(bigThur.owner, self.user)
self.assertEqual(bigThur.slug, "big-thursday")
self.assertEqual(bigThur.title, "Big Thursday")
self.assertEqual(bigThur.details,
"Hounit <b>catlike</b> at ethatial to thin a usistiques onshiend "
"alits mily tente duse prommuniss ind sedships itommunte of perpollood.")
self.assertEqual(bigThur.tz, pytz.utc)
self.assertEqual(bigThur.date_from, dt.date(2018,7,25))
self.assertEqual(bigThur.time_from, dt.time(21))
self.assertEqual(bigThur.date_to, dt.date(2018,7,26))
self.assertEqual(bigThur.time_to, dt.time(8,30))
self.assertEqual(bigThur.when, "Thursday 26th of July at 9am to 8:30pm")
@freeze_time("2018-02-01")
@timezone.override("Pacific/Auckland")
def testUtc2Local(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Google Inc//Google Calendar 70.9054//EN
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
X-WR-CALNAME:Test Data
X-WR-TIMEZONE:Australia/Sydney
X-WR-CALDESC:Sample data for Joyous test_ical unittest
BEGIN:VEVENT
DTSTART:20180725T210000Z
DTEND:20180726T083000Z
DTSTAMP:20180722T060025Z
UID:<EMAIL>
CREATED:20180722T035919Z
DESCRIPTION:Hounit <b>catlike</b> at ethatial to thin a usistiques onshiend
alits mily tente duse prommuniss ind sedships itommunte of perpollood.
LAST-MODIFIED:20180722T035919Z
LOCATION:
SEQUENCE:0
STATUS:CONFIRMED
SUMMARY:Big Thursday
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream, utc2local=True)
events = getAllEvents(request, home=self.calendar)
self.assertEqual(len(events), 1)
bigThur = events[0]
self.assertEqual(bigThur.owner, self.user)
self.assertEqual(bigThur.slug, "big-thursday")
self.assertEqual(bigThur.title, "Big Thursday")
self.assertEqual(bigThur.details,
"Hounit <b>catlike</b> at ethatial to thin a usistiques onshiend "
"alits mily tente duse prommuniss ind sedships itommunte of perpollood.")
self.assertEqual(bigThur.tz.zone, "Australia/Sydney")
self.assertEqual(bigThur.date_from, dt.date(2018,7,26))
self.assertEqual(bigThur.time_from, dt.time(7))
self.assertEqual(bigThur.date_to, dt.date(2018,7,26))
self.assertEqual(bigThur.time_to, dt.time(18,30))
self.assertEqual(bigThur.when, "Thursday 26th of July at 9am to 8:30pm")
def testOutlook(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Microsoft Corporation//Outlook 11.0 MIMEDIR//EN
VERSION:2.0
METHOD:PUBLISH
BEGIN:VEVENT
DTSTART:20180730T092500
DTEND:20180730T101500
UID:7N7Y7V6J4N2U4I3U7H0N7W5O4V2U0K3H2E4Q4O7A2H0W1A5M6N
DTSTAMP:20180728T035656
DESCRIPTION;ENCODING=QUOTED-PRINTABLE:Booking number 9876543=0D=0A=0D=0AYour outgoing route is Westport > Wellington.=0D=0AThis route departs Westport on 30/Jul/2018 09:25 and arrives at Wellington at 10:15. The check-in time is 08:55.=0A
SUMMARY;ENCODING=QUOTED-PRINTABLE:Sounds Air - Flight Reminder
PRIORITY:3
BEGIN:VALARM
TRIGGER:-PT24H
ACTION:DISPLAY
DESCRIPTION:Reminder
END:VALARM
END:VEVENT
END:VCALENDAR
BEGIN:VCALENDAR
PRODID:-//Microsoft Corporation//Outlook 11.0 MIMEDIR//EN
VERSION:2.0
METHOD:PUBLISH
BEGIN:VEVENT
DTSTART:20180731T081500
DTEND:20180731T090000
UID:1G0K0V7K4L0H4Q4T5F4R8U2E0D0S4H2M6O1J6M5C5S2R4D0S2Q
DTSTAMP:20180728T035656
DESCRIPTION;ENCODING=QUOTED-PRINTABLE:Booking number 9876543=0D=0A=0D=0A=0D=0AYour return route is Wellington > Westport.=0D=0AThis route departs Wellington on 31/Jul/2018 08:15 and arrives at Westport at 09:00. The check-in time is 07:45.=0A
SUMMARY;ENCODING=QUOTED-PRINTABLE:Sounds Air - Flight Reminder
PRIORITY:3
BEGIN:VALARM
TRIGGER:-PT24H
ACTION:DISPLAY
DESCRIPTION:Reminder
END:VALARM
END:VEVENT
END:VCALENDAR
""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = [page.specific for page in self.calendar.get_children()]
self.assertEqual(len(events), 2)
flight1, flight2 = events
self.assertEqual(flight1.slug, "sounds-air-flight-reminder")
self.assertEqual(flight1.title, "Sounds Air - Flight Reminder")
self.assertEqual(flight1.details, "\r\n".join(["Booking number 9876543",
"", "Your outgoing route is Westport > Wellington.",
"This route departs Westport on 30/Jul/2018 09:25 and arrives at "
"Wellington at 10:15. The check-in time is 08:55.\n"]))
self.assertEqual(flight1.tz.zone, "Asia/Tokyo")
self.assertEqual(flight1.date, dt.date(2018,7,30))
self.assertEqual(flight1.time_from, dt.time(9,25))
self.assertEqual(flight1.time_to, dt.time(10,15))
self.assertEqual(flight2.slug, "sounds-air-flight-reminder-2")
self.assertEqual(flight2.title, "Sounds Air - Flight Reminder")
self.assertEqual(flight2.details, "\r\n".join(["Booking number 9876543",
"", "", "Your return route is Wellington > Westport.",
"This route departs Wellington on 31/Jul/2018 08:15 and arrives at "
"Westport at 09:00. The check-in time is 07:45.\n"]))
self.assertEqual(flight2.tz.zone, "Asia/Tokyo")
self.assertEqual(flight2.date, dt.date(2018,7,31))
self.assertEqual(flight2.time_from, dt.time(8,15))
self.assertEqual(flight2.time_to, dt.time(9))
def testFacebook(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Facebook//NONSGML Facebook Events V1.0//EN
X-PUBLISHED-TTL:PT12H
X-ORIGINAL-URL:https://www.facebook.com/events/501511573641525/
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
BEGIN:VEVENT
DTSTAMP:20180729T102010Z
LAST-MODIFIED:20180729T102010Z
CREATED:20180729T102010Z
SEQUENCE:0
ORGANIZER;CN=Jjjj Bbbbb:MAILTO:<EMAIL>
ATTENDEE;CN=Bbbbb Wwwwww;PARTSTAT=ACCEPTED:https://www.facebook.com/bbwwwwww
ATTENDEE;CN=Jjjj Bbbbb;PARTSTAT=ACCEPTED:https://www.facebook.com/jjjj.bbbbb
ATTENDEE;CN=Pppp Tttttt;PARTSTAT=TENTATIVE:https://www.facebook.com/pppp.tttttt.123
DTSTART:20180831T070000Z
DTEND:20180831T100000Z
UID:<EMAIL>
SUMMARY:Photo Comp - Prize Giving
LOCATION:TBC
URL:https://www.facebook.com/events/501511573641525/
DESCRIPTION:The much anticipated 2018 West Coa
st Alpine Club is open!\nEntries cl
ose midnight Friday 24th August. F
ull details and entry form in the
linked PDF: https://www.dropbox.co
m/s/5vxnep33ccxok9z/PhotoCompDetai
ls.pdf?dl=0\nDetails of the prize g
iving will be added here in due co
urse\, but save the date in the mea
n time.\n\nhttps://www.facebook.com/
events/501511573641525/
CLASS:PUBLIC
STATUS:CONFIRMED
PARTSTAT:NEEDS-ACTION
END:VEVENT
END:VCALENDAR
""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = self.calendar.get_children()
self.assertEqual(len(events), 1)
event = events[0].specific
self.assertEqual(event.slug, "photo-comp-prize-giving")
self.assertEqual(event.title, "Photo Comp - Prize Giving")
self.assertEqual(event.details, "\n".join([
"The much anticipated 2018 West Coast Alpine Club is open!",
"Entries close midnight Friday 24th August. Full details and "
"entry form in the linked PDF: https://www.dropbox.com/s/"
"5vxnep33ccxok9z/PhotoCompDetails.pdf?dl=0",
"Details of the prize giving will be added here in due course, "
"but save the date in the mean time.", "",
"https://www.facebook.com/events/501511573641525/"]))
self.assertEqual(event.tz.zone, "UTC")
self.assertEqual(event.date, dt.date(2018,8,31))
self.assertEqual(event.time_from, dt.time(7))
self.assertEqual(event.time_to, dt.time(10))
def testUntilTZ(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
PRODID:-//Google Inc//Google Calendar 70.9054//EN
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
X-WR-CALNAME:<EMAIL>
X-WR-TIMEZONE:Pacific/Auckland
BEGIN:VTIMEZONE
TZID:America/New_York
X-LIC-LOCATION:America/New_York
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:19700308T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:19701101T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20310101T050000
DTEND;TZID=America/New_York:20310101T070000
RRULE:FREQ=DAILY;UNTIL=20310108T045959Z
DTSTAMP:20190331T203301Z
UID:<EMAIL>
CREATED:20190331T200304Z
DESCRIPTION:New Year resolution
LAST-MODIFIED:20190331T203219Z
LOCATION:New York\, NY\, USA
SEQUENCE:5
STATUS:CONFIRMED
SUMMARY:Exercise
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = self.calendar.get_children()
self.assertEqual(len(events), 1)
event = events[0].specific
self.assertIs(type(event), RecurringEventPage)
self.assertEqual(event.slug, "exercise")
self.assertEqual(event.tz.zone, "America/New_York")
self.assertEqual(event.time_from, dt.time(5))
self.assertEqual(event.time_to, dt.time(7))
self.assertEqual(event.repeat.getCount(), 7)
self.assertTrue(event._occursOn(dt.date(2031,1,1)))
self.assertFalse(event._occursOn(dt.date(2031,1,8)))
def testMultidayRecurringEvent(self):
stream = BytesIO(rb"""
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//linuxsoftware.nz//NONSGML Joyous v0.8//EN
BEGIN:VEVENT
SUMMARY:Bought from a Rubber Man
DTSTART;TZID=Pacific/Auckland:20190402T160000
DTEND;TZID=Pacific/Auckland:20190404T180000
DTSTAMP:20190405T054311Z
UID:e6936872-f15c-4c47-92f2-3559a6610c78
SEQUENCE:1
RRULE:FREQ=WEEKLY;BYDAY=TU;WKST=SU
CREATED:20190405T054255Z
DESCRIPTION:<p></p>
LAST-MODIFIED:20190405T054255Z
LOCATION:
URL:http://localhost/calendar/bought-rubber-man/
END:VEVENT
BEGIN:VTIMEZONE
TZID:Pacific/Auckland
BEGIN:DAYLIGHT
DTSTART;VALUE=DATE-TIME:20180930T030000
RDATE:20190929T030000,20200927T030000,20210926T030000,20220925T030000,2023
0924T030000,20240929T030000,20250928T030000,20260927T030000,20270926T03000
0,20280924T030000,20290930T030000,20300929T030000,20310928T030000,20320926
T030000,20330925T030000,20340924T030000,20350930T030000,20360928T030000,20
370927T030000
TZNAME:NZDT
TZOFFSETFROM:+1200
TZOFFSETTO:+1300
END:DAYLIGHT
BEGIN:STANDARD
DTSTART;VALUE=DATE-TIME:20190407T020000
RDATE:20200405T020000,20210404T020000,20220403T020000,20230402T020000,2024
0407T020000,20250406T020000,20260405T020000,20270404T020000,20280402T02000
0,20290401T020000,20300407T020000,20310406T020000,20320404T020000,20330403
T020000,20340402T020000,20350401T020000,20360406T020000,20370405T020000
TZNAME:NZST
TZOFFSETFROM:+1300
TZOFFSETTO:+1200
END:STANDARD
END:VTIMEZONE
END:VCALENDAR""")
request = self._getRequest()
self.handler.load(self.calendar, request, stream)
events = self.calendar.get_children()
self.assertEqual(len(events), 1)
event = events[0].specific
self.assertIs(type(event), MultidayRecurringEventPage)
self.assertEqual(event.title, "Bought from a Rubber Man")
self.assertEqual(event.tz.zone, "Pacific/Auckland")
self.assertEqual(event.num_days, 3)
self.assertEqual(event.time_from, dt.time(16))
self.assertEqual(event.time_to, dt.time(18))
# ------------------------------------------------------------------------------
class TestExport(TestCase):
def setUp(self):
Site.objects.update(hostname="joy.test")
self.home = Page.objects.get(slug='home')
self.user = User.objects.create_user('i', '<EMAIL>', 's3(R3t')
self.requestFactory = RequestFactory()
self.calendar = CalendarPage(owner = self.user,
slug = "events",
title = "Events")
self.home.add_child(instance=self.calendar)
self.calendar.save_revision().publish()
self.dicerun = SimpleEventPage(owner = self.user,
slug = "mercy-dice-run",
title = "Mercy Dice Run",
date = dt.date(2020,3,16),
location = "Newtown")
self.calendar.add_child(instance=self.dicerun)
self.dicerun.save_revision().publish()
event = SimpleEventPage(owner = self.user,
slug = "workshop",
title = "Workshop",
date = dt.date(2020,3,22))
self.calendar.add_child(instance=event)
event.save_revision().publish()
self.handler = ICalHandler()
def _getRequest(self, path="/"):
request = self.requestFactory.get(path)
request.user = self.user
request.site = self.home.get_site()
request.session = {}
request._messages = FallbackStorage(request)
request.POST = request.POST.copy()
request.POST['action-publish'] = "action-publish"
return request
def testServeCalendar(self):
response = self.handler.serve(self.calendar,
self._getRequest("/events/"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), "text/calendar")
self.assertEqual(response.get('Content-Disposition'),
"attachment; filename=events.ics")
self.assertEqual(response.content.count(b"BEGIN:VEVENT"), 2)
def testServeEvent(self):
response = self.handler.serve(self.dicerun,
self._getRequest("/events/mercy-dice-run/"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), "text/calendar")
self.assertEqual(response.get('Content-Disposition'),
"attachment; filename=mercy-dice-run.ics")
self.assertEqual(response.content.count(b"BEGIN:VEVENT"), 1)
self.assertIn(b"SUMMARY:Mercy Dice Run", response.content)
self.assertIn(b"DTSTART;TZID=Asia/Tokyo:20200316T000000", response.content)
self.assertIn(b"DTEND;TZID=Asia/Tokyo:20200316T235959", response.content)
self.assertIn(b"LOCATION:Newtown", response.content)
self.assertIn(b"URL:http://joy.test/events/mercy-dice-run", response.content)
def testServePage(self):
response = self.handler.serve(self.home, self._getRequest("/"))
self.assertIsNone(response)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
```
#### File: joyous/tests/test_simple_event.py
```python
import sys
import datetime as dt
import pytz
from freezegun import freeze_time
from django.test import RequestFactory, override_settings
from django_bs_test import TestCase
from django.contrib.auth.models import User, AnonymousUser, Group
from django.utils import timezone
from wagtail.core.models import Page, PageViewRestriction
from ls.joyous.models.calendar import SpecificCalendarPage
from ls.joyous.models.events import SimpleEventPage, ThisEvent, EventsOnDay
from ls.joyous.models.groups import get_group_model
from .testutils import datetimetz
GroupPage = get_group_model()
# ------------------------------------------------------------------------------
class Test(TestCase):
def setUp(self):
self.home = Page.objects.get(slug='home')
self.user = User.objects.create_user('i', '<EMAIL>', '<PASSWORD>')
self.calendar = SpecificCalendarPage(owner = self.user,
slug = "events",
title = "Events")
self.home.add_child(instance=self.calendar)
self.calendar.save_revision().publish()
self.event = SimpleEventPage(owner = self.user,
slug = "pet-show",
title = "Pet Show",
date = dt.date(1987,6,5),
time_from = dt.time(11),
time_to = dt.time(17,30))
self.calendar.add_child(instance=self.event)
self.event.save_revision().publish()
def testGetEventsByDay(self):
events = SimpleEventPage.events.byDay(dt.date(1987,6,1),
dt.date(1987,6,30))
self.assertEqual(len(events), 30)
evod = events[4]
self.assertEqual(evod.date, dt.date(1987,6,5))
self.assertEqual(len(evod.all_events), 1)
self.assertEqual(len(evod.days_events), 1)
self.assertEqual(len(evod.continuing_events), 0)
def testStatus(self):
self.assertEqual(self.event.status, "finished")
self.assertEqual(self.event.status_text, "This event has finished.")
now = timezone.localtime()
earlier = now - dt.timedelta(hours=1)
if earlier.date() != now.date():
earlier = datetimetz(now.date(), dt.time.min)
nowEvent = SimpleEventPage(owner = self.user,
slug = "now",
title = "Now Event",
date = now.date(),
time_from = earlier.time(),
time_to = dt.time.max)
self.assertEqual(nowEvent.status, "started")
self.assertEqual(nowEvent.status_text, "This event has started.")
tomorrow = timezone.localdate() + dt.timedelta(days=1)
futureEvent = SimpleEventPage(owner = self.user,
slug = "tomorrow",
title = "Tomorrow's Event",
date = tomorrow)
self.calendar.add_child(instance=futureEvent)
self.assertIsNone(futureEvent.status)
self.assertEqual(futureEvent.status_text, "")
def testWhen(self):
self.assertEqual(self.event.when,
"Friday 5th of June 1987 at 11am to 5:30pm")
def testAt(self):
self.assertEqual(self.event.at, "11am")
def testUpcomingDt(self):
self.assertIsNone(self.event._upcoming_datetime_from)
now = timezone.localtime()
earlier = now - dt.timedelta(hours=1)
if earlier.date() != now.date():
earlier = datetimetz(now.date(), dt.time.min)
nowEvent = SimpleEventPage(owner = self.user,
slug = "now",
title = "Now Event",
date = now.date(),
time_from = earlier.time(),
time_to = dt.time.max)
self.calendar.add_child(instance=nowEvent)
self.assertIsNone(nowEvent._upcoming_datetime_from)
tomorrow = timezone.localdate() + dt.timedelta(days=1)
futureEvent = SimpleEventPage(owner = self.user,
slug = "tomorrow",
title = "Tomorrow's Event",
date = tomorrow)
self.calendar.add_child(instance=futureEvent)
self.assertEqual(futureEvent._upcoming_datetime_from,
datetimetz(tomorrow, dt.time.max))
def testPastDt(self):
self.assertEqual(self.event._past_datetime_from, datetimetz(1987,6,5,11,0))
now = timezone.localtime()
earlier = now - dt.timedelta(hours=1)
if earlier.date() != now.date():
earlier = datetimetz(now.date(), dt.time.min)
nowEvent = SimpleEventPage(owner = self.user,
slug = "now",
title = "Now Event",
date = now.date(),
time_from = earlier.time(),
time_to = dt.time.max)
self.calendar.add_child(instance=nowEvent)
self.assertEqual(nowEvent._past_datetime_from, earlier)
tomorrow = timezone.localdate() + dt.timedelta(days=1)
futureEvent = SimpleEventPage(owner = self.user,
slug = "tomorrow",
title = "Tomorrow's Event",
date = tomorrow)
self.calendar.add_child(instance=futureEvent)
self.assertIsNone(futureEvent._past_datetime_from)
def testGroup(self):
self.assertIsNone(self.event.group)
group = GroupPage(slug = "runners",
title = "Runners")
self.home.add_child(instance=group)
race = SimpleEventPage(owner = self.user,
slug = "race",
title = "Race",
date = dt.date(2008, 6, 3))
group.add_child(instance=race)
self.assertEqual(race.group, group)
@override_settings(JOYOUS_THEME_CSS = "/static/joyous/joyous_stellar_theme.html")
def testIncludeThemeCss(self):
response = self.client.get("/events/pet-show/")
self.assertEqual(response.status_code, 200)
soup = response.soup
links = soup.head('link')
self.assertEqual(len(links), 2)
link = links[1]
self.assertEqual(link['href'], "/static/joyous/joyous_stellar_theme.html")
self.assertEqual(link['type'], "text/css")
self.assertEqual(link['rel'], ["stylesheet"])
# ------------------------------------------------------------------------------
class TestTZ(TestCase):
def setUp(self):
self.home = Page.objects.get(slug='home')
self.user = User.objects.create_user('i', '<EMAIL>', 's3cr3t')
self.calendar = SpecificCalendarPage(owner = self.user,
slug = "events",
title = "Events")
self.home.add_child(instance=self.calendar)
self.calendar.save_revision().publish()
self.event = SimpleEventPage(owner = self.user,
slug = "pet-show",
title = "Pet Show",
date = dt.date(1987,6,5),
time_from = dt.time(11),
time_to = dt.time(17,30),
tz = pytz.timezone("Australia/Sydney"))
self.calendar.add_child(instance=self.event)
self.event.save_revision().publish()
@timezone.override("America/Los_Angeles")
def testGetEventsByLocalDay(self):
evods = SimpleEventPage.events.byDay(dt.date(1987,6,1),
dt.date(1987,6,30))
self.assertEqual(len(evods), 30)
evod1 = evods[3]
self.assertEqual(evod1.date, dt.date(1987,6,4))
self.assertEqual(len(evod1.days_events), 1)
self.assertEqual(len(evod1.continuing_events), 0)
evod2 = evods[4]
self.assertEqual(evod2.date, dt.date(1987,6,5))
self.assertEqual(len(evod2.days_events), 0)
self.assertEqual(len(evod2.continuing_events), 1)
self.assertEqual(evod1.all_events[0], evod2.all_events[0])
self.assertIs(evod1.all_events[0].page, evod2.all_events[0].page)
@timezone.override("America/Los_Angeles")
def testLocalWhen(self):
self.assertEqual(self.event.when,
"Thursday 4th of June 1987 at 6pm to Friday 5th of June 1987 at 12:30am")
@timezone.override("America/Los_Angeles")
def testLocalAt(self):
self.assertEqual(self.event.at, "6pm")
@timezone.override("America/Los_Angeles")
def testUpcomingLocalDt(self):
self.assertIsNone(self.event._upcoming_datetime_from)
@timezone.override("America/Los_Angeles")
def testPastLocalDt(self):
when = self.event._past_datetime_from
self.assertEqual(when.tzinfo.zone, "America/Los_Angeles")
self.assertEqual(when.time(), dt.time(18))
self.assertEqual(when.date(), dt.date(1987,6,4))
@timezone.override("Pacific/Tongatapu")
def testGetEventsAcrossDateline(self):
showDay = SimpleEventPage(owner = self.user,
slug = "tamakautoga-village-show-day",
title = "Tamakautoga Village Show Day",
date = dt.date(2016,7,30),
tz = pytz.timezone("Pacific/Niue"))
self.calendar.add_child(instance=showDay)
evods = SimpleEventPage.events.byDay(dt.date(2016,7,31),
dt.date(2016,7,31))
self.assertEqual(len(evods[0].days_events), 1)
self.assertEqual(len(evods[0].continuing_events), 0)
event = evods[0].days_events[0].page
self.assertEqual(event.at, "")
self.assertEqual(event.when, "Sunday 31st of July 2016")
# ------------------------------------------------------------------------------
class TestQuerySet(TestCase):
def setUp(self):
self.home = Page.objects.get(slug='home')
self.user = User.objects.create_user('i', '<EMAIL>', 's3cr3t')
self.calendar = SpecificCalendarPage(owner = self.user,
slug = "events",
title = "Events")
self.home.add_child(instance=self.calendar)
self.calendar.save_revision().publish()
self.event = SimpleEventPage(owner = self.user,
slug = "agfest",
title = "AgFest",
date = dt.date(2015,6,5),
time_from = dt.time(11),
time_to = dt.time(17,30))
self.calendar.add_child(instance=self.event)
self.event.save_revision().publish()
@freeze_time("2017-05-31")
def testPast(self):
self.assertEqual(list(SimpleEventPage.events.past()), [self.event])
self.assertEqual(SimpleEventPage.events.past().count(), 1)
self.assertEqual(SimpleEventPage.events.upcoming().count(), 0)
@freeze_time("2012-03-04")
def testUpcoming(self):
self.assertEqual(list(SimpleEventPage.events.upcoming()), [self.event])
self.assertEqual(SimpleEventPage.events.past().count(), 0)
self.assertEqual(SimpleEventPage.events.upcoming().count(), 1)
def testThis(self):
events = list(SimpleEventPage.events.this())
self.assertEqual(len(events), 1)
this = events[0]
self.assertTrue(isinstance(this, ThisEvent))
self.assertEqual(this.title, "AgFest")
self.assertEqual(this.page, self.event)
def testByDay(self):
evods = SimpleEventPage.events.byDay(dt.date(2015,6,5),
dt.date(2015,6,5))
self.assertEqual(len(evods), 1)
evod = evods[0]
self.assertTrue(isinstance(evod, EventsOnDay))
self.assertEqual(evod.date, dt.date(2015,6,5))
self.assertEqual(len(evod.days_events), 1)
self.assertEqual(len(evod.continuing_events), 0)
self.assertEqual(evod.days_events[0].title, "AgFest")
self.assertEqual(evod.days_events[0].page, self.event)
def testPasswordAuth(self):
PASSWORD = PageViewRestriction.PASSWORD
KEY = PageViewRestriction.passed_view_restrictions_session_key
meeting = SimpleEventPage(owner = self.user,
slug = "club-meet",
title = "Club Meeting",
date = dt.date(2009,10,21))
self.calendar.add_child(instance=meeting)
meeting.save_revision().publish()
restriction = PageViewRestriction.objects.create(restriction_type = PASSWORD,
password = "<PASSWORD>",
page = meeting)
self.assertEqual(list(SimpleEventPage.events.all()),
[self.event, meeting])
request = RequestFactory().get("/test")
request.user = self.user
request.session = {}
self.assertEqual(list(SimpleEventPage.events.auth(request)),
[self.event])
request.session[KEY] = [restriction.id]
self.assertEqual(list(SimpleEventPage.events.auth(request)),
[self.event, meeting])
def testLoginAuth(self):
LOGIN = PageViewRestriction.LOGIN
bee = SimpleEventPage(owner = self.user,
slug = "bee",
title = "Working Bee",
date = dt.date(2013,3,30),
time_from = dt.time(10))
self.calendar.add_child(instance=bee)
bee.save_revision().publish()
PageViewRestriction.objects.create(restriction_type = LOGIN,
page = bee)
self.assertEqual(list(SimpleEventPage.events.all()),
[self.event, bee])
self.assertFalse(bee.isAuthorized(None))
request = RequestFactory().get("/test")
request.user = AnonymousUser()
request.session = {}
self.assertEqual(list(SimpleEventPage.events.auth(request)),
[self.event])
request.user = self.user
self.assertEqual(list(SimpleEventPage.events.auth(request)),
[self.event, bee])
def testGroupsAuth(self):
GROUPS = PageViewRestriction.GROUPS
presidium = Group.objects.create(name = "Presidium")
secretariat = Group.objects.create(name = "Secretariat")
assembly = Group.objects.create(name = "Assembly")
meeting = SimpleEventPage(owner = self.user,
slug = "admin-cmte",
title = "Administration Committee Meeting",
date = dt.date(2015,6,20),
time_from = dt.time(16,30))
self.calendar.add_child(instance=meeting)
meeting.save_revision().publish()
restriction = PageViewRestriction.objects.create(restriction_type = GROUPS,
page = meeting)
restriction.groups.set([presidium, secretariat])
restriction.save()
self.assertEqual(list(SimpleEventPage.events.all()),
[self.event, meeting])
request = RequestFactory().get("/test")
request.user = self.user
request.session = {}
self.assertEqual(list(SimpleEventPage.events.auth(request)),
[self.event])
request.user = User.objects.create_superuser('joe', '<EMAIL>', 's3cr3t')
self.assertEqual(list(SimpleEventPage.events.auth(request)),
[self.event, meeting])
request.user = User.objects.create_user('jill', '<EMAIL>', 's3cr3t')
request.user.groups.set([secretariat, assembly])
self.assertEqual(list(SimpleEventPage.events.auth(request)),
[self.event, meeting])
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
``` |
{
"source": "2ik/django-editor.js",
"score": 2
} |
#### File: django-editor.js/django_editorjs_fields/fields.py
```python
import json
from django.core import checks
from django.core.exceptions import ValidationError
from django.db.models import Field
from django.forms import Textarea
from .config import DEBUG, EMBED_HOSTNAME_ALLOWED
from .utils import get_hostname_from_url
from .widgets import EditorJsWidget
try:
# pylint: disable=ungrouped-imports
from django.db.models import JSONField # Django >= 3.1
except ImportError:
HAS_JSONFIELD = False
else:
HAS_JSONFIELD = True
__all__ = ['EditorJsTextField', 'EditorJsJSONField']
class FieldMixin(Field):
def get_internal_type(self):
return 'TextField'
class EditorJsFieldMixin:
def __init__(self, plugins, tools, **kwargs):
self.use_editorjs = kwargs.pop('use_editorjs', True)
self.plugins = plugins
self.tools = tools
self.config = {}
if 'autofocus' in kwargs:
self.config['autofocus'] = kwargs.pop('autofocus')
if 'hideToolbar' in kwargs:
self.config['hideToolbar'] = kwargs.pop('hideToolbar')
if 'inlineToolbar' in kwargs:
self.config['inlineToolbar'] = kwargs.pop('inlineToolbar')
if 'readOnly' in kwargs:
self.config['readOnly'] = kwargs.pop('readOnly')
if 'minHeight' in kwargs:
self.config['minHeight'] = kwargs.pop('minHeight')
if 'logLevel' in kwargs:
self.config['logLevel'] = kwargs.pop('logLevel')
if 'placeholder' in kwargs:
self.config['placeholder'] = kwargs.pop('placeholder')
if 'defaultBlock' in kwargs:
self.config['defaultBlock'] = kwargs.pop('defaultBlock')
if 'sanitizer' in kwargs:
self.config['sanitizer'] = kwargs.pop('sanitizer')
if 'i18n' in kwargs:
self.config['i18n'] = kwargs.pop('i18n')
super().__init__(**kwargs)
def validate_embed(self, value):
for item in value.get('blocks', []):
type = item.get('type', '').lower()
if type == 'embed':
embed = item['data']['embed']
hostname = get_hostname_from_url(embed)
if hostname not in EMBED_HOSTNAME_ALLOWED:
raise ValidationError(
hostname + ' is not allowed in EDITORJS_EMBED_HOSTNAME_ALLOWED')
def clean(self, value, model_instance):
if value and value != 'null':
if not isinstance(value, dict):
try:
value = json.loads(value)
except ValueError:
pass
except TypeError:
pass
else:
self.validate_embed(value)
value = json.dumps(value)
else:
self.validate_embed(value)
return super().clean(value, model_instance)
def formfield(self, **kwargs):
if self.use_editorjs:
kwargs['widget'] = EditorJsWidget(
self.plugins, self.tools, self.config, **kwargs)
else:
kwargs['widget'] = Textarea(**kwargs)
# pylint: disable=no-member
return super().formfield(**kwargs)
class EditorJsTextField(EditorJsFieldMixin, FieldMixin):
# pylint: disable=useless-super-delegation
def __init__(self, plugins=None, tools=None, **kwargs):
super().__init__(plugins, tools, **kwargs)
def clean(self, value, model_instance):
if value == 'null':
value = None
return super().clean(value, model_instance)
class EditorJsJSONField(EditorJsFieldMixin, JSONField if HAS_JSONFIELD else FieldMixin):
# pylint: disable=useless-super-delegation
def __init__(self, plugins=None, tools=None, **kwargs):
super().__init__(plugins, tools, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_supported_json())
return errors
def _check_supported_json(self):
if not HAS_JSONFIELD and DEBUG:
return [
checks.Warning(
'You don\'t support JSONField, please use'
'EditorJsTextField instead of EditorJsJSONField',
obj=self,
)
]
return []
``` |
{
"source": "2-IMMERSE/dvbcsstv-lib",
"score": 2
} |
#### File: src/python/CssProxyEngine.py
```python
import sys
import json
try:
from dvbcss.protocol import OMIT
except ImportError:
sys.stderr.write("""
Could not import pydvbcss library. Suggest installing using pip, e.g. on Linux/Mac:
$ sudo pip install pydvbcss
""")
sys.exit(1)
from ProxyTimelineSource import ProxyTimelineSource
from CssProxy_ServerEndpoint import CssProxy_ServerEndpoint
from dvbcss.protocol.server.cii import CIIServer
from dvbcss.protocol.cii import CII
class BlockableCIIServer(CIIServer):
def __init__(self, *args, **kwargs):
super(BlockableCIIServer,self).__init__(*args,**kwargs)
self._blocking=False
def setBlocking(self, blocking):
if bool(self._blocking) == bool(blocking):
return
self._blocking=blocking;
if not self._blocking:
self.updateClients()
def onClientConnect(self, webSock):
"""Force not sending, if blocking"""
if not self._blocking:
super(BlockableCIIServer,self).onClientConnect(webSock)
else:
self.getConnections()[webSock]["prevCII"] = CII()
self.onNumClientsChange(len(self.getConnections()))
def onClientDisconnect(self, *args, **kwargs):
super(BlockableCIIServer,self).onClientDisconnect(*args, **kwargs)
self.onNumClientsChange(len(self.getConnections()))
def updateClients(self, *args, **kwargs):
if not self._blocking:
super(BlockableCIIServer, self).updateClients(*args, **kwargs)
def onNumClientsChange(self, newNumClients):
"""\
Stub. Override in your implementation to be notified when the number of
connected clients changes.
"""
pass
class CssProxyEngine(object):
"""\
Proxying server engine. Takes a CIIServer and TsServer and acts as a
proxy data source for them, setting the CII properties and providing the
timelines.
It acts as a proxy. It creates a CssProxy_ServerEndpoint to which another
client can connect (e.g. HTML+JS in a browser acting as the TV). When a
client connects to that, the requests from clients for CII messages and
for timelines are forwarded on via the server endpoint. Responses coming
back are relayed back to clients.
This proxy must be running on the same machine as the TV in a browser
so that they share the same system clock that can be used as the wall clock
+---------------+ +------------+ +-----------------+
| | ---CSS-CII---> | | | |
| | | | | |
| Companion app | ---CSS-TS----> | This proxy | <-----> | TV in a browser |
| | | | | |
| | ---CSS-WC----> | | | |
+---------------+ +------------+ +-----------------+
The DVB CSS server endpoints will only be enabled while the browser has an
open connection. If it closes that connection then the server becomes
disabled.
CII messages are modified to have the URLs of the Wall Clock and TS servers.
"""
Server = CssProxy_ServerEndpoint
TimelineSource = ProxyTimelineSource
def __init__(self, ciiServer, tsServer, ciiUrl, tsUrl, wcUrl):
"""\
:param ciiServer: A running BlockableCIIServer. Does not have to be enabled.
:param tsServer: A running TSServer. Does not have to be enabled.
:param ciiUrl: The URL of the CII server to be supplied to applications.
:param tsUrl: The URL of the TSServer endpoint.
:param wcUrl: The URL of WCServer endpoint.
"""
initialMessage = json.dumps({
"ciiUrl": ciiUrl
})
# create wallclock server
self.ciiServer = ciiServer
self.tsServer = tsServer
self.ciiServer.cii.tsUrl = tsUrl
self.ciiServer.cii.wcUrl = wcUrl
self.ciiServer.onNumClientsChange = self._onNumCiiClientsChanged
self.tsSource = self.TimelineSource()
self.serverEndpoint = self.Server(initialMessage)
self.tsServer.attachTimelineSource(self.tsSource)
self._onServerConnectionStateChange()
self.tsSource.onRequestedTimelinesChanged = self._onRequestedChangeFromClients
self.serverEndpoint.onUpdate = self._onUpdateFromServer
self.serverEndpoint.onServerConnected = self._onServerConnectionStateChange
self.serverEndpoint.onServerDisconnected = self._onServerConnectionStateChange
def _onNumCiiClientsChanged(self, newNumClients):
self.serverEndpoint.updateNumberOfSlaves(newNumClients)
def _onRequestedChangeFromClients(self, selectors, added, removed):
self.serverEndpoint.sendTimelinesRequest(selectors, added,removed)
def _onUpdateFromServer(self, cii, controlTimestamps, options):
# don't allow these to be overridden - keep the values we first supplied
cii.tsUrl = OMIT
cii.wcUrl = OMIT
if ("blockCii" in options) and options["blockCii"]:
self.ciiServer.setBlocking(True)
self.ciiServer.cii.update(cii)
if ("blockCii" in options) and not options["blockCii"]:
self.ciiServer.setBlocking(False)
self.ciiServer.updateClients(sendOnlyDiff=True)
# Update the TS server
self.tsServer.contentId = self.ciiServer.cii.contentId
self.tsSource.timelinesUpdate(controlTimestamps)
self.tsServer.updateAllClients()
def _onServerConnectionStateChange(self):
connected = self.serverEndpoint.serverConnected
self.ciiServer.enabled=connected
self.tsServer.enabled=connected
print "CII & TS Servers enabled?", connected
```
#### File: tests/python/test_CssProxyEngine.py
```python
import unittest
import random
import json
import sys
sys.path.append("../../src/python")
from CssProxyEngine import CssProxyEngine
from dvbcss.protocol.cii import CII
from dvbcss.protocol import OMIT
from mock_ciiServer import MockCiiServer
from mock_tsServer import MockTsServer
from mock_wsServerBase import MockWSServerBase
ciiUrl = "flurble"
tsUrl = "blah"
wcUrl = "plig"
def makeRandomAToZString(length=10):
codes = []
for i in range(0,length):
codes.append(random.randrange(64,64+25))
return "".join([chr(c) for c in codes])
class Test_CssProxy(unittest.TestCase):
"""Tests of CssProxyEngine"""
def setUp(self):
self.mockServerBase = None
self.ciiServer = MockCiiServer()
self.tsServer = MockTsServer()
self._orig_ServerBase = CssProxyEngine.Server.ServerBase
CssProxyEngine.Server.ServerBase = self._mockWSServerBaseFactory
def tearDown(self):
CssProxyEngine.Server.ServerBase = self._orig_ServerBase
self.tsServer.cleanup()
self.ciiServer.cleanup()
def _mockWSServerBaseFactory(self, *args, **kwargs):
newServerBase = MockWSServerBase(*args, **kwargs)
self.mockServerBase = newServerBase
return newServerBase
def test_defaultsServersToDisabled(self):
"""When the proxyengine takes control of CIIServer and TSServer, they default to being disabled"""
self.ciiServer.enabled = True
self.tsServer.enabled = True
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
self.assertFalse(self.ciiServer.enabled)
self.assertFalse(self.tsServer.enabled)
def test_serverAllowsSingleProxyConnection(self):
"""Check that the proxy has setup a server and enabled for a SINGLE connection from the browser"""
self.assertIsNone(self.mockServerBase)
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
self.assertIsNotNone(self.mockServerBase)
self.assertTrue(self.mockServerBase.enabled)
self.assertEquals(self.mockServerBase._maxConnectionsAllowed,1)
def test_ServerSideEnabledWhenProxyConnectionMade(self):
"""The CII and TS servers are enabled when a proxy connection is made and disabled again when it disconnects"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
self.mockServerBase.mock_clientConnects()
self.assertTrue(self.ciiServer.enabled)
self.assertTrue(self.tsServer.enabled)
self.mockServerBase.mock_clientDisconnects()
self.assertFalse(self.ciiServer.enabled)
self.assertFalse(self.tsServer.enabled)
def test_initialCiiMostlyEmpty(self):
"""Default CII message is correct before CII is updated by the browser"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
cii = self.ciiServer.cii
self.assertEqual(len(cii.definedProperties()), 3)
self.assertEqual(cii.protocolVersion, "1.1")
self.assertEqual(cii.tsUrl, tsUrl)
self.assertEqual(cii.wcUrl, wcUrl)
def test_browserCiiPropogatedToCsa(self):
"""Check that CII update is propagated to CSAs"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
# CSA connects
self.ciiServer.mock_clientConnects()
self.assertFalse(self.ciiServer.mock_wasUpdateClientsCalled())
# update to CII
msg = """\
{
"cii" : { "contentId": "boingboing", "presentationStatus":"okay", "contentIdStatus":"final" }
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
self.assertTrue(self.ciiServer.mock_wasUpdateClientsCalled())
cii = self.ciiServer.cii
self.assertEqual(cii.contentId, "boingboing")
self.assertEqual(cii.contentIdStatus, "final")
self.assertEqual(cii.presentationStatus, ["okay"])
def test_browserCiiDoesNotOverwriteTsUrlAndWcUrl(self):
"""Check that CII update from the browser does not overwrite the tsUrl or wcUrl"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
# CSA connects
self.ciiServer.mock_clientConnects()
self.assertFalse(self.ciiServer.mock_wasUpdateClientsCalled())
# update to CII
msg = """\
{
"cii" : { "tsUrl":"xxxyyy", "wcUrl":"3o87t3q8ot", "presentationStatus":"fault" }
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
self.assertTrue(self.ciiServer.mock_wasUpdateClientsCalled())
cii = self.ciiServer.cii
self.assertEqual(cii.tsUrl, tsUrl)
self.assertEqual(cii.wcUrl, wcUrl)
def test_browserCiiUpdatesPushed(self):
"""Check that whenever the browser updates CII it is pushed to clients"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
# CSA connects
self.ciiServer.mock_clientConnects()
self.assertFalse(self.ciiServer.mock_wasUpdateClientsCalled())
for i in range(0,10):
c = makeRandomAToZString()
msg = """\
{
"cii" : { "contentId" : "%s" }
}
""" % c
self.mockServerBase.mock_clientSendsMessage(msg)
self.assertTrue(self.ciiServer.mock_wasUpdateClientsCalled())
self.assertEquals(self.ciiServer.cii.contentId, c)
def test_checkAllCiiPropertiesForwarded(self):
""""Check that all CII properties are pushed through from browser to CSAs (except tsUrl and wcUrl)"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
# CSA connects
self.ciiServer.mock_clientConnects()
# CII updated by browser
msg = """\
{
"cii" : {
"protocolVersion" : "1.1",
"contentId" : "dvb://1234.5678.0123",
"contentIdStatus" : "partial",
"presentationStatus" : "okay muted",
"mrsUrl" : "http://mrs.example.com/mrs-service",
"tsUrl" : "ws://a.b.c.d/ts",
"wcUrl" : "udp://1.2.3.4:123",
"teUrl" : "ws://a.b.c.d/te",
"timelines" : [
{
"timelineSelector" : "urn:blah:plug:floooo",
"timelineProperties" : {
"unitsPerTick" : 1001,
"unitsPerSecond" : 60000,
"accuracy" : 0.3
}
},
{
"timelineSelector" : "urn:blah:plug:seilrgb",
"timelineProperties" : {
"unitsPerTick" : 1,
"unitsPerSecond" : 25
}
}
],
"private" : [ { "type" : "blah", "flurgle" : [ 1, 2, { "f":true}, null, "hello" ] } ]
}
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
cii = self.ciiServer.cii
self.assertEquals(cii.protocolVersion, "1.1")
self.assertEquals(cii.contentId, "dvb://1234.5678.0123")
self.assertEquals(cii.contentIdStatus, "partial")
self.assertEquals(cii.presentationStatus, ["okay", "muted"])
self.assertEquals(cii.mrsUrl, "http://mrs.example.com/mrs-service")
self.assertEquals(cii.tsUrl, tsUrl)
self.assertEquals(cii.wcUrl, wcUrl)
self.assertEquals(cii.teUrl, "ws://a.b.c.d/te")
self.assertEquals(cii.private,[ { "type" : "blah", "flurgle" : [ 1, 2, { "f":True}, None, "hello" ] } ])
def test_contentIdUpdatedForTsServer(self):
"""When the browser updates the content ID, this is passed to the TS Server and CSAs are updated"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
msg = """
{
"cii" : { "contentId" : "abcdef" }
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
self.assertEquals(self.tsServer.contentId, "abcdef")
self.assertTrue(self.tsServer.mock_wasUpdateAllClientsCalled())
def test_timelineNeededForwardedToBrowser(self):
"""When the TS Server notifys that a timeline is needed then the browser is requested to add that timeline"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
self.mockServerBase.mock_popAllMessagesSentToClient()
# first timeline needed
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:temi:2:160")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
self.assertEquals(len(msgsToBrowser), 1)
msg = json.loads(msgsToBrowser[0])
self.assertEquals(msg["add_timelineSelectors"], [ "urn:dvb:css:timeline:temi:2:160" ])
def test_timelineNeededForwardedToBrowser2(self):
"""When the TS Server notifys that an additional timeline is needed then the browser is requested to add that timeline"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
self.mockServerBase.mock_popAllMessagesSentToClient()
# first timeline needed
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:temi:2:160")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
# additional timeline needed
self.tsServer.mock_addTimelineSelector("urn:foobar")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
self.assertEquals(len(msgsToBrowser), 1)
msg = json.loads(msgsToBrowser[0])
self.assertEquals(msg["add_timelineSelectors"], [ "urn:foobar" ])
def test_timelineNotNeededForwardedToBrowser(self):
"""When the TS server notifys that a timeline is no longer needed then the browser is requested to remove that timeline"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
self.mockServerBase.mock_popAllMessagesSentToClient()
# first timeline needed
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:temi:2:160")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
# additional timeline needed
self.tsServer.mock_addTimelineSelector("urn:seiugh")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
# first timeline removed
self.tsServer.mock_removeTimelineSelector("urn:dvb:css:timeline:temi:2:160")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
self.assertEquals(len(msgsToBrowser), 1)
msg = json.loads(msgsToBrowser[0])
self.assertEquals(msg["remove_timelineSelectors"], [ "urn:dvb:css:timeline:temi:2:160" ])
# second timeline removed
self.tsServer.mock_removeTimelineSelector("urn:seiugh")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
self.assertEquals(len(msgsToBrowser), 1)
msg = json.loads(msgsToBrowser[0])
self.assertEquals(msg["remove_timelineSelectors"], [ "urn:seiugh" ])
def test_controlTimestampNotProvidedUntilBrowserProvides(self):
"""When a timeline is newly needed, control timestamps are not provided to CSAs until provided by the browser"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
self.mockServerBase.mock_popAllMessagesSentToClient()
# first timeline needed
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:temi:2:160")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
# initially nothing because nothing provided by browser
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:temi:2:160")
self.assertIsNone(ct)
# then browser updates the proxy with an appropriate control timestmap
msg = """
{
"controlTimestamps" : {
"urn:dvb:css:timeline:temi:2:160" : {
"contentTime":"55",
"wallClockTime":"1234",
"timelineSpeedMultiplier":1.0
}
}
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
# now browser has a suitable control timestamp
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:temi:2:160")
self.assertEquals(ct.timestamp.contentTime, 55)
self.assertEquals(ct.timestamp.wallClockTime, 1234)
self.assertEquals(ct.timelineSpeedMultiplier, 1.0)
def test_controlTimestampOnlyForApplicableTimelineSelector(self):
"""When a browser proffers a control timestamp, it is only used for the timeline selector it is specified for"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
self.mockServerBase.mock_popAllMessagesSentToClient()
# two timelines needed
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:temi:2:160")
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:pts")
msgsToBrowser = self.mockServerBase.mock_popAllMessagesSentToClient()
# initially nothing because nothing provided by browser
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:temi:2:160")
self.assertIsNone(ct)
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:pts")
self.assertIsNone(ct)
# then browser updates the proxy with an appropriate control timestmap
msg = """
{
"controlTimestamps" : {
"urn:dvb:css:timeline:pts" : {
"contentTime":"55",
"wallClockTime":"1234",
"timelineSpeedMultiplier":1.0
}
}
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
# now browser has a suitable control timestamp for one of them only
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:temi:2:160")
self.assertEquals(ct, None)
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:pts")
self.assertEquals(ct.timestamp.contentTime, 55)
self.assertEquals(ct.timestamp.wallClockTime, 1234)
self.assertEquals(ct.timelineSpeedMultiplier, 1.0)
def test_tsAndCiiServersDisabledWhenBrowserDisconnects(self):
"""When the browser disconnects, the CII and TS servers and disabled"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
self.assertTrue(self.ciiServer.enabled)
self.assertTrue(self.tsServer.enabled)
# browser disconnects
self.mockServerBase.mock_clientDisconnects()
self.assertFalse(self.ciiServer.enabled)
self.assertFalse(self.tsServer.enabled)
def test_controlTimestampsForgottenAfterTimelineNotNeeded(self):
"""When a timeline is no longer needed, but then needed again later, previous control timestamps are forgotten"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
# timeline is needed and browser provides control timestamp
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:pts")
msg = """\
{
"controlTimestamps" : {
"urn:dvb:css:timeline:pts" : {
"contentTime":"9573",
"wallClockTime":"12340001",
"timelineSpeedMultiplier":0.5
}
}
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
self.tsServer.mock_removeTimelineSelector("urn:dvb:css:timeline:pts")
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:pts")
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:pts")
self.assertEquals(ct, None)
def test_messageFromBrowserDoesNotCauseTimelinesToBeForgotten(self):
"""Messages from the browser only update the control timestamps they list and do not affect others"""
p = CssProxyEngine(self.ciiServer, self.tsServer, ciiUrl, tsUrl, wcUrl)
# browser connects
self.mockServerBase.mock_clientConnects()
# timeline is needed and browser provides control timestamp
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:pts")
self.tsServer.mock_addTimelineSelector("urn:dvb:css:timeline:temi:1:1")
msg = """\
{
"controlTimestamps" : {
"urn:dvb:css:timeline:pts" : {
"contentTime":"9573",
"wallClockTime":"12340001",
"timelineSpeedMultiplier":0.5
},
"urn:dvb:css:timeline:temi:1:1" : {
"contentTime":"1",
"wallClockTime":"12440001",
"timelineSpeedMultiplier":0.5
}
}
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
msg = """\
{
"controlTimestamps" : {
"urn:dvb:css:timeline:temi:1:1" : {
"contentTime":"15",
"wallClockTime":"13140001",
"timelineSpeedMultiplier":0.5
}
}
}
"""
self.mockServerBase.mock_clientSendsMessage(msg)
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:pts")
self.assertEquals(ct.timestamp.contentTime, 9573)
self.assertEquals(ct.timestamp.wallClockTime, 12340001)
self.assertEquals(ct.timelineSpeedMultiplier, 0.5)
ct = self.tsServer.mock_getMostRecentCt("urn:dvb:css:timeline:temi:1:1")
self.assertEquals(ct.timestamp.contentTime, 15)
self.assertEquals(ct.timestamp.wallClockTime, 13140001)
self.assertEquals(ct.timelineSpeedMultiplier, 0.5)
if __name__ == "__main__":
unittest.main(verbosity=1)
``` |
{
"source": "2joephillips/Shooter",
"score": 3
} |
#### File: 2joephillips/Shooter/globals.py
```python
import pygame
def initialize():
global SCREEN_WIDTH
SCREEN_WIDTH = 800
global SCREEN_HEIGHT
SCREEN_HEIGHT = int(SCREEN_WIDTH * 0.8)
global GRAVITY
GRAVITY = 0.75
global BACKGROUNDCOLOR
BACKGROUNDCOLOR = (144, 201, 130)
global FPS
FPS = 60
global RED
RED = (255,0,0)
```
#### File: 2joephillips/Shooter/weapons.py
```python
import pygame
import globals
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y, direction):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/icons/bullet.png').convert_alpha()
self.speed = 10
self.image = image
self.rect = self.image.get_rect()
self.rect.center = (x,y)
self.direction = direction
def update(self, bullet_group, player, enemies):
# move bullet
self.rect.x += (self.direction * self.speed)
# if bullet gone of screen
if self.rect.right < 0 or self.rect.left > globals.SCREEN_WIDTH:
self.kill()
# check collision with characters
if pygame.sprite.spritecollide(player.sprite, bullet_group, False):
if player.alive:
self.kill()
for enemy in enemies:
if pygame.sprite.spritecollide(enemy.sprite, bullet_group, False):
if enemy.alive:
self.kill()
``` |
{
"source": "2js855/symstore",
"score": 2
} |
#### File: symstore/symstore/command_line.py
```python
from __future__ import absolute_import
import sys
import argparse
import symstore
class CompressionNotSupported(Exception):
pass
def parse_args():
parser = argparse.ArgumentParser(
description="publish windows debugging files")
parser.add_argument("-d", "--delete",
metavar="TRANSACTION_ID",
help="delete transaction")
parser.add_argument("-z", "--compress",
action="store_true",
help="publish compressed files")
parser.add_argument("-p", "--product-name", default="",
help="name of the product")
parser.add_argument("-r", "--product-version", default="",
help="version of the product")
parser.add_argument("--version",
action="version",
version="symstore %s" % symstore.VERSION,
help="show program's version number and exit")
parser.add_argument("store_path", metavar="STORE_PATH",
type=str,
help="root directory of the symbol store")
parser.add_argument("files", metavar="FILE", type=str, nargs="*",
help="PDB or PE file(s) to publish")
return parser.parse_args()
def err_exit(error_msg):
sys.stderr.write("%s\n" % error_msg)
sys.exit(1)
def unknown_ext_err(file, file_extension):
if len(file_extension) > 0:
msg = "unknown file extension '%s'" % file_extension
else:
msg = "no file extension"
err_exit("%s: %s, can't figure out file format" % (file, msg))
def check_compression_support(compress_flag):
if not compress_flag:
# compression not request, no need to check
return
from symstore import cab
if not cab.compression_supported:
raise CompressionNotSupported()
def delete_action(sym_store, transaction_id):
try:
sym_store.delete_transaction(transaction_id)
except symstore.TransactionNotFound:
err_exit("no transaction with id '%s' found" % transaction_id)
def add_action(sym_store, files, product_name, product_version, compress):
try:
# error-out if no compression
check_compression_support(compress)
# create new add transaction, add all specified files
transaction = sym_store.new_transaction(product_name, product_version)
for file in files:
transaction.add_file(file, compress)
# commit the transaction to the store
sym_store.commit(transaction)
except symstore.UnknownFileExtension as e:
unknown_ext_err(file, e.file_extension)
except symstore.FileFormatError as e:
err_exit("%s: invalid %s file: %s" % (file, e.format_name, e))
except CompressionNotSupported:
err_exit("gcab module not available, compression not supported")
except symstore.FileNotFound as e:
err_exit("No such file: %s" % e.filename)
def main():
args = parse_args()
sym_store = symstore.Store(args.store_path)
if args.delete is not None:
delete_action(sym_store, args.delete)
return
# otherwise this is an 'add' action
add_action(sym_store, args.files, args.product_name,
args.product_version, args.compress)
```
#### File: symstore/symstore/pe.py
```python
from __future__ import absolute_import
import os
import struct
from symstore import errs
from symstore import fileio
PE_SIGNATURE = b"PE\0\0"
PE_SIGNATURE_POINTER = 0x3C
PE_SIG_SIZE = 4
MACHINE_SIZE = 2
NUMBER_OF_SECTION_SIZE = 2
TIME_DATE_STAMP_SIZE = 4
POINTER_TO_SYMBOL_TABLE_SIZE = 4
NUMBER_OF_SYMBOLS_SIZE = 4
SIZE_OF_OPTIONAL_HEADER_SIZE = 2
CHARACTERISTICS_SIZE = 2
# TimeDateStamp field's offset relative to PE signature
TIME_DATE_STAMP_OFFSET = \
PE_SIG_SIZE + \
MACHINE_SIZE + \
NUMBER_OF_SECTION_SIZE
# Optional header offset relative to PE signature
OPTIONAL_HEADER_OFFSET = \
PE_SIG_SIZE + \
MACHINE_SIZE + \
NUMBER_OF_SECTION_SIZE + \
TIME_DATE_STAMP_SIZE + \
POINTER_TO_SYMBOL_TABLE_SIZE + \
NUMBER_OF_SYMBOLS_SIZE + \
SIZE_OF_OPTIONAL_HEADER_SIZE + \
CHARACTERISTICS_SIZE
# SizeOfImage field's offset relative to optional header start
SIZE_OF_IMAGE_OFFSET = 56
class PEFormatError(errs.FileFormatError):
format_name = "PE"
def _read_u32(file, size, offset):
"""
Read 32-bit little-endian unsigned integer from an opened file.
:param file: opended file handle
:param size: file szie
:param offset: the offset of the integer
"""
if offset + 4 > size:
raise PEFormatError("data offset %s beyond end of file %s" %
(offset, size))
file.seek(offset)
return struct.unpack("<I", file.read(4))[0]
class PEFile:
"""
Simple PE file parser, that loads two fields used by symstore:
* TimeDateStamp from file header
* SizeOfImage from optional header
The values are accessed by reading the object's member variables by
the same name, e.g.
PEFile("some.exe").TimeDateStamp
:raises symstore.FileNotFoundError: if specified file does not exist
"""
def __init__(self, filepath):
with fileio.open_rb(filepath) as f:
fsize = os.fstat(f.fileno()).st_size
# load PE signature offset
pe_sig_offset = _read_u32(f, fsize, PE_SIGNATURE_POINTER)
# check that file contains valid PE signature
f.seek(pe_sig_offset)
if f.read(4) != PE_SIGNATURE:
raise PEFormatError("PE signature not found")
# load TimeDateStamp field
self.TimeDateStamp = \
_read_u32(f, fsize, pe_sig_offset+TIME_DATE_STAMP_OFFSET)
# load SizeOfImage field
self.SizeOfImage = _read_u32(f, fsize,
pe_sig_offset +
OPTIONAL_HEADER_OFFSET +
SIZE_OF_IMAGE_OFFSET)
```
#### File: tests/unit/test_fileio.py
```python
import os
import tempfile
import shutil
from os import path
from tests import testcase
from symstore import fileio
from symstore import FileNotFound
class TestOpenRb(testcase.TestCase):
"""
test fileio.open_rb() function
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_file_not_found(self):
"""
test the case when file we want to open does not exist
"""
file_path = path.join(self.temp_dir, "foo")
with self.assertRaises(FileNotFound) as cm:
fileio.open_rb(file_path)
# check that exception have correct filename assigned
self.assertEqual(cm.exception.filename, file_path)
def test_other_error(self):
"""
test the case when get an error 'unexpected' error,
and error which we don't explicitly handle
"""
dir_path = path.join(self.temp_dir, "bar")
os.mkdir(dir_path)
self.assertRaisesRegex(IOError, ".*Is a directory",
fileio.open_rb, dir_path)
``` |
{
"source": "2JS/alpha-zero-general",
"score": 3
} |
#### File: alpha-zero-general/ataxx/AtaxxLogic.py
```python
_directions_1 = [(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1),(0,1)]
_directions_2 = [(2,2),(2,1),(2,0),(2,-1),(2,-2),(1,-2),(0,-2),(-1,-2),(-2,-2),(-2,-1),(-2,0),(-2,1),(-2,2),(-1,2),(0,2),(1,2)]
class Board:
def __init__(self, n=7):
self.n = n
self.pieces = [[0]*self.n for _ in range(self.n)]
self.pieces[0][0] = 1
self.pieces[0][-1] = -1
self.pieces[-1][0] = -1
self.pieces[-1][-1] = 1
def __getitem__(self, index):
return self.pieces[index]
def countDiff(self, color):
"""Counts the # pieces of the given color
(1 for white, -1 for black, 0 for empty spaces)"""
count = 0
for y in range(self.n):
for x in range(self.n):
if self[x][y]==color:
count += 1
if self[x][y]==-color:
count -= 1
return count
def get_legal_moves(self, color):
moves = []
for x0 in range(self.n):
for y0 in range(self.n):
for x1 in range(self.n):
for y1 in range(self.n):
dx = x1 - x0
dy = y1 - y0
if self[x0][y0] == color and self[x1][y1] == 0:
if (dx, dy) in _directions_1:
moves.append((None, None, x1, y1))
elif (dx, dy) in _directions_2:
moves.append((x0, y0, x1, y1))
return moves
def has_legal_moves(self, color):
for x0 in range(self.n):
for y0 in range(self.n):
for x1 in range(self.n):
for y1 in range(self.n):
dx = x1 - x0
dy = y1 - y0
if (dx, dy) in _directions_1 or (dx, dy) in _directions_2:
if self[x0][y0] == color and self[x1][y1] == 0:
return True
return False
def execute_move(self, move, color):
x0, y0, x1, y1 = move
if x1 == y1 == None:
return
assert self[x1][y1] == 0
if x0 == y0 == None:
assert len([0 for _dx, _dy in _directions_1 if {x1+_dx, y1+_dy} <= set(range(7)) and self[x1+_dx][y1+_dy] == color]) > 0
else:
dx, dy = x1-x0, y1-y0
assert (-2 <= dx <= 2 and -2 <= dy <= 2)
assert self[x0][y0] == color
assert (dx, dy) in _directions_2
self[x0][y0] = 0
self[x1][y1] = color
for _dx, _dy in _directions_1:
if {x1 + _dx, y1 + _dy} <= set(range(7)):
if self[x1 + _dx][y1 + _dy] == -color:
self[x1 + _dx][y1 + _dy] = color
``` |
{
"source": "2jun0/CodeSecret-Backend",
"score": 2
} |
#### File: CodeSecret-Backend/code_modifing/code_modifier.py
```python
import time
import hashlib
import config
import github
import git as g
import git_crawling as gc
import re
import db
from models import Repository, File, SecretKey
from .python_lang_module import PythonLangModule
from .javascript_lang_module import JavascriptLangModule
from .c_lang_module import CLangModule
from .java_lang_module import JavaLangModule
# 이 클래스는 재활용하면 안된다. repo가 고정되어있기 때문이다 (변경 불가능)
class CodeModifier:
def __init__(self, upstream_repo:Repository, files=None):
assert upstream_repo.github_obj
self.is_prepared = False
self.upstream_repo:Repository = upstream_repo
self.files = files
self.fork_repo:Repository = None
self.lang_module_dict = dict() # 이 언어 모델은 LangModule을 상속하고, 파일 수정, 헤더파일 수정에 쓰인다.
# 준비는 한번만 할 수 있음.
# 코드를 수정할 fork 저장소, code-fix 브랜치를 만든다.
def prepare(self):
assert self.is_prepared == False
self.is_prepared = True
self.fork_upstream_repo()
# check gitignore file
gitignore_file_dict = db.get_file(fullname='.gitignore', repo_fullname=self.upstream_repo.fullname)
if gitignore_file_dict:
self.gitignore_file = db.file_dict_to_obj(gitignore_file_dict)
self.gitignore_file.content = gc.get_file_content(self.gitignore_file)
# 없으면 생성
else:
# 헤더 파일 내용
self.gitignore_file = self.fork_repo.create_file('.gitignore', 'Secret key leak fix', '', branch='code-fix')
self.gitignore_file.content = ''
# 파일을 수정한다.
def modify_file(self, file:File, origin_content:str, keys:list):
try:
if not self.is_prepared:
self.prepare()
if file.fullname.endswith('.py'):
if 'py' not in self.lang_module_dict:
self.lang_module_dict['py'] = PythonLangModule(self.fork_repo, self.gitignore_file)
self.lang_module_dict['py'].modify_file(file, origin_content, keys)
elif file.fullname.endswith('.js'):
if 'js' not in self.lang_module_dict:
self.lang_module_dict['js'] = JavascriptLangModule(self.fork_repo, self.gitignore_file)
self.lang_module_dict['js'].modify_file(file, origin_content, keys)
elif file.fullname.endswith('.c') or file.fullname.endswith('.cpp'):
if 'c' not in self.lang_module_dict:
self.lang_module_dict['c'] = CLangModule(self.fork_repo, self.gitignore_file)
self.lang_module_dict['c'].modify_file(file, origin_content, keys)
elif file.fullname.endswith('.java'):
if 'java' not in self.lang_module_dict:
self.lang_module_dict['java'] = JavaLangModule(self.fork_repo, self.gitignore_file)
self.lang_module_dict['java'].modify_file(file, origin_content, keys)
else:
return
except Exception as e:
self.fork_repo.github_obj.delete()
self.fork_repo.github_obj = None
raise e
def pull_request(self):
if self.fork_repo:
pull = self.upstream_repo.github_obj.create_pull("[Code Secret] Secret key leak problem fix", "Secret key leak problem fix", "master", "{}:code-fix".format(config.GITHUB_ACCOUNT['username']), True)
print('pull request : ', pull)
self.fork_repo.github_obj.delete()
self.fork_repo = None
return pull
# upstream을 포크해서 code-fix 브랜치를 만든다.
def fork_upstream_repo(self):
# fork
self.fork_repo = g.repository_to_obj(repo = self.upstream_repo.github_obj.create_fork(), owner = config.GITHUB_ACCOUNT['username'])
self.fork_repo.upstream_repo = self.upstream_repo
# create code-fix branch
refs = self.fork_repo.github_obj.get_git_refs()
code_fix_ref = None
gobj_master_ref = None
for ref in refs:
if ref.ref == 'refs/heads/master':
gobj_master_ref = ref
elif ref.ref == 'code_fix_ref':
code_fix_ref = ref
if not code_fix_ref:
self.fork_repo.github_obj.create_git_ref('refs/heads/code-fix', gobj_master_ref.object.sha)
```
#### File: CodeSecret-Backend/code_modifing/default_lang_module.py
```python
import os
import sys
from models import File, Repository
import db
import git_crawling as gc
from .lang_module import LangModule
import re
class DefaultLangModule(LangModule):
def __init__(self, fork_repo:Repository, header_file_name:str, gitignore_file:File):
super().__init__(fork_repo, header_file_name, None, None, 0, gitignore_file)
self.prepare_header_file()
def _get_comment_str(self, s:str):
return s
def _get_import_str(self):
return ''
def _get_header_def(self, val_name:str):
return '{} = "YOUR SECRET KEY"'.format(val_name)
def _header_update(self, key_name:str):
self.header_file.content += '{}\n'.format(self._get_header_def(key_name))
def _get_init_header(self):
return self._get_comment_str('Secret Key Count = {}\n'.format(self.secret_key_cnt))
def modify_file(self, file:File, origin_content, keys):
# 대상 key 수정, 파일수정
## key들을 모두 SECRET_KEY_번호 방식으로 수정
modified_content = origin_content
key_vals = []
for key in keys:
if key.content not in key_vals:
self.secret_key_cnt += 1
key_name = 'SECRET_KEY_{}'.format(self.secret_key_cnt) # 새로 바꿀 변수
self._header_update(key_name)
modified_content = re.sub('[\'\"]{}[\'\"]'.format(key.content), key_name, modified_content)
key_vals.append(key.content)
# 헤더파일에서 #Secret Key Count 수정
self.header_file.content = re.sub(
self._get_comment_str('Secret Key Count = (\d+)'),
self._get_comment_str('Secret Key Count = {}'.format(self.secret_key_cnt)),
self.header_file.content)
# 헤더파일 import 추가
modified_content = self._get_import_str() + '\n' + modified_content
# 코드파일&헤더파일 수정된 내용 업데이트
self.fork_repo.update_file(
file = file, title = 'Secret key leak fix',
content = modified_content, branch='code-fix')
self.fork_repo.update_file(
file = self.header_file, title='Secret key leak fix',
content = self.header_file.content, branch='code-fix')
def prepare_header_file(self):
super().prepare_header_file()
header_file_dict = db.get_file(self.header_file_name, self.fork_repo.upstream_repo.fullname)
if header_file_dict:
self.header_file = db.file_dict_to_obj(header_file_dict)
self.header_file.content = gc.get_file_content(self.header_file)
# secret key 개수는 파일에 #Secret Key Count = 10 이런식으로 적혀짐
m = re.search(
self._get_comment_str('Secret Key Count = (\d+)'),
self.header_file.content)
if m:
self.secret_key_cnt = int(m.group(1))
else:
self.secret_key_cnt = 0
else:
# 헤더 파일 내용
self.secret_key_cnt = 0
content = self._get_init_header()
self.header_file = self.fork_repo.create_file(
file_fullname = self.header_file_name, title='Secret key leak fix',
content = content, branch='code-fix')
```
#### File: CodeSecret-Backend/code_modifing/java_lang_module.py
```python
from models import File, Repository
from .default_lang_module import DefaultLangModule
import re
class JavaLangModule(DefaultLangModule):
def __init__(self, fork_repo:Repository, gitignore_file:File):
super().__init__(fork_repo, 'AASecretKeys.java', gitignore_file)
def _get_comment_str(self, s:str):
return '//{}'.format(s)
def _get_import_str(self):
return 'import {}.*'.format(self.header_file_name[:-5])
def _get_header_def(self, val_name:str):
return 'public static String {} = "YOUR SECRET KEY";'.format(val_name)
def _header_update(self, key_name:str):
self.header_file.content = re.sub('public class AASecretKeys\s*{', 'public class AASecretKeys {\n\t%s'%(self._get_header_def(key_name)), self.header_file.content)
def _get_init_header(self):
return super()._get_init_header() + 'public class AASecretKeys {\n}'
```
#### File: CodeSecret-Backend/code_modifing/python_lang_module.py
```python
from models import File, Repository
from .default_lang_module import DefaultLangModule
class PythonLangModule(DefaultLangModule):
def __init__(self, fork_repo:Repository, gitignore_file:File):
super().__init__(fork_repo, '_secret_keys.py', gitignore_file)
def _get_comment_str(self, s:str):
return '#{}'.format(s)
def _get_import_str(self):
return 'from {} import *'.format(self.header_file_name)
def _get_header_def(self, val_name:str):
return '{} = \'YOUR SECRET KEY\''.format(val_name)
```
#### File: 2jun0/CodeSecret-Backend/git_crawling.py
```python
import time
from selenium import webdriver
import requests
import git as g
from bs4 import BeautifulSoup
from models import User, Repository, File
def get_file_content(file: File, branch='master'):
return requests.get(url = 'https://raw.githubusercontent.com/{}/{}/{}'.format(file.repo_fullname, branch, file.fullname)).text
#-------------------------------------------------------------
# 아래는
# 크롤링 하는 함수라 굉장히 불안정 합니다. 가급적이면 사용하지 마세요
def get_all_repositories(user: User, filter: object=None):
repos = g.get_all_repositories(user.github_username)
obj_repos = []
for repo in repos:
repo = g.repository_to_obj(repo, user.id)
if filter:
if not filter(repo): continue
obj_repos.append(repo)
return obj_repos
def get_all_files(repo: Repository, filter: object=None, branch='master'):
files = []
dirs = ['https://github.com/{}/tree/{}/'.format(repo.fullname, branch)]
while dirs:
url = dirs.pop(0)
driver = webdriver.Chrome('chromedriver_win32/chromedriver.exe')
driver.get(url)
html = driver.page_source
while 'Skeleton' in html:
time.sleep(0.1)
html = driver.page_source
soup = BeautifulSoup(html, 'lxml')
file_tags = soup.select('.repository-content .js-navigation-item')
for tag in file_tags:
file_tag = tag.select('span a')
if len(file_tag) == 0:
continue
file_tag = file_tag[0]
name = file_tag.text
file_sha = None#file_tag.attrs['id'].split('-')[-1]
commit_sha = tag.select('.commit-message a')[0].attrs['href'][-40:]
is_file = not file_tag.attrs['href'].startswith('/{}/tree'.format(repo.fullname))
fullname = file_tag.attrs['href'].split('/{}/'.format(branch))[-1]
file = File(
fullname=fullname, name=name,
last_commit_sha=commit_sha, repo_fullname=repo.fullname, sha=file_sha
)
if filter:
if not filter(file, is_file): continue
if not is_file: # is directory
dirs.append('https://github.com'+file_tag.attrs['href'])
else:
files.append(file)
driver.quit()
return files
```
#### File: 2jun0/CodeSecret-Backend/models.py
```python
class User:
def __init__(self, id, password, github_username, github_obj=None):
self.id = id
self.password = password
self.github_username = github_username
self.github_obj = github_obj
class File:
def __init__(self, fullname, name, last_commit_sha, repo_fullname, sha, github_obj=None, content=None):
self.fullname = fullname
self.name = name
self.last_commit_sha = last_commit_sha
self.repo_fullname = repo_fullname
self.sha = sha
self.github_obj = github_obj
self.content = content # special value (db에 포함되지 않음)
class Repository:
def __init__(self, fullname, name, last_commit_date, last_commit_sha, owner, github_obj=None):
self.fullname = fullname
self.name = name
self.last_commit_date = last_commit_date
self.last_commit_sha = last_commit_sha
self.owner = owner
self.github_obj = github_obj
self.upstream_repo = None
def update_file(self, file: File, title: str, content: str, branch: str):
assert self.github_obj
# 파일 업데이트 실행
result = self.github_obj.update_file(file.fullname, title, content, sha=file.sha, branch=branch)
commit = result['commit']
content_file = result['content']
# 파일 정보 업데이트
file.sha = content_file.sha
file.last_commit_sha = commit.sha
file.content = content
def create_file(self, file_fullname: str, title: str, content: str, branch: str):
assert self.github_obj
# 파일 생성
result = self.github_obj.create_file(file_fullname, title, content, branch=branch)
commit = result['commit']
content_file = result['content']
# 파일 객체 생성
new_file = File(
fullname=file_fullname,
name=content_file.name,
last_commit_sha=commit.sha,
repo_fullname=self.fullname,
sha=content_file.sha,
content=content)
return new_file
class SecretKey:
def __init__(self, y, x, file_fullname, file_commit_sha, content, repo_last_commit_sha=None, pull_num=0, github_obj=None):
self.y = y
self.x = x
self.file_fullname = file_fullname
self.file_commit_sha = file_commit_sha
self.content = content
self.github_obj = github_obj
self.repo_last_commit_sha = repo_last_commit_sha
self.pull_num = pull_num
``` |
{
"source": "2jun0/Diffie-Hellman-example",
"score": 3
} |
#### File: 2jun0/Diffie-Hellman-example/test.py
```python
from argparse import ArgumentParser
parser = ArgumentParser(description='디피-헬만 키 생성 프로그램')
parser.add_argument('--nbytes', required=True, type=int)
from key import SecretKey, generate_private_key, generate_public_key, generate_secret_key
import key
from Crypto.Cipher import DES
def encrypt_DES(text: str, key: SecretKey) -> bytes:
while len(text) % 8 != 0:
text += b'\x00'
des = DES.new(key.to_bytes(), DES.MODE_ECB)
return des.encrypt(text)
def decrypt_DES(encrypted: str, key: SecretKey) -> bytes:
des = DES.new(key.to_bytes(), DES.MODE_ECB)
decrypted = des.decrypt(encrypted)
return decrypted.rstrip(b'\x00')
def read_file(path: str) -> str:
text = b''
try:
rf = open(path, 'rb')
tmp = rf.read(1024)
while tmp != b'':
text += tmp
tmp = rf.read(1024)
finally:
rf.close()
return text
def save_file(path: str, text: str):
try:
wf = open(path, 'wb')
text = wf.write(text)
finally:
wf.close()
return text
if __name__ == '__main__':
args = parser.parse_args()
nbytes = args.nbytes
# 엘리스와 밥의 nbytes 크기인 개인키 생성
Alice_pri_key = generate_private_key(nbytes, 10556253568756343647, 5)
Bob_pri_key = generate_private_key(nbytes, 10556253568756343647, 5)
print(f'사용된 소수: {Alice_pri_key.q}, 소수의 원시근: {Alice_pri_key.a}')
print('[엘리스와 밥의 개인키]\n'
f'엘리스\'s 개인키 : {Alice_pri_key}\n'
f'밥\'s 개인키 : {Bob_pri_key}\n')
# 엘리스와 밥의 공개키 생성
Alice_pub_key = generate_public_key(pri_key=Alice_pri_key)
Bob_pub_key = generate_public_key(pri_key=Bob_pri_key)
print('[엘리스와 밥의 공개키]\n'
f'엘리스\'s 공개키 : {Alice_pub_key}\n'
f'밥\'s 공개키 : {Bob_pub_key}\n')
# 엘리스와 밥의 비밀키 생성
Alice_sec_key = generate_secret_key(my_pri_key=Alice_pri_key, other_pub_key=Bob_pub_key)
Bob_sec_key = generate_secret_key(my_pri_key=Bob_pri_key, other_pub_key=Alice_pub_key)
print('[엘리스와 밥의 비밀키]\n'
f'엘리스\'s 비밀키 : {Alice_sec_key}\n'
f'밥\'s 비밀키 : {Bob_sec_key}\n')
print('비밀키가 일치하는가? : %s' % (Alice_sec_key.val == Bob_sec_key.val))
# 엘리스는 어린왕자 파일 전송
Alice_text = read_file('alice\'s 어린왕자.txt')
encrypted = encrypt_DES(Alice_text, Alice_sec_key)
# 밥은 어린왕자 파일 저장
Bob_text = decrypt_DES(encrypted, Bob_sec_key)
save_file('Bob\'s 어린왕자.txt', Bob_text)
print('[파일 전송 완료!]')
``` |
{
"source": "2jun0/RSA_example",
"score": 3
} |
#### File: 2jun0/RSA_example/decrypt.py
```python
from util import get_bytes_length
from key import PrivateKey
def decrypt_int(int_val:int, key:PrivateKey) -> int:
return pow(int_val, key.d, key.n)
def decrypt_block(block: bytes, key: PrivateKey) -> bytes:
"""data를 복호화한다.
:overflow를 방지하기 위해 다음을 만족해야 한다.
:len(data) <= len(key) + 11
"""
block_len = get_bytes_length(key.n)
encrypted = int.from_bytes(block, byteorder='big')
decrypted = decrypt_int(encrypted, key)
clear = decrypted.to_bytes(block_len, byteorder='big')
if len(block) > block_len:
raise Exception('Decryption failed')
# 시작 기호를 찾을 수 없으면 오류
if clear[0:2] != b'\x00\x02':
raise Exception('Decryption failed')
# 두번째 기호를 찾을 수 없으면 오류
try:
origin_idx = clear.index(b'\x00', 2)
except ValueError:
raise Exception('Decryption failed')
return clear[origin_idx + 1:]
def decrypt(data: bytes, key: PrivateKey) -> bytes:
block_len = get_bytes_length(key.n)
decrypted = b''
for i in range(0, len(data), block_len):
decrypted += decrypt_block(data[i:i+block_len], key)
return decrypted
def decrypt_file(src: str, dest: str, key: PrivateKey):
block_len = get_bytes_length(key.n)
try:
rf = open(src, 'rb')
wf = open(dest, 'wb')
data = 1
while data != b"":
data = rf.read(block_len)
decrypted = decrypt_block(data, key)
wf.write(decrypted)
finally:
rf.close()
wf.close()
``` |
{
"source": "2KangHo/pacman",
"score": 3
} |
#### File: 2KangHo/pacman/actors.py
```python
import math
from collections import defaultdict
import cocos.sprite
import cocos.audio
import cocos.actions as ac
import cocos.euclid as eu
import cocos.collision_model as cm
import pyglet.image
from pyglet.image import Animation
def load_animation(img, x, y, loop):
raw = pyglet.image.load(img)
seq = pyglet.image.ImageGrid(raw, x, y)
return Animation.from_image_sequence(seq, 0.25, loop)
class Die(cocos.sprite.Sprite):
def __init__(self, pos):
die_img = load_animation('assets/Player/pacman_die.png', 1, 11, False)
super(Die, self).__init__(die_img, pos)
self.do(ac.Delay(1) + ac.CallFunc(self.kill))
class Actor(cocos.sprite.Sprite):
def __init__(self, img, x, y):
super(Actor, self).__init__(img, position=(x, y))
self._cshape = cm.CircleShape(self.position, self.width*0.5)
@property
def cshape(self):
self._cshape.center = eu.Vector2(self.x, self.y)
return self._cshape
class PacDot(Actor):
def __init__(self, x, y):
super(PacDot, self).__init__('assets/PacDot/pacdot.png', x, y)
self.score = 10
class PacDotBig(Actor):
def __init__(self, x, y):
super(PacDotBig, self).__init__('assets/PacDot/pacdot_big.png', x, y)
self.score = 50
class Player(Actor):
KEY_PRESSED = defaultdict(int)
def __init__(self, x, y, actions):
pacman_up = load_animation('assets/Player/pacman_up.png', 1, 4, True)
pacman_down = load_animation('assets/Player/pacman_down.png', 1, 4, True)
pacman_left = load_animation('assets/Player/pacman_left.png', 1, 4, True)
pacman_right = load_animation('assets/Player/pacman_right.png', 1, 4, True)
super(Player, self).__init__( 'assets/Player/pacman_start.png', x, y)
self.speed = eu.Vector2(200, 0)
self.do(actions)
class Blinky(Actor):
def __init__(self, x, y, actions):
blinky_up = load_animation('assets/Ghosts/blinky_up.png', 1, 2, True)
blinky_down = load_animation('assets/Ghosts/blinky_down.png', 1, 2, True)
blinky_left = load_animation('assets/Ghosts/blinky_left.png', 1, 2, True)
blinky_right = load_animation('assets/Ghosts/blinky_right.png', 1, 2, True)
super(Blinky, self).__init__(blinky_up, x, y)
self.speed = eu.Vector2(200, 0)
self.do(actions)
class Clyde(Actor):
def __init__(self, x, y, actions):
clyde_up = load_animation('assets/Ghosts/clyde_up.png', 1, 2, True)
clyde_down = load_animation('assets/Ghosts/clyde_down.png', 1, 2, True)
clyde_left = load_animation('assets/Ghosts/clyde_left.png', 1, 2, True)
clyde_right = load_animation('assets/Ghosts/clyde_right.png', 1, 2, True)
super(Clyde, self).__init__(clyde_up, x, y)
self.speed = eu.Vector2(200, 0)
self.do(actions)
class Inky(Actor):
def __init__(self, x, y, actions):
inky_up = load_animation('assets/Ghosts/inky_up.png', 1, 2, True)
inky_down = load_animation('assets/Ghosts/inky_down.png', 1, 2, True)
inky_left = load_animation('assets/Ghosts/inky_left.png', 1, 2, True)
inky_right = load_animation('assets/Ghosts/inky_right.png', 1, 2, True)
super(Inky, self).__init__(inky_up, x, y)
self.speed = eu.Vector2(200, 0)
self.do(actions)
class Pinky(Actor):
def __init__(self, x, y, actions):
pinky_up = load_animation('assets/Ghosts/pinky_up.png', 1, 2, True)
pinky_down = load_animation('assets/Ghosts/pinky_down.png', 1, 2, True)
pinky_left = load_animation('assets/Ghosts/pinky_left.png', 1, 2, True)
pinky_right = load_animation('assets/Ghosts/pinky_right.png', 1, 2, True)
super(Pinky, self).__init__(pinky_up, x, y)
self.speed = eu.Vector2(200, 0)
self.do(actions)
```
#### File: 2KangHo/pacman/gamelayer.py
```python
import random
from cocos.director import director
from cocos.scenes.transitions import SplitColsTransition, FadeTransition
import cocos.layer
import cocos.scene
import cocos.text
import cocos.actions as ac
import cocos.collision_model as cm
import pygame.mixer
import actors
import mainmenu
from scenario import get_scenario
class GameLayer(cocos.layer.Layer):
is_event_handler = True
def __init__(self, hud, scenario):
super(GameLayer, self).__init__()
self.hud = hud
self.scenario = scenario
self.score = self._score = 0
self.lives = self._lives = 3
self.create_player()
self.create_ghosts()
self.create_dots()
w, h = director.get_window_size()
cell_size = 32
self.coll_man = cm.CollisionManagerGrid(0, w, 0, h, cell_size, cell_size)
self.coll_man_dots = cm.CollisionManagerGrid(0, w, 0, h, cell_size, cell_size)
pygame.mixer.init()
self.sounds={'die' : pygame.mixer.Sound("assets/Sounds/pacman_death.wav"), \
'intro' : pygame.mixer.Sound("assets/Sounds/pacman_beginning.wav"), \
'waka' : pygame.mixer.Sound("assets/Sounds/pacman_chomp.wav")}
self.sounds['intro'].play()
self.schedule(self.game_loop)
@property
def lives(self):
return self._lives
@lives.setter
def lives(self, val):
self._lives = val
self.hud.update_lives(val)
@property
def score(self):
return self._score
@score.setter
def score(self, val):
self._score = val
self.hud.update_score(val)
def game_loop(self, _):
self.coll_man.clear()
self.coll_man_dots.clear()
for obj in self.get_children():
if isinstance(obj, actors.Blinky):
self.coll_man.add(obj)
if isinstance(obj, actors.Clyde):
self.coll_man.add(obj)
if isinstance(obj, actors.Inky):
self.coll_man.add(obj)
if isinstance(obj, actors.Pinky):
self.coll_man.add(obj)
def create_player(self):
player_start = self.scenario.player_start
x, y = player_start
self.add(actors.Player(x, y, ac.Delay(5)))
def create_ghosts(self):
ghosts_start = self.scenario.ghosts_start
blinky_x, blinky_y = ghosts_start[0]
clyde_x, clyde_y = ghosts_start[1]
inky_x, inky_y = ghosts_start[2]
pinky_x, pinky_y = ghosts_start[3]
self.add(actors.Blinky(blinky_x, blinky_y, ac.Delay(5)+ac.Repeat(self.scenario.ghosts_action[0])))
self.add(actors.Clyde(clyde_x, clyde_y, ac.Delay(7)+ac.Repeat(self.scenario.ghosts_action[1])))
self.add(actors.Inky(inky_x, inky_y, ac.Delay(9)+ac.Repeat(self.scenario.ghosts_action[2])))
self.add(actors.Pinky(pinky_x, pinky_y, ac.Delay(11)+ac.Repeat(self.scenario.ghosts_action[3])))
def create_dots(self):
pass
def on_key_release(self, key, _):
pass
#actors.Player.speed *= 1
class HUD(cocos.layer.Layer):
def __init__(self):
super(HUD, self).__init__()
w, h = director.get_window_size()
self.score_text = self._create_text(w/2, h-18)
self.score_points = self._create_text(w/2, h-48)
def _create_text(self, x, y):
text = cocos.text.Label(font_size=15, font_name='Emulogic',
anchor_x='center', anchor_y='center')
text.position = (x, y)
self.add(text)
return text
def update_score(self, score):
self.score_text.element.text = 'Score: %s' % score
def update_lives(self, lives):
self.score_points.element.text = 'Lives: %s' % lives
def new_game():
scenario = get_scenario()
background = scenario.get_background()
hud = HUD()
game_layer = GameLayer(hud, scenario)
return cocos.scene.Scene(background, game_layer, hud)
def game_over():
w, h = director.get_window_size()
layer = cocos.layer.Layer()
text = cocos.text.Label('Game Over', position=(w*0.5, h*0.5),
font_name='Emulogic', font_size=64,
anchor_x='center', anchor_y='center')
layer.add(text)
scene = cocos.scene.Scene(layer)
new_scene = FadeTransition(mainmenu.new_menu())
func = lambda: director.replace(new_scene)
scene.do(ac.Delay(3) + ac.CallFunc(func))
return scene
Game_Map = \
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 7, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 7, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 4, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 4, 4, 4, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 7, 1, 1, 0, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 0, 1, 1, 7, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
``` |
{
"source": "2KangHo/PUFGAN",
"score": 2
} |
#### File: 2KangHo/PUFGAN/networks.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def define_G(nc, nz, ngf, ngpu, device, netPath=''):
netG = None
netG = Generator(nc, nz, ngf, ngpu).to(device)
netG.apply(weights_init)
if netPath != '':
netG.load_state_dict(torch.load(netPath))
return netG
def define_D(nc, ndf, ngpu, device, netPath=''):
netD = None
netD = Discriminator(nc, ndf, ngpu).to(device)
netD.apply(weights_init)
if netPath != '':
netD.load_state_dict(torch.load(netPath))
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
class Generator(nn.Module):
def __init__(self, nc, nz, ngf, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(
self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class Discriminator(nn.Module):
def __init__(self, nc, ndf, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(
self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
``` |
{
"source": "2KangHo/PyTorch-ImageClassification-TestBench",
"score": 3
} |
#### File: 2KangHo/PyTorch-ImageClassification-TestBench/down_imagenet.py
```python
import pathlib
import argparse
import requests
from tqdm import tqdm
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 1024 * 32
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(CHUNK_SIZE), desc=destination, miniters=0, unit='MB', unit_scale=1/32, unit_divisor=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download ImageNet')
parser.add_argument('--datapath', default='../data', type=str, metavar='PATH',
help='Where you want to save ImageNet? (default: ../data)')
args = parser.parse_args()
data_dir = pathlib.Path(args.datapath)
data_dir.mkdir(parents=True, exist_ok=True)
files = [
('1gYyRk6cAxkdhKaRbm2etrj-L6yWbaIxR', 'ILSVRC2012_devkit_t12.tar.gz'),
('1nrD10h3H4abgZr4ToNvSAesMR9CQD4Dk', 'ILSVRC2012_img_train.tar'),
('1BNzST-vesjJz4_JEYIsfGvDFXv_0qBNu', 'ILSVRC2012_img_val.tar'),
('14pb7YLYBRyp4QBDu4qF6SOfZDogXza01', 'meta.bin')
]
for file_id, file_name in files:
destination = data_dir / file_name
download_file_from_google_drive(file_id, destination.as_posix())
``` |
{
"source": "2karis/CPForum",
"score": 3
} |
#### File: vagrant/forum/database_setup.py
```python
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Homeless(Base):
__tablename__ = 'homeless'
id = Column(Integer, primary_key=True)
location = Column(String(255), unique = True)
description = Column(String(255))
image = Column(String(80))
# @property
# def serialize(self):
# #Returns object in easily serializaeble format
# return {
# 'id': self.id,
# 'location': self.location,
# 'image': self.image,
# }
def __repr__(self):
return "<Homeless(location='%s', description='%s', image='%s')>" % (
self.location, self.description, self.image)
engine = create_engine('sqlite:///lesshome.db')
Base.metadata.create_all(engine)
``` |
{
"source": "2key/activecampaign-python",
"score": 3
} |
#### File: activecampaign-python/activecampaign/fields.py
```python
class Fields(object):
def __init__(self, client):
self.client = client
def update_custom_field_text_value(self, contact_id, field_id, value):
"""
Update a value for a custom field
Args:
contact_id: string
field_id: string
value: string
Returns:
"""
data = {"fieldValue":{"contact":str(contact_id),"field":str(field_id),"value":str(value)}}
return self.client._post("/fieldValues", json=data)
``` |
{
"source": "2kodevs/CineTV-database",
"score": 3
} |
#### File: 2kodevs/CineTV-database/data_parser.py
```python
import os, json
from functools import reduce
def cut_points(data, top):
points = []
for i in range(top):
for line in data:
if line[i] != ' ':
break
else:
points.append(i)
single_points = [-1, points[0]]
for p in points[1:]:
if p == single_points[-1] + 1:
single_points[-1] = p
else:
single_points.append(p)
return single_points
def daily_programs(data, week):
days = []
for i in range(7):
days.append([x[i] for x in data])
clean_days = []
for day in days:
clean_days.append([])
for line in day:
if line != '':
try:
start = line[:5].split(':')
assert len(start) == 2 and len(start[0]) == 2 and len(start[1]) == 2
h, m = int(start[0]), int(start[1])
assert h < 25 and m < 60
clean_days[-1].append(line)
except:
if len(clean_days[-1]):
clean_days[-1][-1] += f" {line}"
return {week[i]:clean_days[i] for i in range(7)}
def channels_programs(data, points, dates, tv_channels):
channels, blocks = [], []
for line in data:
element = line.strip()
if element in tv_channels:
channels.append(element)
blocks.append([])
else:
daily = []
for i in range(1, len(points)):
daily.append(line[points[i - 1] + 1:points[i]].strip())
if daily[0] in tv_channels:
channels.append(daily[0])
blocks.append([])
daily[0] = ''
blocks[-1].append(daily)
return {channels[i]:daily_programs(blocks[i], dates) for i in range(len(blocks))}
def main(args):
year, d = "2019", {}
files = [x for x in os.listdir(args.path) if x[-5:] == ".data"]
dates = ['Domingo', 'Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes', 'Sabado']
channels = ["CUBAVISIÓN", "TELE REBELDE", "CANAL EDUCATIVO", "CANAL EDUCATIVO/2", "MULTIVISIÓN"]
for file in files:
with open(f"{args.path}/{file}", 'r') as fd:
text = fd.read()
_, month, day, _ = file.split('-')
raw_data = text.split('\n')[2:-3]
data = [x for x in raw_data if x.strip() not in channels]
top = reduce(lambda x, y: max(x, len(y)), data, 0) + 1
data = [x + ' ' * (top - len(x)) for x in data]
# dates = [f'{days[i]} {i + int(day)}' for i in range(7)]
points = cut_points(data, top)
d[f"{day}/{month}/{year}"] = channels_programs(raw_data, points, dates, channels)
with open(args.dir, 'w') as fd:
fd.write(json.dumps(d, indent=4, ensure_ascii=False))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='CineTV data parser')
parser.add_argument('-p', '--path', type=str, default='Files/data/', help='path of the data files')
parser.add_argument('-d', '--dir', type=str, default='Files/db/CineTV-DB.json', help='file to save the DB')
args = parser.parse_args()
main(args)
```
#### File: 2kodevs/CineTV-database/page_filter.py
```python
import os
from pdftotext import PDF
def main(args):
files = [x for x in os.listdir(args.path) if x[-4:] == ".pdf"]
for file in files:
with open(f"{args.path}/{file}", 'rb') as fd:
pdf = PDF(fd)
idx, name = 0, file.split('.')[0]
for page in pdf:
line = page.split("\n")[0].split()
if len(line) == 10 and "CARTELERA" in line:
with open(f"{args.folder}{name}-page{idx}.data", 'w') as fd:
fd.write(page)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='CineTV page filter')
parser.add_argument('-p', '--path', type=str, default='Files/pdf/', help='path of the pdf files')
parser.add_argument('-f', '--folder', type=str, default='Files/data/', help='destination folder')
args = parser.parse_args()
main(args)
```
#### File: 2kodevs/CineTV-database/renamer.py
```python
import os
from downloader import tostr, next_date
f = lambda x,y: f"2019-{y}-{x}.pdf"
def generate_all_dates():
m, d, l = 1, 13, []
while m < 13:
l.append(tostr(m, d, f))
d, m = next_date(m, d)
return l
def main(args):
bad_days = [(24, 2), (4, 8), (6, 10)]
uninformative_days = [(24, 2), (4, 8), (20, 1), (10, 3), (17, 3), (24, 3), (14, 4), (21, 4), (12, 5), (20, 10)]
bad_names = list(map(lambda x: tostr(x[1], x[0], f), bad_days))
uninformative = list(map(lambda x: tostr(x[1], x[0], f), uninformative_days))
path = args.path
data = [x for x in generate_all_dates() if x not in bad_names]
files = [x for x in os.listdir(path) if "icultura" in x]
for file in files:
name = data[int(file.split('.')[2])]
if name not in uninformative:
os.rename(f"{path}/{file}", f"{path}/{name}")
files = [x for x in os.listdir(path) if "icultura" in x]
for file in files:
os.remove(f"{path}/{file}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='CineTV pdf renamer')
parser.add_argument('-p', '--path', type=str, default='Files/pdf/', help='path of the data files')
args = parser.parse_args()
main(args)
``` |
{
"source": "2kodevs/cooperAItive",
"score": 2
} |
#### File: players/strategies/alphazero.py
```python
from ..player import BasePlayer
from .utils.alphazero import encoder_generator, rollout_maker, selector_maker
from .utils.mc import monte_carlo
from .models import AlphaZeroNet
from .utils import parse_bool
class AlphaZero(BasePlayer):
def __init__(self, name, handouts, rollouts, NN, tag='', load_model=True):
super().__init__(f'AlphaZero::{name}')
if isinstance(NN, str):
_, self.NN = AlphaZeroNet().load(NN, tag, load_model=parse_bool(load_model))
else:
self.NN = NN
self.handouts = int(handouts)
self.rollouts = int(rollouts)
def filter(self, valids):
data = {}
selector = selector_maker(data, self.valid_moves(), self.pieces_per_player - len(self.pieces), False, 6)
encoder = encoder_generator(self.max_number)
rollout = rollout_maker(data, self.NN)
_, action, *_ = monte_carlo(
self,
encoder,
rollout,
selector,
self.handouts,
self.rollouts,
)
return [action]
```
#### File: strategies/models/alpha_zero_model.py
```python
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from ..utils import state_to_list
#STATE= [(56bits, 4bits, 2bits) x 41]
STATE_SHAPE = (1, 41, 62)
NUM_FILTERS = 64
KERNEL_SIZE = 3
class Net(nn.Module):
"""
Neural Network for Alpha Zero implementation of Dominoes
"""
def __init__(self, input_shape=STATE_SHAPE, policy_shape=111, lr=0.02, device='cpu'):
"""
param input_shape: (int, int, int)
Dimensions of the input.
param policy_shape: int
Number of total actions in policy head
param residual_layers: int
Number of residual convolutionals layers
param device:
cpu or cuda
"""
super(Net, self).__init__()
self.save_path = 'checkpoints/'
device = torch.device(device)
self.device = device
self.conv_in = nn.Sequential(
nn.Conv2d(input_shape[0], NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
nn.BatchNorm2d(NUM_FILTERS),
nn.LeakyReLU(),
).to(device)
# layers with residual
self.conv_1 = nn.Sequential(
nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
nn.BatchNorm2d(NUM_FILTERS),
nn.LeakyReLU(),
).to(device)
self.conv_2 = nn.Sequential(
nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
nn.BatchNorm2d(NUM_FILTERS),
nn.LeakyReLU(),
).to(device)
self.conv_3 = nn.Sequential(
nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
nn.BatchNorm2d(NUM_FILTERS),
nn.LeakyReLU(),
).to(device)
self.conv_4 = nn.Sequential(
nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
nn.BatchNorm2d(NUM_FILTERS),
nn.LeakyReLU(),
).to(device)
self.conv_5 = nn.Sequential(
nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
nn.BatchNorm2d(NUM_FILTERS),
nn.LeakyReLU(),
).to(device)
self.conv_6 = nn.Sequential(
nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
nn.BatchNorm2d(NUM_FILTERS),
nn.LeakyReLU(),
).to(device)
# self.conv_7 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_8 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_9 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_10 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_11 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_12 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_13 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_14 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_15 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_16 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_17 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_18 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# self.conv_19 = nn.Sequential(
# nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=KERNEL_SIZE, padding=1),
# nn.BatchNorm2d(NUM_FILTERS),
# nn.LeakyReLU(),
# ).to(device)
# value head
self.conv_val = nn.Sequential(
nn.Conv2d(NUM_FILTERS, 1, kernel_size=1),
nn.BatchNorm2d(1),
nn.LeakyReLU(),
).to(device)
body_out_shape = (NUM_FILTERS, ) + input_shape[1:]
conv_val_size = self._get_conv_val_size(body_out_shape)
self.value = nn.Sequential(
nn.Linear(conv_val_size, 256),
nn.LeakyReLU(),
nn.Linear(256, 1),
nn.Tanh(),
).to(device)
# policy head
self.conv_policy = nn.Sequential(
nn.Conv2d(NUM_FILTERS, input_shape[0], kernel_size=1),
nn.BatchNorm2d(input_shape[0]),
nn.LeakyReLU(),
).to(device)
conv_policy_size = self._get_conv_policy_size(body_out_shape)
self.policy = nn.Sequential(
nn.Linear(conv_policy_size, policy_shape)
).to(device)
#optimizer
self.optimizer = optim.SGD(self.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4)
def _get_conv_val_size(self, shape):
o = self.conv_val(torch.zeros(1, *shape).to(self.device))
return int(np.prod(o.size()))
def _get_conv_policy_size(self, shape):
o = self.conv_policy(torch.zeros(1, *shape).to(self.device))
return int(np.prod(o.size()))
def forward(self, x):
batch_size = x.size()[0]
v = self.conv_in(x)
v = self.conv_1(v)
v = self.conv_2(v)
v = self.conv_3(v)
v = self.conv_4(v)
v = self.conv_5(v)
v = self.conv_6(v)
# v = self.conv_7(v)
# v = self.conv_8(v)
# v = self.conv_9(v)
# v = self.conv_10(v)
# v = self.conv_11(v)
# v = self.conv_12(v)
# v = self.conv_13(v)
# v = self.conv_14(v)
# v = self.conv_15(v)
# v = self.conv_16(v)
# v = self.conv_17(v)
# v = self.conv_18(v)
# v = self.conv_19(v)
val = self.conv_val(v)
val = self.value(val.view(batch_size, -1))
pol = self.conv_policy(v)
pol = self.policy(pol.view(batch_size, -1))
return pol, val
def predict(self, s, valids_actions):
"""
Infer node data given an state
param s:
list of encoded states of the game
param available_actions
list of encoded valids actions from a position of the game
return
(Move probabilities P vector, value V vector)
"""
self.eval()
batch = self.state_lists_to_batch(s)
masks = [self.valids_actions_to_tensor(va) for va in valids_actions]
pol, val = self(batch)
pol = [self.get_policy_value(p, mask, False) for p, mask in zip(pol, masks)]
return pol, val
def get_policy_value(self, logits, mask, log_softmax):
"""
Get move probabilities distribution.
param logits:
list of 111 bits. Raw policy head
param available_actions
list of 111 bits. Mask of available actions
param log_softmax
Set True to use log_softmax as activation function. Set False to use softmax
return
Move probabilities
"""
selection = torch.masked_select(logits, mask)
dist = F.log_softmax(selection, dim=-1)
if log_softmax:
return dist
return torch.exp(dist)
def state_lists_to_batch(self, state_lists):
"""
Convert list of list states to batch for network
param state_lists:
list of 'list[endoded states]'
return
States to Tensor
"""
assert isinstance(state_lists, list)
batch_size = len(state_lists)
batch = torch.zeros((batch_size,) + STATE_SHAPE, dtype=torch.float32)
size = np.array(STATE_SHAPE).prod()
for idx, state in enumerate(state_lists):
decoded = torch.tensor([state_to_list(state, size)])
batch[idx] = decoded.view(STATE_SHAPE)
return batch.to(self.device)
def valids_actions_to_tensor(self, valids_actions):
mask = state_to_list(valids_actions, 111)
return torch.tensor(mask, dtype=torch.bool).to(self.device)
def train_batch(self, data):
"""
Given a batch of training data, train the NN
param data:
list with training data
return:
Training loss
"""
# data: [(state, p_target, v_target, valids_actions)]
batch, p_targets, v_targets, valids_actions = [], [], [], []
for (state, p, v, actions) in data:
# state and available_actions are encoded
batch.append(state)
p_targets.append(p)
v_targets.append(v)
valids_actions.append(actions)
batch = self.state_lists_to_batch(batch)
self.train()
self.optimizer.zero_grad()
p_targets = [torch.tensor(p_target, dtype=torch.float32).to(self.device) for p_target in p_targets]
v_targets = torch.tensor(v_targets, dtype=torch.float32).to(self.device)#[torch.tensor(v_target, dtype=torch.float32).to(self.device) for v_target in v_targets]
p_preds_t, v_preds = self(batch)
p_preds = []
for i, a in enumerate(valids_actions):
mask = self.valids_actions_to_tensor(a)
p_preds.append(self.get_policy_value(p_preds_t[i], mask, True))
loss_value = F.mse_loss(v_preds.squeeze(-1), v_targets)
loss_policy = torch.zeros(1).to(self.device)
for pred, target in zip(p_preds, p_targets):
loss_policy += torch.sum(pred * target)
loss_policy = -loss_policy
loss = loss_policy + loss_value
loss.backward()
self.optimizer.step()
# Return loss values to track total loss mean for epoch
return (loss.item(), loss_policy.item(), loss_value.item())
def save(self, error_log, config, epoch, path, save_model, tag='latest', verbose=False):
net_name = [f'AlphaZero_Dom_{tag}.ckpt', f'AlphaZero_Dom_model_{tag}.ckpt'][save_model]
save_path = f'{path}/{self.save_path}'
full_path = f'{save_path}{net_name}'
if not os.path.exists(path):
os.makedirs(path)
if os.path.exists(full_path):
# Save backup for tag
os.rename(full_path, f'{save_path}{net_name[:-5]}_backup.ckpt')
if save_model:
torch.save({
'model': self,
'device': self.device,
'error_log': error_log,
'config': config,
'epoch': epoch,
}, full_path)
else:
torch.save({
'model_state_dict': self.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'device': self.device,
'error_log': error_log,
'config': config,
'epoch': epoch,
}, full_path)
if verbose:
print(f'Model saved with name: {net_name[:-5]}')
print('Checkpoint saved')
def load(self, save_path, tag='latest', load_logs=False, load_model=False):
net_name = [f'AlphaZero_Dom_{tag}.ckpt', f'AlphaZero_Dom_model_{tag}.ckpt'][load_model]
net_checkpoint = torch.load(f'{save_path}/{self.save_path}{net_name}')
device = net_checkpoint['device']
ret = [net_checkpoint['config']]
model = None
if load_model:
ret.append(net_checkpoint['model'])
model = ret[1]
else:
model = Net()
model.load_state_dict(net_checkpoint['model_state_dict'])
self.optimizer.load_state_dict(net_checkpoint['optimizer_state_dict'])
try:
model = model.to(device)
self.device = device
except AssertionError:
self.device = torch.device('cpu')
model.to(self.device)
# Load safe copy with right device
if not load_model:
self.load_state_dict(model.state_dict())
if load_logs:
ret.extend([net_checkpoint['error_log'], net_checkpoint['epoch']])
return ret
```
#### File: players/strategies/rlplayer.py
```python
from ..player import BasePlayer
from random import random
class RLPlayer(BasePlayer):
'''
This player use reinforcement learning
to measure his strategy.
'''
def __init__(self, name):
super().__init__(f"RLPlayer::{name}")
self.actions = []
self.values = {}
self.steps = 0
def load_values(self, addr):
pass
def save_values(self, addr):
pass
def filter(self, valids=None):
return self.policy(super().filter(valids))
def get_movements(self):
heads = [-1, -1]
first_move = True
self.movements = [[[0, 0] for _ in range(10)] for _ in range(4)]
for e, *d in self.history:
if e.name == 'MOVE':
player, piece, head = d
self.movements[player][piece[0]][piece[0] == heads[head]] += 1
self.movements[player][piece[1]][piece[1] == heads[head]] += 1
if first_move:
heads = list(piece)
first_move = False
else:
heads[head] = piece[piece[0] == heads[head]]
def get_rep(self, piece, head):
'''
Build a action.
'''
piece = list(piece)
if piece[0] != self.heads[head]:
piece.reverse()
rep = []
for num in piece:
# cur = []
# for player in range(4):
# cur.extend(self.movements[player][num])
# for x in range(self.me * 2):
# cur.append(cur[x])
# add playe movements related to num
for i in range(4):
rep.extend(self.movements[(self.me + i) % 4][num])
# rep.extend(cur[self.me * 2:])
data = 0
for p in self.pieces:
data += (num in p)
# add num data size (bit here :-P)
rep.append(data)
# add game stage (bit here :-P)
rep.append((self.steps + 9) // 10)
return str(rep)
#//TODO: Infer value here from NN
def get_value(self, piece, head):
return self.values.get(self.get_rep(piece, head), [0.5, 0])[0]
def policy(self, valids):
#//TODO: Parametrize exploration constant
if random() <= 0.3: return valids
self.get_movements()
top = 0
greedy = []
for action in valids:
value = self.get_value(*action)
if value > top:
greedy.clear()
top = value
if value == top: greedy.append(action)
return greedy
def log(self, data):
super().log(data)
e, *d = data
if e.name == 'MOVE':
self.steps += 1
player, piece, head = d
if player == self.me:
self.actions.append(self.get_rep(piece, head))
elif e.name == 'PASS':
self.steps += 1
elif e.name == 'WIN':
new_val = 0.5 if d[0] == -1 else [0, 1][d[0] == self.me % 2]
self.measure(new_val)
def measure(self, new_val):
#//TODO: Parametrize step_size_numerator
step_size_numerator = 0.1
for action in self.actions:
value, n = self.values.get(action, [0.5, 0])
step_size = step_size_numerator / (n + 1)
final_value = value + step_size * (new_val - value)
self.values[action] = [final_value, n + 1]
self.actions.clear()
class SingletonRLPlayer:
def __init__(self, *args):
self.init = args
self.instances = {}
self.instances["0"] = RLPlayer("0")
self.instances["2"] = RLPlayer("2")
self.instances["1"] = self.instances["0"]
self.instances["3"] = self.instances["2"]
self.__name__ = 'RLPlayer'
def __call__(self, name):
if not name in self.instances:
print("wtf")
self.instances[name] = RLPlayer(name)
return self.instances[name]
``` |
{
"source": "2kodevs/Distributed-Scrapper",
"score": 2
} |
#### File: 2kodevs/Distributed-Scrapper/scrapper.py
```python
import zmq, logging, time, os, requests, pickle, re
from multiprocessing import Process, Lock, Queue, Value
from ctypes import c_int
from threading import Thread, Lock as tLock, Semaphore
from util.utils import parseLevel, LoggerFactory as Logger, noBlockREQ, discoverPeer, getSeeds, findSeeds
log = Logger(name="Scrapper")
availableSlaves = Value(c_int)
lockWork = tLock()
lockClients = tLock()
lockSocketPull = tLock()
lockSocketNotifier = tLock()
counterSocketPull = Semaphore(value=0)
counterSocketNotifier = Semaphore(value=0)
def slave(tasks, notifications, idx, verifyQ):
"""
Child Process of Scrapper, responsable of downloading the urls.
"""
while True:
url = tasks.get()
with availableSlaves:
availableSlaves.value -= 1
log.info(f"Child:{os.getpid()} of Scrapper downloading {url}", f"slave {idx}")
for i in range(5):
try:
response = requests.get(url)
except Exception as e:
log.error(e, f"slave {idx}")
if i == 4:
notifications.put(("FAILED", url, i))
continue
notifications.put(("DONE", url, response.content))
verifyQ.put((False, url))
break
with availableSlaves:
availableSlaves.value += 1
def listener(addr, port, queue, data):
"""
Process to attend the verification messages sent by the seed.
"""
def puller():
for flag, url in iter(queue.get, "STOP"):
with lockWork:
try:
if flag:
data.append(url)
else:
data.remove(url)
except Exception as e:
log.error(e, "puller")
thread = Thread(target=puller)
thread.start()
socket = zmq.Context().socket(zmq.REP)
socket.bind(f"tcp://{addr}:{port}")
while True:
res = socket.recv().decode()
with lockWork:
socket.send_json(res in data)
def connectToSeeds(sock, inc, lock, counter, peerQ, user):
"""
Thread that connect <sock> socket to seeds.
"""
for addr, port in iter(peerQ.get, "STOP"):
with lock:
log.debug(f"Connecting to seed {addr}:{port + inc}", f"Connect to Seeds -- {user} socket")
sock.connect(f"tcp://{addr}:{port + inc}")
counter.release()
log.info(f"Scrapper connected to seed with address:{addr}:{port + inc})", f"Connect to Seeds -- {user} socket")
def disconnectFromSeeds(sock, inc, lock, counter, peerQ, user):
"""
Thread that disconnect <sock> socket from seeds.
"""
for addr, port in iter(peerQ.get, "STOP"):
with lock:
log.debug(f"Disconnecting from seed {addr}:{port + inc}", f"Disconnect from Seeds -- {user} socket")
sock.disconnect(f"tcp://{addr}:{port + inc}")
counter.acquire()
log.info(f"Scrapper disconnected from seed with address:{addr}:{port + inc})", f"Disconnect from Seeds -- {user} socket")
def notifier(notifications, peerQ, deadQ):
"""
Process to send notifications of task's status to seeds.
"""
context = zmq.Context()
socket = noBlockREQ(context)
#Thread that connect REQ socket to seeds
connectT = Thread(target=connectToSeeds, name="Connect to Seeds - Notifier", args=(socket, 2, lockSocketNotifier, counterSocketNotifier, peerQ, "Notifier"))
connectT.start()
#Thread that disconnect REQ socket from seeds
disconnectT = Thread(target=disconnectFromSeeds, name="Disconnect from Seeds - Notifier", args=(socket, 2, lockSocketNotifier, counterSocketNotifier, deadQ, "Notifier"))
disconnectT.start()
for msg in iter(notifications.get, "STOP"):
try:
assert len(msg) == 3, "wrong notification"
except AssertionError as e:
log.error(e, "Worker Notifier")
continue
while True:
try:
with lockSocketNotifier:
if counterSocketNotifier.acquire(timeout=1):
log.debug(f"Sending msg: ({msg[0]}, {msg[1]}, data) to a seed", "Worker Notifier")
#msg: (flag, url, data)
socket.send_pyobj(msg)
# nothing important to receive
socket.recv()
counterSocketNotifier.release()
break
except zmq.error.Again as e:
log.debug(e, "Worker Notifier")
counterSocketNotifier.release()
except Exception as e:
log.error(e, "Worker Notifier")
counterSocketNotifier.release()
finally:
time.sleep(0.5)
class Scrapper:
"""
Represents a scrapper, the worker node in the Scrapper network.
"""
def __init__(self, address, port):
self.addr = address
self.port = port
self.curTask = []
log.info(f"Scrapper created", "init")
def login(self, seed):
"""
Login the node in the system.
"""
network = True
if seed is not None:
#ip_address:port_number
regex = re.compile("\d{,3}\.\d{,3}\.\d{,3}\.\d{,3}:\d+")
try:
assert regex.match(seed).end() == len(seed)
except (AssertionError, AttributeError):
log.error(f"Parameter seed inserted is not a valid ip_address:port_number")
seed = None
if seed is None:
#//TODO: Change times param in production
log.debug("Discovering seed nodes", "login")
seed, network = discoverPeer(3, log)
if seed == "":
self.seeds = list()
log.info("Login finished", "login")
return network
seedsQ = Queue()
pGetSeeds = Process(target=getSeeds, name="Get Seeds", args=(seed, discoverPeer, (self.addr, self.port), False, seedsQ, log))
pGetSeeds.start()
self.seeds = seedsQ.get()
pGetSeeds.terminate()
log.info("Login finished", "login")
return network
def manage(self, slaves):
"""
Start to manage childs-slaves.
"""
context = zmq.Context()
socketPull = context.socket(zmq.PULL)
seedsQ1 = Queue()
seedsQ2 = Queue()
for address in self.seeds:
seedsQ1.put(address)
seedsQ2.put(address)
#Thread that connect pull socket to seeds
connectT = Thread(target=connectToSeeds, name="Connect to Seeds - Pull", args=(socketPull, 1, lockSocketPull, counterSocketPull, seedsQ1, "Pull"))
connectT.start()
pendingQ = Queue()
toDisconnectQ1 = Queue()
toDisconnectQ2 = Queue()
#Thread that disconnect pull socket from seeds
disconnectT = Thread(target=disconnectFromSeeds, name="Disconnect from Seeds - Pull", args=(socketPull, 1, lockSocketPull, counterSocketPull, toDisconnectQ1, "Notifier"))
disconnectT.start()
pFindSeeds = Process(target=findSeeds, name="Find Seeds", args=(set(self.seeds), [seedsQ1, seedsQ2], [toDisconnectQ1, toDisconnectQ2], log))
pFindSeeds.start()
notificationsQ = Queue()
pNotifier = Process(target=notifier, name="pNotifier", args=(notificationsQ, seedsQ2, toDisconnectQ2))
pNotifier.start()
listenT = Process(target=listener, name="pListen", args=(self.addr, self.port, pendingQ, self.curTask))
listenT.start()
taskQ = Queue()
log.info(f"Scrapper starting child processes", "manage")
availableSlaves.value = slaves
for i in range(slaves):
p = Process(target=slave, args=(taskQ, notificationsQ, i, pendingQ))
p.start()
log.debug(f"Scrapper has started a child process with pid:{p.pid}", "manage")
addr = (self.addr, self.port)
time.sleep(1)
while True:
#task: (client_addr, url)
try:
with availableSlaves:
if availableSlaves.value > 0:
log.debug(f"Available Slaves: {availableSlaves.value}", "manage")
with counterSocketPull:
with lockSocketPull:
url = socketPull.recv(flags=zmq.NOBLOCK).decode()
with lockWork:
if url not in self.curTask:
taskQ.put(url)
notificationsQ.put(("PULLED", url, addr))
pendingQ.put((True, url))
log.debug(f"Pulled {url} in scrapper", "manage")
except zmq.error.ZMQError as e:
log.debug(f"No new messages to pull: {e}", "manage")
time.sleep(1)
pNotifier.terminate()
def main(args):
log.setLevel(parseLevel(args.level))
s = Scrapper(port=args.port, address=args.address)
if not s.login(args.seed):
log.info("You are not connected to a network", "main")
s.manage(args.workers)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Worker of a distibuted scrapper')
parser.add_argument('-a', '--address', type=str, default='127.0.0.1', help='node address')
parser.add_argument('-p', '--port', type=int, default=5050, help='connection port')
parser.add_argument('-l', '--level', type=str, default='DEBUG', help='log level')
parser.add_argument('-s', '--seed', type=str, default=None, help='address of a existing seed node. Insert as ip_address:port_number')
parser.add_argument('-w', '--workers', type=int, default=2, help='number of slaves')
args = parser.parse_args()
main(args)
```
#### File: Distributed-Scrapper/util/utils.py
```python
import socket, logging, hashlib, random, sys, zmq, time, pickle, queue
from util.colors import REDB, BLUEB, YELLOWB
from util.params import format, datefmt, BROADCAST_PORT, login, localhost
from socket import socket, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR, SO_BROADCAST, timeout
from multiprocessing import Process, Queue
def getIp():
"""
Return local ip address(it must be connected to internet).
"""
return [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
def getIpOffline():
"""
Return local ip address(works on LAN without internet).
"""
return (([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
def makeUuid(n, urls):
"""
Returns a number that can be used as an unique universal identifier.
"""
name = ""
random.shuffle(urls)
for url in urls:
name += url
nounce = random.randint(1, n)
h = hashlib.sha256(name.encode() + str(nounce).encode())
return int.from_bytes(h.digest(), byteorder=sys.byteorder)
parseLevel = lambda x: getattr(logging, x)
def LoggerFactory(name="root"):
'''
Create a custom logger to use colors in the logs
'''
logging.setLoggerClass(Logger)
logging.basicConfig(format=format, datefmt=datefmt)
return logging.getLogger(name=name)
class Logger(logging.getLoggerClass()):
def __init__(self, name = "root", level = logging.NOTSET):
self.debug_color = BLUEB
self.info_color = YELLOWB
self.error_color = REDB
return super().__init__(name, level)
def debug(self, msg, mth=""):
super().debug(msg, extra={"color": self.debug_color, "method": mth})
def info(self, msg, mth=""):
super().info(msg, extra={"color": self.info_color, "method": mth})
def error(self, msg, mth=""):
super().error(msg, extra={"color": self.error_color, "method": mth})
def change_color(self, method, color):
setattr(self, f"{method}_color", color)
def noBlockREQ(context, timeout=2000):
'''
Create a custom zmq.REQ socket by modifying the values of:
- zmq.REQ_RELAXED to 1
- zmq.REQ_CORRELATE to 1
- zmq.RCVTIMEO to <timeout>
'''
socket = context.socket(zmq.REQ)
socket.setsockopt(zmq.REQ_RELAXED, 1)
socket.setsockopt(zmq.REQ_CORRELATE, 1)
socket.setsockopt(zmq.RCVTIMEO, timeout)
return socket
def valid_tags(tag):
'''
Html tags filter
'''
return tag.has_attr('href') or tag.has_attr('src')
def discoverPeer(times, log):
"""
Discover a seed in the subnet by broadcast.
It not works offline.
"""
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
sock.settimeout(2)
broadcastAddress = ('255.255.255.255', BROADCAST_PORT)
message = login
seed = ""
network = True
for i in range(times):
try:
log.info("Discovering peers", "discoverPeer")
sock.sendto(message.encode(), broadcastAddress)
log.debug("Waiting to receive", "discoverPeer")
data, server = sock.recvfrom(4096)
header, address = pickle.loads(data)
if header == 'WELCOME':
log.info(f"Received confirmation: {address}", "discoverPeer")
log.info(f"Server: {str(server)}", "discoverPeer")
seed = f"{server[0]}:{address[1]}"
sock.close()
return seed, network
else:
log.info("Login failed, retrying...", "discoverPeer")
except timeout as e:
log.error("Socket " + str(e), "discoverPeer")
except Exception as e:
log.error(e, "discoverPeer")
log.error(f"Connect to a network please, retrying connection in {(i + 1) * 2} seconds...", "discoverPeer")
network = False
#Factor can be changed
time.sleep((i + 1) * 2)
sock.close()
return seed, network
def change_html(html, changes):
'''
Function to replace a group of changes in a Html.
Changes have the form (old, new)
'''
changes.sort(key=lambda x: len(x[0]), reverse=True)
for url, name in changes:
html = html.replace(f'"{url}"', f'"{name}"')
html = html.replace(f"'{url}'", f"'{name}'")
return html
def getSeeds(seed, discoverPeer, address, login, q, log):
"""
Request the list of seed nodes to a active seed, if <seed> is not active, then try to discover a seed active in the network.
"""
context = zmq.Context()
sock = noBlockREQ(context, timeout=1200)
sock.connect(f"tcp://{seed}")
for i in range(4, 0, -1):
try:
sock.send_json(("GET_SEEDS",))
seeds = sock.recv_pyobj()
log.info(f"Received seeds: {seeds}", "Get Seeds")
if login:
sock.send_json(("NEW_SEED", address))
sock.recv_json()
sock.close()
q.put(seeds)
break
except zmq.error.Again as e:
log.debug(e, "Get Seeds")
seed, _ = discoverPeer(i, log)
if seed != "":
sock.connect(f"tcp://{seed}")
except Exception as e:
log.error(e, "Get Seeds")
finally:
if i == 1:
q.put({})
def ping(seed, q, time, log):
"""
Process that make ping to a seed.
"""
context = zmq.Context()
socket = noBlockREQ(context, timeout=time)
socket.connect(f"tcp://{seed[0]}:{seed[1]}")
status = True
log.debug(f"PING to {seed[0]}:{seed[1]}", "Ping")
try:
socket.send_json(("PING",))
msg = socket.recv_json()
log.info(f"Received {msg} from {seed[0]}:{seed[1]} after ping", "Ping")
except zmq.error.Again as e:
log.debug(f"PING failed -- {e}", "Ping")
status = False
q.put(status)
def findSeeds(seeds, peerQs, deadQs, log, timeout=1000, sleepTime=15, seedFromInput=None):
"""
Process that ask to a seed for his list of seeds.
"""
time.sleep(sleepTime)
while True:
#random address
seed = (localhost, 9999)
data = list(seeds)
for s in data:
#This process is useful to know if a seed is dead too
pingQ = Queue()
pPing = Process(target=ping, name="Ping", args=(s, pingQ, timeout, log))
pPing.start()
status = pingQ.get()
pPing.terminate()
if not status:
for q in deadQs:
q.put(s)
seeds.remove(s)
else:
seed = s
seedsQ = Queue()
pGetSeeds = Process(target=getSeeds, name="Get Seeds", args=(f"{seed[0]}:{seed[1]}", discoverPeer, None, False, seedsQ, log))
log.debug("Finding new seeds to pull from...", "Find Seeds")
pGetSeeds.start()
tmp = set(seedsQ.get())
pGetSeeds.terminate()
#If Get Seeds succeds to connect to a seed
if len(tmp) != 0:
dif = tmp - seeds
if not len(dif):
log.debug("No new seed nodes where finded", "Find Seeds")
else:
log.debug("New seed nodes where finded", "Find Seeds")
for s in dif:
for q in peerQs:
q.put(s)
seeds.update(tmp)
if seedFromInput is not None:
try:
seeds.add(seedFromInput.get(block=False))
except queue.Empty:
pass
#The amount of the sleep in production can be changed
time.sleep(sleepTime)
def run_process(target, args):
"""
Run a process and return it's result.
"""
ansQ = Queue()
args = args + (ansQ,)
process = Process(target=target, args=args)
process.start()
ans = ansQ.get()
process.terminate()
return ans
def clock(cycle, q):
"""
Process that notice to the <q> Queue owner when a cycle is passed.
"""
time.sleep(cycle)
q.put(True)
``` |
{
"source": "2kodevs/domaino-data",
"score": 3
} |
#### File: domaino-data/utils/prob.py
```python
def factorial(n):
l = [1] * (n + 1)
for i in range(2, n + 1):
l[i] = l[i - 1] * i
return l
def combinations(n, k, fac=None):
if k > n or k < 0: return 0
if k == 0 or k == n: return 1
if fac is None:
fac = factorial(n)
return fac[n] // (fac[k] * fac[n - k])
def data_prob(n, max_number=9, pieces=10, fixed=0):
if fixed > n: return 0
if n < 0 or n > pieces: return 0
total = ((max_number + 1) * (max_number)) // 2
total += max_number + 1
fac = factorial(total)
num = combinations(max_number + 1 - fixed, n - fixed, fac) * combinations(total - max_number - 1, pieces - n, fac)
den = combinations(total, pieces)
return num / den
def one_piece_prob(max_number=9, pieces=10):
total = ((max_number + 1) * (max_number)) // 2
total += max_number + 1
fac = factorial(total)
num = combinations(total - 1, pieces - 1, fac)
den = combinations(total, pieces)
return num / den
def possible_hands(total, k, players=4):
fac = factorial(total)
result = 1
for _ in range(players):
result *= combinations(total, k, fac)
total -= k
return result
if __name__ == "__main__":
print(data_prob(int(input('Cuantas quieres en la data: '))))
``` |
{
"source": "2kodevs/domaino",
"score": 4
} |
#### File: domino/hands/no_doubles.py
```python
from ..player_view import PlayerView
from random import sample
def no_doubles(max_number, pieces_per_player):
"""
Player 0 will have no doubles.
Randomly distribute pieces among every player.
Valid pieces are all integer tuples of the form:
(i, j) 0 <= i <= j <= max_number
Each player will have `pieces_per_player`.
"""
all_pieces = {(i, j) for i in range(max_number + 1) for j in range(max_number + 1) if i <= j}
pieces0 = all_pieces - {(i, i) for i in range(max_number + 1)}
hand0 = sample(pieces0, pieces_per_player)
pieces = list(all_pieces - set(hand0))
assert 4 * pieces_per_player <= len(pieces) + len(hand0)
hand = sample(pieces, 3 * pieces_per_player)
hands = [hand0]
hands += [hand[i:i+pieces_per_player] for i in range(0, 3 * pieces_per_player, pieces_per_player)]
return [PlayerView(h) for h in hands]
```
#### File: players/behaviors/best_accompanied.py
```python
from ..player import BasePlayer
from random import choice
class BestAccompanied(BasePlayer):
''' Play better accompanied if possible as the first move of the game
'''
def __init__(self, name):
super().__init__(f'BestAccompanied::{name}')
def filter(self, valids=None):
valids = super().filter(valids)
if self.heads != [-1, -1]:
return valids
cant = {}
for p0, p1 in self.pieces:
if p0 != p1:
cant[p1] = cant.get(p1, 0) + 1
cant[p0] = cant.get(p0, 0) + 1
filtered = [(c, num) for num, c in cant.items() if c >= 2]
if not filtered:
return valids
best = max(filtered)[0]
best = max([(num, c) for c, num in filtered if c == best])[0]
pieces = [p for p in self.pieces if best in p]
if (best, best) in pieces:
return [((best, best), 0)]
return [(p, 0) for p in pieces]
```
#### File: domaino/players/player.py
```python
import random
class BasePlayer:
def __init__(self, name):
self.name = name
self.position = None
self.pieces = []
self.history = []
self.heads = None
def step(self, heads):
should_pass = True
if -1 in heads:
# First move of the game
should_pass = False
else:
for piece in self.pieces:
if piece[0] in heads or piece[1] in heads:
should_pass = False
break
if should_pass:
# If player should pass because it doesn't have any valid piece
return None
self.heads = heads
piece, head = self.choice()
assert piece in self.pieces, f"Invalid piece: {piece}"
self.pieces.remove(piece)
return piece, head
def valid(self, piece, head):
"""
Check if `piece` can be put on head `head`
"""
return self.heads[head] == -1 or self.heads[head] in piece
def valid_moves(self):
# List all valid moves in the form (piece, head).
# This is put piece on head.
valids = []
for piece in self.pieces:
for head in range(2):
if self.valid(piece, head):
valids.append((piece, head))
return valids
def reset(self, position, pieces):
self.position = position
self.pieces = pieces
self.pieces_per_player = len(pieces)
self.history.clear()
def log(self, data):
self.history.append(data)
def choice(self, valids=None):
"""
Select a random move from the player filtered ones
Return:
piece: (tuple<int>) Piece player is going to play. It must have it.
head: (int in {0, 1}) What head is it going to put the piece. This will be ignored in the first move.
"""
valids = self.filter(valids)
assert len(valids), "Player strategy return 0 options to select"
return random.choice(self.filter(valids))
def score(self):
"""
Score of current player relative to the weights of its pieces
"""
result = 0
for piece in self.pieces:
result += piece[0] + piece[1]
return result
def filter(self, valids=None):
"""
Logic of each agent. This function given a set of valids move select the posible options.
Notice that rules force player to always make a move whenever is possible.
Player can access to current heads using `self.heads` or even full match history
through `self.history`
Return:
List of:
piece: (tuple<int>) Piece player is going to play. It must have it.
head: (int in {0, 1}) What head is it going to put the piece. This will be ignored in the first move.
"""
if valids is None:
return self.valid_moves()
return valids
@property
def me(self):
return self.position
@property
def partner(self, position=None):
if position is None:
position = self.me
return position ^ 2
@property
def team(self, position=None):
""" Players 0 and 2 belong to team 0
Players 1 and 3 belong to team 1
"""
if position is None:
position = self.me
return position & 1
@property
def next(self, position=None):
""" Next player to play
"""
if position is None:
position = self.me
return (position + 1) & 3
```
#### File: players/strategies/always_double.py
```python
from ..player import BasePlayer
class AlwaysDouble(BasePlayer):
'''
This player always selects a double piece if possible to make a move
'''
def __init__(self, name):
super().__init__(f"AlwaysDouble::{name}")
def filter(self, valids=None):
valids = super().filter(valids)
data = []
for piece, head in valids:
if piece[0] == piece[1]:
data.append((piece, head))
return data if data else valids
```
#### File: players/strategies/mc.py
```python
from ..player import BasePlayer
from ...domino import Event
from copy import deepcopy
from random import shuffle, choice
from math import log
import numpy as np
from ...common.logger import add_logger, DEBUG, INFO
logger = add_logger('mcts', INFO)
logger.disabled = False
def canonical(piece):
a, b = piece
return max(a, b), min(a, b)
class MonteCarlo(BasePlayer):
def __init__(self, name):
super().__init__(f"Carlos::{name}")
self.history_pointer = 0
def my_init(self):
self.my_pieces = deepcopy(self.pieces)
self.remaining = [7] * 4
self.pool = set()
# TODO: Use it while sampling
self.dont_have = [0] * 4
for i in range(7):
for j in range(i + 1):
self.pool.add(canonical((i, j)))
for piece in self.my_pieces:
self.pool.remove(canonical(piece))
def feed(self):
# Save
if self.history_pointer == 0:
self.my_init()
# Game simulation
# team = self.position % 2
while self.history_pointer < len(self.history):
# Read and proc next event
event, *args = self.history[self.history_pointer]
self.history_pointer += 1
if event == Event.MOVE:
position, piece, head = args
self.remaining[position] -= 1
if position != self.position:
self.pool.remove(canonical(piece))
elif event == Event.PASS:
position = args
self.dont_have[position] |= 1 << self.heads[0]
self.dont_have[position] |= 1 << self.heads[1]
elif event == Event.NEW_GAME:
pass
else:
raise ValueError(f"Invalid event: {event}")
def sample(self):
order = list(self.pool)
shuffle(order)
pieces = [[] for _ in range(4)]
for pos in range(4):
if pos == self.position:
pieces[pos] = deepcopy(self.pieces)
else:
r = self.remaining[pos]
pieces[pos] = order[:r]
order = order[r:]
assert len(pieces[pos]) == r
assert len(order) == 0
return pieces
def choice(self):
self.feed()
NUM_SAMPLING = 10
NUM_EXPANDED = 2000
scores = {} # Score of each move (piece, head)
winpredictions = {}
for _ in range(NUM_SAMPLING):
distribution = self.sample()
cscores, cwinpredictions = montecarlo(distribution, tuple(self.heads), self.position, NUM_EXPANDED)
for move, scr in cscores.items():
scores[move] = scores.get(move, 0.) + scr
for move, scr in cwinpredictions.items():
winpredictions[move] = winpredictions.get(move, 0.) + scr
assert len(scores) > 0
best_score = -1.
best_move = None
# for move, scr in scores.items():
for move, scr in winpredictions.items():
if scr > best_score:
best_score = scr
best_move = move
logger.info(f"Best move: {best_move}")
logger.info(f"Expected score: {winpredictions[best_move] / NUM_SAMPLING}")
return best_move
# Utils for Montecarlo
class Node:
WIN_POINTS = 2
TIE_POINTS = 1
EXPLORATION = 2.
def __init__(self, state):
self.state = state
self.visit_count = 0
# Wins | Tie | Loose
self.rate = [0, 0, 0]
self.children = None
self.end_node = None
def score(self, parent_visit_count, me):
if self.visit_count == 0:
return float('inf')
assert sum(self.rate) == self.visit_count
if me: # Current player is from my team
exploitation = self.rate[0] * Node.WIN_POINTS + self.rate[1] * Node.TIE_POINTS
else: # Current player is NOT from my team
exploitation = self.rate[2] * Node.WIN_POINTS + self.rate[1] * Node.TIE_POINTS
# Mean of all simulations so far
exploitation /= self.visit_count
exploration = Node.EXPLORATION * (log(parent_visit_count) / self.visit_count) ** .5
score = exploitation + exploration
logger.debug(f"Exploitation: {exploitation}")
logger.debug(f"Exploration: {exploration}")
logger.debug(f"Score: {score}")
return score
def intersect(pieceA, pieceB):
""" Check if two 2-len tuples have at least one element in common
"""
return pieceA[0] in pieceB or pieceA[1] in pieceB
def winner(state, position, distribution):
""" Find winner in current state
"""
WIN, TIE, LOOSE = 0, 1, 2
team = position & 1
mask, heads, pos = state
winner_team = None
light_hand = float('inf')
light_player = set()
for i in range(4):
# Player `i` don't have any remaining piece
if (mask >> (7 * i)) & ((1 << 7) - 1) == 0:
winner_team = i & 1
break
hand = 0
for j in range(7):
if (mask >> (i * 7 + j)) & 1:
hand += sum(distribution[i][j])
if hand < light_hand:
light_hand = hand
light_player = set()
if hand == light_hand:
light_player.add(i & 1)
if winner_team is None:
if len(light_player) == 2:
return TIE
winner_team = list(light_player)[0]
return WIN if winner_team == team else LOOSE
def is_over(state, distribution):
""" Check if game is over
"""
mask, heads, pos = state
exist_move = False
for i in range(4):
# Player `i` doesn't have any piece left
if (mask >> (7 * i)) & ((1 << 7) - 1) == 0:
return True
for j in range(7):
if ((mask >> (i * 7 + j)) & 1) and intersect(distribution[i][j], heads):
exist_move = True
return not exist_move
def neighbors(state, distribution):
mask, heads, pos = state
count = 0
for i in range(7):
# If player contains this piece yet
if ((mask >> (7 * pos + i)) & 1) == 1:
piece = distribution[pos][i]
# If piece can be played through head_0
if heads[0] in piece or heads[0] == -1:
nmask = mask ^ (1 << (7 * pos + i))
nheads = (heads[0] ^ piece[0] ^ piece[1], heads[1])
npos = (pos + 1) & 3 # % 4
count += 1
yield (nmask, nheads, npos)
# If piece can be played through head_1
if heads[1] in distribution[pos][i]:
nmask = mask ^ (1 << (7 * pos + i))
nheads = (heads[0], heads[1] ^ piece[0] ^ piece[1])
npos = (pos + 1) & 3
count += 1
yield (nmask, nheads, npos)
# Player can't make any valid move other than pass
if count == 0:
npos = (pos + 1) & 3
yield (mask, heads, npos)
def show(state):
mask, heads, pos = state
print(f"{bin(mask)} | {heads[0]} {heads[1]} | {pos}")
def montecarlo(distribution, heads, position, NUM_EXPANDED):
"""
state: (bitmask, heads, pos)
bitmask: 7 bits each player 2**28 states that denotes which pieces are still holding relative to `distribution`
parent visit count: PC
visit count: VC
win count: WC
exploration control: K
WC / VC + K * sqrt(log(PC) / VC)
"""
team = position & 1
# Compute first state
mask = 0
for dist in reversed(distribution):
assert len(dist) <= 7
mask <<= 7
mask |= (1 << len(dist)) - 1
heads = tuple(heads)
pos = position
start = (mask, heads, pos)
# Initialize states for MonteCarlo Tree Search
state_map = {start: Node(start)}
# Run MonteCarlo
iterations = 0
logger.debug(f"Start montecarlo from: {bin(mask)} | {heads} | {pos}")
while True:
iterations += 1
# Stop condition
if len(state_map) >= NUM_EXPANDED or \
iterations >= 1e4:
logger.debug(f"Iterations: {iterations}")
logger.debug(f"Number of states: {len(state_map)}")
break
cur = start
# path = [state_map[cur]]
path = []
# Traverse the tree search from the root down to one leaf
while True:
# show(cur)
node = state_map[cur]
path.append(node)
if node.visit_count == 0:
break
best_score = float('-inf')
best_child = None
for child in node.children:
scr = child.score(node.visit_count, (cur[2] & 1) == team)
if scr > best_score:
best_score = scr
best_child = child
assert best_child is not None
cur = best_child.state
# Expand `cur` children if needed
if node.children is None:
node.children = []
for neig in neighbors(cur, distribution):
child = Node(neig)
node.children.append(child)
state_map[neig] = child
# Run simulation from `cur`
while not is_over(cur, distribution):
cur = choice(list(neighbors(cur, distribution)))
w = winner(cur, position, distribution)
# Update path with new information
for s in path:
s.visit_count += 1
s.rate[w] += 1
# Find action values
root = state_map[start]
answer = {}
winprediction = {}
for i, piece in enumerate(distribution[position]):
if heads[0] in piece or heads[0] == -1:
nmask = mask ^ (1 << (7 * position + i))
nheads = (heads[0] ^ piece[0] ^ piece[1], heads[1])
npos = (position + 1) & 3 # % 4
nnode = state_map[(nmask, nheads, npos)]
move = canonical(piece), 0
answer[move] = nnode.visit_count / root.visit_count
winprediction[move] = (nnode.rate[0] * 2 + nnode.rate[1]) / nnode.visit_count
if heads[1] in piece:
nmask = mask ^ (1 << (7 * position + i))
nheads = (heads[0], heads[1] ^ piece[0] ^ piece[1])
npos = (position + 1) & 3 # % 4
nnode = state_map[(nmask, nheads, npos)]
move = canonical(piece), 1
answer[move] = nnode.visit_count / root.visit_count
winprediction[move] = (nnode.rate[0] * 2 + nnode.rate[1]) / nnode.visit_count
return answer, winprediction
```
#### File: players/strategies/table_counter.py
```python
from ..player import BasePlayer
class TableCounter(BasePlayer):
'''
Select the pice with higher score from the pices with most frequent played values
'''
def __init__(self, name):
super().__init__(f"TableCounter::{name}")
def count_table(self):
cant = {}
pieces = [d[1] for e, *d in self.history if e.name == 'MOVE']
for p in pieces:
cant[p[0]] = cant.get(p[0], 0) + 1
if p[0] != p[1]:
cant[p[1]] = cant.get(p[1], 0) + 1
return cant
def filter(self, valids=None):
valids = super().filter(valids)
best, data = -1, []
cant = self.count_table()
for piece, head in valids:
value = cant.get(piece[piece[0] == self.heads[head]], 0)
if value > best:
best, data = value, []
if value == best:
data.append((piece, head))
return data
```
#### File: research/minimax/domino_ux.py
```python
from minimax import Minimax
from minimax_domino import Game
from random import Random
def generate_game(seed=None):
rnd = Random(seed)
tokens = [(j, i) for i in range(7) for j in range(i + 1)]
assert len(tokens) == 28
tokens = rnd.sample(tokens, 28)
assigned_tokens = []
while len(tokens) > 0:
assigned_tokens.append(tokens[:7])
tokens = tokens[7:]
return Game(assigned_tokens)
def main():
team = [0]
game = generate_game(seed)
cur_state = game.first_state()
mm = Minimax(game)
for tokens in game.pieces:
print(tokens)
first_value = mm.find(cur_state)
while not game.is_over(cur_state):
print(cur_state)
value = mm.find(cur_state)
print(value)
moves = mm.get_moves(cur_state)
move = moves[0]
print(move)
cur_state = game.apply(cur_state, move)
assert first_value == value
if __name__ == '__main__':
main()
```
#### File: research/minimax/minimax_domino_test.py
```python
import random
import minimax
import minimax_domino
import domino_ux
import unittest
def generate_game(num_tokens, max_value, seed):
rnd = random.Random(seed)
tokens = []
for i in range(4):
row = []
for j in range(num_tokens):
x, y = rnd.randint(0, max_value), rnd.randint(0, max_value)
if x > y: x, y = y, x
row.append((x, y))
tokens.append(row)
return minimax_domino.Game(tokens)
class TestStringMethods(unittest.TestCase):
def test_simple(self):
# Simple hardcoded game for debugging
game = minimax_domino.Game([
[(0, 0), (1, 2)],
[(0, 1), (2, 2)],
[(1, 1), (0, 0)],
[(0, 2), (0, 1)],
])
mm = minimax.Minimax(game)
state = game.first_state()
value = mm.find(state)
self.assertEqual(value, 1)
def test_first_win(self):
# First team wins
game = generate_game(2, 2, 0)
mm = minimax.Minimax(game)
cur_state = game.first_state()
first_value = mm.find(cur_state)
# print(game.pieces)
# print(first_value)
# print(cur_state)
while not game.is_over(cur_state):
moves = mm.get_moves(cur_state)
move = moves[0]
cur_state = game.apply(cur_state, move)
# print(moves)
# print(">>", move)
# print(cur_state)
value = mm.find(cur_state)
self.assertEqual(value, first_value)
def test_second_win(self):
# Second team wins
game = generate_game(2, 2, 12)
mm = minimax.Minimax(game)
cur_state = game.first_state()
first_value = mm.find(cur_state)
# print(game.pieces)
# print(first_value)
# print(cur_state)
while not game.is_over(cur_state):
moves = mm.get_moves(cur_state)
move = moves[0]
cur_state = game.apply(cur_state, move)
# print(moves)
# print(">>", move)
# print(cur_state)
value = mm.find(cur_state)
self.assertEqual(value, first_value)
def test_large(self):
# Game with max_value up to 6
game = domino_ux.generate_game(seed=7)
mm = minimax.Minimax(game)
cur_state = game.first_state()
first_value = mm.find(cur_state)
# print(game.pieces)
# print(first_value)
# print(cur_state)
while not game.is_over(cur_state):
moves = mm.get_moves(cur_state)
move = moves[0]
cur_state = game.apply(cur_state, move)
# print(moves)
# print(">>", move)
# print(cur_state)
value = mm.find(cur_state)
self.assertEqual(value, first_value)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "2kodevs/matcom_scheduler",
"score": 2
} |
#### File: bot/handlers/cancel.py
```python
from .utils import is_chat_admin, clear_chat
from telegram.ext import CommandHandler, Filters
# Messages
ADMINS_ONLY = 'Ups!!!, solo los administradores pueden usar este comando :('
ACTIVE = 'Para cancelar una discusión, primero se debe crear una usando /create'
CANCELED = 'Discusión cancelada satisfactoriamente '
def cancel(update, context):
user = update.effective_user.id
chat = update.effective_chat.id
try:
assert is_chat_admin(context.bot, chat, user), ADMINS_ONLY
assert context.chat_data.get('active'), ACTIVE
clear_chat(chat, context)
assert False, CANCELED
except AssertionError as e:
update.effective_message.reply_text(str(e))
# Handler
cancel_handler = CommandHandler('cancel', cancel, Filters.group)
``` |
{
"source": "2kodevs/Search-Engine",
"score": 3
} |
#### File: tests/indexer/_tests.py
```python
import unittest
from src import Indexer
class IndexerTestCase(unittest.TestCase):
def setUp(self):
self.Indexer = Indexer()
self.maxDiff = None
def test_tokenizer(self):
#data ok 1 doc
data = [{
'id': 1,
'title': 'some random title',
'author': '<NAME>',
'text': """
here lies the test of tests, the supreme test, first 2kodevs test using python.
this day should be remembered forever, the future of the tests depends on it. we have a heavy burden for now on.
""",
}]
terms = Indexer.tokenize(data)
ans = [
('some', 1), ('random', 1), ('title', 1), ('alan brito', 1), ('here', 1), ('lies', 1),
('the', 1), ('test', 1), ('of', 1), ('tests', 1), ('the', 1), ('supreme', 1), ('test', 1),
('first', 1), ('2kodevs', 1), ('test', 1), ('using', 1), ('python', 1), ('this', 1), ('day', 1),
('should', 1), ('be', 1), ('remembered', 1), ('forever', 1), ('the', 1), ('future', 1), ('of', 1),
('the', 1), ('tests', 1), ('depends', 1), ('on', 1), ('it', 1), ('we', 1), ('have', 1), ('a', 1),
('heavy', 1), ('burden', 1), ('for', 1), ('now', 1), ('on', 1),
]
ans.sort()
self.assertEqual(terms, ans)
#data with many stopwords and blank spaces 1 doc
data = [{
'id': 1,
'title': 'some random title',
'author': '<NAME>',
'text': """
here lies the test of ... tests, the supreme-test, first \n2kodevs\n test using python.
this day should be; remembered: . forever, the future of the tests ,,,depends on it. we have a heavy burden for now on.
""",
}]
terms = Indexer.tokenize(data)
ans = [
('some', 1), ('random', 1), ('title', 1), ('alan brito', 1), ('here', 1), ('lies', 1),
('the', 1), ('test', 1), ('of', 1), ('tests', 1), ('the', 1), ('supreme', 1), ('test', 1),
('first', 1), ('2kodevs', 1), ('test', 1), ('using', 1), ('python', 1), ('this', 1), ('day', 1),
('should', 1), ('be', 1), ('remembered', 1), ('forever', 1), ('the', 1), ('future', 1), ('of', 1),
('the', 1), ('tests', 1), ('depends', 1), ('on', 1), ('it', 1), ('we', 1), ('have', 1), ('a', 1),
('heavy', 1), ('burden', 1), ('for', 1), ('now', 1), ('on', 1),
]
ans.sort()
self.assertEqual(terms, ans)
#data ok 2 doc
data = [{
'id': 1,
'title': 'some random title',
'author': '<NAME>',
'text': """
here lies the test of tests, the supreme test, first 2kodevs test using python.
this day should be remembered forever, the future of the tests depends on it. we have a heavy burden for now on.
""",
},
{
'id': 2,
'title': 'some random title 2',
'author': '<NAME>',
'text': """
less meaningfuls words around here, still testing tests with some test
""",
}
]
terms = Indexer.tokenize(data)
ans = [
('some', 1), ('random', 1), ('title', 1), ('alan brito', 1), ('here', 1), ('lies', 1),
('the', 1), ('test', 1), ('of', 1), ('tests', 1), ('the', 1), ('supreme', 1), ('test', 1),
('first', 1), ('2kodevs', 1), ('test', 1), ('using', 1), ('python', 1), ('this', 1), ('day', 1),
('should', 1), ('be', 1), ('remembered', 1), ('forever', 1), ('the', 1), ('future', 1), ('of', 1),
('the', 1), ('tests', 1), ('depends', 1), ('on', 1), ('it', 1), ('we', 1), ('have', 1), ('a', 1),
('heavy', 1), ('burden', 1), ('for', 1), ('now', 1), ('on', 1), ('some', 2), ('random', 2), ('title', 2),
('2', 2), ('<NAME>', 2), ('less', 2), ('meaningfuls', 2), ('words', 2), ('around', 2),
('here', 2), ('still', 2), ('testing', 2), ('tests', 2), ('with', 2), ('some', 2), ('test', 2)
]
ans.sort()
self.assertEqual(terms, ans)
def test_update_vocabulary(self):
data = [{
'id': 1,
'title': 'some random title',
'author': '<NAME>',
'text': """
here lies the test of tests, the supreme test, first 2kodevs test using python.
""",
},
{
'id': 2,
'title': 'some random title 2',
'author': '<NAME>',
'text': """
less meaningfuls words around here, still testing tests with some test
""",
}
]
terms = Indexer.tokenize(data)
self.Indexer.max_freq = [0, 0]
self.Indexer.update_vocabulary(terms)
ans = {
'some': [(1, 1), (2, 2)],
'random': [(1, 1), (2, 1)],
'title': [(1, 1), (2, 1)],
'alan brito': [(1, 1)],
'here': [(1, 1), (2, 1)],
'lies': [(1, 1)],
'the': [(1, 2)],
'test': [(1, 3), (2, 1)],
'of': [(1, 1)],
'tests': [(1, 1), (2, 1)],
'supreme': [(1, 1)],
'first': [(1, 1)],
'2kodevs': [(1, 1)],
'using': [(1, 1)],
'python': [(1, 1)],
'susana horia': [(2, 1)],
'2': [(2, 1)],
'less': [(2, 1)],
'meaningfuls': [(2, 1)],
'around': [(2, 1)],
'still': [(2, 1)],
'testing': [(2, 1)],
'words': [(2, 1)],
'with': [(2, 1)],
}
self.assertDictEqual(self.Indexer.vocabulary, ans)
self.assertEqual(self.Indexer.max_freq, [3, 2])
``` |
{
"source": "2kodevs/TypeInferencer",
"score": 2
} |
#### File: 2kodevs/TypeInferencer/main.py
```python
import eel
import logging
from time import sleep
from core.cmp.visitors import *
from core.cmp.evaluation import *
def build_AST(G, text):
data, err = [], False
ast = None
txt = '================== TOKENS =====================\n'
tokens = tokenize_text(text)
txt += format_tokens(tokens)
data.append(txt)
txt = '=================== PARSE =====================\n'
#print(parser([t.token_type for t in tokens], get_shift_reduce=True))
try:
parse, operations = CoolParser([t.token_type for t in tokens], get_shift_reduce=True)
except:
err = True
txt += 'Impossible to parse\n'
#print('\n'.join(repr(x) for x in parse))
data.append(txt)
if not err:
txt = '==================== AST ======================\n'
ast = evaluate_reverse_parse(parse, operations, tokens)
formatter = FormatVisitor()
tree = formatter.visit(ast)
txt += str(tree)
data.append(txt)
return ast, '\n\n'.join(data)
def error_formatter(errors):
txt = 'Errors: [\n'
for error in errors:
txt += f'\t{error}\n'
txt += ']\n'
return txt
def run_pipeline(G, text):
data, err = [], False
ast, txt = build_AST(G, text)
errors = context = scope = None
data.append(txt)
if ast:
txt = '============== COLLECTING TYPES ===============\n'
errors = []
collector = TypeCollector(errors)
collector.visit(ast)
context = collector.context
if len(errors):
txt += error_formatter(errors)
err = True
txt += 'Context:\n'
txt += str(context)
data.append(txt)
errors.clear()
txt = '=============== BUILDING TYPES ================\n'
builder = TypeBuilder(context, errors)
builder.visit(ast)
if len(errors):
txt += error_formatter(errors)
err = True
errors.clear()
data.append(txt)
txt = '=============== CHECKING TYPES ================\n'
checker = TypeChecker(context, errors)
scope = checker.visit(ast)
if len(errors):
txt += error_formatter(errors)
err = True
errors.clear()
data.append(txt)
txt = '=============== INFERING TYPES ================\n'
inferer = InferenceVisitor(context, errors)
while True:
old = scope.count_auto()
scope = inferer.visit(ast)
if old == scope.count_auto():
break
errors.clear()
scope = inferer.visit(ast)
if len(errors):
txt += error_formatter(errors)
err = True
errors.clear()
txt += 'Context:\n'
txt += str(context) + '\n'
formatter = ComputedVisitor()
if not err:
tree = formatter.visit(ast)
txt += 'AST:\n' + str(tree)
data.append(txt)
return '\n\n'.join(data)
@eel.expose
def compile(text):
sleep(2)
return run_pipeline(CoolGrammar, text)
def main():
eel.init('web')
eel_options = {'port': 8045}
eel.start('index.html', size=(1000, 860), options=eel_options, block=False)
while True:
eel.sleep(0.1)
if __name__ == '__main__':
main()
``` |
{
"source": "2kofawsome/full-stack-chessboard",
"score": 2
} |
#### File: full-stack-chessboard/lcddriver/lcddriver.py
```python
from lcddriver import i2c_lib
from time import *
from multiprocessing import Process, Manager
import ctypes
import sys
# LCD Address
# Usually you will have to use one of the two provided values below.
# If you prefer, you can check your LCD address with the command: "sudo i2cdetect -y 1"
# This is a common LCD address.
ADDRESS = 0x27
# This is another common LCD address.
# ADDRESS = 0x3f
# commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# flags for backlight control
LCD_BACKLIGHT = 0x08
LCD_NOBACKLIGHT = 0x00
En = 0b00000100 # Enable bit
Rw = 0b00000010 # Read/Write bit
Rs = 0b00000001 # Register select bit
class lcd:
# initializes objects and lcd
def __init__(self):
self.lcd_device = i2c_lib.i2c_device(ADDRESS)
self.manager = Manager()
self.lines = self.manager.Namespace()
self.lines.one = "Full-Stack-Chessboard"
self.lines.two = "By <NAME> "
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x02)
self.lcd_write(LCD_FUNCTIONSET | LCD_2LINE | LCD_5x8DOTS | LCD_4BITMODE)
self.lcd_write(LCD_DISPLAYCONTROL | LCD_DISPLAYON)
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_ENTRYMODESET | LCD_ENTRYLEFT)
sleep(0.2)
self.process = Process(target=self.loop)
self.process.start()
# clocks EN to latch command
def lcd_strobe(self, data):
self.lcd_device.write_cmd(data | En | LCD_BACKLIGHT)
sleep(0.0005)
self.lcd_device.write_cmd(((data & ~En) | LCD_BACKLIGHT))
sleep(0.0001)
def lcd_write_four_bits(self, data):
self.lcd_device.write_cmd(data | LCD_BACKLIGHT)
self.lcd_strobe(data)
# write a command to lcd
def lcd_write(self, cmd, mode=0):
self.lcd_write_four_bits(mode | (cmd & 0xF0))
self.lcd_write_four_bits(mode | ((cmd << 4) & 0xF0))
# put string function
def display_string(self, string, line):
if line == 1:
self.lcd_write(0x80)
if line == 2:
self.lcd_write(0xC0)
if line == 3:
self.lcd_write(0x94)
if line == 4:
self.lcd_write(0xD4)
for char in string:
self.lcd_write(ord(char), Rs)
# clear lcd and set to home
def clear(self):
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_RETURNHOME)
self.process.terminate()
self.process.join()
def loop(self):
"""
Process that runs simultaneously to main program to keep the LCD monitor constantly updating
args: None
Returns: None
"""
while True:
try:
line1 = self.lines.one
line2 = self.lines.two
while line1 == self.lines.one and line2 == self.lines.two:
self.display_string(line1, 1)
self.display_string(line2, 2)
if line1 == "Calculating. ": #special case
sleep(0.2)
self.display_string("Calculating..", 1)
sleep(0.2)
self.display_string("Calculating...", 1)
sleep(0.2)
else:
for n in range(4):
sleep(0.25)
if line1 != self.lines.one or line2 != self.lines.two:
break
if line1 != self.lines.one or line2 != self.lines.two:
break
if len(line1) > len(line2):
for n in range(len(line1) - 16):
sleep(0.15)
self.display_string(line1[n + 1 :], 1)
if len(line1) - len(line2) < len(line1) - 16 - n:
self.display_string(line2[n + 1 :], 2)
else:
for n in range(len(line2) - 16):
sleep(0.15)
if len(line2) - len(line1) < len(line2) - 16 - n:
self.display_string(line1[n + 1 :], 1)
self.display_string(line2[n + 1 :], 2)
for n in range(4):
sleep(0.25)
if line1 != self.lines.one or line2 != self.lines.two:
break
else:
continue
break
except (BrokenPipeError):
#self.clear()
pass
def update(self, string, line):
"""
Updates string for LCD display loop
args: string, line (int)
Returns: None
"""
if len(string) < 16:
string = string + " " * (16 - len(string))
if line == 1:
self.lines.one = string
elif line == 2:
self.lines.two = string
``` |
{
"source": "2kranki/genapp",
"score": 2
} |
#### File: genapp/scripts/gen_sql_test.py
```python
from io import StringIO
from unittest import TestCase
import genSql01
import os
################################################################################
# Test Classes
################################################################################
class testParseArgs(TestCase):
def setUp(self):
pass
def test_one(self):
args = ["--debug"]
genSql01.parseArgs(args)
self.assertTrue(genSql01.oArgs.fDebug)
class testAbsolutePath(TestCase):
def setUp(self):
genSql01.parseArgs(["--debug"])
def test_one(self):
txt = "${HOME}/a.txt"
a = genSql01.getAbsolutePath(txt)
b = os.path.expandvars(txt)
self.assertEqual(a,b)
a = genSql01.getAbsolutePath('~/a.txt')
self.assertEqual(a,b)
class testBuild(TestCase):
def setUp(self):
genSql01.parseArgs(["--debug"])
def test_one(self):
iRc = genSql01.build()
self.assertEqual(iRc, 0)
################################################################################
# Command-line interface
################################################################################
if '__main__' == __name__:
import unittest
unittest.main()
``` |
{
"source": "2kranki/jenkins01",
"score": 2
} |
#### File: 2kranki/jenkins01/newVersion.py
```python
import commands
#import csv
#import decimal
import math
import optparse
import os
import re
import sys
import time
import user
oOptions = None
################################################################################
# Object Classes and Functions
################################################################################
#===============================================================================
# execute an OS Command Class
#===============================================================================
class execCmd:
def __init__( self, fExec=True, fAccum=False ):
self.fAccum = fAccum
self.fExec = fExec
self.fNoOutput = False
self.iRC = 0
self.szCmdList = []
def __getitem__( self, i ):
szLine = self.szCmdList[i]
if szLine:
return szLine
else:
raise IndexError
#-------------------------------------------------------------------------------
# Execute a set of Bash Commands.
#-------------------------------------------------------------------------------
def doBashSys( self, oCmds, fIgnoreRC=False ):
"Execute a set of Bash Commands."
if oOptions.fDebug:
print "execCmd::doBashSys(%s)" % ( oCmds )
# Make sure that we have a sequence type for the commands.
import types
if isinstance( oCmds, types.ListType ) or isinstance( oCmds, types.TupleType):
pass
else:
oCmds = [ oCmds ]
# Build a stub bash script that will run in the chrooted environment.
oStub,oFilePath = tempfile.mkstemp( ".sh", "bashStub", '.', text=True )
if oOptions.fDebug:
print "\toFilePath='%s'" % ( oFilePath )
os.write( oStub, "#!/bin/bash -xv\n\n" )
else:
os.write( oStub, "#!/bin/bash\n\n" )
for szCmd in oCmds:
os.write( oStub, szCmd + "\n" )
os.write( oStub, "exit $?\n" )
os.close( oStub )
# Now execute the Bash Stub with cleanup.
oFileBase = os.path.basename( oFilePath )
szCmd = "chmod +x " + oFilePath
self.doCmd( szCmd, fIgnoreRC )
try:
szCmd = oFilePath
self.doSys( szCmd, fIgnoreRC )
finally:
os.unlink( oFilePath )
#-------------------------------------------------------------------------------
# Execute a System Command.
#-------------------------------------------------------------------------------
def doCmd( self, szCmd, fIgnoreRC=False ):
"Execute a System Command."
# Do initialization.
if oOptions.fDebug:
print "execCmd::doCmd(%s)" % (szCmd)
if 0 == len( szCmd ):
if oOptions.fDebug:
print "\tcmdlen==0 so rc=0"
raise ValueError
szCmd = os.path.expandvars( szCmd )
if self.fNoOutput:
szCmd += " 2>/dev/null >/dev/null"
if self.fAccum:
self.szCmdList.append( szCmd )
self.szCmd = szCmd
# Execute the command.
if oOptions.fDebug:
print "\tcommand(Debug Mode) = %s" % ( szCmd )
if szCmd and self.fExec:
tupleResult = commands.getstatusoutput( szCmd )
if oOptions.fDebug:
print "\tResult = %s, %s..." % ( tupleResult[0], tupleResult[1] )
self.iRC = tupleResult[0]
self.szOutput = tupleResult[1]
if fIgnoreRC:
return
if 0 == tupleResult[0]:
return
else:
if oOptions.fDebug:
print "OSError cmd: %s" % ( szCmd )
print "OSError rc: %d" % ( self.iRC )
print "OSError output: %s" % ( self.szOutput )
raise OSError, szCmd
if szCmd and not self.fExec:
if oOptions.fDebug:
print "\tNo-Execute enforced! Cmd not executed, but good return..."
return
# Return to caller.
self.iRC = -1
self.szOutput = None
raise ValueError
#-------------------------------------------------------------------------------
# Execute a list of System Commands.
#-------------------------------------------------------------------------------
def doCmds( self, oCmds, fIgnoreRC=False ):
"Execute a list of System Commands."
# Make sure that we have a sequence type for the commands.
import types
if isinstance( oCmds, types.ListType ) or isinstance( oCmds, types.TupleType):
pass
else:
oCmds = [ oCmds ]
# Execute each command.
for szCmd in oCmds:
self.doCmd( szCmd + "\n", fIngnoreRC )
#-------------------------------------------------------------------------------
# Execute a System Command with output directly to terminal.
#-------------------------------------------------------------------------------
def doSys( self, szCmd, fIgnoreRC=False ):
"Execute a System Command with output directly to terminal."
# Do initialization.
if oOptions.fDebug:
print "execCmd::doSys(%s)" % (szCmd)
if 0 == len( szCmd ):
if oOptions.fDebug:
print "\tcmdlen==0 so rc=0"
raise ValueError
szCmd = os.path.expandvars( szCmd )
if self.fNoOutput:
szCmd += " 2>/dev/null >/dev/null"
if self.fAccum:
self.szCmdList.append( szCmd )
self.szCmd = szCmd
# Execute the command.
if oOptions.fDebug:
print "\tcommand(Debug Mode) = %s" % ( szCmd )
if szCmd and self.fExec:
self.iRC = os.system( szCmd )
self.szOutput = None
if oOptions.fDebug:
print "\tResult = %s" % ( self.iRC )
if fIgnoreRC:
return
if 0 == self.iRC:
return
else:
raise OSError, szCmd
if szCmd and not self.fExec:
if oOptions.fDebug:
print "\tNo-Execute enforced! Cmd not executed, but good return..."
return
# Return to caller.
self.iRC = -1
raise ValueError
def getOutput( self ):
return self.szOutput
def getRC( self ):
return self.iRC
def len( self ):
return len( self.szCmdList )
def save( self ):
return 0
def setExec( self, fFlag=True ):
self.fExec = fFlag
def setNoOutput( self, fFlag=False ):
self.fNoOutput = fFlag
#===============================================================================
# Miscellaneous
#===============================================================================
#---------------------------------------------------------------------
# getAbsolutePath -- Convert a Path to an absolute path
#---------------------------------------------------------------------
def getAbsolutePath( szPath ):
"Convert Path to an absolute path."
if oOptions.fDebug:
print "getAbsolutePath(%s)" % ( szPath )
# Convert the path.
szWork = os.path.normpath( szPath )
szWork = os.path.expanduser( szWork )
szWork = os.path.expandvars( szWork )
szWork = os.path.abspath( szWork )
# Return to caller.
if oOptions.fDebug:
print "\tabsolute_path=", szWork
return szWork
################################################################################
# Main Program Processing
################################################################################
def mainCLI( listArgV=None ):
"Command-line interface."
global oDB
global oOptions
# Do initialization.
iRc = 20
# Parse the command line.
szUsage = "usage: %prog [options] sourceDirectoryPath [destinationDirectoryPath]"
oCmdPrs = optparse.OptionParser( usage=szUsage )
oCmdPrs.add_option( "-d", "--debug", action="store_true",
dest="fDebug", default=False,
help="Set debug mode"
)
oCmdPrs.add_option( "-v", "--verbose",
action="count",
dest="iVerbose",
default=0,
help="Set verbose mode"
)
(oOptions, oArgs) = oCmdPrs.parse_args( listArgV )
if oOptions.fDebug:
print "In DEBUG Mode..."
print 'Args:',oArgs
if len(oArgs) < 1:
szSrc = os.getcwd( )
else:
szSrc = oArgs[0]
if len(oArgs) > 1:
print "ERROR - too many command arguments!"
oCmdPrs.print_help( )
return 4
if oOptions.fDebug:
print 'szSrc:',szSrc
# Perform the specified actions.
iRc = 0
try:
# Read in the tag file.
with open('tag.txt', 'r') as tag:
ver = tag.read().strip().split('.')
# Update the version.
#print('.'.join(map(str, ver)))
ver[2] = int(ver[2]) + 1
newVer = '.'.join(map(str, ver))
print newVer
# Write out the new file
tagOut = open("tag.txt", "w")
tagOut.write(newVer)
tagOut.close()
# Now tag the git repo (git tag -a version_string -m "New Release"
oExec = execCmd()
cmd = "git tag -a {0} -m \"New Release\"".format(newVer)
if not oOptions.fDebug:
oExec.doSys(cmd)
else:
print "Debug:",cmd
tupleResult = commands.getstatusoutput("git remote")
if int(tupleResult[0]) == 0:
remotes = tupleResult[1]
for remote in remotes.splitlines():
cmd = "git push {0} --tag".format(remote.strip())
if not oOptions.fDebug:
oExec.doSys(cmd)
else:
print "Debug:",cmd
finally:
pass
return iRc
################################################################################
# Command-line interface
################################################################################
if '__main__' == __name__:
startTime = time.time( )
iRc = mainCLI( sys.argv[1:] )
if oOptions.iVerbose or oOptions.fDebug:
if 0 == iRc:
print "...Successful completion."
else:
print "...Completion Failure of %d" % ( iRc )
endTime = time.time( )
if oOptions.iVerbose or oOptions.fDebug:
print "Start Time: %s" % (time.ctime( startTime ) )
print "End Time: %s" % (time.ctime( endTime ) )
diffTime = endTime - startTime # float Time in seconds
iSecs = int(diffTime % 60.0)
iMins = int((diffTime / 60.0) % 60.0)
iHrs = int(diffTime / 3600.0)
if oOptions.iVerbose or oOptions.fDebug:
print "run Time: %d:%02d:%02d" % ( iHrs, iMins, iSecs )
sys.exit( iRc or 0 )
``` |
{
"source": "2kwattz/Rock-Paper-Scissor-Game-",
"score": 4
} |
#### File: 2kwattz/Rock-Paper-Scissor-Game-/rps.py
```python
import random
import pyfiglet
#About game
def aboutgame():
print("Why is Rock Paper Scissors played?\n")
print("This game is played by children and adults and is popular all over the world.\n")
print("Apart from being a game played to pass time, the game is usually played in situations where something has to be chosen. It is similar in that way to other games like flipping the coin, throwing dice or drawing straws\n")
print("There is no room for cheating or for knowing what the other person is going to do so the results are usually very satisfying with no room for fighting or error.\n")
gameoptions = ['r','p','s']
userpoints = 0
computerpoints = 0
rounds = 0
#Main interface
banner = pyfiglet.figlet_format("Rock Paper Scissor")
print(banner)
print("Press: 1. Play 2. Rules 3. About Game 4. Exit\nCode by : 2kwattz\n")
menueoptions = int(input())
if menueoptions == 1:
print("Enter your name to continue")
username = input()
print(f"\nWelcome {username}, Lets Start !")
while(rounds<10):
rounds = rounds + 1
print("\n Press: 1.Rock 2.Paper 3.Scissor")
compinput = random.choice(gameoptions)
userinput = int(input())
if userinput == 1 and compinput == 'r':
print(f"Round {rounds} : {username} chose Rock , Computer chose Rock\n")
print(f"Its a Draw\t {username}'s Score : {userpoints} , Computer's Score : {computerpoints}")
elif userinput == 1 and compinput == 'p':
computerpoints = computerpoints + 1
print(f"Round {rounds} : {username} chose Rock , Computer chose Paper\n")
print(f"Computer won this round , {username}'s Score : {userpoints} , Computer's Score : {computerpoints}")
elif userinput == 1 and compinput == 's':
userpoints = userpoints + 1
print(f"Round {rounds} : {username} chose Rock , Computer chose Paper\n")
print(f"{username} won this round , {username}'s Score : {userpoints} , Computer's Score : {computerpoints}")
elif userinput == 2 and compinput == 'r':
userpoints = userpoints + 1
print(f"Round {rounds} : {username} chose Paper , Computer chose Rock\n")
print(f"{username} won this round , {username}'s Score : {userpoints} , Computer's Score : {computerpoints}")
elif userinput == 2 and compinput == 'p':
print(f"Round {rounds} : {username} chose Paper , Computer chose Paper\n")
print(f"Its a Draw\t {username}'s Score : {userpoints} , Computer's Score : {computerpoints}")
elif userinput == 2 and compinput == 's':
computerpoints = computerpoints + 1
print(f"Round {rounds} : {username} chose Paper , Computer chose Scissors\n")
print(f"Computer won this Round , {username}'s Score : {userpoints} , Computer's Score : {computerpoints}")
elif userinput == 3 and compinput == 'r':
computerpoints = computerpoints + 1
print(f"Round {rounds} : {username} chose Scissors , Computer chose Rock\n")
print(f"Computer won this round , {username}'s Score : {userpoints} , Computer's Score : {computerpoints}")
elif userinput == 3 and compinput == 'p':
userpoints = userpoints + 1
print(f"Round {rounds} : {username} chose Scissors , Computer chose Paper\n")
print(f"{username} won this round , {username}'s Score : {userpoints} , Computer's Score : {computerpoints}")
elif userinput == 3 and compinput == 's':
print(f"Round {rounds} : {username} chose Scissor , Computer chose Scissor\n")
print(f"Its a Draw\t {username}'s Score : {userpoints} , Computer's score : {computerpoints}")
else:
print("My Dear Sir . You have not entered a valid number. \nPlease enter '1' for Rock\n'2' for paper\n'3' for scissor\n")
elif menueoptions == 2:
print("Rock wins against scissors.\nScissors win against paper.\nPaper wins against rock.\n")
elif menueoptions == 3:
print(aboutgame())
``` |
{
"source": "2l47/pony.town-antiafk",
"score": 3
} |
#### File: 2l47/pony.town-antiafk/antiafk.py
```python
import imagehash
import os
import PIL.Image
import PIL.ImageGrab
import pynput
import random
import re
import schedule
import subprocess
import time
# === Configuration options ===
# Time in milliseconds to display notifications for
notification_duration = 15 * 1000
# Time in seconds before considering the user inactive
inactivity_timeout = 2 * 60
# Time in seconds to wait before executing actions depending on whether the user is active
inactive_execution_grace = 10
active_execution_grace = 15
# How many seconds early we anticipate getting disconnected by the server, to prevent waiting too long
afk_timeout_grace = 45
# === Variable instantiation ===
# Is the script currently moving the mouse?
global automating_mouse
automating_mouse = False
# Array of mouse movement coordinates from the past second
global mouse_points
mouse_points = []
# Epoch the user was last active at
global user_last_active
user_last_active = 0
# Sends a desktop notification
def notify(text):
print(text)
os.system(f"notify-send --expire-time={notification_duration} antiafk.py \"{text}\"")
# Returns the bounding box and chat indicator hash to be used for the game
# This is lazy and inefficient code
def getGameData():
# Detect what game we're on
window_title = subprocess.check_output("xdotool getactivewindow getwindowname", shell=True).decode().strip()
games = [
{
"name": "pony.town",
"regex": re.compile("^Pony Town.*"),
# 19x16 pixels
"bbox": (473, 1048, 492, 1064),
"chat_indicator_image": PIL.Image.open("pony.town_chat_indicator.png")
},
{
"name": "ashes.town",
"regex": re.compile("^Ashes Town.*"),
# 19x16 pixels: x+5, y-1 from pony.town
"bbox": (478, 1047, 497, 1063),
"chat_indicator_image": PIL.Image.open("ashes.town_chat_indicator.png")
}
]
for game in games:
if game["regex"].match(window_title):
chat_indicator_hash = imagehash.average_hash(game["chat_indicator_image"])
return game["bbox"], chat_indicator_hash
raise RuntimeError(f"Unknown game window title: {window_title}")
# Function to check whether the user has the textbox open
def user_typing():
bounding_box, chat_indicator_hash = getGameData()
image = PIL.ImageGrab.grab(bbox=bounding_box)
#image.save("capture.png")
capture_hash = imagehash.average_hash(image)
difference = chat_indicator_hash - capture_hash
#print(f"capture_hash: {capture_hash}")
#print(f"chat_indicator_hash: {chat_indicator_hash}")
#print(f"difference: {difference}")
return difference == 0
# Clears past mouse coordinates every second
def clear_points():
global mouse_points
mouse_points = []
# Helper function to check if the user is currently active
def user_considered_active():
return (time.time() - user_last_active) < inactivity_timeout
# Mouse event handler
def on_mouse_move(x, y):
global automating_mouse
global user_last_active
if automating_mouse:
print(f"Script moved mouse to {x}, {y} (ignoring mouse movement)")
else:
mouse_points.append((x, y))
# If the user has 100ms of activity, update the last active epoch
if len(mouse_points) >= 100:
now = time.time()
# If the user is currently considered active, don't log the fact that we're updating the epoch
if not user_considered_active():
print(f"[{now}] User activity epoch updated")
user_last_active = now
# Sends an initial warning and subsequent warnings before returning
def warning_timer(caller, execution_grace):
notify(f"{caller} called, waiting {execution_grace} seconds before executing...")
for i in range(execution_grace):
remaining = execution_grace - i
if remaining <= 5:
notify(f"Returning to {caller} in {remaining} seconds...")
time.sleep(1)
# Turns the head back and forth to simulate player activity
def headturn():
if user_typing():
return notify("Not running headturn() because user is typing...")
execution_grace = active_execution_grace if user_considered_active() else inactive_execution_grace
warning_timer("headturn()", execution_grace)
keypress_duration = random.randint(20, 100)
print(f"headturn() using keypress duration of {keypress_duration} ms")
interkey_delay = random.randint(100, 200)
print(f"headturn() using inter-key delay of {interkey_delay} ms")
os.system(f"xdotool key --clearmodifiers --delay {keypress_duration} --repeat 2 --repeat-delay {interkey_delay} h")
print("headturn() done.\n")
def main():
# === Start monitoring user activity ===
listener = pynput.mouse.Listener(on_move=on_mouse_move)
listener.start()
# === Schedule tasks ===
# Clear the mouse points array every second
schedule.every().second.do(clear_points)
# headturn should run every 10-15 minutes.
min_s = 10 * 60
max_s = (15 * 60) - (afk_timeout_grace + active_execution_grace)
schedule.every(min_s).to(max_s).seconds.do(headturn)
# === Run forever-ish ===
while True:
try:
schedule.run_pending()
time.sleep(1)
except (KeyboardInterrupt, RuntimeError) as ex:
print(f"\n\nShutting down due to {type(ex).__name__}: {ex}")
# Cancel all jobs
print("Clearing the schedule...")
schedule.clear()
print("Schedule cleared.")
# Shutdown the mouse listener
print("Shutting down the mouse listener (you may need to move your mouse)...")
pynput.mouse.Listener.stop(listener)
print("Mouse listener shut down.")
# Exit
return
if __name__ == "__main__":
main()
``` |
{
"source": "2l47/vps-ping-analysis",
"score": 3
} |
#### File: 2l47/vps-ping-analysis/main.py
```python
import cartopy.crs as ccrs
import configparser
import geopandas as gpd
import importlib
import matplotlib.pyplot as plt
import os
import sys
import wiuppy
# Load credentials and instantiate API
config = configparser.ConfigParser()
config.read(os.path.expanduser("~/.wiuppy"))
client = config["Auth"]["client"]
token = config["Auth"]["token"]
api = wiuppy.WIU(client, token)
# Load WIU data
wiu_continents = set()
wiu_countries = set()
wiu_NA_servers = []
wiu_EU_servers = []
wiu_servers = api.servers()
for d in wiu_servers:
wiu_continents.add(d["continent_name"])
wiu_countries.add(d["country"])
if d["country"] in ["United States", "Canada"]:
wiu_NA_servers.append(d["name"])
if d["continent_name"] == "Eurasia":
wiu_EU_servers.append(d["name"])
print(f"WIU Continents: {wiu_continents}")
print(f"\nWIU Countries: {wiu_countries}")
print(f"\nWIU NA servers: {wiu_NA_servers}")
print(f"\nWIU EU servers: {wiu_EU_servers}")
# Define acceptable ping range criteria
ping_ranges = (
{"name": "perfect", "range": range(0, 30)}, # 0 <= ping < 30
{"name": "satisfactory", "range": range(30, 60)}, # 30 <= ping < 60
{"name": "uncomfortable", "range": range(60, 90)}, # 60 <= ping < 90
{"name": "unplayable", "range": range(90, sys.maxsize)} # 90 <= ping < sys.maxsize
)
def test_datacenters(datacenter_names, uri_prefix, uri_suffix, from_locations, hosting_provider=None, continents=None, reuse_job=None, interactive_map=False):
if hosting_provider:
hosting_provider = f" at {hosting_provider}"
if continents:
hosting_provider += f" ({', '.join(continents)})"
for datacenter in datacenter_names:
job = wiuppy.Job(api)
if not reuse_job:
full_uri = f"{uri_prefix}{datacenter}{uri_suffix}"
print(f"{'=' * 8} Preparing job for {datacenter}{hosting_provider} ({full_uri})... {'=' * 8}")
job.uri = full_uri
job.servers = from_locations
job.tests = ["ping"]
# 100 pings at half-second intervals should take 50 seconds; we allot a reasonably generous deadline of 60 seconds
job.options = {"ping": {"interval": 0.500, "count": 100, "timeout": 60}}
# Submit the request and wait for the tests to finish
print("Submitting job...")
job.submit()
else:
job.id = reuse_job
print("Waiting for job retrieval...")
job.retrieve(poll=True)
# Reset server lists for the new datacenter we're testing
for designation in ping_ranges:
designation["servers"] = []
# Report the average ping time on all servers
print(f"Collecting regional ping times to {datacenter}...")
for (server, tests) in job.results["response"]["complete"].items():
summary = tests["ping"]["summary"]["summary"]
print(f"\n{server} -> {datacenter}")
print(f"\t{summary['transmitted']} packets transmitted, {summary['received']} received, {summary['packetloss']} packet loss, time {summary['time']}")
print(f"\trtt min/avg/max/mdev = {summary['min']}/{summary['avg']}/{summary['max']}/{summary['mdev']} ms")
for designation in ping_ranges:
if int(float(summary["avg"])) in designation["range"]:
designation["servers"].append((server, summary["avg"]))
break
# Load world file
world = gpd.read_file("data/ne_10m_admin_1_states_provinces.shp")
if continents:
world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
new_world = None
for c in continents:
print(f"Looking for {c}")
found = world[world.continent == c]
if new_world is None:
new_world = found
else:
new_world = new_world.append(found)
world = new_world
# Reproject to Mercator
world = world.to_crs(epsg=3395)
# This is our subplot
ax2 = plt.subplot(projection=ccrs.epsg(3395))
plt.title(f"Expected quality of regional latency for {datacenter}{hosting_provider}\n(Larger circles indicate better latency)")
# Rendering options
world.plot(
legend = True,
edgecolor = "black",
color = "blue",
linewidth = 0.25 if continents else 0.05,
alpha = 0.25,
ax = ax2
)
# Print results for this datacenter while generating the map
for designation in ping_ranges:
print(f"\n{designation['name']} ping areas: {designation['servers']}")
for city, ping in designation["servers"]:
# Calculate radii such that lower ping values are bigger circles, to visualize how well the datacenter covers the area
radius = ((-200/90) * float(ping)) + 200
# Get this city's coordinates and plot the point
for s in wiu_servers:
if s["name"] == city:
latitude = float(s["latitude"])
longitude = float(s["longitude"])
# n_samples determines circle "roughness"
ax2.tissot(rad_km=radius, lats=[latitude,], lons=[longitude,], n_samples=36)
break
# Display or save our map
if interactive_map:
plt.show()
else:
plt.savefig(f"{datacenter}{hosting_provider}.png", dpi=600 if continents else 2400)#, bbox_inches="tight")
print("\n")
# Get rid of world and plot stuff
importlib.reload(plt)
# We're assuming the provided job was for the first location in the list
if reuse_job:
break
# Declare Linode's speedtest subdomains
linode_NA_locations = "fremont dallas atlanta toronto1 newark".split()
linode_EU_locations = "london frankfurt".split()
# Declare Hetzner's speedtest subdomains
hetzner_EU_locations = "nbg fsn hel".split()
# Get ping times for Linode's North America datacenters
test_datacenters(linode_NA_locations, "http://speedtest.", ".linode.com", wiu_NA_servers, hosting_provider="Linode", continents=["North America"])
# Get ping times for Linode's EU datacenters
test_datacenters(linode_EU_locations, "http://speedtest.", ".linode.com", wiu_EU_servers, hosting_provider="Linode", continents=["Europe", "Asia"])
# Get ping times for Hetzner's EU datacenters
test_datacenters(hetzner_EU_locations, "http://", ".icmp.hetzner.com", wiu_EU_servers, hosting_provider="Hetzner", continents=["Europe", "Asia"])
``` |
{
"source": "2legit/ytilities",
"score": 3
} |
#### File: ytilities/tests/testsample.py
```python
from .context import MyClass
def test_pass():
assert True, "dummy sample test"
def test_get_5():
myclass = MyClass()
assert myclass.get_5() == 5, "MyClass get_5 returns 5"
print(__name__)
print(MyClass.get_5())
``` |
{
"source": "2lu3/ai-watch-data",
"score": 3
} |
#### File: ai-watch-data/code/feature.py
```python
from dataset import Dataset
from util import Util
class Feature:
def __init__(self, use_features):
self.dataset = Dataset(use_features)
years = [y for y in range(2008, 2020)]
self.data = self.dataset.get_data(years, "tokyo")
def get_dataset(self):
return self.data.copy()
def register_feature(self, feature, feature_name):
Util.dump_feature(feature, feature_name)
def standarlization(self):
for name in self.data.columns:
if self.data[name][0] is int or self.data[name][0] is float:
self.data[name] = (
self.data[name] - self.data[name].mean()
) / self.data[name].std(ddof=0)
``` |
{
"source": "2lu3/useful-tools",
"score": 2
} |
#### File: useful-tools/ffmpeg-overwrap/converter.py
```python
from subprocess import Popen
import os
import pickle
import glob
import time
#from_file_types = ['MOV', 'MTS']
#from_file_types = ['MTS']
#from_file_types = ['MOV']
from_file_types = ['mp4']
to_file_type = 'mp4'
configurations = [
'-c:v h264_amf',
#'-c:v hevc_amf',
#'-vcodec libx264',
#'-c:a copy',
#'-crf 30',
#'-vf scale=-1:720',
'-b:v 1000k',
'-b:a 128k',
#'-an',
#'-tag:v hvc1',
'-vtag avc1',
]
#from_file_types = ['MTS']
#to_file_type = 'MOV'
#configurations = [
# '-f mov',
# #'-c:a copy',
# ]
class MovieConverter:
def __init__(self):
self.in_folder = 'input'
self.out_folder = 'output'
# dict('path', 'start', 'finished')
self.movie_paths = []
self.log_movie_paths = None
self.movie_converted = []
self.log_movie_converted = []
self.lock_file_name = 'lock'
self.movie_paths_pickle_name = 'pickle_path.pickle'
self.movie_converted_pickle_name = 'pickle_converted.pickle'
self.to_file_type = to_file_type
self.from_file_types = from_file_types
def update_log_file(self):
with open(self.movie_paths_pickle_name, 'wb') as f:
pickle.dump(self.movie_paths, f)
with open(self.movie_converted_pickle_name, 'wb') as f:
pickle.dump(self.movie_converted, f)
def load_log_file(self):
if os.path.exists(self.lock_file_name):
print('reading log files')
with open(self.movie_paths_pickle_name, 'rb') as f:
self.log_movie_paths = pickle.load(f)
with open(self.movie_converted_pickle_name, 'rb') as f:
self.log_movie_converted = pickle.load(f)
else:
print('no lock file')
print('not reading log files')
self.log_movie_paths = None
# lockファイルを作成
with open(self.lock_file_name, 'w') as f:
f.write('You looked at me!')
def search_data_folder(self):
self.movie_paths =[]
for from_type in self.from_file_types:
self.movie_paths.extend(glob.glob(self.in_folder + '/**/*.'+from_type, recursive=True))
print('found these files')
for file_path in self.movie_paths:
print(file_path)
def create_to_path(self, from_path):
return self.out_folder + from_path.split('.')[0].removeprefix(self.in_folder) + '.' + self.to_file_type
def create_command(self, from_path):
command = 'ffmpeg -i "' + from_path + '"'
for conf in configurations:
command += ' ' + conf
command += ' "' + self.create_to_path(from_path) + '"'
return command
def ffmpeg(self, path):
command = self.create_command(path)
print('command', command)
popen = Popen(command)
popen.wait()
def run(self):
self.load_log_file()
self.search_data_folder()
if self.log_movie_paths is not None:
print('cotinue restored working')
self.movie_paths = self.log_movie_paths
self.movie_converted = self.log_movie_converted
self.movie_converted.extend([False for _ in range(len(self.movie_paths) - len(self.movie_converted))])
else:
print('start new task')
self.movie_converted = [False for _ in self.movie_paths]
for i in range(len(self.movie_paths)):
from_path = self.movie_paths[i]
to_path = self.create_to_path(from_path)
if self.movie_converted[i] == True:
print('passed ' + to_path)
continue
# 既にファイルが有る場合
if os.path.exists(to_path):
# ファイルを削除
os.remove(to_path)
time.sleep(1)
# 子フォルダまで作成されていない場合
os.makedirs(to_path.removesuffix(os.path.basename(to_path)), exist_ok=True)
time.sleep(1)
self.ffmpeg(from_path)
self.movie_converted[i] = True
self.update_log_file()
print('all done')
os.remove(self.lock_file_name)
def main():
converter = MovieConverter()
converter.run()
if __name__ == '__main__':
main()
```
#### File: 2lu3/useful-tools/video-editor.py
```python
import os
from glob import glob
from subprocess import PIPE, Popen
class Video:
def __init__(self, name, output_path="output/"):
self.output_root = output_path
self.rename(name)
def rename(self, new_name):
self.name = new_name
self.filename = self.name + ".mp4"
self.path = self.output_root + self.filename
def remove(self):
os.remove(self.path)
class VideoEditor:
def __init__(self):
self.video_list = []
def to_filename(self, name):
if "." not in name:
return name + ".mp4"
else:
return name
def to_name(self, filename):
if "." in filename:
return os.path.splitext(os.path.basename(filename))[0]
else:
return filename
# 最初の動画をロード
def load_video(self, path, name=None):
if name is None:
name = self.to_name(path)
video = Video(name)
self.video_list.append(video)
# トリミングし、別ファイルとして保存
def trim(self, index, start, end, name=None):
# 名前を指定されていない場合
if name is None:
name = "video_" + str(len(self.video_list))
# 保存先Video
new_video = Video(name)
# トリミングし、別ファイルに保存
time = end - start
command = f"ffmpeg -ss {start} -i {self.video_list[index].path} -t {time} {new_video.path}"
proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, text=True)
result = proc.communicate()
print(result[0])
print(result[1])
# video_listに変更を反映
self.video_list.append(new_video)
# 別名に変更
def rename(self, index, new_name):
# 新しいVideoを作成
new_video = Video(new_name)
# ファイル名を変更
os.rename(self.video_list[index].path, new_video.path)
# 古いデータを削除
self.video_list.pop(index)
# video_listに変更を反映
self.video_list.append(new_video)
# 結合する
def concat(self, index, name):
# 名前を指定されていない場合
if name is None:
name = "video_" + str(len(self.video_list))
# 保存先Video
new_video = Video(name)
with open("temp.txt", "w") as f:
for i in index:
f.write("file ")
f.write(self.video_list[int(i)].path)
f.write("\n")
command = f"ffmpeg -f concat -i temp.txt -c copy {new_video.path}"
proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, text=True)
result = proc.communicate()
print(result[0])
print(result[1])
# 動画を列挙
def show(self):
return [video.name for video in self.video_list]
# 削除
def remove(self, index):
os.remove(self.video_list[index].path)
self.video_list.pop(index)
commands = [
"trim name start end (save_name)",
"rename old_name new_name",
"concat name1, name2 (save_name)",
"del name",
"show",
"play name",
"exit",
]
video_editor = VideoEditor()
video_list = glob("./output/*.mp4")
for path in video_list:
video_editor.load_video(path)
def show_all_videos():
video_list = video_editor.show()
print("")
print("動画一覧")
for i, name in enumerate(video_list):
print(i, name)
print("")
def input_order():
for i, command in enumerate(commands):
print(i, command)
order = input("コマンドを入力して下さい(数字で)\n")
if order == "6" or order == "exit": # exit
return "exit", None
show_all_videos()
if order == "4" or order == "show":
return None, None
print("動画を選んで下さい(数字で)")
index = input()
if order == "0": # trim
print("開始位置を入力してください")
start = int(input())
print("終了位置を入力して下さい")
end = int(input())
print("新しい名前を入力して下さい")
name = input()
if name == "":
name = None
return "trim", [int(index), start, end, name]
elif order == "1": # rename
print("新しい名前を入力して下さい")
name = input()
if name == "":
name = None
return "rename", [int(index), name]
elif order == "2": # concat
print("新しい名前を入力して下さい")
name = input()
if name == "":
name = None
index = index.split(" ")
return "concat", [index, name]
elif order == "3": # del
return "del", [int(index)]
elif order == "4": # show
pass
elif order == "5": # play
return "play", [int(index)]
def main():
while True:
order, option = input_order()
if order == "trim":
video_editor.trim(option[0], option[1], option[2], option[3])
elif order == "rename":
video_editor.rename(option[0], option[1])
elif order == "concat":
video_editor.concat(option[0], option[1])
elif order == "del":
video_editor.remove(option[0])
elif order == "show":
show_all_videos()
elif order == "play":
print("自分でやれ")
elif order == "exit":
exit()
print("")
print("")
print("")
if __name__ == "__main__":
main()
``` |
{
"source": "2lu3/wrs2021",
"score": 3
} |
#### File: wrs2021/detect-closed-faces/retina.py
```python
import cv2
from matplotlib import pyplot as plt
from retinaface.pre_trained_models import get_model
import glob
model = get_model("resnet50_2020-07-20", max_size=2048)
model.eval()
def eval_image(path):
image = cv2.imread(path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = model.predict_jsons(image_rgb)
print(results)
for r in results:
bbox = r['bbox']
if not bbox: continue
cv2.rectangle(image_rgb, (bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),thickness=10)
plt.imshow(image_rgb)
plt.show()
image_paths = glob.glob("edited/*.png")
for path in image_paths:
eval_image(path)
``` |
{
"source": "2lx/pyexcelreport",
"score": 2
} |
#### File: pyexcelreport/xlsreport/sqltabledata.py
```python
from .xlstable import *
import re
import datetime
import sys
from copy import copy
def maybe_sqlquoted(param):
"""Возвращает переданный параметр окруженный кавычками (если это необходимо в SQL)
"""
UUID_PATTERN = re.compile(r'^[{]?([\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12})[}]?$', re.IGNORECASE)
matched = UUID_PATTERN.match(param)
if matched:
param = "'{{{0:s}}}'".format(matched.group(1))
elif type(param) == [datetime.date]:
param = "'{0:s}'".format(param.strftime('%d/%m/%Y %H:%M:%S.%f'))
elif isinstance(param, str):
if param != "NULL":
param = "'{0:s}'".format(param)
return param
import pymssql
from . import config
class MSSql():
def __init__(self, server=config.mssql_server,
user=config.db_login,
password=<PASSWORD>,
database=config.db_catalog):
self.conn = pymssql.connect(server=server,
user=user,
password=password,
database=database,
autocommit=True)
def __del__(self):
self.conn.close
def get_table_data(self, sqlquery, table_info):
"""Возвращает все поля из результатов запроса в формате списка значений (в порядке полей из table_info)
"""
cursor = self.conn.cursor(as_dict=True)
print("Выполняется запрос: '{0:s}'".format(sqlquery))
cursor.execute(sqlquery)
table_data = []
for row in cursor:
# row_data = ()
row_data = []
for ti in table_info:
# row_data += (row[ti.fname],) if (ti.fname != '') and (not ti.fname.startswith('__')) else ('',)
if (ti.fname != '') and (not ti.fname.startswith('__')):
row_data.append(row[ti.fname])
else:
row_data.append(ti.default_value)
table_data.append(row_data)
return table_data
def get_dict_data(self, sqlquery):
"""Возвращает все поля из результатов запроса в формате списка словарей
"""
cursor = self.conn.cursor(as_dict=True)
print("Выполняется запрос: '{0:s}'".format(sqlquery))
cursor.execute(sqlquery)
dict_data = []
for row in cursor:
# one_dict = dict()
# for fld in row.keys():
# one_dict[fld] = row[fld]
dict_data.append(copy(row))
return dict_data
```
#### File: pyexcelreport/xlsreport/xlsutils.py
```python
import os
from openpyxl import Workbook, worksheet
def workbook_create():
wb = Workbook()
for i in wb.worksheets:
wb.remove(i)
return wb
def sheet_create(wb, main_sheet_name):
ws = wb.create_sheet( main_sheet_name )
for i in range(len(wb.sheetnames)):
if wb.sheetnames[i] == 'charlie':
break
wb.active = i
return ws
def sheet_print_setup(ws, porientation, pwidth):
'https://openpyxl.readthedocs.io/en/2.5/_modules/openpyxl/worksheet/page.html'
ws.print_options.horizontalCentered = True
ws.print_options.verticalCentered = False
ws.print_options.headings = False
ws.print_options.gridLines = False
ws.page_margins.left = 0.2
ws.page_margins.right = 0.2
ws.page_margins.top = 0.2
ws.page_margins.bottom = 0.2
ws.page_margins.header = 0
ws.page_margins.footer = 0
ws.sheet_properties.pageSetUpPr.fitToPage = True
ws.page_setup.fitToHeight = False
worksheet.Worksheet.set_printer_settings(ws, paper_size = 1, orientation=porientation)
ws.page_setup.fitToWidth = pwidth
if pwidth == 2:
ws.print_options.horizontalCentered = False
``` |
{
"source": "2mal16/whoosh",
"score": 2
} |
#### File: whoosh/stress/test_bigtable.py
```python
from __future__ import with_statement
from random import randint, shuffle
from nose.tools import assert_equal #@UnresolvedImport
from whoosh.compat import xrange, iteritems
from whoosh.filedb.filetables import HashWriter, HashReader
from whoosh.support.testing import TempStorage
def test_bigtable():
with TempStorage("bigtable") as st:
def randstring(min, max):
return "".join(chr(randint(1, 255))
for _ in xrange(randint(min, max)))
count = 100000
samp = dict((randstring(1,50), randstring(1,50))
for _ in xrange(count))
fhw = HashWriter(st.create_file("big.hsh"))
fhw.add_all(iteritems(samp))
fhw.close()
fhr = HashReader(st.open_file("big.hsh"))
keys = list(samp.keys())
shuffle(keys)
for key in keys:
assert_equal(samp[key], fhr[key])
set1 = set(iteritems(samp))
set2 = set(fhr.items())
assert_equal(set1, set2)
fhr.close()
```
#### File: whoosh/tests/test_misc.py
```python
from __future__ import with_statement
import os, threading, time
from whoosh.compat import u
from whoosh.util.filelock import try_for
from whoosh.util.numeric import length_to_byte, byte_to_length
from whoosh.util.testing import TempStorage
def test_now():
from whoosh.util import now
t1 = now()
t2 = now()
assert t1 <= t2
def test_storage_creation():
import tempfile, uuid
from whoosh import fields
from whoosh.filedb.filestore import FileStorage
schema = fields.Schema(text=fields.TEXT)
uid = uuid.uuid4()
dirpath = os.path.join(tempfile.gettempdir(), str(uid))
assert not os.path.exists(dirpath)
st = FileStorage(dirpath)
st.create()
assert os.path.exists(dirpath)
ix = st.create_index(schema)
with ix.writer() as w:
w.add_document(text=u("alfa bravo"))
w.add_document(text=u("bracho charlie"))
st.destroy()
assert not os.path.exists(dirpath)
def test_ramstorage():
from whoosh.filedb.filestore import RamStorage
st = RamStorage()
lock = st.lock("test")
lock.acquire()
lock.release()
def test_filelock_simple():
with TempStorage("simplefilelock") as st:
lock1 = st.lock("testlock")
lock2 = st.lock("testlock")
assert lock1 is not lock2
assert lock1.acquire()
assert st.file_exists("testlock")
assert not lock2.acquire()
lock1.release()
assert lock2.acquire()
assert not lock1.acquire()
lock2.release()
def test_threaded_filelock():
with TempStorage("threadedfilelock") as st:
lock1 = st.lock("testlock")
result = []
# The thread function tries to acquire the lock and then quits
def fn():
lock2 = st.lock("testlock")
gotit = try_for(lock2.acquire, 1.0, 0.1)
if gotit:
result.append(True)
lock2.release()
t = threading.Thread(target=fn)
# Acquire the lock in this thread
lock1.acquire()
# Start the other thread trying to acquire the lock
t.start()
# Wait for a bit
time.sleep(0.15)
# Release the lock
lock1.release()
# Wait for the other thread to finish
t.join()
# If the other thread got the lock, it should have appended True to the
# "results" list.
assert result == [True]
def test_length_byte():
source = list(range(11))
xform = [length_to_byte(n) for n in source]
result = [byte_to_length(n) for n in xform]
assert source == result
def test_clockface_lru():
from whoosh.util.cache import clockface_lru_cache
@clockface_lru_cache(5)
def test(n):
return n * 2
result = [test(n) for n in (1, 2, 3, 4, 5, 4, 3, 2, 10, 1)]
assert result == [2, 4, 6, 8, 10, 8, 6, 4, 20, 2]
assert test.cache_info() == (3, 7, 5, 5)
test.cache_clear()
assert test.cache_info() == (0, 0, 5, 0)
def test_double_barrel_lru():
from whoosh.util.cache import lru_cache
@lru_cache(5)
def test(n):
return n * 2
result = [test(n) for n in (1, 2, 3, 4, 5, 4, 3, 2, 10, 1)]
assert result == [2, 4, 6, 8, 10, 8, 6, 4, 20, 2]
# # hits, misses, maxsize and currsize
# assert test.cache_info() == (4, 6, 5, 5)
test.cache_clear()
# assert test.cache_info() == (0, 0, 5, 0)
def test_version_object():
from whoosh.util.versions import SimpleVersion as sv
assert sv.parse("1") == sv(1)
assert sv.parse("1.2") == sv(1, 2)
assert sv.parse("1.2b") == sv(1, 2, ex="b")
assert sv.parse("1.2rc") == sv(1, 2, ex="rc")
assert sv.parse("1.2b3") == sv(1, 2, ex="b", exnum=3)
assert sv.parse("1.2.3") == sv(1, 2, 3)
assert sv.parse("1.2.3a") == sv(1, 2, 3, "a")
assert sv.parse("1.2.3rc") == sv(1, 2, 3, "rc")
assert sv.parse("1.2.3a4") == sv(1, 2, 3, "a", 4)
assert sv.parse("1.2.3rc2") == sv(1, 2, 3, "rc", 2)
assert sv.parse("999.999.999c999") == sv(999, 999, 999, "c", 999)
assert sv.parse("1.2") == sv.parse("1.2")
assert sv("1.2") != sv("1.3")
assert sv.parse("1.0") < sv.parse("1.1")
assert sv.parse("1.0") < sv.parse("2.0")
assert sv.parse("1.2.3a4") < sv.parse("1.2.3a5")
assert sv.parse("1.2.3a5") > sv.parse("1.2.3a4")
assert sv.parse("1.2.3c99") < sv.parse("1.2.4")
assert sv.parse("1.2.3a4") != sv.parse("1.2.3a5")
assert sv.parse("1.2.3a5") != sv.parse("1.2.3a4")
assert sv.parse("1.2.3c99") != sv.parse("1.2.4")
assert sv.parse("1.2.3a4") <= sv.parse("1.2.3a5")
assert sv.parse("1.2.3a5") >= sv.parse("1.2.3a4")
assert sv.parse("1.2.3c99") <= sv.parse("1.2.4")
assert sv.parse("1.2") <= sv.parse("1.2")
assert sv(1, 2, 3).to_int() == 17213488128
assert sv.from_int(17213488128) == sv(1, 2, 3)
```
#### File: whoosh/tests/test_postings.py
```python
from __future__ import with_statement
from whoosh import analysis, fields
from whoosh.compat import xrange, u
from whoosh.codec import default_codec
from whoosh.formats import Existence, Frequency
from whoosh.formats import Positions, PositionBoosts
from whoosh.formats import Characters, CharacterBoosts
from whoosh.util.testing import TempStorage
def _roundtrip(content, format_, astype, ana=None):
with TempStorage("roundtrip") as st:
codec = default_codec()
seg = codec.new_segment(st, "")
ana = ana or analysis.StandardAnalyzer()
field = fields.FieldType(format=format_, analyzer=ana)
fw = codec.field_writer(st, seg)
fw.start_field("f1", field)
for text, _, weight, valuestring in sorted(field.index(content)):
fw.start_term(text)
fw.add(0, weight, valuestring, None)
fw.finish_term()
fw.finish_field()
fw.close()
tr = codec.terms_reader(st, seg)
ps = []
for fieldname, btext in tr.terms():
m = tr.matcher(fieldname, btext, format_)
ps.append((field.from_bytes(btext), m.value_as(astype)))
tr.close()
return ps
def test_existence_postings():
content = u("alfa bravo charlie")
assert _roundtrip(content, Existence(), "frequency") == [("alfa", 1), ("bravo", 1), ("charlie", 1)]
def test_frequency_postings():
content = u("alfa bravo charlie bravo alfa alfa")
assert _roundtrip(content, Frequency(), "frequency") == [("alfa", 3), ("bravo", 2), ("charlie", 1)]
def test_position_postings():
content = u("alfa bravo charlie bravo alfa alfa")
assert _roundtrip(content, Positions(), "positions") == [("alfa", [0, 4, 5]), ("bravo", [1, 3]), ("charlie", [2])]
assert _roundtrip(content, Positions(), "frequency") == [("alfa", 3), ("bravo", 2), ("charlie", 1)]
def test_character_postings():
content = u("alfa bravo charlie bravo alfa alfa")
assert _roundtrip(content, Characters(), "characters") == [("alfa", [(0, 0, 4), (4, 25, 29), (5, 30, 34)]),
("bravo", [(1, 5, 10), (3, 19, 24)]),
("charlie", [(2, 11, 18)])]
assert _roundtrip(content, Characters(), "positions") == [("alfa", [0, 4, 5]), ("bravo", [1, 3]), ("charlie", [2])]
assert _roundtrip(content, Characters(), "frequency") == [("alfa", 3), ("bravo", 2), ("charlie", 1)]
def test_posboost_postings():
pbs = PositionBoosts()
ana = analysis.RegexTokenizer(r"\S+") | analysis.DelimitedAttributeFilter()
content = u("alfa^2 bravo^0.1 charlie^2 bravo^0.5 alfa alfa")
assert _roundtrip(content, pbs, "position_boosts", ana) == [("alfa", [(0, 2), (4, 1), (5, 1)]),
("bravo", [(1, 0.1), (3, 0.5)]),
("charlie", [(2, 2)])]
assert _roundtrip(content, pbs, "positions", ana) == [("alfa", [0, 4, 5]), ("bravo", [1, 3]), ("charlie", [2])]
assert _roundtrip(content, pbs, "frequency", ana) == [("alfa", 3), ("bravo", 2), ("charlie", 1)]
def test_charboost_postings():
cbs = CharacterBoosts()
ana = analysis.RegexTokenizer(r"\S+") | analysis.DelimitedAttributeFilter()
content = u("alfa^2 bravo^0.1 charlie^2 bravo^0.5 alfa alfa")
assert _roundtrip(content, cbs, "character_boosts", ana) == [("alfa", [(0, 0, 4, 2), (4, 37, 41, 1), (5, 42, 46, 1)]),
("bravo", [(1, 7, 12, 0.1), (3, 27, 32, 0.5)]),
("charlie", [(2, 17, 24, 2)])]
assert _roundtrip(content, cbs, "position_boosts", ana) == [("alfa", [(0, 2), (4, 1), (5, 1)]),
("bravo", [(1, 0.1), (3, 0.5)]),
("charlie", [(2, 2)])]
assert _roundtrip(content, cbs, "characters", ana) == [("alfa", [(0, 0, 4), (4, 37, 41), (5, 42, 46)]),
("bravo", [(1, 7, 12), (3, 27, 32)]),
("charlie", [(2, 17, 24)])]
assert _roundtrip(content, cbs, "positions", ana) == [("alfa", [0, 4, 5]), ("bravo", [1, 3]), ("charlie", [2])]
assert _roundtrip(content, cbs, "frequency", ana) == [("alfa", 3), ("bravo", 2), ("charlie", 1)]
``` |
{
"source": "2martens/masterthesis",
"score": 3
} |
#### File: twomartens/masterthesis/evaluate.py
```python
from typing import Sequence, Union, Tuple, List
import numpy as np
def get_number_gt_per_class(labels: Sequence[Sequence[Sequence[int]]],
nr_classes: int) -> np.ndarray:
"""
Calculates the number of ground truth boxes per class and returns result.
Args:
labels: list of labels per image
nr_classes: number of classes
Returns:
numpy array with respective counts
"""
number_gt_per_class = np.zeros(shape=(nr_classes + 1), dtype=np.int)
label_range = range(len(labels))
# iterate over images
for i in label_range:
boxes = np.asarray(labels[i])
# iterate over boxes in image
for j in range(boxes.shape[0]):
class_id = int(boxes[j, 0])
if class_id > nr_classes:
continue
number_gt_per_class[class_id] += 1
return number_gt_per_class
def prepare_predictions(predictions: Sequence[Sequence[Sequence[Union[int, float]]]],
nr_classes: int) -> \
List[List[Tuple[int, float, int, int, int, int]]]:
"""
Prepares the predictions for further processing.
Args:
predictions: list of predictions per image
nr_classes: number of classes
Returns:
list of predictions per class
"""
results = [list() for _ in range(nr_classes + 1)]
# index positions for bounding box coordinates
xmin = 2
ymin = 3
xmax = 4
ymax = 5
for i, batch_item in enumerate(predictions):
image_id = i
for box in batch_item:
if len(box) == 7:
# entropy is in box list
xmin += 1
ymin += 1
xmax += 1
ymax += 1
if len(box) > nr_classes:
class_id = np.argmax(box[:-5])
confidence = np.amax(box[:-5])
xmin = -5
ymin = -4
xmax = -3
ymax = -2
else:
class_id = int(box[0])
# Round the box coordinates to reduce the required memory.
confidence = box[1]
xmin_value = round(box[xmin])
ymin_value = round(box[ymin])
xmax_value = round(box[xmax])
ymax_value = round(box[ymax])
prediction = (image_id, confidence, xmin_value, ymin_value, xmax_value, ymax_value)
# Append the predicted box to the results list for its class.
results[class_id].append(prediction)
return results
def match_predictions(predictions: Sequence[Sequence[Tuple[int, float, int, int, int, int]]],
labels: Sequence[Sequence[Sequence[int]]],
iou_func: callable,
nr_classes: int,
iou_threshold: float,
border_pixels: str = "include",
sorting_algorithm: str = "quicksort") -> Tuple[List[np.ndarray], List[np.ndarray],
List[np.ndarray], List[np.ndarray],
np.ndarray, np.ndarray,
np.ndarray, np.ndarray]:
"""
Matches predictions to ground truth boxes.
Args:
predictions: list of predictions
labels: list of labels per image
iou_func: function to calculate the intersection over union
nr_classes: number of classes
iou_threshold: only matches higher than this value will be considered
border_pixels: How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxes, but not the other.
sorting_algorithm: Which sorting algorithm the matching algorithm should use. This
argument accepts any valid sorting algorithm for Numpy's `argsort()` function.
You will usually want to choose between 'quicksort' (fastest and most memory efficient,
but not stable) and 'mergesort' (slight slower and less memory efficient, but stable).
The official Matlab evaluation algorithm uses a stable sorting algorithm, so this algorithm
is only guaranteed to behave identically if you choose 'mergesort' as the sorting algorithm,
but it will almost always behave identically even if you choose 'quicksort' (but no guarantees).
Returns:
true positives, false positives, cumulative true positives, and cumulative false positives for
each class, open set error as defined by Miller et al, cumulative open set error,
cumulative true positives and cumulative false positives over all classes
"""
true_positives = [[]] # The false positives for each class, sorted by descending confidence.
false_positives = [[]] # The true positives for each class, sorted by descending confidence.
cumulative_true_positives = [[]]
cumulative_false_positives = [[]]
most_predictions = -1
for class_id in range(1, nr_classes + 1):
nr_predictions = len(predictions[class_id])
if nr_predictions > most_predictions:
most_predictions = nr_predictions
open_set_error = np.zeros(most_predictions, dtype=np.int)
true_positives_micro = np.zeros(most_predictions, dtype=np.int)
false_positives_micro = np.zeros(most_predictions, dtype=np.int)
for class_id in range(1, nr_classes + 1):
predictions_class = predictions[class_id]
# Store the matching results in these lists:
true_pos = np.zeros(len(predictions_class),
dtype=np.int) # 1 for every prediction that is a true positive, 0 otherwise
false_pos = np.zeros(len(predictions_class),
dtype=np.int) # 1 for every prediction that is a false positive, 0 otherwise
# In case there are no predictions at all for this class, we're done here.
if len(predictions_class) == 0:
true_positives.append(true_pos)
false_positives.append(false_pos)
cumulative_true_pos = np.cumsum(true_pos) # Cumulative sums of the true positives
cumulative_false_pos = np.cumsum(false_pos) # Cumulative sums of the false positives
cumulative_true_positives.append(cumulative_true_pos)
cumulative_false_positives.append(cumulative_false_pos)
continue
# Convert the predictions list for this class into a structured array so that we can sort it by confidence.
# Create the data type for the structured array.
preds_data_type = np.dtype([('image_id', np.int32),
('confidence', 'f4'),
('xmin', 'f4'),
('ymin', 'f4'),
('xmax', 'f4'),
('ymax', 'f4')])
# Create the structured array
predictions_class = np.array(predictions_class, dtype=preds_data_type)
# Sort the detections by decreasing confidence.
descending_indices = np.argsort(-predictions_class['confidence'], kind=sorting_algorithm)
predictions_sorted = predictions_class[descending_indices]
# Keep track of which ground truth boxes were already matched to a detection.
gt_matched = {}
for i in range(len(predictions_class)):
prediction = predictions_sorted[i]
image_id = prediction['image_id']
# Convert the structured array element to a regular array
pred_box = np.asarray(list(prediction[['xmin', 'ymin', 'xmax', 'ymax']]))
# Get the relevant ground truth boxes for this prediction,
# i.e. all ground truth boxes that match the prediction's
# image ID and class ID.
gt = labels[image_id]
gt = np.asarray(gt)
class_mask = gt[:, 0] == class_id
gt = gt[class_mask]
if gt.size == 0:
# If the image doesn't contain any objects of this class,
# the prediction becomes a false positive.
false_pos[i] = 1
false_positives_micro[i] += 1
open_set_error[i] += 1
continue
# Compute the IoU of this prediction with all ground truth boxes of the same class.
overlaps = iou_func(boxes1=gt[:, [1, 2, 3, 4]],
boxes2=pred_box,
coords='corners',
mode='element-wise',
border_pixels=border_pixels)
# For each detection, match the ground truth box with the highest overlap.
# It's possible that the same ground truth box will be matched to multiple
# detections.
gt_match_index = np.argmax(overlaps)
gt_match_overlap = overlaps[gt_match_index]
if gt_match_overlap < iou_threshold:
# False positive, IoU threshold violated:
# Those predictions whose matched overlap is below the threshold become
# false positives.
false_pos[i] = 1
false_positives_micro[i] += 1
else:
if image_id not in gt_matched:
# True positive:
# If the matched ground truth box for this prediction hasn't been matched to a
# different prediction already, we have a true positive.
true_pos[i] = 1
true_positives_micro[i] += 1
gt_matched[image_id] = np.zeros(shape=(gt.shape[0]), dtype=np.bool)
gt_matched[image_id][gt_match_index] = True
elif not gt_matched[image_id][gt_match_index]:
# True positive:
# If the matched ground truth box for this prediction hasn't been matched to a
# different prediction already, we have a true positive.
true_pos[i] = 1
true_positives_micro[i] += 1
gt_matched[image_id][gt_match_index] = True
else:
# False positive, duplicate detection:
# If the matched ground truth box for this prediction has already been matched
# to a different prediction previously, it is a duplicate detection for an
# already detected object, which counts as a false positive.
false_pos[i] = 1
false_positives_micro[i] += 1
true_positives.append(true_pos)
false_positives.append(false_pos)
cumulative_true_pos = np.cumsum(true_pos) # Cumulative sums of the true positives
cumulative_false_pos = np.cumsum(false_pos) # Cumulative sums of the false positives
cumulative_true_positives.append(cumulative_true_pos)
cumulative_false_positives.append(cumulative_false_pos)
cumulative_open_set_error = np.cumsum(open_set_error)
cumulative_false_positives_micro = np.cumsum(false_positives_micro)
cumulative_true_positives_micro = np.cumsum(true_positives_micro)
return (
true_positives, false_positives, cumulative_true_positives, cumulative_false_positives,
open_set_error, cumulative_open_set_error,
cumulative_true_positives_micro, cumulative_false_positives_micro
)
def get_precision_recall(number_gt_per_class: np.ndarray,
cumulative_true_positives: Sequence[np.ndarray],
cumulative_false_positives: Sequence[np.ndarray],
cumulative_true_positives_micro: np.ndarray,
cumulative_false_positives_micro: np.ndarray,
nr_classes: int) -> Tuple[List[np.ndarray], List[np.ndarray],
np.ndarray, np.ndarray,
np.ndarray, np.ndarray]:
"""
Computes the precision and recall values and returns them.
Args:
number_gt_per_class: number of ground truth bounding boxes per class
cumulative_true_positives: cumulative true positives per class
cumulative_false_positives: cumulative false positives per class
cumulative_true_positives_micro: cumulative true positives over all classes
cumulative_false_positives_micro: cumulative false positives over all classes
nr_classes: number of classes
Returns:
cumulative precisions and cumulative recalls per class,
micro averaged precision/recall, and
macro averaged precision/recall
"""
cumulative_precisions = [[]]
cumulative_recalls = [[]]
cumulative_precision_micro = np.zeros(cumulative_true_positives_micro.shape, dtype=np.float)
cumulative_recall_micro = np.zeros(cumulative_true_positives_micro.shape, dtype=np.float)
cumulative_precision_macro = np.zeros_like(cumulative_precision_micro)
cumulative_recall_macro = np.zeros_like(cumulative_recall_micro)
total_number_gt = 0
number_of_nonzero_classes = 0
# Iterate over all classes.
for class_id in range(1, nr_classes + 1):
if number_gt_per_class[class_id] == 0:
cumulative_precisions.append([])
cumulative_recalls.append([])
continue
tp = cumulative_true_positives[class_id]
fp = cumulative_false_positives[class_id]
cumulative_precision = np.where(tp + fp > 0, tp / (tp + fp), 0) # 1D array with shape `(num_predictions,)`
number_gt = number_gt_per_class[class_id]
total_number_gt += number_gt
cumulative_recall = tp / number_gt # 1D array with shape `(num_predictions,)`
cumulative_precisions.append(cumulative_precision)
cumulative_recalls.append(cumulative_recall)
diff_to_largest_class = cumulative_precision_micro.shape[0] - cumulative_precision.shape[0]
if diff_to_largest_class:
highest_precision = cumulative_precision[-1] if cumulative_precision.shape[0] else 0
highest_recall = cumulative_recall[-1] if cumulative_recall.shape[0] else 0
repeated_last_precision = np.tile(highest_precision, diff_to_largest_class)
repeated_last_recall = np.tile(highest_recall, diff_to_largest_class)
extended_precision = np.concatenate((cumulative_precision, repeated_last_precision))
extended_recall = np.concatenate((cumulative_recall, repeated_last_recall))
cumulative_precision_macro += extended_precision
cumulative_recall_macro += extended_recall
else:
cumulative_precision_macro += cumulative_precision
cumulative_recall_macro += cumulative_recall
number_of_nonzero_classes += 1
# calculate micro averaged precision and recall
tp = cumulative_true_positives_micro
fp = cumulative_false_positives_micro
cumulative_precision_micro = np.where(tp + fp > 0, tp / (tp + fp), 0)
cumulative_recall_micro = tp / total_number_gt
# calculate macro averaged precision and recall
cumulative_precision_macro /= number_of_nonzero_classes
cumulative_recall_macro /= number_of_nonzero_classes
return (cumulative_precisions, cumulative_recalls,
cumulative_precision_micro, cumulative_recall_micro,
cumulative_precision_macro, cumulative_recall_macro
)
def get_f1_score(cumulative_precisions: List[np.ndarray],
cumulative_recalls: List[np.ndarray],
cumulative_precision_micro: np.ndarray,
cumulative_recall_micro: np.ndarray,
cumulative_precision_macro: np.ndarray,
cumulative_recall_macro: np.ndarray,
nr_classes: int) -> Tuple[List[np.ndarray],
np.ndarray, np.ndarray]:
"""
Computes the F1 score for every class.
Args:
cumulative_precisions: cumulative precisions for each class
cumulative_recalls: cumulative recalls for each class
cumulative_precision_micro: cumulative precision micro averaged
cumulative_recall_micro: cumulative recall micro averaged
cumulative_precision_macro: cumulative precision macro averaged
cumulative_recall_macro: cumulative recall macro averaged
nr_classes: number of classes
Returns:
cumulative F1 score per class,
cumulative F1 score micro averaged, cumulative F1 score macro averaged
"""
cumulative_f1_scores = [[]]
# iterate over all classes
for class_id in range(1, nr_classes + 1):
cumulative_precision = cumulative_precisions[class_id]
cumulative_recall = cumulative_recalls[class_id]
if not np.count_nonzero(cumulative_precision + cumulative_recall):
cumulative_f1_scores.append([])
continue
f1_score = 2 * ((cumulative_precision * cumulative_recall) / (cumulative_precision + cumulative_recall + 0.001))
cumulative_f1_scores.append(f1_score)
f1_score_micro = 2 * ((cumulative_precision_micro * cumulative_recall_micro) /
(cumulative_precision_micro + cumulative_recall_micro + 0.001))
f1_score_macro = 2 * ((cumulative_precision_macro * cumulative_recall_macro) /
(cumulative_precision_macro + cumulative_recall_macro + 0.001))
return cumulative_f1_scores, f1_score_micro, f1_score_macro
def get_mean_average_precisions(cumulative_precisions: List[np.ndarray],
cumulative_recalls: List[np.ndarray],
nr_classes: int) -> List[float]:
"""
Computes the mean average precision for each class and returns them.
Args:
cumulative_precisions: cumulative precisions for each class
cumulative_recalls: cumulative recalls for each class
nr_classes: number of classes
Returns:
average precision per class
"""
average_precisions = [0.0]
# Iterate over all classes.
for class_id in range(1, nr_classes + 1):
cumulative_precision = cumulative_precisions[class_id]
cumulative_recall = cumulative_recalls[class_id]
# We will compute the precision at all unique recall values.
unique_recalls, unique_recall_indices, unique_recall_counts = np.unique(cumulative_recall,
return_index=True,
return_counts=True)
# Store the maximal precision for each recall value and the absolute difference
# between any two unique recall values in the lists below. The products of these
# two numbers constitute the rectangular areas whose sum will be our numerical
# integral.
maximal_precisions = np.zeros_like(unique_recalls)
recall_deltas = np.zeros_like(unique_recalls)
# Iterate over all unique recall values in reverse order. This saves a lot of computation:
# For each unique recall value `r`, we want to get the maximal precision value obtained
# for any recall value `r* >= r`. Once we know the maximal precision for the last `k` recall
# values after a given iteration, then in the next iteration, in order compute the maximal
# precisions for the last `l > k` recall values, we only need to compute the maximal precision
# for `l - k` recall values and then take the maximum between that and the previously computed
# maximum instead of computing the maximum over all `l` values.
# We skip the very last recall value, since the precision after the last recall value
# 1.0 is defined to be zero.
for i in range(len(unique_recalls) - 2, -1, -1):
begin = unique_recall_indices[i]
end = unique_recall_indices[i + 1]
# When computing the maximal precisions, use the maximum of the previous iteration to
# avoid unnecessary repeated computation over the same precision values.
# The maximal precisions are the heights of the rectangle areas of our integral under
# the precision-recall curve.
maximal_precisions[i] = np.maximum(np.amax(cumulative_precision[begin:end]),
maximal_precisions[i + 1])
# The differences between two adjacent recall values are the widths of our rectangle areas.
recall_deltas[i] = unique_recalls[i + 1] - unique_recalls[i]
average_precision = np.sum(maximal_precisions * recall_deltas)
average_precisions.append(average_precision)
return average_precisions
def get_mean_average_precision(average_precisions: List[float]) -> float:
"""
Computes the mean average precision over all classes and returns it.
Args:
average_precisions: list of average precisions per class
Returns:
mean average precision over all classes
"""
average_precisions = np.copy(average_precisions)
average_precisions = average_precisions[average_precisions != 0.0]
return np.average(average_precisions[:])
```
#### File: twomartens/masterthesis/plotting.py
```python
import functools
import os
from typing import Dict
from typing import Tuple
from typing import Union, Sequence
import math
import numpy as np
from matplotlib import pyplot
from PIL import Image
def save_ssd_train_images(images: Union[np.ndarray, Sequence[str]], labels: np.ndarray,
output_path: str, coco_path: str,
image_size: int, get_coco_cat_maps_func: callable,
custom_string: str = None) -> None:
"""
Saves given images and labels to given output path.
The images are saved both in a raw version and with bounding boxes printed on them.
Args:
images: a NumPy array of images or a list of filenames
labels: a NumPy array of labels
output_path: path to save the images in
coco_path: path to the COCO data set
image_size: size of the resized images
get_coco_cat_maps_func: callable that returns the COCO category maps for a given annotation file
custom_string: optional custom string that is prepended to file names
"""
annotation_file_train = f"{coco_path}/annotations/instances_minival2014.json"
_, _, _, classes_to_names = get_coco_cat_maps_func(annotation_file_train)
colors = pyplot.cm.hsv(np.linspace(0, 1, 61)).tolist()
os.makedirs(output_path, exist_ok=True)
nr_images = len(images)
nr_digits = math.ceil(math.log10(nr_images))
custom_string = f"{custom_string}_" if custom_string is not None else ""
for i, train_image in enumerate(images):
instances = labels[i]
if type(train_image) is str:
with Image.open(train_image) as _image:
train_image = np.array(_image, dtype=np.uint8)
image = Image.fromarray(train_image)
image.save(f"{output_path}/"
f"{custom_string}train_image{str(i).zfill(nr_digits)}.png")
figure_filename = f"{output_path}/{custom_string}bboxes{str(i).zfill(nr_digits)}.png"
_draw_bbox_image(image=image,
filename=figure_filename,
draw_func=functools.partial(_draw_bboxes,
image_size=image_size,
classes_to_names=classes_to_names),
drawables=[
(colors, instances)
])
def draw_bbox_figure(image_filename: str, labels: Sequence[np.ndarray],
instances: Sequence[Sequence[np.ndarray]],
image_size: int,
output_path: str, coco_path: str,
get_coco_cat_maps_func: callable,
suffix: str) -> None:
"""
Draws a bounding box figure and saves it under the image file name under the output path.
Args:
image_filename: complete path to image file
labels: ground truth labels for image
instances: list of predictions to be compared against each other
image_size: size of the resized images
output_path: path to save the images in
coco_path: path to the COCO data set
get_coco_cat_maps_func: callable that returns the COCO category maps for a given annotation file
suffix: suffix of the saved figure file
"""
annotation_file_train = f"{coco_path}/annotations/instances_minival2014.json"
_, _, _, classes_to_names = get_coco_cat_maps_func(annotation_file_train)
colors = pyplot.cm.hsv(np.linspace(0, 1, len(instances))).tolist()
os.makedirs(output_path, exist_ok=True)
with Image.open(image_filename) as _image:
np_image = np.array(_image, dtype=np.uint8)
image = Image.fromarray(np_image)
figure_filename = f"{output_path}/{os.path.basename(image_filename)}_bboxes_{suffix}.png"
drawables = [(colors[i], _instances) for i, _instances in enumerate(instances)]
drawables.append(([35/255, 45/255, 215/255, 1.0], labels))
_draw_bbox_image(image=image,
filename=figure_filename,
draw_func=functools.partial(
_draw_bboxes,
image_size=image_size,
classes_to_names=classes_to_names
),
drawables=drawables)
def _draw_bbox_image(image: Image,
filename: str,
draw_func: callable,
drawables: Sequence[Tuple[Union[Sequence, float], Sequence[np.ndarray]]]):
figure = pyplot.figure(figsize=(6.4, 4.8))
pyplot.imshow(image)
current_axis = pyplot.gca()
for colors, instances in drawables:
draw_func(instances=instances,
axis=current_axis,
colors=colors)
pyplot.savefig(filename, bbox_inches='tight')
pyplot.close(figure)
def _draw_bboxes(instances: Sequence[np.ndarray], axis: pyplot.Axes,
image_size: int,
colors: Sequence,
classes_to_names: Dict[int, str]) -> None:
for instance in instances:
if not len(instance):
continue
else:
class_id, confidence, xmin, ymin, xmax, ymax = _get_bbox_info(instance, image_size)
if class_id == 0:
continue
if len(np.shape(colors)) == 1:
color = colors
else:
color = colors[class_id]
label = f"{classes_to_names[class_id]} - {confidence}" \
if confidence is not None \
else f"{classes_to_names[class_id]}"
axis.add_patch(
pyplot.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color=color, fill=False,
linewidth=2))
axis.text(xmin, ymin, label, size='x-large', color='white',
bbox={'facecolor': color, 'alpha': 1.0})
def _get_bbox_info(instance: np.ndarray, image_size: int) -> Tuple[int, Union[float, None], float, float, float, float]:
if len(instance) == 5: # ground truth
class_id = int(instance[0])
confidence = None
xmin = instance[1]
ymin = instance[2]
xmax = instance[3]
ymax = instance[4]
elif len(instance) == 7: # predictions
class_id = int(instance[0])
confidence = instance[1]
xmin = instance[3]
ymin = instance[4]
xmax = instance[5]
ymax = instance[6]
elif len(instance) == 6: # predictions using Caffe method
class_id = int(instance[0])
confidence = instance[1]
xmin = instance[2]
ymin = instance[3]
xmax = instance[4]
ymax = instance[5]
else:
instance = np.copy(instance)
class_id = np.argmax(instance[:-12], axis=0)
confidence = np.amax(instance[:-12], axis=0)
instance[-12:-8] *= instance[-4:] # multiply with variances
instance[[-11, -9]] *= np.expand_dims(instance[-5] - instance[-7], axis=-1)
instance[[-12, -10]] *= np.expand_dims(instance[-6] - instance[-8], axis=-1)
instance[-12:-8] += instance[-8:-4]
instance[-12:-8] *= image_size
xmin = instance[-12]
ymin = instance[-11]
xmax = instance[-10]
ymax = instance[-9]
return class_id, confidence, xmin, ymin, xmax, ymax
```
#### File: twomartens/masterthesis/weights.py
```python
import shutil
import h5py
import math
import numpy as np
from twomartens.masterthesis.ssd_keras.misc_utils import tensor_sampling_utils
def subsample_ssd(ssd_weights_file: str, subsampled_weights_file: str) -> None:
"""
Sub-samples the weights of the SSD 300 network.
Args:
ssd_weights_file: file path for the SSD weights file
subsampled_weights_file: file path for file with sub-sampled weights
"""
shutil.copy(ssd_weights_file, subsampled_weights_file)
with h5py.File(subsampled_weights_file) as weights_destination_file:
classifier_names = ["conv4_3_norm_mbox_conf",
"fc7_mbox_conf",
"conv6_2_mbox_conf",
"conv7_2_mbox_conf",
"conv8_2_mbox_conf",
"conv9_2_mbox_conf"]
n_classes_source = 81
classes_of_interest = [i for i in range(61)] # first 60 classes are kept plus background
for name in classifier_names:
# Get the trained weights for this layer from the source HDF5 weights file.
kernel = weights_destination_file[name][name]['kernel:0'].value
bias = weights_destination_file[name][name]['bias:0'].value
# Get the shape of the kernel. We're interested in sub-sampling
# the last dimension, 'o'.
height, width, in_channels, out_channels = kernel.shape
subsampling_indices = []
for i in range(int(math.floor(out_channels / n_classes_source))):
indices = np.array(classes_of_interest) + i * n_classes_source
subsampling_indices.append(indices)
subsampling_indices = list(np.concatenate(subsampling_indices))
# Sub-sample the kernel and bias.
# The `sample_tensors()` function used below provides extensive
# documentation, so don't hesitate to read it if you want to know
# what exactly is going on here.
new_kernel, new_bias = tensor_sampling_utils.sample_tensors(
weights_list=[kernel, bias],
sampling_instructions=[height, width, in_channels, subsampling_indices],
axes=[[3]],
# The one bias dimension corresponds to the last kernel dimension.
init=['gaussian', 'zeros'],
mean=0.0,
stddev=0.005
)
# Delete the old weights from the destination file.
del weights_destination_file[name][name]['kernel:0']
del weights_destination_file[name][name]['bias:0']
# Create new data sets for the sub-sampled weights.
weights_destination_file[name][name].create_dataset(name='kernel:0', data=new_kernel)
weights_destination_file[name][name].create_dataset(name='bias:0', data=new_bias)
if __name__ == "__main__":
weights_file = "data/weights/ssd/VGG_coco_SSD_300x300_iter_400000.h5"
weights_destination_file = "data/weights/ssd/VGG_coco_SSD_300x300_iter_400000_subsampled.h5"
subsample_ssd(weights_file, weights_destination_file)
``` |
{
"source": "2mas/TodoList",
"score": 2
} |
#### File: 2mas/TodoList/todo_list.py
```python
import os
import json
import sys
import sublime
import sublime_plugin
class TodoListMainObject(object):
def __init__(self):
self.selected_list = None
try:
if os.path.exists(TodoListMainObject.file_path):
with open(TodoListMainObject.file_path) as json_file:
TodoListMainObject.loaded_lists = json.load(json_file)
else:
TodoListMainObject.save_file()
except IOError:
return None
def move_todo(self, idx, new_idx):
todo_item = TodoListMainObject.loaded_lists[
self.selected_list
].pop(idx)
TodoListMainObject.loaded_lists[
self.selected_list
].insert(
new_idx,
todo_item
)
TodoListMainObject.save_file()
@staticmethod
def save_file():
with open(TodoListMainObject.file_path, 'w') as outfile:
json.dump(TodoListMainObject.loaded_lists, outfile)
@staticmethod
def get_setting(key, default=None):
settings = sublime.load_settings('TodoList.sublime-settings')
if os.name == 'nt':
os_specific_settings = sublime.load_settings(
'TodoList (Windows).sublime-settings'
)
elif sys.platform == 'darwin':
os_specific_settings = sublime.load_settings(
'TodoList (OSX).sublime-settings'
)
else:
os_specific_settings = sublime.load_settings(
'TodoList (Linux).sublime-settings'
)
return os_specific_settings.get(key, settings.get(key, default))
""" Class variables """
loaded_lists = {}
custom_file_path = get_setting.__func__('file_path')
"""defaults to user-path of OS"""
if custom_file_path:
file_path = custom_file_path
else:
file_path = os.path.expanduser(
os.path.join('~', 'todo_list.json')
)
class TodoListMenuCommand(sublime_plugin.WindowCommand):
def run(self):
self.menu = []
if not getattr(self.window, 'todo_instance', None):
self.window.todo_instance = TodoListMainObject()
if self.window.todo_instance.selected_list:
if len(TodoListMainObject.loaded_lists[
self.window.todo_instance.selected_list
]) > 0:
self.menu.append(
{
'text': 'Todos: List all',
'command': 'todo_list_list_all'
}
)
self.menu.append(
{
'text': 'Todos: Add', 'command': 'todo_list_add_todo'
}
)
if len(TodoListMainObject.loaded_lists) > 0:
self.menu.append(
{
'text': 'List: Switch',
'command': 'todo_list_load_list'
}
)
self.menu.append(
{
'text': 'List: Delete current',
'command': 'todo_list_delete_current_list'
}
)
else:
if len(TodoListMainObject.loaded_lists) > 0:
self.menu.append(
{
'text': 'List: Load', 'command': 'todo_list_load_list'
}
)
self.menu.append(
{
'text': 'List: Delete',
'command': 'todo_list_delete_list'
}
)
self.menu.append(
{
'text': 'List: Create new', 'command': 'todo_list_create_list'
}
)
self.window.show_quick_panel(
[item['text'] for item in self.menu],
self.on_done,
)
def on_done(self, idx: int):
if idx >= 0 and idx < len(self.menu):
self.window.run_command(self.menu[idx]['command'])
class TodoListCreateListCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel(
'Enter list name:',
'',
self.on_done,
None,
None
)
def on_done(self, list_name: str):
TodoListMainObject.loaded_lists[list_name] = []
self.window.todo_instance.selected_list = list_name
TodoListMainObject.save_file()
self.window.run_command('todo_list_menu')
class TodoListLoadListCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.show_quick_panel(
list(TodoListMainObject.loaded_lists.keys()),
self.on_done,
)
def on_done(self, list_idx: int):
self.window.todo_instance.selected_list = list(
TodoListMainObject.loaded_lists.keys()
)[list_idx]
self.window.run_command('todo_list_menu')
class TodoListDeleteListCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.show_quick_panel(
list(TodoListMainObject.loaded_lists.keys()),
self.on_done,
)
def on_done(self, list_idx: int):
list_name = list(
TodoListMainObject.loaded_lists.keys()
)[list_idx]
del TodoListMainObject.loaded_lists[list_name]
TodoListMainObject.save_file()
if self.window.todo_instance.selected_list == list_name:
self.window.todo_instance.selected_list = None
self.window.run_command('todo_list_menu')
class TodoListDeleteCurrentListCommand(sublime_plugin.WindowCommand):
def run(self):
if self.window.todo_instance.selected_list:
del TodoListMainObject.loaded_lists[
self.window.todo_instance.selected_list
]
TodoListMainObject.save_file()
self.window.todo_instance.selected_list = None
self.window.run_command('todo_list_menu')
class TodoListListAllCommand(sublime_plugin.WindowCommand):
def run(self):
if (not getattr(self.window, 'todo_instance', None) or
not self.window.todo_instance.selected_list):
self.window.run_command('todo_list_menu')
self.window.show_quick_panel(
TodoListMainObject.loaded_lists[
self.window.todo_instance.selected_list
],
self.on_done,
)
def on_done(self, idx: int):
if idx >= 0:
self.submenu = []
if idx > 0:
self.submenu.append({
'text': 'Move up',
'command': 'todo_list_move_up',
'todo_idx': idx
})
if idx < len(TodoListMainObject.loaded_lists[
self.window.todo_instance.selected_list
]) - 1:
self.submenu.append({
'text': 'Move down',
'command': 'todo_list_move_down',
'todo_idx': idx
})
self.submenu.append({
'text': 'Remove item',
'command': 'todo_list_remove_todo',
'todo_idx': idx
})
self.window.show_quick_panel(
[item['text'] for item in self.submenu],
self.on_sub_select
)
elif idx == -1:
self.window.run_command('todo_list_menu')
def on_sub_select(self, idx: int):
"""on selected action on todo-item"""
if idx >= 0 and idx < len(self.submenu):
self.window.run_command(
self.submenu[idx]['command'],
{'todo_idx': self.submenu[idx]['todo_idx']}
)
else:
self.window.run_command('todo_list_list_all')
class TodoListRemoveTodoCommand(sublime_plugin.WindowCommand):
def run(self, **args):
del TodoListMainObject.loaded_lists[
self.window.todo_instance.selected_list
][args['todo_idx']]
TodoListMainObject.save_file()
self.window.run_command('todo_list_list_all')
class TodoListAddTodoCommand(sublime_plugin.WindowCommand):
def on_done(self, text: str):
TodoListMainObject.loaded_lists[
self.window.todo_instance.selected_list
].append(text)
TodoListMainObject.save_file()
self.window.run_command('todo_list_list_all')
def run(self):
self.window.show_input_panel(
'Add to-do:',
'',
self.on_done,
None,
None
)
class TodoListMoveUpCommand(sublime_plugin.WindowCommand):
def run(self, **args):
self.window.todo_instance.move_todo(
args['todo_idx'], args['todo_idx'] - 1
)
self.window.run_command('todo_list_list_all')
class TodoListMoveDownCommand(sublime_plugin.WindowCommand):
def run(self, **args):
self.window.todo_instance.move_todo(
args['todo_idx'], args['todo_idx'] + 1
)
self.window.run_command('todo_list_list_all')
``` |
{
"source": "2m/conductr-cli",
"score": 3
} |
#### File: conductr-cli/conductr_cli/bundle_utils.py
```python
from zipfile import ZipFile
def short_id(bundle_id):
return '-'.join([part[:7] for part in bundle_id.split('-')])
def conf(bundle_path):
bundle_zip = ZipFile(bundle_path)
bundle_configuration = [bundle_zip.read(name) for name in bundle_zip.namelist() if name.endswith('bundle.conf')]
return bundle_configuration[0].decode('utf-8') if len(bundle_configuration) == 1 else ''
```
#### File: conductr-cli/conductr_cli/conduct_logs.py
```python
from conductr_cli import conduct_logging, conduct_info
import json
import requests
@conduct_logging.handle_connection_error
@conduct_logging.handle_http_error
def logs(args):
"""`conduct logs` command"""
response = requests.get('{}/logs/{}'.format(args.service, args.bundle))
conduct_logging.raise_for_status_inc_3xx(response)
data = [
{
'time': event['@cee']['body']['time'],
'host': event['@cee']['body']['host'],
'log': event['@cee']['body']['log']
} for event in json.loads(response.text)
]
data.insert(0, {'time': 'TIME', 'host': 'HOST', 'log': 'LOG'})
padding = 2
column_widths = dict(conduct_info.calc_column_widths(data), **{'padding': ' ' * padding})
for row in data:
print('''\
{time: <{time_width}}{padding}\
{host: <{host_width}}{padding}\
{log: <{log_width}}{padding}'''.format(**dict(row, **column_widths)))
```
#### File: conductr-cli/conductr_cli/conduct_unload.py
```python
from conductr_cli import conduct_url, conduct_logging
import requests
@conduct_logging.handle_connection_error
@conduct_logging.handle_http_error
def unload(args):
"""`conduct unload` command"""
path = 'bundles/{}'.format(args.bundle)
url = conduct_url.url(path, args)
response = requests.delete(url)
conduct_logging.raise_for_status_inc_3xx(response)
if args.verbose:
conduct_logging.pretty_json(response.text)
print('Bundle unload request sent.')
print('Print ConductR info with: conduct info{}'.format(args.cli_parameters))
```
#### File: conductr_cli/test/test_bundle_utils.py
```python
from unittest import TestCase
from conductr_cli import bundle_utils
from conductr_cli.test.cli_test_case import create_temp_bundle
import shutil
class ShortId(TestCase):
def test(self):
self.assertEqual(
bundle_utils.short_id('45e0c477d3e5ea92aa8d85c0d8f3e25c'),
'45e0c47')
self.assertEqual(
bundle_utils.short_id('c1ab77e63b722ef8c6ea8a1c274be053-3cc322b62e7608b5cdf37185240f7853'),
'c1ab77e-3cc322b')
class Conf(TestCase):
def setUp(self): # noqa
self.tmpdir, self.bundle_path = create_temp_bundle('bundle conf contents')
def test(self):
conf_contents = bundle_utils.conf(self.bundle_path)
self.assertEqual(conf_contents, 'bundle conf contents')
def tearDown(self): # noqa
shutil.rmtree(self.tmpdir)
```
#### File: conductr_cli/test/test_conduct_events.py
```python
from unittest import TestCase
from conductr_cli.test.cli_test_case import CliTestCase, strip_margin
from conductr_cli import conduct_events
try:
from unittest.mock import patch, MagicMock # 3.3 and beyond
except ImportError:
from mock import patch, MagicMock
class TestConductEventsCommand(TestCase, CliTestCase):
default_args = {
'service': 'http://127.0.0.1:9210',
'bundle': 'ab8f513'
}
default_url = 'http://127.0.0.1:9210/events/ab8f513'
def test_no_events(self):
http_method = self.respond_with(text='{}')
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_events.events(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|TIME EVENT DESC
|"""),
self.output(stdout))
def test_multiple_events(self):
http_method = self.respond_with(text="""[
{
"@cee": {
"head": {
"contentType": "conductr.loadScheduler.loadBundleRequested"
},
"body": {
"time": "Today 12:54:30",
"requestId": "req123",
"bundleName": "bundle-name",
"message": "Load bundle requested: requestId=req123, bundleName=bundle-name"
},
"tag": "conductr.loadScheduler.loadBundleRequested"
}
},
{
"@cee": {
"head": {
"contentType": "conductr.loadExecutor.bundleWritten"
},
"body": {
"time": "Today 12:54:36",
"requestId": "req123",
"bundleName": "bundle-name",
"message": "Bundle written: requestId=req123, bundleName=bundle-name"
},
"tag": "conductr.loadExecutor.bundleWritten"
}
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_events.events(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|TIME EVENT DESC
|Today 12:54:30 conductr.loadScheduler.loadBundleRequested Load bundle requested: requestId=req123, bundleName=bundle-name
|Today 12:54:36 conductr.loadExecutor.bundleWritten Bundle written: requestId=req123, bundleName=bundle-name
|"""),
self.output(stdout))
def test_failure_invalid_address(self):
http_method = self.raise_connection_error('test reason', self.default_url)
stderr = MagicMock()
with patch('requests.get', http_method), patch('sys.stderr', stderr):
conduct_events.events(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
self.default_connection_error.format(self.default_url),
self.output(stderr))
```
#### File: conductr_cli/test/test_conduct_info.py
```python
from unittest import TestCase
from conductr_cli.test.cli_test_case import CliTestCase, strip_margin
from conductr_cli import conduct_info
try:
from unittest.mock import patch, MagicMock # 3.3 and beyond
except ImportError:
from mock import patch, MagicMock
class TestConductInfoCommand(TestCase, CliTestCase):
default_args = {
'ip': '127.0.0.1',
'port': 9005,
'verbose': False,
'long_ids': False
}
default_url = 'http://127.0.0.1:9005/bundles'
def test_no_bundles(self):
http_method = self.respond_with(text='[]')
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|"""),
self.output(stdout))
def test_stopped_bundle(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|45e0c47 test-bundle 1 0 0
|"""),
self.output(stdout))
def test_one_running_one_starting_one_stopped(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle-1" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [{"isStarted": true}],
"bundleInstallations": [1]
},
{
"attributes": { "bundleName": "test-bundle-2" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c-c52e3f8d0c58d8aa29ae5e3d774c0e54",
"bundleExecutions": [{"isStarted": false}],
"bundleInstallations": [1]
},
{
"attributes": { "bundleName": "test-bundle-3" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|45e0c47 test-bundle-1 1 0 1
|45e0c47-c52e3f8 test-bundle-2 1 1 0
|45e0c47 test-bundle-3 1 0 0
|"""),
self.output(stdout))
def test_one_running_one_stopped_verbose(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle-1" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [{"isStarted": true},{"isStarted": true},{"isStarted": true}],
"bundleInstallations": [1,2,3]
},
{
"attributes": { "bundleName": "test-bundle-2" },
"bundleId": "c52e3f8d0c58d8aa29ae5e3d774c0e54",
"bundleExecutions": [],
"bundleInstallations": [1,2,3]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
args = self.default_args.copy()
args.update({'verbose': True})
conduct_info.info(MagicMock(**args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|[
| {
| "attributes": {
| "bundleName": "test-bundle-1"
| },
| "bundleExecutions": [
| {
| "isStarted": true
| },
| {
| "isStarted": true
| },
| {
| "isStarted": true
| }
| ],
| "bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
| "bundleInstallations": [
| 1,
| 2,
| 3
| ]
| },
| {
| "attributes": {
| "bundleName": "test-bundle-2"
| },
| "bundleExecutions": [],
| "bundleId": "c52e3f8d0c58d8aa29ae5e3d774c0e54",
| "bundleInstallations": [
| 1,
| 2,
| 3
| ]
| }
|]
|ID NAME #REP #STR #RUN
|45e0c47 test-bundle-1 3 0 3
|c52e3f8 test-bundle-2 3 0 0
|"""),
self.output(stdout))
def test_long_ids(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
args = self.default_args.copy()
args.update({'long_ids': True})
conduct_info.info(MagicMock(**args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|45e0c477d3e5ea92aa8d85c0d8f3e25c test-bundle 1 0 0
|"""),
self.output(stdout))
def test_double_digits(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|45e0c47 test-bundle 10 0 0
|"""),
self.output(stdout))
def test_has_error(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"hasError": true
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|! 45e0c47 test-bundle 10 0 0
|There are errors: use `conduct events` or `conduct logs` for further information
|"""),
self.output(stdout))
def test_failure_invalid_address(self):
http_method = self.raise_connection_error('test reason', self.default_url)
stderr = MagicMock()
with patch('requests.get', http_method), patch('sys.stderr', stderr):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
self.default_connection_error.format(self.default_url),
self.output(stderr))
```
#### File: conductr_cli/test/test_shazar.py
```python
from unittest import TestCase
import shutil
import tempfile
from os import remove
from conductr_cli.shazar import create_digest, build_parser, run
from conductr_cli.test.cli_test_case import CliTestCase
try:
from unittest.mock import patch, MagicMock # 3.3 and beyond
except ImportError:
from mock import patch, MagicMock
class TestShazar(TestCase):
def test_create_digest(self):
temp = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
temp.write(b'test file data')
temp_name = temp.name
temp.close()
self.assertEqual(
create_digest(temp_name),
'1be7aaf1938cc19af7d2fdeb48a11c381dff8a98d4c4b47b3b0a5044a5255c04'
)
remove(temp_name)
def test_parser_success(self):
parser = build_parser()
args = parser.parse_args('--output-dir output-dir source'.split())
self.assertEqual(args.output_dir, 'output-dir')
self.assertEqual(args.source, 'source')
class TestIntegration(TestCase, CliTestCase):
def setUp(self): # noqa
self.tmpdir = tempfile.mkdtemp()
self.tmpfile = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
self.tmpfile.write(b'test file data')
def test(self):
stdout = MagicMock()
with patch('sys.stdout', stdout):
run('--output-dir {} {}'.format(self.tmpdir, self.tmpfile.name).split())
self.assertRegex(
self.output(stdout),
'Created digested ZIP archive at /tmp/tmp[a-z0-9_]{6,8}/tmp[a-z0-9_]{6,8}-[a-f0-9]{64}\.zip'
)
def tearDown(self): # noqa
shutil.rmtree(self.tmpdir)
remove(self.tmpfile.name)
``` |
{
"source": "2-men-team/dynamic-knapsack",
"score": 3
} |
#### File: Vasniktel/py/ip71_teliman_08.py
```python
from sys import argv
def readIn(infile):
with open(infile) as fin:
data = [list(map(lambda num: int(num), line.split())) for line in fin]
return data[0][0], data[1:]
def writeOut(val, outfile):
with open(outfile, 'w') as fout:
fout.write(str(val))
def execute(cap, data):
memo = [[0 for _ in xrange(cap + 1)] for _ in xrange(len(data) + 1)]
minWeight = min(map(lambda el: el[1], data))
for n in xrange(1, len(data) + 1):
price, weight = data[n - 1]
for w in xrange(minWeight, cap + 1):
if weight > w:
memo[n][w] = memo[n - 1][w]
else:
memo[n][w] = max(memo[n - 1][w], price + memo[n - 1][w - weight])
return memo[len(data)][cap]
if __name__ == '__main__':
infile = argv[1]
outfile = 'ip71_teliman_08_output.txt'
capacity, data = readIn(infile)
summed = execute(capacity, data)
writeOut(summed, outfile)
``` |
{
"source": "2-men-team/hash-table",
"score": 4
} |
#### File: hash-table/ElminsteAumar/Llist.py
```python
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Linked_list:
def __init__(self):
self.head = None
def insert_end(self, data):
temp = Node(data)
if self.head is None:
self.head = temp
return
current = self.head
while current.next is not None:
current = current.next
current.next = temp
def insert_start(self, data):
temp = Node(data)
temp.next = self.head
self.head = temp
def length(self):
current = self.head
length = 0
while current is not None:
length += 1
current = current.next
return length
def search(self, data):
current = self.head
while current is not None:
if current.data == data:
return current
else:
current = current.next
return None
def delete(self, data):
current = self.head
prev = None
while current is not None:
if current.data == data:
break
else:
prev = current
current = current.next
if prev is None:
self.head = current.next
else:
prev.next = current.next
def include(self, data):
current = self.head
while current is not None:
if current.data == data:
return True
else:
current = current.next
return False
``` |
{
"source": "2min0/Autoencoder-master",
"score": 2
} |
#### File: 2min0/Autoencoder-master/example_train.py
```python
import time
import numpy as np
import torch.optim as optim
from torch import nn
from torch.utils.data import DataLoader
from data_gen import VaeDataset
from models import SegNet
from utils import *
from torchvision.datasets import ImageFolder
from torchvision.transforms import transforms as T
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--train-data-path', '-train', type=str, default='data/cifar-10/cifar10_train', required=True,
help='training data path: data/cifar-10/cifar10_train')
parser.add_argument('--valid-data-path', '-valid', type=str, default='data/cifar-10/cifar10_train', required=True,
help='valid data path: data/cifar-10/cifar10_train')
parser.add_argument('--batch_size', '-b', type=int, default=256, required=True, help='batch size. default=256')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.0001, required=True,
help='learning rate. default=0.0001')
parser.add_argument('--epochs', '-e', type=int, default=120, required=True, help='epochs. default=120')
args = parser.parse_args()
def train(epoch, train_loader, model, optimizer):
# Ensure dropout layers are in train mode
model.train()
# Loss function
# criterion = nn.MSELoss().to(device)
batch_time = ExpoAverageMeter() # forward prop. + back prop. time
# losses = ExpoAverageMeter() # loss (per word decoded)
start = time.time()
# to draw train loss graph
weighted_loss_list = []
# Batches
for i_batch, (x, y) in enumerate(train_loader):
# to update weighted loss, initialize
loss_list = []
max_loss = 0.
# to print loss to terminal
loss_print = []
# Set device options
x = x.to(device)
y = y.to(device)
# print(np.array(x.size()))
# >>> [batch size, # of channels, img_width, img_height]
# Zero gradients
optimizer.zero_grad()
# model output
x_hat = model(x)
# loss = torch.sqrt((x_hat - x).pow(2).mean())
# loss.backward()
# for every batch, calculate loss and update weighted loss
for i in range(np.array(x.size())[0]):
# RMSE (Root MSE) loss
loss = (x_hat[i, :, :, :] - x[i, :, :, :]).pow(2).mean()
max_loss = max(loss, max_loss)
# # sort: 내림차순
# loss_list.sort(reverse=True)
loss = max_loss
max_loss.backward()
# # high loss has large weight
# for i in range(len(loss_list)):
# weighted_loss = loss_list[i] * (1 - 0.5 / len(loss_list) * i)
# weighted_loss_list.append(weighted_loss.item())
# loss_print.append(weighted_loss.item())
# # convert value to tensor and backprop
# # actually, it updates for every sample, not every batch.
# weighted_loss = torch.tensor(weighted_loss).requires_grad_(True)
# weighted_loss.backward()
# loss.backward()
# def closure():
# optimizer.zero_grad()
# y_hat = model(x)
# loss = torch.sqrt((y_hat - y).pow(2).mean())
# loss.backward()
# losses.update(loss.item())
# return loss
# optimizer.step(closure)
optimizer.step()
# Keep track of metrics
# losses.update(loss.item())
batch_time.update(time.time() - start)
start = time.time()
# loss_print_mean = sum(loss_print) / len(loss_print)
# Print status
if i_batch % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss}\t'.format(epoch, i_batch, len(train_loader),
batch_time=batch_time, loss=loss))
# # to draw loss graph, output mean of weighted loss
# total_loss = sum(weighted_loss_list) / len(weighted_loss_list)
return loss
def valid(val_loader, model):
model.eval() # eval mode (no dropout or batchnorm)
# Loss function
# criterion = nn.MSELoss().to(device)
batch_time = ExpoAverageMeter() # forward prop. + back prop. time
losses = ExpoAverageMeter() # loss (per word decoded)
start = time.time()
with torch.no_grad():
# Batches
for i_batch, (x, y) in enumerate(val_loader):
# Set device options
x = x.to(device)
y = y.to(device)
x_hat = model(x)
loss = torch.sqrt((x_hat - x).pow(2).mean())
# Keep track of metrics
losses.update(loss.item())
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i_batch % print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(i_batch, len(val_loader),
batch_time=batch_time,
loss=losses))
return losses.avg
def main():
train_data = ImageFolder(root=args.train_data_path, transform=T.ToTensor())
train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True)
valid_data = ImageFolder(root=args.valid_data_path, transform=T.ToTensor())
val_loader = DataLoader(dataset=valid_data, batch_size=len(valid_data), shuffle=False, pin_memory=True)
# Create SegNet model
model = SegNet(in_channels=3, is_unpooling=True)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
best_loss = 100000
epochs_since_improvement = 0
# draw loss graph
epoch_graph = []
train_loss_graph = []
val_loss_graph = []
auto_loss_graph = []
class_loss_graph = []
# Epochs
for epoch in range(args.epochs):
# Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
if epochs_since_improvement == 20:
break
if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
adjust_learning_rate(optimizer, 0.8)
# One epoch's training
train_loss = train(epoch, train_loader, model, optimizer)
# One epoch's validation
val_loss = valid(val_loader, model)
print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss))
# Check if there was an improvement
is_best = val_loss < best_loss
best_loss = min(best_loss, val_loss)
if not is_best:
epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
# draw loss graph
epoch_graph.append(epoch + 1)
train_loss_graph.append(train_loss)
val_loss_graph.append(val_loss)
# Save checkpoint
save_checkpoint(epoch, model, optimizer, val_loss, is_best)
# save loss graph
plt.figure()
plt.title('loss')
plt.plot(np.array(epoch_graph), np.array(train_loss_graph))
plt.plot(np.array(epoch_graph), np.array(val_loss_graph))
plt.savefig('loss.png')
if __name__ == '__main__':
main()
``` |
{
"source": "2min0/Self-Diagnoisng-GAN",
"score": 2
} |
#### File: 2min0/Self-Diagnoisng-GAN/train_mimicry_phase2.py
```python
import argparse
import os
import pickle
from pathlib import Path
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.utils import data
from diagan.datasets.predefined import get_predefined_dataset
from diagan.models.predefined_models import get_gan_model
from diagan.trainer.trainer import LogTrainer
from diagan.utils.plot import (
calculate_scores, print_num_params,
show_sorted_score_samples
)
from diagan.utils.settings import set_seed
from pandas import Series, DataFrame
def get_dataloader(dataset, batch_size=128, weights=None, eps=1e-6):
if weights is not None:
weight_list = [eps if i < eps else i for i in weights]
sampler = data.WeightedRandomSampler(weight_list, len(weight_list), replacement=True)
else:
sampler = None
dataloader = data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False if sampler else True,
sampler=sampler,
num_workers=8,
pin_memory=True)
return dataloader
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", "-d", default="cifar10", type=str)
parser.add_argument("--root", "-r", default="./dataset/cifar10", type=str, help="dataset dir")
parser.add_argument("--work_dir", default="./exp_results", type=str, help="output dir")
parser.add_argument("--exp_name", type=str, help="exp name")
parser.add_argument("--baseline_exp_name", type=str, help="exp name")
parser.add_argument('--p1_step', default=40000, type=int)
parser.add_argument("--model", default="sngan", type=str, help="network model")
parser.add_argument("--loss_type", default="hinge", type=str, help="loss type")
parser.add_argument('--gpu', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--num_steps', default=80000, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--decay', default='linear', type=str)
parser.add_argument('--n_dis', default=5, type=int)
parser.add_argument('--resample_score', type=str)
parser.add_argument('--gold', action='store_true')
parser.add_argument('--topk', action='store_true')
parser.add_argument('--num_minor', default=50, type=int)
parser.add_argument('--num_major', default=500, type=int)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
output_dir = f'{args.work_dir}/{args.exp_name}'
save_path = Path(output_dir)
save_path.mkdir(parents=True, exist_ok=True)
baseline_output_dir = f'{args.work_dir}/{args.baseline_exp_name}'
baseline_save_path = Path(baseline_output_dir)
set_seed(args.seed)
if torch.cuda.is_available():
device = "cuda"
cudnn.benchmark = True
else:
device = "cpu"
prefix = args.exp_name.split('/')[-1]
if args.dataset == 'celeba':
window = 5000
elif args.dataset == 'cifar10':
window = 5000
else:
window = 5000
if not args.gold:
logit_path = baseline_save_path / 'logits_netD_eval.pkl'
print(f'Use logit from: {logit_path}')
logits = pickle.load(open(logit_path, "rb"))
score_start_step = (args.p1_step - window)
score_end_step = args.p1_step
score_dict = calculate_scores(logits, start_epoch=score_start_step, end_epoch=score_end_step)
# print(len(score_dict['ldrm']), len(score_dict['ldrv'])) >>> 1100, 1100
sample_weights = score_dict[args.resample_score]
print(
f'sample_weights mean: {sample_weights.mean()}, var: {sample_weights.var()}, max: {sample_weights.max()}, min: {sample_weights.min()}')
######################
# save LDRM and LDRV
######################
# for convenience: define variables of # of major samples and minor samples
ma = args.num_major
mi = args.num_minor
# make empty arrays for dataframe. '+1' is because of the first "mean" row in dataframe.
ldrm_array = np.zeros((4, ma + 1))
ldrv_array = np.zeros((4, ma + 1))
# fill 'ldrm_array'
for i in range(2):
for j in range(2):
# If ma=50 and mi=50, split 'ldrm' array into [0:50], [50:550], [550:600], [600:1100]
values = score_dict['ldrm'][(ma + mi) * i + mi * j:(ma + mi) * i + mi + ma * j]
# The first row is the average.
ldrm_array[2 * i + j][0] = np.mean(np.array(values))
# In the rows except the first row, there are 'ldrm' values.
for k in range(len(values)):
ldrm_array[2 * i + j][k + 1] = values[k]
# fill 'ldrv_array' in the same way.
for i in range(2):
for j in range(2):
values = score_dict['ldrv'][(ma + mi) * i + mi * j:(ma + mi) * i + mi + ma * j]
ldrv_array[2 * i + j][0] = np.mean(np.array(values))
for k in range(len(values)):
ldrv_array[2 * i + j][k + 1] = values[k]
# make a dataframe. 3 classification criteria: LDRM or LDRV, label 0 or 1, minor or major
df = DataFrame({'LDRM_0_minor': ldrm_array[0].tolist(),
'LDRM_0_major': ldrm_array[1].tolist(),
'LDRM_1_minor': ldrm_array[2].tolist(),
'LDRM_1_major': ldrm_array[3].tolist(),
'LDRV_0_minor': ldrv_array[0].tolist(),
'LDRV_0_major': ldrv_array[1].tolist(),
'LDRV_1_minor': ldrv_array[2].tolist(),
'LDRV_1_major': ldrv_array[3].tolist()})
# set the name (index) of the first row as "mean".
df.rename(index={0: "mean"}, inplace=True)
df.to_csv(f"{output_dir}/LDR_output.csv")
else:
sample_weights = None
netG_ckpt_path = baseline_save_path / f'checkpoints/netG/netG_{args.p1_step}_steps.pth'
netD_ckpt_path = baseline_save_path / f'checkpoints/netD/netD_{args.p1_step}_steps.pth'
netD_drs_ckpt_path = baseline_save_path / f'checkpoints/netD/netD_{args.p1_step}_steps.pth'
netG, netD, netD_drs, optG, optD, optD_drs = get_gan_model(
dataset_name=args.dataset,
model=args.model,
loss_type=args.loss_type,
drs=True,
topk=args.topk,
gold=args.gold,
)
print(f'model: {args.model} - netD_drs_ckpt_path: {netD_drs_ckpt_path}')
print_num_params(netG, netD)
ds_train = get_predefined_dataset(dataset_name=args.dataset, root=args.root, weights=None)
dl_train = get_dataloader(ds_train, batch_size=args.batch_size, weights=sample_weights)
ds_drs = get_predefined_dataset(dataset_name=args.dataset, root=args.root, weights=None)
dl_drs = get_dataloader(ds_drs, batch_size=args.batch_size, weights=None)
if not args.gold:
show_sorted_score_samples(ds_train, score=sample_weights, save_path=save_path, score_name=args.resample_score,
plot_name=prefix)
print(args)
# Start training
trainer = LogTrainer(
output_path=save_path,
netD=netD,
netG=netG,
optD=optD,
optG=optG,
netG_ckpt_file=str(netG_ckpt_path),
netD_ckpt_file=str(netD_ckpt_path),
netD_drs_ckpt_file=str(netD_drs_ckpt_path),
netD_drs=netD_drs,
optD_drs=optD_drs,
dataloader_drs=dl_drs,
n_dis=args.n_dis,
num_steps=args.num_steps,
save_steps=1000,
lr_decay=args.decay,
dataloader=dl_train,
log_dir=output_dir,
print_steps=10,
device=device,
topk=args.topk,
gold=args.gold,
gold_step=args.p1_step,
save_logits=False,
)
trainer.train()
if __name__ == '__main__':
main()
``` |
{
"source": "2minchul/chip-helper",
"score": 3
} |
#### File: chip-helper/src/cmd_tool.py
```python
import os
import sys
from contextlib import contextmanager
def exit_enter(code=0):
input('press enter to exit...')
sys.exit(code)
def get_execution_path():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def get_input_path_or_exit():
path = os.path.join(get_execution_path(), 'input')
if os.path.isdir(path):
return path
print('input 폴더를 찾을 수 없습니다')
print(f'"{get_execution_path()}" 에 input 폴더를 생성해주세요')
exit_enter(1)
def get_resource_path():
return os.path.join(get_execution_path(), 'resource')
def get_chrome_driver_path_or_exit():
chrome_driver_path = os.path.join(get_resource_path(), 'chromedriver.exe')
if not os.path.isfile(chrome_driver_path):
print('resource 폴더에 chromedriver 를 다운받아야 합니다')
exit_enter(1)
return chrome_driver_path
@contextmanager
def cd(path):
origin = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(origin)
```
#### File: chip-helper/src/command.py
```python
import argparse
import os
import re
import sys
from operator import itemgetter
from typing import Optional
import sentry_sdk
import youtube_dl
from selenium.common.exceptions import SessionNotCreatedException
from cmd_tool import (
get_execution_path,
exit_enter,
get_input_path_or_exit,
get_chrome_driver_path_or_exit,
get_resource_path,
cd
)
from imagetools import Size
from qrcode import NaverQrCode, make_qr_image, make_redirect_html
from thumbnail import composite_thumbnail, capture_video
from youtube_uploader import YoutubeUploader, YoutubeUploaderException
sentry_sdk.init("https://[email protected]/5243685")
def make_dirs():
os.chdir(get_execution_path())
print(os.path.abspath(os.curdir))
print('폴더 생성기')
print('존재하는 폴더는 건너뜀')
path = input('생성할 경로: ')
if not os.path.isdir(path):
print('없는 경로입니다')
exit_enter(1)
s, e = map(int, (input('시작할 숫자: '), input('끝나는 숫자: ')))
for i in range(s, e + 1):
os.makedirs(os.path.join(path, f'{i:04}'), exist_ok=True)
print('완료')
exit_enter()
def make_thumbnail():
input_path = os.path.join(get_execution_path(), 'input')
for cur_dir, _, files in os.walk(input_path):
dir_name = os.path.basename(cur_dir)
def _is_mp4(filename):
_, ext = os.path.splitext(filename)
return ext == '.mp4'
mp4_files = tuple(filter(_is_mp4, files))
if 1 < len(mp4_files):
print(f'pass: "{dir_name}" 안에 한개 이상의 mp4 파일이 존재합니다')
if not dir_name.isnumeric():
print(f'skip: "{dir_name}" 는 숫자로 구성된 폴더이름이 아닙니다')
continue
idx_text = f'{int(dir_name):04}'
mp4_filename = mp4_files[0]
jpg_filename = f'{idx_text}.jpg'
jpg_filepath = os.path.join(cur_dir, jpg_filename)
print(f'capture:\t{mp4_filename} to {jpg_filename}')
capture_video(os.path.join(cur_dir, mp4_filename), jpg_filepath)
target_filename = f'p{idx_text}.jpg'
print(f'composite:\t{jpg_filename} to {target_filename} ...')
composite_thumbnail(jpg_filepath, os.path.join(cur_dir, target_filename))
print('완료되었습니다!')
exit_enter()
def upload_videos():
path = get_execution_path()
chrome_driver_path = get_chrome_driver_path_or_exit()
input_path = get_input_path_or_exit()
cookie_path = os.path.join(get_execution_path(), 'cookies.txt')
if not os.path.isfile(cookie_path):
print('최상위 폴더에 cookies.txt 를 작성해야 합니다')
exit_enter(1)
uploader = None
video_dirs = {}
to_upload = {}
for cur_dir, _, files in os.walk(input_path):
dir_name = os.path.basename(cur_dir)
video_path = video_name = thumbnail_path = None
if 1 < len(tuple(filter(lambda s: s.endswith('.mp4'), files))):
print(f'"{cur_dir}" 에 여러개의 .mp4 파일이 존재합니다!')
continue
for filename in files:
if filename == 'youtube_url.txt':
video_path = thumbnail_path = None
print(f'already uploaded: {dir_name}')
break
current_video_name, ext = os.path.splitext(filename)
if ext == '.mp4':
if not dir_name.isnumeric():
print(f'skip: "{dir_name}" 는 숫자로 구성된 폴더이름이 아닙니다')
break
video_name = f'{int(dir_name):04}'
video_path = os.path.join(cur_dir, f'{video_name}.mp4')
if current_video_name != video_name:
print(f'rename "{filename}" to "{video_name}.mp4"')
os.rename(os.path.join(cur_dir, filename), video_path)
video_dirs[video_name] = cur_dir
elif ext == '.jpg' and re.match(r'^\d+[.]jpg$', filename):
thumbnail_path = os.path.join(cur_dir, filename)
if not (video_path and thumbnail_path):
continue
to_upload[int(dir_name)] = (video_name, video_path, thumbnail_path)
for dir_number, (video_name, video_path, thumbnail_path) in sorted(to_upload.items(), key=lambda e: e[0]):
# if not uploader:
uploader = YoutubeUploader()
try:
my_channel_id = uploader.init(chrome_driver_path, cookie_path)
except SessionNotCreatedException as e:
print(e)
print('컴퓨터에 설치된 chrome 과 chromedriver 의 버전이 일치하지 않습니다.')
print('https://chromedriver.chromium.org/downloads 에서 다시 chromedriver 를 다운로드 해주세요.')
break
with open(os.path.join(path, '.mychannelid'), 'w') as f:
f.write(my_channel_id)
print(f'uploading {video_name}')
try:
if uploader.upload_video(video_path, thumbnail_path):
print(f'success: {video_name}')
else:
print(f'failure: {video_name}')
except YoutubeUploaderException as e:
print(e)
print(f'failure: {video_name}')
try:
uploader.browser.close()
except:
pass
print('모든 업로드 작업을 마쳤습니다.')
exit_enter()
def update_youtube_urls(my_channel_id=None):
path = get_execution_path()
input_path = get_input_path_or_exit()
cookie_path = os.path.join(get_execution_path(), 'cookies.txt')
if not my_channel_id:
mychannelid_path = os.path.join(path, '.mychannelid')
if os.path.isfile(mychannelid_path):
with open(mychannelid_path, 'r') as f:
my_channel_id = f.read()
else:
print('youtube upload 를 먼저 실행해주세요')
exit_enter(1)
yn = input('기존에 존재하는 youtube_url.txt 도 덮어쓰시겠습니까? [y/n]: ')
overwrite = yn == 'y'
video_dirs = {}
for cur_dir, _, files in os.walk(input_path):
if not overwrite and os.path.isfile(os.path.join(cur_dir, 'youtube_url.txt')):
continue
dir_name = os.path.basename(cur_dir)
for filename in files:
name, ext = os.path.splitext(filename)
if ext == '.mp4' and dir_name.isnumeric():
video_dirs[name] = cur_dir
yt = youtube_dl.YoutubeDL(dict(cookiefile=cookie_path))
my_channel_playlist = yt.extract_info(
f'https://www.youtube.com/channel/{my_channel_id}', download=False, process=False
).get('url')
is_created = False
video_urls = {}
for video in yt.extract_info(my_channel_playlist, download=False, process=False).get('entries'):
title = video['title']
if title.isnumeric() and video_dirs.get(title):
is_created = True
video_urls[int(title)] = (title, f"https://www.youtube.com/watch?v={video['id']}")
if not is_created:
print('새로 업로드 된 동영상이 없거나, 아직 업로드가 완전히 완료되지 않았습니다.')
print('잠시 후 다시 시도해주세요.')
else:
for _, (title, url) in sorted(video_urls.items(), key=itemgetter(0)):
print(f'make youtube_url.txt: {title}')
with open(os.path.join(video_dirs[title], 'youtube_url.txt'), 'w') as f:
f.write(url)
exit_enter()
def qrcode():
input_path = get_input_path_or_exit()
chrome_driver_path = get_chrome_driver_path_or_exit()
resource_path = get_resource_path()
if not os.path.isfile(os.path.join(resource_path, 'DXGulimB-KSCpc-EUC-H.ttf')):
print('폰트 파일을 찾을 수 없습니다.')
print('DXGulimB-KSCpc-EUC-H.ttf 파일을 "font/" 안에 넣어주세요!')
exit_enter(1)
naver_qr: Optional[NaverQrCode] = None
def walk_dir():
walk_dirs = {}
for cur_dir, dirs, files in os.walk(input_path):
dir_name = os.path.basename(cur_dir)
if not dir_name.isnumeric():
continue
if 'youtube_url.txt' not in files:
continue
if 'qrcode.html' in files:
print(f'already created: {dir_name}')
continue
walk_dirs[int(dir_name)] = (cur_dir, dirs, files)
return (v for k, v in sorted(walk_dirs.items(), key=itemgetter(0)))
for cur_dir, _, files in walk_dir():
dir_name = os.path.basename(cur_dir)
idx = int(dir_name)
idx_text = f'{idx:04}'
with open(os.path.join(cur_dir, 'youtube_url.txt'), 'r') as f:
youtube_url = f.read()
if not naver_qr:
naver_qr = NaverQrCode()
naver_qr.init(chrome_driver_path)
print('waiting login ...')
naver_qr.login()
print('login success')
qr_data = naver_qr.create_qr(idx_text, youtube_url).get('QRCodeData', {})
qr_url = qr_data.get('qrCodeUrl')
qr_id = qr_data.get('qrcdNo')
if not qr_url:
print(f'{idx_text}: QR CODE 생성에 실패했습니다')
continue
with cd(resource_path):
print(f'creating "{idx_text}.png"')
image = make_qr_image(Size(591, 738), qr_url, idx) # 5cm x 6.25cm (300dpi)
with open(os.path.join(cur_dir, f'{idx_text}.png'), 'wb') as f:
image.save(f, format='PNG', dpi=(300, 300))
make_redirect_html(
os.path.join(cur_dir, 'qrcode.html'),
f'https://qr.naver.com/code/updateForm.nhn?qrcdNo={qr_id}'
)
if naver_qr:
naver_qr.visit_admin_page()
print('모든 작업이 끝났습니다.')
input('press enter to exit...')
if naver_qr:
naver_qr.close()
sys.exit(0)
def organize():
input_path = get_input_path_or_exit()
for filename in os.listdir(input_path):
filepath = os.path.join(input_path, filename)
if not os.path.isfile(filepath) or 'README.txt' == filename:
continue
name, _ = os.path.splitext(filename)
if not name.isnumeric():
print(f'pass: "{filename}" 은 숫자로 이루어진 이름이 아닙니다')
continue
dir_path = os.path.join(input_path, f'{int(name)}')
os.makedirs(dir_path, exist_ok=True)
try:
os.rename(filepath, os.path.join(dir_path, filename))
print(f'move "{filename}" to "{int(name)}/{filename}"')
except Exception as e:
print(f'"{filename}" 을 옮기는데 실패하였습니다: {e}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Chip Helper')
subparsers = parser.add_subparsers(help='commands', dest='command', required=True)
subparsers.add_parser('makedirs', help='Create dirs like "nnnn" format in a specific path')
subparsers.add_parser('organize', help='Create numeric dirs and move video files in it')
subparsers.add_parser('thumbnail', help='Create thumbnails')
subparsers.add_parser('upload', help='Upload videos to youtube')
subparsers.add_parser('youtube-url', help='Make youtube_url.txt in input dirs')
subparsers.add_parser('qrcode', help='Generate Naver QR and composite qr image')
args = parser.parse_args()
func = {
'makedirs': make_dirs,
'thumbnail': make_thumbnail,
'upload': upload_videos,
'youtube-url': update_youtube_urls,
'qrcode': qrcode,
'organize': organize,
}.get(args.command)
func()
print('모든 작업이 완료되었습니다.')
exit_enter()
```
#### File: src/qrcode/naver_qr.py
```python
from typing import Optional
import requests
from selenium import webdriver
from selenium_helper import wait_element_by_id
class NaverQrCode:
default_headers = {
'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
'charset': 'utf-8'
}
browser: Optional[webdriver.Chrome]
my_naver_id: Optional[str]
def init(self, chrome_diver_path='chromedriver'):
self.browser = webdriver.Chrome(chrome_diver_path)
self.browser.get("http://naver.com")
def login(self):
browser = self.browser
browser.get('https://nid.naver.com/nidlogin.login?mode=form&url=https%3A%2F%2Fwww.naver.com')
frame = wait_element_by_id(browser, 'minime')
self.my_naver_id = self._get_naver_id(frame)
def _get_naver_id(self, minime_frame):
br = self.browser
br.switch_to.frame(minime_frame)
email_div = br.find_element_by_class_name('MY_EMAIL')
email = email_div.text
naver_id, *_ = email.split('@')
br.switch_to.default_content()
return naver_id
def create_qr(self, qr_name, qr_link):
br = self.browser
br.get('https://qr.naver.com/code/createForm.nhn')
input_tag = wait_element_by_id(br, 'qrCodePub')
qr_code_pub = input_tag.get_attribute('value') # qr 코드를 만들기 위한 1회용 token
return self.send_create_qr_api(qr_code_pub, qr_name, qr_link)
def send_create_qr_api(self, qr_pub, qr_name, qr_link):
headers = self.default_headers.copy()
headers.update({'Referer': 'https://qr.naver.com/code/createForm.nhn', 'Origin': 'https://qr.naver.com'})
cookies = {cookie['name']: cookie['value'] for cookie in self.browser.get_cookies()}
data = {
"qrcdNo": "", "qrCodeUrl": "",
"qrSaveStatusCd": "79", "qrColorBorderCd": "21", "qrDirectLinkTypeCd": "29",
"qrSearchWord": "", "qrAttachOrder": "L|D|I|V|M|C", "qrSubjectFontTypeCd": "157", "qrLogoImgUrl": "",
"qrLandingSkinTypeCd": "177", "qrAttachImgViewTypeCd": "164", "qrBorderSkinTypeCd": "237",
"qrUserBorderSkinUrl": "", "qrCenterImgUrl": "", "qrLocation": "241",
"qrUserBorderSkinThumbnailUrl": "", "qrCenterImgThumbnailUrl": "",
"qrVersion": "30", "qrCodeExp": "10001", "qrName": qr_name,
"qrLogoTypeCd": "24", "qrSubject": "", "qrLocationTypeCd": "241",
"qrAttachLinkList[0].linkSubject": "", "qrAttachLinkList[0].linkUrl": "",
"qrAttachLinkList[1].linkSubject": "", "qrAttachLinkList[1].linkUrl": "",
"qrAttachLinkList[2].linkSubject": "", "qrAttachLinkList[2].linkUrl": "",
"qrAttachLinkList[3].linkSubject": "", "qrAttachLinkList[3].linkUrl": "",
"qrAttachLinkList[4].linkSubject": "", "qrAttachLinkList[4].linkUrl": "",
"qrDesc": "", 'qrNaverId': self.my_naver_id, 'qrDirectLink': qr_link, 'qrCodePub': qr_pub
}
response = requests.post(
'https://qr.naver.com/code/createCode.nhn',
headers=headers, cookies=cookies, data=data
)
return response.json()
def visit_admin_page(self):
self.browser.get('https://qr.naver.com/code/codeAdmin.nhn')
def close(self):
self.browser.close()
del self.browser
```
#### File: chip-helper/src/thumbnail.py
```python
import cv2
from PIL import Image, ImageFilter
from PIL.PngImagePlugin import PngImageFile
from imagetools import Size
def zoom(img, percent):
n = percent / 100
size = tuple(int(0.5 + (i * n)) for i in img.size)
return img.resize(size)
def get_center_dox(img, target_size: Size):
w, h = img.size
left = (w - target_size.width) / 2
top = (h - target_size.height) / 2
right = (w + target_size.width) / 2
bottom = (h + target_size.height) / 2
return tuple(map(int, (left, top, right, bottom)))
def crop_center(img, target_size: Size):
box = get_center_dox(img, target_size)
return img.crop(box)
def paste_center(img, small_img):
box = get_center_dox(img, Size.from_img(small_img))
return img.paste(small_img, box)
def composite_thumbnail(input_path, output_path):
canvas_size = Size(1920, 1440)
origin_img: PngImageFile = Image.open(input_path)
zoom_img = zoom(origin_img, 118.98)
zoom_size = Size.from_img(zoom_img)
# crop center
crop_img = crop_center(zoom_img, Size(canvas_size.width, zoom_size.height))
bg_img = zoom(
crop_img,
(canvas_size.height / crop_img.height) * 100 # crop_img 의 높이가 canvas_img 의 높이와 같아지도록 확대
)
bg_img = crop_center(bg_img, canvas_size)
bg_img = bg_img.filter(ImageFilter.GaussianBlur(radius=7))
paste_center(bg_img, crop_img)
bg_img.save(output_path, format='JPEG', quality=100, dpi=(300, 300))
def capture_video(video_path, output_path):
vcap = cv2.VideoCapture(video_path)
res, im_ar = vcap.read()
cv2.imwrite(output_path, im_ar)
```
#### File: src/youtube_uploader/youtube_uploader.py
```python
import re
from collections import defaultdict
from http import cookiejar
from typing import Optional
from selenium import webdriver
from selenium.common.exceptions import UnableToSetCookieException, ElementClickInterceptedException, \
StaleElementReferenceException
from selenium_helper import wait_element_by_name, wait_element_by_id
from .exceptions import BadStatusError
import time
class YoutubeUploader:
browser: Optional[webdriver.Chrome] = None
def init(self, chrome_diver_path='chromedriver', cookie_file_path='cookies.txt'):
self.browser = browser = webdriver.Chrome(chrome_diver_path)
cj = cookiejar.MozillaCookieJar(cookie_file_path)
cj.load()
domains = defaultdict(list)
for cookie in cj:
domains[cookie.domain].append(cookie)
merged_domains = defaultdict(list)
for domain in sorted(domains.keys(), reverse=True): # type: str
if domain.startswith('.'):
is_append = False
for t in merged_domains.keys():
if domain in t:
merged_domains[domain].extend(domains[domain])
is_append = True
break
if not is_append:
merged_domains[domain].extend(domains[domain])
else:
merged_domains[domain].extend(domains[domain])
for domain, cookies in merged_domains.items():
dummy_url = 'https://' + domain.lstrip('.').rstrip('/') + '/404dummy'
browser.get(dummy_url)
for cookie in cookies:
cookie_dict = {'domain': cookie.domain, 'name': cookie.name, 'value': cookie.value,
'secure': cookie.secure}
try:
browser.add_cookie(cookie_dict)
except UnableToSetCookieException:
print('UnableToSetCookieException', cookie)
pass
browser.get('http://youtube.com')
rex = re.search(r'[{]"key":"creator_channel_id","value":"(.+?)"[}]', browser.page_source)
if rex:
return rex.group(1)
def upload_video(self, video_path, thumbnail_path):
browser = self.browser
upload_btn = None
for _ in range(3):
browser.get(f'https://studio.youtube.com?_={str(int(time.time()))}')
try:
upload_btn = wait_element_by_id(browser, 'upload-button', max_count=3)
if not upload_btn:
upload_btn = wait_element_by_id(browser, 'upload-icon', max_count=3)
if upload_btn:
upload_btn.click()
break
except:
browser.implicitly_wait(2)
if not upload_btn:
return False
self.check_status(sleep=1)
select_input = wait_element_by_name(browser, 'Filedata')
select_input.send_keys(video_path)
self.check_status(sleep=0.5)
th = wait_element_by_id(browser, 'file-loader')
th.send_keys(thumbnail_path)
no_kids_btn = wait_element_by_name(browser, 'NOT_MADE_FOR_KIDS')
no_kids_btn.click()
next_btn = wait_element_by_id(browser, 'next-button')
next_btn.click()
while self.get_step() != 1: # 두번째 단계
self.check_status(sleep=1)
next_btn = wait_element_by_id(browser, 'next-button')
next_btn.click()
unlisted_btn = wait_element_by_name(browser, 'UNLISTED')
unlisted_btn.click()
while self.get_step() != 2: # 세번째 단계
self.check_status(sleep=1)
while 1:
done_btn = wait_element_by_id(self.browser, 'done-button', 1)
if not done_btn.is_enabled():
break
try:
done_btn.click()
except ElementClickInterceptedException:
break
self.check_status(sleep=1)
is_finish = False
while not is_finish:
for element in self.browser.find_elements_by_id('dialog-title'):
try:
if element.text == '동영상 처리 중':
is_finish = True
break
except StaleElementReferenceException:
time.sleep(1)
is_finish = True
break
self.check_status(sleep=1)
time.sleep(2)
return True
def check_status(self, sleep=0.0):
for element in self.browser.find_elements_by_class_name('error-short'):
if element.text == '처리 중단됨':
raise BadStatusError('처리 중단됨')
if sleep:
self.browser.implicitly_wait(sleep)
def get_step(self):
for element in self.browser.find_elements_by_class_name('step'):
if element.get_attribute('state') == 'numbered':
return int(element.get_attribute('step-index')) # 0 or 1 or 2
if __name__ == '__main__':
uploader = YoutubeUploader()
uploader.init()
uploader.upload_video(
r'C:\Users\drminchul\Downloads\결과 파일 복사본\결과 파일 복사본\1\0001.mp4',
r'C:\Users\drminchul\Downloads\결과 파일 복사본\결과 파일 복사본\1\p0001.jpg'
)
uploader.upload_video(
r'C:\Users\drminchul\Downloads\결과 파일 복사본\결과 파일 복사본\2\0002.mp4',
r'C:\Users\drminchul\Downloads\결과 파일 복사본\결과 파일 복사본\2\p0002.jpg'
)
uploader.upload_video(
r'C:\Users\drminchul\Downloads\결과 파일 복사본\결과 파일 복사본\3\0003.mp4',
r'C:\Users\drminchul\Downloads\결과 파일 복사본\결과 파일 복사본\3\p0003.jpg'
)
uploader.upload_video(
r'C:\Users\drminchul\Downloads\결과 파일 복사본\결과 파일 복사본\4\0004.mp4',
r'C:\Users\drminchul\Downloads\결과 파일 복사본\결과 파일 복사본\4\p0004.jpg'
)
print('완료!')
``` |
{
"source": "2mol/permafrostanalytics",
"score": 2
} |
#### File: eternal_sunshine/christian/regression_dataset.py
```python
import stuett
from stuett.global_config import get_setting, setting_exists
import argparse
from pathlib import Path
from datetime import datetime, date, timedelta
import plotly.graph_objects as go
import pandas as pd
import xarray as xr
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
import torch
import io
class PermaRegressionDataset(Dataset):
"""A dataset that maps images and meta information (such as radiation,
surface temperature, ...) onto the temperature below surface."""
def __init__(self, local, data_path='../data', transform=None,
time_slice={"start_time": "2017-01-01",
"end_time": "2017-12-31"}):
"""
Args:
local (bool): Whether to read the dataset from a local storage
location or from a public Azure share.
data_path (str, optional): If the data should be read from a local
location, then this folder will denote the location of the
dataset.
transform (callable, optional): Optional transform to be applied
on images.
time_slice (dict): Can be used to create a different train and test
set. Note, this is not a pretty solution, especially because
time values are not interleaved. I.e., if time information is
used as input to a network, but the network has never seen
values from the corresponding month, then it can't make
confident predictions.
"""
if transform is not None:
raise NotImplementedError("transform not implemented!")
self.transform = transform
# This sensor contains near-surface temperature readings and is on the
# south side and therefore receives a lot of sunshine.
rock_temperature_file_mh10 = "MH10_temperature_rock_2017.csv" # South
radiation_file = "MH15_radiometer__conv_2017.csv"
if not local:
account_name = (
get_setting("azure")["account_name"]
if setting_exists("azure")
else "storageaccountperma8980"
)
account_key = (
get_setting("azure")["account_key"] if setting_exists(
"azure") else None
)
ts_store = stuett.ABSStore(
container="hackathon-on-permafrost",
prefix="timeseries_derived_data_products",
account_name=account_name,
account_key=account_key,
)
img_store = stuett.ABSStore(
container="hackathon-on-permafrost",
prefix="timelapse_images_fast",
account_name=account_name,
account_key=account_key,
)
else:
timeseries_folder = Path(data_path).joinpath(
"timeseries_derived_data_products").resolve()
ts_store = stuett.DirectoryStore(timeseries_folder)
if rock_temperature_file_mh10 not in store:
raise RuntimeError('Please provide a valid path to the ' +
'permafrost data!')
img_store = stuett.DirectoryStore(Path(data_path).joinpath( \
'timelapse_images_fast'))
if "2017-01-01/20170101_080018.JPG" not in store:
raise RuntimeError('Please provide a valid path to the ' +
'permafrost images.')
#self._ts_store = ts_store
self._img_store = img_store
### Load timeseries data.
rock_temperature_node_mh10 = stuett.data.CsvSource(
rock_temperature_file_mh10, store=ts_store)
rock_temp_mh10 = rock_temperature_node_mh10(time_slice)
radiation_node = stuett.data.CsvSource(radiation_file, store=ts_store)
radiation = radiation_node(time_slice)
net_radiation = radiation.loc[:, ['net_radiation']]
surface_temp = rock_temp_mh10.loc[:, ['temperature_nearsurface_t2']]
target_temp = rock_temp_mh10.loc[:, ['temperature_10cm']]
### Load image filenames.
image_node = stuett.data.MHDSLRFilenames(
store=img_store,
force_write_to_remote=True,
as_pandas=False,
)
image_fns = image_node(time_slice)
### Find image filenames that were captured close to temperature
### measures.
# With close we mean within a 20min window.
# Temperature/radiation values that have no corresponding image are
# ignored.
# Sanity check!
#for t1, t2 in zip(radiation['time'], rock_temp_mh10['time']):
# assert (t1 == t2)
j = 0
n = len(image_fns['time'])
measurement_pairs = []
for i, t in enumerate(rock_temp_mh10['time'].values):
while j < n:
# Translate difference in timestamps to minutes before casting
# to int.
diff = (image_fns['time'][j] - t).values.astype( \
'timedelta64[m]').astype(np.int)
if diff > 10:
# Image too far in the future, ignore sensor value.
break
absdiff = np.abs(diff)
if absdiff < 10:
# The image is very close, simply check whether the next
# picture is even closer. Otherwise, we take the current
# image.
if j + 1 < n:
absdiff2 = np.abs(
(image_fns['time'][j + 1] - t).values.astype(
'timedelta64[m]').astype(np.int))
else:
absdiff2 = None
if absdiff2 is None or absdiff < absdiff2:
measurement_pairs.append((i, j))
j += 1
else:
measurement_pairs.append((i, j + 1))
j += 2
break
j += 1
### Build dataset (make sure that there are no None values in the
### timeseries measurements).
self._img_fns = []
self._surface_temp = []
self._target_temp = []
self._timestamps = []
self._radiation = []
# This is coarse time information that one may provide as additional
# information. We encode the (normalized) month and daytime information,
# as this information may be quite helpful when judging temperature
# values.
# Though, it might also tempt the regression system to ignore all
# other information and solely predict based on this information
# (as a strong local minimum).
self._month = []
self._daytime = []
assert(np.all(~np.isnan(net_radiation.values)))
assert(np.all(~np.isnan(surface_temp.values)))
#assert(np.all(~np.isnan(target_temp.values)))
for i, j in measurement_pairs:
if np.any(np.isnan(target_temp.values[i, 0])):
continue
self._target_temp.append(target_temp.values[i, 0])
self._surface_temp.append(surface_temp.values[i, 0])
self._radiation.append(net_radiation.values[i, 0])
self._timestamps.append(target_temp['time'].values[i])
ts = pd.to_datetime(self._timestamps[-1])
self._month.append(ts.month)
self._daytime.append(ts.hour*60 + ts.minute)
self._img_fns.append(str(image_fns.values[0, j]))
self._target_temp = np.array(self._target_temp, dtype=np.float32)
self._surface_temp = np.array(self._surface_temp, dtype=np.float32)
self._radiation = np.array(self._radiation, dtype=np.float32)
self._month = np.array(self._month, dtype=np.float32)
self._daytime = np.array(self._daytime, dtype=np.float32)
# Normalize regression values.
self.target_temp_mean = self._target_temp.mean()
self.target_temp_std = self._target_temp.std()
self.surface_temp_mean = self._surface_temp.mean()
self.surface_temp_std = self._surface_temp.std()
self.radiation_mean = self._radiation.mean()
self.radiation_std = self._radiation.std()
self._target_temp = (self._target_temp - self.target_temp_mean) / \
self.target_temp_std
self._surface_temp = (self._surface_temp - self.surface_temp_mean) / \
self.surface_temp_std
self._radiation = (self._radiation - self.radiation_mean) / \
self.radiation_std
self._month = (self._month - self._month.mean()) / self._month.std()
self._daytime = (self._month - self._daytime.mean()) / \
self._daytime.std()
print('dataset contains %d samples.' % len(self._img_fns))
def __len__(self):
return len(self._img_fns)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
if isinstance(idx, list):
# TODO read multiple images
raise NotImplementedError()
else:
img = Image.open(io.BytesIO(self._img_store[self._img_fns[idx]]))
img = img.rotate(90, expand=1)
data = np.array(img.convert('RGB')).transpose([2, 0, 1])
data = data.astype(np.float32)
ts = self._timestamps[idx]
sample = {
'img': data,
'surface_temp': self._surface_temp[idx].reshape(-1, 1),
'target_temp': self._target_temp[idx].reshape(-1, 1),
'radiation': self._radiation[idx].reshape(-1, 1),
'month': self._month[idx].reshape(-1, 1),
'daytime': self._daytime[idx].reshape(-1, 1),
# Just for the user, not meant to be used as input to a neural net.
#'timestamp': ts,
'idx': idx
}
return sample
```
#### File: ideas/imgdiff/cvtest.py
```python
import numpy as np
import cv2
WIDTH, HEIGHT = 1600, 1000
SCALE = 0.25
def resize(img, scale):
width = int(img.shape[1] * scale)
height = int(img.shape[0] * scale)
dim = (width, height)
return cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
if __name__ == '__main__':
# image1_name = "timelapse_images/2017-09-16/20170916_120007.JPG"
# image2_name = "timelapse_images/2017-09-17/20170917_120003.JPG"
image1_name = "20170923_120003.JPG"
image2_name = "20170925_120004.JPG"
image1 = cv2.imread(image1_name)
image2 = cv2.imread(image2_name)
# window = cv2.namedWindow(image1_name, cv2.WINDOW_NORMAL)
# window = cv2.namedWindow(image2_name, cv2.WINDOW_NORMAL)
# cv2.resizeWindow(image1_name, width, height)
# cv2.resizeWindow(image2_name, width, height)
window = cv2.namedWindow('imgdiff', cv2.WINDOW_NORMAL)
cv2.resizeWindow('imgdiff', WIDTH, HEIGHT)
# diff_left = cv2.subtract(image1, image2)
# diff_right = cv2.subtract(image1, image2)
# diff = cv2.add(diff_left, diff_right)
# diff = cv2.subtract(image1, image2)
diff = cv2.absdiff(image1, image2)
# -------------------------------------------
# gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY)
# diff[thresh == 255] = 0
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
# diff = cv2.erode(diff, kernel, iterations = 1)
# -------------------------------------------
# cv2.imwrite('diff.png', difference)
concat_image = np.concatenate((image1, diff, image2), axis=1)
# cv2.imshow(image1_name, image1)
# cv2.imshow(image2_name, image2)
while True:
cv2.imshow('imgdiff', resize(concat_image, 0.25))
k = cv2.waitKey(0)
if k == 27: # Esc
break
cv2.destroyAllWindows()
``` |
{
"source": "2much4u/PS4-GTA-V-Native-Updater",
"score": 2
} |
#### File: 2much4u/PS4-GTA-V-Native-Updater/nativeUpdater.py
```python
systemNatives = [
0x4EDE34FBADD967A6,
0xE81651AD79516E48,
0xB8BA7F44DF1575E1,
0xEB1C67C3A5333A92,
0xC4BB298BD441BE78,
0x83666F9FB8FEBD4B,
0xC9D9444186B5A374,
0xC1B1E9A034A63A62,
0x5AE11BC36633DE4E,
0x0000000050597EE2,
0x0BADBFA3B172435F,
0xD0FFB162F40A139C,
0x71D93B57D07F9804,
0xE3621CC40F31FE2E,
0xE816E655DE37FE20,
0x652D2EEEF1D3E62C,
0xA8CEACB4F35AE058,
0x2A488C176D52CCA5,
0xB7A628320EFF8E47,
0xEDD95A39E5544DE8,
0x97EF1E5BCE9DC075,
0xF34EE736CF047844,
0x11E019C8F43ACC8A,
0xF2DB717A73826179,
0xBBDA792448DB5A89,
0x42B65DEEF2EDF2A1
]
# Read crossmap file into dictionary
def parseCrossmap():
fileName = AskFile(False, "", "Choose a crossmap file")
file = open(fileName, "r")
raw_lines = file.readlines()
crossmap = dict()
nativeCount = 0
for line in raw_lines:
values = line.replace('\n', '').split(',')
crossmap[int(values[1], 16)] = int(values[0], 16)
nativeCount += 1
file.close()
print("Found " + str(nativeCount) + " natives in crossmap")
return crossmap
# Read native hashes and functions into dictionary from binary
def findNativeFunctions():
global registerNative
registerNative = choose_func("Select registerNative function")
functionMap = dict()
nativeCount = 0
for xref in XrefsTo(registerNative):
addr = xref.frm
# Start at xref to function and work backwards
while True:
addr = PrevHead(addr)
if GetMnem(addr) == "mov":
opnd = GetOpnd(addr, 0)
if "edi" in opnd or "rdi" in opnd:
break
# Function address is always the line before the hash
hash = GetOperandValue(addr, 1)
func = GetOperandValue(PrevHead(addr), 1)
functionMap[hash] = func
nativeCount += 1
print("Found " + str(nativeCount) + " natives in EBOOT")
return functionMap
# Merge the crossmap and function map dictionaries
def mergeMaps(crossmap, functionMap):
mergedMap = dict()
nativeCount = 0
for new_hash in functionMap:
old_hash = crossmap.get(new_hash)
if old_hash:
mergedMap[old_hash] = functionMap[new_hash]
nativeCount += 1
#else:
# print("Failed to find hash " + hex(new_hash) + " in crossmap")
print("Merged " + str(nativeCount) + " natives in the maps")
return mergedMap
# Find system natives (usually excluded from crossmap)
def findSystemNatives(mergedMap):
# Find registerNativeInTable address from registerNative address
addr = registerNative
registerNativeInTable = 0
while True:
if GetMnem(addr) == "jmp":
registerNativeInTable = GetOperandValue(addr, 0)
break
addr = NextHead(addr)
# Search xrefs to registerNativeInTable
for xref in XrefsTo(registerNativeInTable):
addr = xref.frm
# Start at xref to function and work backwards
hash = 0
function = 0
while True:
addr = PrevHead(addr)
if hash != 0 and function != 0:
break
if GetMnem(addr) == "mov" and hash == 0:
opnd = GetOpnd(addr, 0)
if "esi" in opnd or "rsi" in opnd:
hash = GetOperandValue(addr, 1)
if hash not in systemNatives:
hash = 0
break
if GetMnem(addr) == "lea" and function == 0:
opnd = GetOpnd(addr, 0)
if "rdx" in opnd:
function = GetOperandValue(addr, 1)
if hash != 0:
mergedMap[hash] = function
return mergedMap
# Overwrite native hashes in clean header with function addresses
def createHeader(mergedMap):
fileName = AskFile(False, "", "Choose a native header file")
file = open(fileName, "r")
raw_lines = file.readlines()
missedNatives = 0
for i in range(0, len(raw_lines)):
line = raw_lines[i]
if "invoke" in line:
openParenthSplit = line.split(">(")
closeParenthSplit = openParenthSplit[1].split(")")
hash = None
commaSplit = None
if len(closeParenthSplit[0]) > 10:
commaSplit = closeParenthSplit[0].split(",")
hash = commaSplit[0]
else:
hash = closeParenthSplit[0]
function = mergedMap.get(int(hash, 16))
newLine = openParenthSplit[0] + ">("
if function:
formFunc = hex(function).split("0x")
formFunc = formFunc[1].replace('L', '').upper()
newLine += "0x" + formFunc
else:
# Write DEADBEEF for hashes not found
newLine += "0xDEADBEEF"
missedNatives += 1
print("Failed to find address for " + hex(int(hash, 16)))
newLine += closeParenthSplit[0].split(hash)[1] + ")"
newLine += closeParenthSplit[1]
raw_lines[i] = newLine
file.close()
newFile = open("newHeader.h", "w")
newFile.writelines(raw_lines)
newFile.close()
print("newHeader.h created")
print("Unable to replace " + str(missedNatives) + " natives")
crossmap = parseCrossmap()
functionMap = findNativeFunctions()
mergedMap = mergeMaps(crossmap, functionMap)
# Feel free to comment out this line if your crossmap includes system natives
mergedMap = findSystemNatives(mergedMap)
createHeader(mergedMap)
``` |
{
"source": "2mv/raapija",
"score": 3
} |
#### File: 2mv/raapija/last_transactions_parser.py
```python
import csv
import tempfile
import os
from transaction import Transaction
class LastTransactionsParser:
LAST_TRANSACTIONS_FILENAME = os.path.join(tempfile.gettempdir(), 'raapija_transactions_last.csv')
@staticmethod
def read():
try:
with open(LastTransactionsParser.LAST_TRANSACTIONS_FILENAME, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
return [Transaction(**transaction_dict) for transaction_dict in reader]
except FileNotFoundError:
return None
@staticmethod
def write(transactions):
with open(LastTransactionsParser.LAST_TRANSACTIONS_FILENAME, 'w', encoding='utf-8') as csvfile:
csv_fieldnames = transactions[0].__dict__.keys()
writer = csv.DictWriter(csvfile, csv_fieldnames)
writer.writeheader()
for transaction in transactions:
writer.writerow(transaction.__dict__)
```
#### File: 2mv/raapija/message.py
```python
from marrow.mailer import Message as MarrowMessage
class Message(MarrowMessage):
def __bytes__(self):
return bytes(str(self), self.encoding)
``` |
{
"source": "2mv/seuraaja",
"score": 2
} |
#### File: 2mv/seuraaja/mailer.py
```python
from marrow.mailer import Mailer as MarrowMailer
from marrow.mailer import Message
import sys
import os
import pwd
import socket
class Mailer:
MAILER = MarrowMailer(dict(manager=dict(use='immediate'), transport=dict(use='sendmail')))
DEFAULT_USER = pwd.getpwuid(os.getuid()).pw_name
DEFAULT_AUTHOR = DEFAULT_USER + '@' + socket.getfqdn()
@staticmethod
def send(message):
Mailer.MAILER.send(message)
@staticmethod
def start():
Mailer.MAILER.start()
@staticmethod
def stop():
Mailer.MAILER.stop()
@staticmethod
def send_recommendations(changed_recommendations, new_recommendations, to_addr):
Mailer.start()
message = Message(
author=Mailer.DEFAULT_AUTHOR,
to=to_addr,
subject='New/changed recommendations',
plain=Mailer.get_recommendations_str(changed_recommendations, new_recommendations)
)
message.sendmail_f = Mailer.DEFAULT_USER
Mailer.send(message)
Mailer.stop()
@staticmethod
def get_recommendations_str(changed_recommendations, new_recommendations):
message_str = ""
if len(new_recommendations) > 0:
message_str = "New recommendations:\n"
message_str += "\n".join([str(r) for r in new_recommendations])
if len(changed_recommendations) > 0:
message_str = "Changed recommendations:\n"
message_str += "\n".join([str(r) for r in changed_recommendations])
return message_str
```
#### File: 2mv/seuraaja/recommendations.py
```python
import unicodecsv as csv
import tempfile
import os
import errno
from datetime import date
class Recommendations:
STORED_RECOMMENDATIONS_FILENAME = os.path.join(tempfile.gettempdir(), 'seuraaja_recommendations_last.csv')
CSV_FIELD_NAMES = ['name', 'recommendation', 'potential', 'timestamp']
@staticmethod
def company_summary_to_recommendation(company_summary):
return {
'name': company_summary['name'],
'recommendation': company_summary['recommendation'],
'potential': company_summary['potential'],
'timestamp': date.today()
}
@staticmethod
def get_current_recommendations(company_summaries):
return map(Recommendations.company_summary_to_recommendation, company_summaries)
@staticmethod
def read_stored_recommendations():
try:
with open(Recommendations.STORED_RECOMMENDATIONS_FILENAME, 'r') as csvfile:
reader = csv.DictReader(csvfile)
return list(reader)
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise e
@staticmethod
def get_changed_recommendations(current_recommendations, last_recommendations):
last_company_names = map(lambda r: r['name'], last_recommendations)
last_recommendations_by_name = dict(zip(last_company_names, last_recommendations))
return filter(lambda r: r['name'] in last_recommendations_by_name and
last_recommendations_by_name[r['name']]['recommendation'] != r['recommendation'],
current_recommendations)
@staticmethod
def get_new_recommendations(current_recommendations, last_recommendations):
current_company_names = map(lambda r: r['name'], current_recommendations)
last_company_names = map(lambda r: r['name'], last_recommendations)
new_company_names = set(current_company_names) - set(last_company_names)
return filter(lambda r: r['name'] in new_company_names, current_recommendations)
@staticmethod
def persist(recommendations):
with open(Recommendations.STORED_RECOMMENDATIONS_FILENAME, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=Recommendations.CSV_FIELD_NAMES)
writer.writeheader()
for recommendation in recommendations:
writer.writerow(recommendation)
``` |
{
"source": "2n3906/wordprexit",
"score": 2
} |
#### File: wordprexit/wordprexit/cli.py
```python
import click
import time
from .wxrfile import WXRFile
from .autop import wpautop
from .wp_shortcodes import parse_shortcodes
import html2text
from .hugo_shortcodes import shortcodify
from urllib.parse import urlparse, urljoin
import json
import requests
import dateutil.parser
import os
import datetime
import hashlib
import re
import sys
from ruamel.yaml import YAML # import yaml
yaml = YAML()
yaml.default_flow_style = False
def contains_wp_shortcodes(body, *sc):
"""Search body for one or more shortcodes named sc"""
tagre = '|'.join(sc)
# terrible regex taken from Wordpress source
pattern = re.compile(
'\\[(\\[?)(' + tagre +
')\\b([^\\]\\/]*(?:\\/(?!\\])[^\\]\\/]*)*?)(?:(\\/)\\]|\\](?:([^\\[]*(?:\\[(?!\\/\\2\\])[^\\[]*)*)\\[\\/\\2\\])?)(\\]?)'
)
if body:
return pattern.findall(body)
else:
return False
def check_post_attachments(post: dict, allattach: dict):
# Scan HTML body for <img> tags, presuming we'll download these
if re.search(r'<img\s', post.get('body', '')):
post['hugo_has_attachments'] = True
# Also check for attachments known to Wordpress
if [p for p in allattach if p.get('post_parent') == post.get('post_id')]:
post['hugo_has_attachments'] = True
return
def make_post_destinations(post: dict):
post_date = post.get('post_date', datetime.datetime(1970, 1, 1, 0, 0, 0))
fn = '{}-{}'.format(
post_date.strftime('%Y-%m-%d'), post.get('post_name', 'UNTITLED'))
if post.get('hugo_has_attachments'):
filepath = os.path.join('posts', fn, 'index.md')
bundlepath = os.path.join('posts', fn)
else:
filepath = os.path.join('posts', fn + '.md')
bundlepath = None
post['hugo_filepath'] = filepath
post['hugo_bundlepath'] = bundlepath
post['hugo_uniqueid'] = hashlib.md5(filepath.encode('utf-8')).hexdigest()
return
def make_post_frontmatter(post):
front_matter = {
'title': post.get('title'),
'date': post.get('post_date').isoformat(),
'lastmod': post.get('post_date').isoformat(),
'slug': post.get('post_name', 'UNTITLED'),
'type': 'posts',
}
if post.get('excerpt'):
front_matter['summary'] = post.get('excerpt')
if post.get('author'):
front_matter['author'] = post.get('author')
if post.get('categories'):
front_matter['categories'] = post.get('categories')
if post.get('tags'):
front_matter['tags'] = post.get('tags')
if post.get('status') == 'draft':
front_matter['draft'] = True
post['hugo_front_matter'] = front_matter
return
def add_resources_to_frontmatter(post: dict, allattach: dict):
attachments = [
p for p in allattach if p.get('post_parent') == post.get('post_id')
]
if attachments:
post['hugo_has_attachments'] = True # redundant
post['hugo_front_matter']['resources'] = [{
'src':
os.path.basename(urlparse(a.get('attachment_url')).path),
'title':
a.get('title')
} for a in attachments]
post['hugo_attachments_src'] = [
a.get('attachment_url') for a in attachments
]
return
def convert_post(post: dict):
body = post.get('body')
# post is HTML, so run fake wpautop on it
body = wpautop(body)
# Turn Wordpress shortcodes into HTML
body = parse_shortcodes(body)
# Parse HTML, replacing HTML attributes with Hugo shortcodes
body, detectedhtmlimages = shortcodify(body)
if detectedhtmlimages:
post['hugo_has_attachments'] = True
# add detected images to our list
# but first, remove any that look like IMAGENAME-WWWxHHH.jpg because we probably have the original
detectedhtmlimages = [
a for a in detectedhtmlimages
if not re.match(r'(.*)\-(\d+x\d+)\.(jpg|png)$', a)
]
if 'hugo_attachments_src' in post:
post['hugo_attachments_src'].extend(detectedhtmlimages)
else:
post['hugo_attachments_src'] = detectedhtmlimages
# Make body into Markdown
h = html2text.HTML2Text()
h.images_as_html = True
h.wrap_links = 0
h.inline_links = 0
body = h.handle(body).strip()
# Un-wrap Hugo shortcodes that got line-wrapped by html2text
body = re.sub(r'(?s)({{[\<\%].*?[\>\%]}})', lambda match: match.group(1).
replace('\n', ' '), body)
parentdir, tail = os.path.split(post['hugo_filepath'])
if not os.path.exists(parentdir):
os.makedirs(parentdir)
with open(post['hugo_filepath'], 'w') as f:
f.write('---\n')
yaml.dump(post.get('hugo_front_matter'), f)
f.write('---\n')
f.write(body)
return
def download_attachments(post, blog_url):
if post.get('hugo_bundlepath'):
if not os.path.exists(post.get('hugo_bundlepath')):
os.makedirs(post.get('hugo_bundlepath'))
for u in post.get('hugo_attachments_src', []):
fn = os.path.basename(urlparse(u).path)
fullpath = os.path.join(post.get('hugo_bundlepath'), fn)
# resolve relative URLs, when needed:
u = urljoin(blog_url, u)
r = requests.get(u, stream=True, timeout=15)
if r.status_code == 200:
with open(fullpath, 'wb') as f:
for chunk in r:
f.write(chunk)
if 'last-modified' in r.headers:
# set file mtime to time provided by web server
ts = dateutil.parser.parse(
r.headers['last-modified']).timestamp()
os.utime(fullpath, (ts, ts))
else:
click.echo('ERROR {} on {}'.format(r.status_code, u))
def convert_comments(post):
comments = post.get('comments')
if comments:
for c in comments:
if c.get('comment_approved'):
comment_dir = os.path.join('data', 'comments',
post['hugo_uniqueid'])
comment_fn = 'wordpress-{0:08d}.json'.format(
c.get('comment_id', 0))
comment_filepath = os.path.join(comment_dir, comment_fn)
comment_out = {}
h = html2text.HTML2Text()
h.wrap_links = 0
comment_out['_id'] = hashlib.md5(
str(c.get('comment_id')).encode('utf-8')).hexdigest()
if 'comment_parent' in c:
if c['comment_parent'] != 0:
comment_out['_parent'] = hashlib.md5(
str(c.get('comment_parent')).encode(
'utf-8')).hexdigest()
else:
comment_out['_parent'] = post.get('post_name')
if post.get('post_name'):
comment_out['slug'] = post.get('post_name')
comment_out['date'] = c.get(
'comment_date_gmt',
datetime.datetime(
1970, 1, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc)).isoformat()
if 'comment_author' in c:
comment_out['name'] = c.get('comment_author')
if 'comment_author_email' in c:
comment_out['email'] = hashlib.md5(
c.get('comment_author_email').encode(
'utf-8')).hexdigest()
if 'comment_author_url' in c:
comment_out['url'] = c.get('comment_author_url')
if 'comment_content' in c:
# run fake wpautop on it
comment_body = wpautop(c['comment_content'])
# then convert to markdown
comment_out['message'] = h.handle(comment_body).strip()
if not os.path.exists(comment_dir):
os.makedirs(comment_dir)
with open(comment_filepath, 'w') as f:
f.write(json.dumps(comment_out, indent=2))
return
@click.command(context_settings={'help_option_names':['-h','--help']})
@click.argument('wxr_file', type=click.Path(exists=True))
def main(wxr_file):
"""Convert a Wordpress WXR export to a Hugo site."""
click.echo('Reading file {}...'.format(wxr_file))
w = WXRFile(wxr_file)
all_posts = w.get_posts()
all_attachments = w.get_attachments()
with click.progressbar(
all_posts, label='Munching metadata.....', show_pos=True) as bar:
for post in bar:
check_post_attachments(post, all_attachments)
make_post_destinations(post)
make_post_frontmatter(post)
add_resources_to_frontmatter(post, all_attachments)
with click.progressbar(
all_posts, label='Processing posts......', show_pos=True) as bar:
for post in bar:
convert_post(post)
with click.progressbar(
all_posts, label='Adding attachments....', show_pos=True) as bar:
for post in bar:
download_attachments(post, w.blog_url)
with click.progressbar(
all_posts, label='Converting comments...', show_pos=True) as bar:
for post in bar:
convert_comments(post)
click.echo('Done.')
``` |
{
"source": "2naive/jasmin",
"score": 2
} |
#### File: protocols/cli/configs.py
```python
from jasmin.config.tools import ConfigFile
import os
import logging
# Related to travis-ci builds
ROOT_PATH = os.getenv('ROOT_PATH', '/')
class JCliConfig(ConfigFile):
"""Config handler for 'jcli' section"""
def __init__(self, config_file=None):
ConfigFile.__init__(self, config_file)
self.bind = self._get('jcli', 'bind', '127.0.0.1')
self.port = self._getint('jcli', 'port', 8990)
self.authentication = self._getbool('jcli', 'authentication', True)
self.admin_username = self._get('jcli', 'admin_username', 'jcliadmin')
self.admin_password = self._get(
'jcli', 'admin_password', '<PASSWORD>').decode('hex')
self.log_level = logging.getLevelName(self._get('jcli', 'log_level', 'INFO'))
self.log_file = self._get('jcli', 'log_file', '%s/var/log/jasmin/jcli.log' % ROOT_PATH)
self.log_rotate = self._get('jcli', 'log_rotate', 'W6')
self.log_format = self._get(
'jcli', 'log_format', '%(asctime)s %(levelname)-8s %(process)d %(message)s')
self.log_date_format = self._get('jcli', 'log_date_format', '%Y-%m-%d %H:%M:%S')
```
#### File: cli/test/test_smppccm.py
```python
import mock
from twisted.internet import defer, reactor
from test_jcli import jCliWithoutAuthTestCases
from jasmin.protocols.smpp.test.smsc_simulator import *
@defer.inlineCallbacks
def waitFor(seconds):
# Wait seconds
waitDeferred = defer.Deferred()
reactor.callLater(seconds, waitDeferred.callback, None)
yield waitDeferred
class SmppccmTestCases(jCliWithoutAuthTestCases):
# Wait delay for
wait = 0.6
def add_connector(self, finalPrompt, extraCommands=[]):
sessionTerminated = False
commands = []
commands.append({'command': 'smppccm -a', 'expect': r'Adding a new connector\: \(ok\: save, ko\: exit\)'})
for extraCommand in extraCommands:
commands.append(extraCommand)
if extraCommand['command'] in ['ok', 'ko']:
sessionTerminated = True
if not sessionTerminated:
commands.append({'command': 'ok',
'expect': r'Successfully added connector \[',
'wait': self.wait})
return self._test(finalPrompt, commands)
class LastClientFactory(Factory):
lastClient = None
def buildProtocol(self, addr):
self.lastClient = Factory.buildProtocol(self, addr)
return self.lastClient
class HappySMSCTestCase(SmppccmTestCases):
protocol = HappySMSCRecorder
@defer.inlineCallbacks
def setUp(self):
yield SmppccmTestCases.setUp(self)
self.smsc_f = LastClientFactory()
self.smsc_f.protocol = self.protocol
self.SMSCPort = reactor.listenTCP(0, self.smsc_f)
@defer.inlineCallbacks
def tearDown(self):
SmppccmTestCases.tearDown(self)
yield self.SMSCPort.stopListening()
class BasicTestCases(HappySMSCTestCase):
def test_list(self):
commands = [{'command': 'smppccm -l', 'expect': r'Total connectors: 0'}]
return self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_add_with_minimum_args(self):
extraCommands = [{'command': 'cid operator_1'}]
yield self.add_connector(r'jcli : ', extraCommands)
@defer.inlineCallbacks
def test_add_without_minimum_args(self):
extraCommands = [{'command': 'ok', 'expect': r'You must set at least connector id \(cid\) before saving !',
'wait': self.wait}]
yield self.add_connector(r'> ', extraCommands)
@defer.inlineCallbacks
def test_add_invalid_configkey(self):
extraCommands = [{'command': 'cid operator_2'},
{'command': 'anykey anyvalue', 'expect': r'Unknown SMPPClientConfig key: anykey'}]
yield self.add_connector(r'jcli : ', extraCommands)
@defer.inlineCallbacks
def test_add_invalid_configkey_value(self):
extraCommands = [{'command': 'cid operator_3'},
{'command': 'port 22e'},
{'command': 'ok', 'expect': r'Error\: port must be an integer', 'wait': self.wait}]
yield self.add_connector(r'> ', extraCommands)
@defer.inlineCallbacks
def test_add_long_username(self):
extraCommands = [{'command': 'cid operator_3'},
{'command': 'username 1234567890123456'},
{'command': 'ok', 'expect': r'Error\: username is longer than allowed size \(15\)',
'wait': self.wait}]
yield self.add_connector(r'> ', extraCommands)
@defer.inlineCallbacks
def test_add_long_password(self):
extraCommands = [{'command': 'cid operator_3'},
{'command': 'password <PASSWORD>'},
{'command': 'ok', 'expect': r'Error\: password is longer than allowed size \(8\)',
'wait': self.wait}]
yield self.add_connector(r'> ', extraCommands)
@defer.inlineCallbacks
def test_cancel_add(self):
extraCommands = [{'command': 'cid operator_3'},
{'command': 'ko'}, ]
yield self.add_connector(r'jcli : ', extraCommands)
@defer.inlineCallbacks
def test_add_and_list(self):
extraCommands = [{'command': 'cid operator_4'}]
yield self.add_connector('jcli : ', extraCommands)
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_4 stopped None 0 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_add_cancel_and_list(self):
extraCommands = [{'command': 'cid operator_5'},
{'command': 'ko'}, ]
yield self.add_connector(r'jcli : ', extraCommands)
commands = [{'command': 'smppccm -l', 'expect': r'Total connectors: 0'}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_add_and_show(self):
cid = 'operator_6'
extraCommands = [{'command': 'cid %s' % cid}]
yield self.add_connector('jcli : ', extraCommands)
expectedList = ['ripf 0',
'con_fail_delay 10',
'dlr_expiry 86400', 'coding 0',
'logrotate midnight',
'submit_throughput 1',
'elink_interval 30',
'bind_to 30',
'port 2775',
'con_fail_retry yes',
'password password',
'src_addr None',
'bind_npi 0',
'addr_range None',
'dst_ton 1',
'res_to 120',
'def_msg_id 0',
'priority 0',
'con_loss_retry yes',
'username smppclient',
'dst_npi 1',
'validity None',
'requeue_delay 120',
'host 127.0.0.1',
'src_npi 1',
'trx_to 300',
'logfile .*var/log/jasmin/default-%s.log' % cid,
'systype ',
'ssl no',
'cid %s' % cid,
'loglevel 20',
'bind transceiver',
'proto_id None',
'dlr_msgid 0',
'con_loss_delay 10',
'logprivacy no',
'bind_ton 0',
'pdu_red_to 10',
'src_ton 2']
commands = [{'command': 'smppccm -s %s' % cid, 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_show_invalid_cid(self):
commands = [{'command': 'smppccm -s invalid_cid', 'expect': r'Unknown connector\: invalid_cid'}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_update_cid(self):
cid = 'operator_7'
extraCommands = [{'command': 'cid %s' % cid}]
yield self.add_connector(r'jcli : ', extraCommands)
commands = [{'command': 'smppccm -u operator_7',
'expect': r'Updating connector id \[%s\]\: \(ok\: save, ko\: exit\)' % cid},
{'command': 'cid 2222', 'expect': r'Connector id can not be modified !'}]
yield self._test(r'> ', commands)
@defer.inlineCallbacks
def test_update(self):
cid = 'operator_8'
extraCommands = [{'command': 'cid %s' % cid}]
yield self.add_connector(r'jcli : ', extraCommands)
commands = [{'command': 'smppccm -u operator_8',
'expect': r'Updating connector id \[%s\]\: \(ok\: save, ko\: exit\)' % cid},
{'command': 'port 2222'},
{'command': 'ok', 'expect': r'Successfully updated connector \[%s\]' % cid}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_update_and_show(self):
cid = 'operator_9'
extraCommands = [{'command': 'cid %s' % cid}]
yield self.add_connector(r'jcli : ', extraCommands)
commands = [{'command': 'smppccm -u %s' % cid,
'expect': r'Updating connector id \[%s\]\: \(ok\: save, ko\: exit\)' % cid},
{'command': 'port 122223'},
{'command': 'ok', 'expect': r'Successfully updated connector \[%s\]' % cid}]
yield self._test(r'jcli : ', commands)
expectedList = ['ripf 0',
'con_fail_delay 10',
'dlr_expiry 86400', 'coding 0',
'logrotate midnight',
'submit_throughput 1',
'elink_interval 30',
'bind_to 30',
'port 122223',
'con_fail_retry yes',
'password password',
'src_addr None',
'bind_npi 0',
'addr_range None',
'dst_ton 1',
'res_to 120',
'def_msg_id 0',
'priority 0',
'con_loss_retry yes',
'username smppclient',
'dst_npi 1',
'validity None',
'requeue_delay 120',
'host 127.0.0.1',
'src_npi 1',
'trx_to 300',
'logfile .*var/log/jasmin/default-%s.log' % cid,
'systype ',
'ssl no',
'cid %s' % cid,
'loglevel 20',
'bind transceiver',
'proto_id None',
'dlr_msgid 0',
'con_loss_delay 10',
'logprivacy no',
'bind_ton 0',
'pdu_red_to 10',
'src_ton 2']
commands = [{'command': 'smppccm -s %s' % cid, 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_remove_invalid_cid(self):
commands = [{'command': 'smppccm -r invalid_cid', 'expect': r'Unknown connector\: invalid_cid'}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_remove(self):
cid = 'operator_10'
extraCommands = [{'command': 'cid %s' % cid}]
yield self.add_connector(r'jcli : ', extraCommands)
commands = [{'command': 'smppccm -r %s' % cid, 'expect': r'Successfully removed connector id\:%s' % cid}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_remove_started_connector(self):
# Add
cid = 'operator_11'
extraCommands = [{'command': 'cid %s' % cid},
{'command': 'con_fail_retry 0'}]
yield self.add_connector(r'jcli : ', extraCommands)
# Start
commands = [{'command': 'smppccm -1 %s' % cid,
'expect': r'Successfully started connector id\:%s' % cid,
'wait': 0.6}]
yield self._test(r'jcli : ', commands)
# Remove
commands = [{'command': 'smppccm -r %s' % cid,
'expect': r'Successfully removed connector id\:%s' % cid,
'wait': 0.6}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_remove_and_list(self):
# Add
cid = 'operator_12'
extraCommands = [{'command': 'cid %s' % cid}]
yield self.add_connector(r'jcli : ', extraCommands)
# List
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_12 stopped None 0 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
# Remove
commands = [{'command': 'smppccm -r %s' % cid, 'expect': r'Successfully removed connector id\:%s' % cid}]
yield self._test(r'jcli : ', commands)
# List again
commands = [{'command': 'smppccm -l', 'expect': r'Total connectors: 0'}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_start(self):
# Add
cid = 'operator_13'
extraCommands = [{'command': 'cid %s' % cid},
{'command': 'con_fail_retry no', 'wait': 0.6}]
yield self.add_connector(r'jcli : ', extraCommands)
# Start
commands = [{'command': 'smppccm -1 %s' % cid,
'expect': r'Successfully started connector id\:%s' % cid,
'wait': 0.6}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_start_and_list(self):
# Add
cid = 'operator_14'
extraCommands = [{'command': 'cid %s' % cid},
{'command': 'port %s' % self.SMSCPort.getHost().port}]
yield self.add_connector(r'jcli : ', extraCommands)
# Start
commands = [{'command': 'smppccm -1 %s' % cid,
'expect': r'Successfully started connector id\:%s' % cid,
'wait': 1}]
yield self._test(r'jcli : ', commands)
# List
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_14 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
# Stop
commands = [{'command': 'smppccm -0 %s' % cid,
'expect': r'Successfully stopped connector id',
'wait': 0.6}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_start_invalid_cid(self):
commands = [{'command': 'smppccm -1 invalid_cid', 'expect': r'Unknown connector\: invalid_cid'}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_stop(self):
# Add
cid = 'operator_15'
extraCommands = [{'command': 'cid %s' % cid},
{'command': 'port %s' % self.SMSCPort.getHost().port}]
yield self.add_connector(r'jcli : ', extraCommands)
# Stop
commands = [{'command': 'smppccm -0 %s' % cid, 'expect': r'Failed stopping connector, check log for details'}]
yield self._test(r'jcli : ', commands)
# Start
commands = [{'command': 'smppccm -1 %s' % cid,
'expect': r'Successfully started connector id\:%s' % cid,
'wait': 0.6}]
yield self._test(r'jcli : ', commands)
# Stop
commands = [{'command': 'smppccm -0 %s' % cid,
'expect': r'Successfully stopped connector id\:%s' % cid,
'wait': 0.6}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_stop_invalid_cid(self):
commands = [{'command': 'smppccm -0 invalid_cid', 'expect': r'Unknown connector\: invalid_cid'}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_start_stop_and_list(self):
# Add
cid = 'operator_16'
extraCommands = [{'command': 'cid %s' % cid},
{'command': 'port %s' % self.SMSCPort.getHost().port}]
yield self.add_connector(r'jcli : ', extraCommands)
# Start
commands = [{'command': 'smppccm -1 %s' % cid,
'expect': r'Successfully started connector id\:%s' % cid,
'wait': 0.6}]
yield self._test(r'jcli : ', commands)
# Stop
commands = [{'command': 'smppccm -0 %s' % cid,
'expect': r'Successfully stopped connector id\:%s' % cid,
'wait': 0.6}]
yield self._test(r'jcli : ', commands)
# List
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_16 stopped NONE 1 1 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
class ParameterValuesTestCases(SmppccmTestCases):
@defer.inlineCallbacks
def test_add_connector(self):
"""Will test for value validation for a set of command keys with smppccm -a
everything is built through the assert_battery"""
assert_battery = [
{'key': 'src_ton', 'default_value': '2', 'set_value': '3', 'isValid': True},
{'key': 'src_ton', 'default_value': '2', 'set_value': '300', 'isValid': False},
{'key': 'src_ton', 'default_value': '2', 'set_value': '-1', 'isValid': False},
{'key': 'src_ton', 'default_value': '2', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'dst_ton', 'default_value': '1', 'set_value': '1', 'isValid': True},
{'key': 'dst_ton', 'default_value': '1', 'set_value': '-1', 'isValid': False},
{'key': 'dst_ton', 'default_value': '1', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'dst_ton', 'default_value': '1', 'set_value': '300', 'isValid': False},
{'key': 'dst_ton', 'default_value': '1', 'set_value': '6', 'isValid': True},
{'key': 'bind_ton', 'default_value': '0', 'set_value': '1', 'isValid': True},
{'key': 'bind_ton', 'default_value': '0', 'set_value': '-1', 'isValid': False},
{'key': 'bind_ton', 'default_value': '0', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'bind_ton', 'default_value': '0', 'set_value': '300', 'isValid': False},
{'key': 'bind_ton', 'default_value': '0', 'set_value': '6', 'isValid': True},
{'key': 'src_npi', 'default_value': '1', 'set_value': '3', 'isValid': True},
{'key': 'src_npi', 'default_value': '1', 'set_value': '300', 'isValid': False},
{'key': 'src_npi', 'default_value': '1', 'set_value': '-1', 'isValid': False},
{'key': 'src_npi', 'default_value': '1', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'dst_npi', 'default_value': '1', 'set_value': '1', 'isValid': True},
{'key': 'dst_npi', 'default_value': '1', 'set_value': '-1', 'isValid': False},
{'key': 'dst_npi', 'default_value': '1', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'dst_npi', 'default_value': '1', 'set_value': '300', 'isValid': False},
{'key': 'dst_npi', 'default_value': '1', 'set_value': '6', 'isValid': True},
{'key': 'bind_npi', 'default_value': '0', 'set_value': '1', 'isValid': True},
{'key': 'bind_npi', 'default_value': '0', 'set_value': '-1', 'isValid': False},
{'key': 'bind_npi', 'default_value': '0', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'bind_npi', 'default_value': '0', 'set_value': '300', 'isValid': False},
{'key': 'bind_npi', 'default_value': '0', 'set_value': '6', 'isValid': True},
{'key': 'priority', 'default_value': '0', 'set_value': '0', 'isValid': True},
{'key': 'priority', 'default_value': '0', 'set_value': '-1', 'isValid': False},
{'key': 'priority', 'default_value': '0', 'set_value': 'LEVEL_1', 'isValid': False},
{'key': 'priority', 'default_value': '0', 'set_value': '300', 'isValid': False},
{'key': 'priority', 'default_value': '0', 'set_value': '3', 'isValid': True},
{'key': 'ripf', 'default_value': '0', 'set_value': '0', 'isValid': True},
{'key': 'ripf', 'default_value': '0', 'set_value': '-1', 'isValid': False},
{'key': 'ripf', 'default_value': '0', 'set_value': 'xx', 'isValid': False},
{'key': 'ripf', 'default_value': '0', 'set_value': 'REPLCACE', 'isValid': False},
{'key': 'ripf', 'default_value': '0', 'set_value': '1', 'isValid': True},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': 'yes', 'isValid': True},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': '1', 'isValid': False},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': 'xx', 'isValid': False},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': 'NON', 'isValid': False},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': 'no', 'isValid': True},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': 'yes', 'isValid': True},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': '1', 'isValid': False},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': 'xx', 'isValid': False},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': 'NON', 'isValid': False},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': 'no', 'isValid': True},
{'key': 'ssl', 'default_value': 'no', 'set_value': 'yes', 'isValid': True},
{'key': 'ssl', 'default_value': 'no', 'set_value': '1', 'isValid': False},
{'key': 'ssl', 'default_value': 'no', 'set_value': 'xx', 'isValid': False},
{'key': 'ssl', 'default_value': 'no', 'set_value': 'NON', 'isValid': False},
{'key': 'ssl', 'default_value': 'no', 'set_value': 'no', 'isValid': True},
{'key': 'loglevel', 'default_value': '20', 'set_value': '10', 'isValid': True},
{'key': 'loglevel', 'default_value': '20', 'set_value': '1', 'isValid': False},
{'key': 'loglevel', 'default_value': '20', 'set_value': 'xx', 'isValid': False},
{'key': 'loglevel', 'default_value': '20', 'set_value': 'DEBUG', 'isValid': False},
{'key': 'loglevel', 'default_value': '20', 'set_value': '50', 'isValid': True},
]
cid = 0
for value in assert_battery:
if value['isValid']:
add_expect = '%s %s> ' % (value['key'], value['set_value'])
show_expect = '%s %s' % (value['key'], value['set_value'])
else:
add_expect = 'Unknown value for key %s' % value['key']
show_expect = '%s %s' % (value['key'], value['default_value'])
# Add and assertions
extraCommands = [{'command': 'cid operator_%s' % cid},
{'command': '%s %s' % (value['key'], value['set_value']), 'expect': add_expect},
{'command': 'ok', 'wait': 0.8}]
yield self.add_connector(r'jcli : ', extraCommands)
# Assert value were taken (or not, depending if it's valid)
commands = [{'command': 'smppccm -s operator_%s' % cid, 'expect': show_expect}]
yield self._test(r'jcli : ', commands)
cid += 1
@defer.inlineCallbacks
def test_update_connector(self):
"""Will test for value validation for a set of command keys with smppccm -u
everything is built through the assert_battery"""
assert_battery = [
{'key': 'src_ton', 'default_value': '2', 'set_value': '3', 'isValid': True},
{'key': 'src_ton', 'default_value': '2', 'set_value': '300', 'isValid': False},
{'key': 'src_ton', 'default_value': '2', 'set_value': '-1', 'isValid': False},
{'key': 'src_ton', 'default_value': '2', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'dst_ton', 'default_value': '1', 'set_value': '1', 'isValid': True},
{'key': 'dst_ton', 'default_value': '1', 'set_value': '-1', 'isValid': False},
{'key': 'dst_ton', 'default_value': '1', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'dst_ton', 'default_value': '1', 'set_value': '300', 'isValid': False},
{'key': 'dst_ton', 'default_value': '1', 'set_value': '6', 'isValid': True},
{'key': 'bind_ton', 'default_value': '0', 'set_value': '1', 'isValid': True},
{'key': 'bind_ton', 'default_value': '0', 'set_value': '-1', 'isValid': False},
{'key': 'bind_ton', 'default_value': '0', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'bind_ton', 'default_value': '0', 'set_value': '300', 'isValid': False},
{'key': 'bind_ton', 'default_value': '0', 'set_value': '6', 'isValid': True},
{'key': 'src_npi', 'default_value': '1', 'set_value': '3', 'isValid': True},
{'key': 'src_npi', 'default_value': '1', 'set_value': '300', 'isValid': False},
{'key': 'src_npi', 'default_value': '1', 'set_value': '-1', 'isValid': False},
{'key': 'src_npi', 'default_value': '1', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'dst_npi', 'default_value': '1', 'set_value': '1', 'isValid': True},
{'key': 'dst_npi', 'default_value': '1', 'set_value': '-1', 'isValid': False},
{'key': 'dst_npi', 'default_value': '1', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'dst_npi', 'default_value': '1', 'set_value': '300', 'isValid': False},
{'key': 'dst_npi', 'default_value': '1', 'set_value': '6', 'isValid': True},
{'key': 'bind_npi', 'default_value': '0', 'set_value': '1', 'isValid': True},
{'key': 'bind_npi', 'default_value': '0', 'set_value': '-1', 'isValid': False},
{'key': 'bind_npi', 'default_value': '0', 'set_value': 'NATIONAL', 'isValid': False},
{'key': 'bind_npi', 'default_value': '0', 'set_value': '300', 'isValid': False},
{'key': 'bind_npi', 'default_value': '0', 'set_value': '6', 'isValid': True},
{'key': 'priority', 'default_value': '0', 'set_value': '0', 'isValid': True},
{'key': 'priority', 'default_value': '0', 'set_value': '-1', 'isValid': False},
{'key': 'priority', 'default_value': '0', 'set_value': 'LEVEL_1', 'isValid': False},
{'key': 'priority', 'default_value': '0', 'set_value': '300', 'isValid': False},
{'key': 'priority', 'default_value': '0', 'set_value': '3', 'isValid': True},
{'key': 'ripf', 'default_value': '0', 'set_value': '0', 'isValid': True},
{'key': 'ripf', 'default_value': '0', 'set_value': '-1', 'isValid': False},
{'key': 'ripf', 'default_value': '0', 'set_value': 'xx', 'isValid': False},
{'key': 'ripf', 'default_value': '0', 'set_value': 'REPLCACE', 'isValid': False},
{'key': 'ripf', 'default_value': '0', 'set_value': '1', 'isValid': True},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': 'yes', 'isValid': True},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': '1', 'isValid': False},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': 'xx', 'isValid': False},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': 'NON', 'isValid': False},
{'key': 'con_fail_retry', 'default_value': 'yes', 'set_value': 'no', 'isValid': True},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': 'yes', 'isValid': True},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': '1', 'isValid': False},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': 'xx', 'isValid': False},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': 'NON', 'isValid': False},
{'key': 'con_loss_retry', 'default_value': 'yes', 'set_value': 'no', 'isValid': True},
{'key': 'ssl', 'default_value': 'no', 'set_value': 'yes', 'isValid': True},
{'key': 'ssl', 'default_value': 'no', 'set_value': '1', 'isValid': False},
{'key': 'ssl', 'default_value': 'no', 'set_value': 'xx', 'isValid': False},
{'key': 'ssl', 'default_value': 'no', 'set_value': 'NON', 'isValid': False},
{'key': 'ssl', 'default_value': 'no', 'set_value': 'no', 'isValid': True},
{'key': 'loglevel', 'default_value': '20', 'set_value': '10', 'isValid': True},
{'key': 'loglevel', 'default_value': '20', 'set_value': '1', 'isValid': False},
{'key': 'loglevel', 'default_value': '20', 'set_value': 'xx', 'isValid': False},
{'key': 'loglevel', 'default_value': '20', 'set_value': 'DEBUG', 'isValid': False},
{'key': 'loglevel', 'default_value': '20', 'set_value': '50', 'isValid': True},
]
cid = 0
for value in assert_battery:
if value['isValid']:
add_expect = '%s %s> ' % (value['key'], value['set_value'])
show_expect = '%s %s' % (value['key'], value['set_value'])
else:
add_expect = 'Unknown value for key %s' % value['key']
show_expect = '%s %s' % (value['key'], value['default_value'])
# Add connector with defaults
extraCommands = [{'command': 'cid operator_%s' % cid}]
yield self.add_connector(r'jcli : ', extraCommands)
# Update and assert
commands = [{'command': 'smppccm -u operator_%s' % cid},
{'command': 'password password'},
{'command': '%s %s' % (value['key'], value['set_value']), 'expect': add_expect},
{'command': 'ok', 'wait': 0.8}]
yield self._test(r'jcli : ', commands)
# Assert value were taken (or not, depending if it's valid)
commands = [{'command': 'smppccm -s operator_%s' % cid, 'expect': show_expect}]
yield self._test(r'jcli : ', commands)
cid += 1
class SMSCTestCases(HappySMSCTestCase):
@defer.inlineCallbacks
def setUp(self):
yield HappySMSCTestCase.setUp(self)
# A connector list to be stopped on tearDown
self.startedConnectors = []
@defer.inlineCallbacks
def tearDown(self):
# Stop all started connectors
for startedConnector in self.startedConnectors:
yield self.stop_connector(startedConnector)
yield HappySMSCTestCase.tearDown(self)
@defer.inlineCallbacks
def start_connector(self, cid, finalPrompt=r'jcli : ', wait=0.6, expect=None):
commands = [{'command': 'smppccm -1 %s' % cid, 'wait': wait, 'expect': expect}]
yield self._test(finalPrompt, commands)
# Add cid to the connector list to be stopped in tearDown
self.startedConnectors.append(cid)
@defer.inlineCallbacks
def stop_connector(self, cid, finalPrompt=r'jcli : ', wait=0.6, expect=None):
commands = [{'command': 'smppccm -0 %s' % cid, 'wait': wait, 'expect': expect}]
yield self._test(finalPrompt, commands)
@defer.inlineCallbacks
def test_systype(self):
"""Testing for #64, will set systype key to any value and start the connector to ensure
it is correctly encoded in bind pdu"""
# Add a connector, set systype and start it
extraCommands = [{'command': 'cid operator_1'},
{'command': 'systype 999999'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
yield self.start_connector('operator_1')
# List and assert it is BOUND
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_update_systype(self):
"""Testing for #64, will set systype key to any value and start the connector to ensure
it is correctly encoded in bind pdu"""
# Add a connector, set systype and start it
extraCommands = [{'command': 'cid operator_1'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
# Update the connector to set systype and start it
commands = [{'command': 'smppccm -u operator_1'},
{'command': 'systype 999999'},
{'command': 'ok'}]
yield self._test(r'jcli : ', commands)
yield self.start_connector('operator_1')
# List and assert it is BOUND
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_username(self):
"""Testing for #105, will set username to an int value and start the connector to ensure
it is correctly encoded in bind pdu"""
# Add a connector, set systype and start it
extraCommands = [{'command': 'cid operator_1'},
{'command': 'username 999999'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
yield self.start_connector('operator_1')
# List and assert it is BOUND
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_update_username(self):
"""Testing for #105, will set username to an int value and start the connector to ensure
it is correctly encoded in bind pdu"""
# Add a connector, set systype and start it
extraCommands = [{'command': 'cid operator_1'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
# Update the connector to set systype and start it
commands = [{'command': 'smppccm -u operator_1'},
{'command': 'username 999999'},
{'command': 'ok'}]
yield self._test(r'jcli : ', commands)
yield self.start_connector('operator_1')
# List and assert it is BOUND
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_password(self):
"""Testing for #105, will set password to an int value and start the connector to ensure
it is correctly encoded in bind pdu"""
# Add a connector, set systype and start it
extraCommands = [{'command': 'cid operator_1'},
{'command': 'password <PASSWORD>'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
yield self.start_connector('operator_1')
# List and assert it is BOUND
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_update_password(self):
"""Testing for #105, will set password to an int value and start the connector to ensure
it is correctly encoded in bind pdu"""
# Add a connector, set systype and start it
extraCommands = [{'command': 'cid operator_1'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
# Update the connector to set systype and start it
commands = [{'command': 'smppccm -u operator_1'},
{'command': 'password <PASSWORD>'},
{'command': 'ok'}]
yield self._test(r'jcli : ', commands)
yield self.start_connector('operator_1')
# List and assert it is BOUND
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_quick_restart(self):
"Testing for #68, restarting quickly a connector will loose its session state"
# Add a connector and start it
extraCommands = [{'command': 'cid operator_1'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
yield self.start_connector('operator_1', wait=8)
# List and assert it is BOUND
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
# Stop and start very quickly will lead to an error starting the connector because there were
# no sufficient time for unbind to complete
yield self.stop_connector('operator_1', finalPrompt=None, wait=0)
yield self.start_connector('operator_1', finalPrompt=None,
wait=20,
expect='Failed starting connector, check log for details')
# List and assert it is stopped (start command errored)
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 stopped NONE 1 1 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_restart_on_update(self):
"Testing for #68, updating a config key from RequireRestartKeys will lead to a quick restart"
# Add a connector and start it
extraCommands = [{'command': 'cid operator_1'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
yield self.start_connector('operator_1')
# List and assert it is BOUND
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 1 0 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
# Update loglevel which is in RequireRestartKeys and will lead to a connector restart
commands = [{'command': 'smppccm -u operator_1'},
{'command': 'systype ANY'},
{'command': 'ok', 'wait': 7,
'expect': ['Restarting connector \[operator_1\] for updates to take effect ...',
'Failed starting connector, will retry in 5 seconds',
'Successfully updated connector \[operator_1\]']}, ]
yield self._test(r'jcli : ', commands)
# List and assert it is started (restart were successful)
expectedList = ['#Connector id Service Session Starts Stops',
'#operator_1 started BOUND_TRX 2 1 ',
'Total connectors: 1']
commands = [{'command': 'smppccm -l', 'expect': expectedList}]
yield self._test(r'jcli : ', commands)
@defer.inlineCallbacks
def test_update_bind_ton_npi_and_address_range(self):
"""Testing for #104, updating bind_ton & bind_npi through jcli must take effect"""
# Add a connector, set bind_ton
extraCommands = [{'command': 'cid operator_1'},
{'command': 'bind_ton 1'},
{'command': 'bind_npi 0'},
{'command': 'addr_range ^32.*{6}$'},
{'command': 'port %s' % self.SMSCPort.getHost().port}, ]
yield self.add_connector(r'jcli : ', extraCommands)
# Update connector and start it
commands = [{'command': 'smppccm -u operator_1'},
{'command': 'bind_ton 5'}, # ALPHANUMERIC
{'command': 'bind_npi 8'}, # NATIONAL
{'command': 'addr_range ^34.*{6}$'}, # NATIONAL
{'command': 'ok'}]
yield self._test(r'jcli : ', commands)
yield self.start_connector('operator_1')
# Assert bind_ton value
self.assertEqual(1, len(self.SMSCPort.factory.lastClient.pduRecords))
self.assertEqual('ALPHANUMERIC', str(self.SMSCPort.factory.lastClient.pduRecords[0].params['addr_ton']))
self.assertEqual('NATIONAL', str(self.SMSCPort.factory.lastClient.pduRecords[0].params['addr_npi']))
self.assertEqual('^34.*{6}$', str(self.SMSCPort.factory.lastClient.pduRecords[0].params['address_range']))
```
#### File: smpp/test/smsc_simulator.py
```python
import random
from datetime import datetime, timedelta
from jasmin.vendor.smpp.twisted.tests.smsc_simulator import *
LOG_CATEGORY = "jasmin.smpp.tests.smsc_simulator"
message_state_map = {
'ACCEPTD': MessageState.ACCEPTED,
'UNDELIV': MessageState.UNDELIVERABLE,
'REJECTD': MessageState.REJECTED,
'DELIVRD': MessageState.DELIVERED,
'EXPIRED': MessageState.EXPIRED,
'DELETED': MessageState.DELETED,
'ACCEPTD': MessageState.ACCEPTED,
'UNKNOWN': MessageState.UNKNOWN,
}
class NoSubmitSmWhenReceiverIsBoundSMSC(HappySMSC):
def handleSubmit(self, reqPDU):
self.sendResponse(reqPDU, CommandStatus.ESME_RINVBNDSTS)
class NoResponseOnSubmitSMSCRecorder(HappySMSC):
submitRecords = []
def handleSubmit(self, reqPDU):
self.submitRecords.append(reqPDU)
pass
class HappySMSCRecorder(HappySMSC):
def __init__(self):
HappySMSC.__init__(self)
self.pduRecords = []
self.submitRecords = []
def PDUReceived(self, pdu):
HappySMSC.PDUReceived(self, pdu)
self.pduRecords.append(pdu)
def handleSubmit(self, reqPDU):
self.submitRecords.append(reqPDU)
self.sendSubmitSmResponse(reqPDU)
def sendSubmitSmResponse(self, reqPDU):
if reqPDU.params['short_message'] == 'test_error: ESME_RTHROTTLED':
status = CommandStatus.ESME_RTHROTTLED
elif reqPDU.params['short_message'] == 'test_error: ESME_RSYSERR':
status = CommandStatus.ESME_RSYSERR
elif reqPDU.params['short_message'] == 'test_error: ESME_RREPLACEFAIL':
status = CommandStatus.ESME_RREPLACEFAIL
else:
status = CommandStatus.ESME_ROK
# Return back a pdu
self.lastSubmitSmRestPDU = reqPDU.requireAck(reqPDU.seqNum, status=status,
message_id=str(random.randint(10000000, 9999999999)))
self.sendPDU(self.lastSubmitSmRestPDU)
def handleData(self, reqPDU):
self.sendSuccessResponse(reqPDU)
class DeliverSmSMSC(HappySMSC):
def trigger_deliver_sm(self, pdu):
self.sendPDU(pdu)
class DeliveryReceiptSMSC(HappySMSC):
"""Will send a deliver_sm on bind request
"""
def __init__(self):
HappySMSC.__init__(self)
self.responseMap[BindReceiver] = self.sendDeliverSM
self.responseMap[BindTransceiver] = self.sendDeliverSM
def sendDeliverSM(self, reqPDU):
self.sendSuccessResponse(reqPDU)
message_id = '1891273321'
pdu = DeliverSM(
source_addr='1234',
destination_addr='4567',
short_message='id:%s sub:001 dlvrd:001 submit date:1305050826 done date:1305050826 stat:DELIVRD err:000 text:DLVRD TO MOBILE' % message_id,
message_state=MessageState.DELIVERED,
receipted_message_id=message_id,
)
self.sendPDU(pdu)
class ManualDeliveryReceiptHappySMSC(HappySMSC):
"""Will send a deliver_sm through trigger_DLR() method
A submit_sm must be sent to this SMSC before requesting sendDeliverSM !
"""
submitRecords = []
lastSubmitSmRestPDU = None
lastSubmitSmPDU = None
def __init__(self):
HappySMSC.__init__(self)
self.nextResponseMsgId = None
self.pduRecords = []
def PDUReceived(self, pdu):
HappySMSC.PDUReceived(self, pdu)
self.pduRecords.append(pdu)
def sendSuccessResponse(self, reqPDU):
if str(reqPDU.commandId)[:5] == 'bind_':
self.submitRecords = []
HappySMSC.sendSuccessResponse(self, reqPDU)
def sendSubmitSmResponse(self, reqPDU):
if self.nextResponseMsgId is None:
msgid = str(random.randint(10000000, 9999999999))
else:
msgid = str(self.nextResponseMsgId)
self.nextResponseMsgId = None
self.lastSubmitSmRestPDU = reqPDU.requireAck(reqPDU.seqNum,
status=CommandStatus.ESME_ROK,
message_id=msgid,
)
return self.sendPDU(self.lastSubmitSmRestPDU)
def handleSubmit(self, reqPDU):
# Send back a submit_sm_resp
self.sendSubmitSmResponse(reqPDU)
self.lastSubmitSmPDU = reqPDU
self.submitRecords.append(reqPDU)
def trigger_deliver_sm(self, pdu):
return self.sendPDU(pdu)
def trigger_data_sm(self, pdu):
return self.sendPDU(pdu)
def trigger_DLR(self, _id=None, pdu_type='deliver_sm', stat='DELIVRD'):
if self.lastSubmitSmRestPDU is None:
raise Exception('A submit_sm must be sent to this SMSC before requesting sendDeliverSM !')
# Pick the last submit_sm
submitsm_pdu = self.lastSubmitSmPDU
# Pick the last submit_sm_resp
submitsm_resp_pdu = self.lastSubmitSmRestPDU
if _id is None:
_id = submitsm_resp_pdu.params['message_id']
if pdu_type == 'deliver_sm':
# Send back a deliver_sm with containing a DLR
pdu = DeliverSM(
source_addr=submitsm_pdu.params['source_addr'],
destination_addr=submitsm_pdu.params['destination_addr'],
short_message='id:%s sub:001 dlvrd:001 submit date:1305050826 done date:1305050826 stat:%s err:000 text:%s' % (
str(_id),
stat,
submitsm_pdu.params['short_message'][:20]
),
message_state=message_state_map[stat],
receipted_message_id=str(_id),
)
return self.trigger_deliver_sm(pdu)
elif pdu_type == 'data_sm':
# Send back a data_sm with containing a DLR
pdu = DataSM(
source_addr=submitsm_pdu.params['source_addr'],
destination_addr=submitsm_pdu.params['destination_addr'],
message_state=message_state_map[stat],
receipted_message_id=str(_id),
)
return self.trigger_data_sm(pdu)
else:
raise Exception('Unknown pdu_type (%s) when calling trigger_DLR()' % pdu_type)
class QoSSMSC_2MPS(HappySMSC):
"""A throttled SMSC that only accept 2 Messages per second"""
last_submit_at = None
def handleSubmit(self, reqPDU):
# Calculate MPS
permitted_throughput = 1 / 2.0
permitted_delay = timedelta(microseconds=permitted_throughput * 1000000)
if self.last_submit_at is not None:
delay = datetime.now() - self.last_submit_at
if self.last_submit_at is not None and delay < permitted_delay:
self.sendResponse(reqPDU, CommandStatus.ESME_RTHROTTLED)
else:
self.last_submit_at = datetime.now()
self.sendResponse(reqPDU, CommandStatus.ESME_ROK)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
factory = Factory()
factory.protocol = BlackHoleSMSC
reactor.listenTCP(8007, factory)
reactor.run()
```
#### File: smpp/test/test_configuration.py
```python
from twisted.trial.unittest import TestCase
from jasmin.protocols.smpp.configs import ConfigUndefinedIdError, ConfigInvalidIdError
from jasmin.protocols.smpp.configs import SMPPClientConfig
class SMPPClientConfigCases(TestCase):
def test_id_is_mandatory(self):
self.assertRaises(ConfigUndefinedIdError, SMPPClientConfig)
def test_id_syntax_validation(self):
invalidValues = ['zzz s', '', 'a,', 'r#r', '9a', '&"()=+~#{[|\`\^@]}', 'a123456789012345678901234-', 'aa']
for invalidValue in invalidValues:
self.assertRaises(ConfigInvalidIdError, SMPPClientConfig, id=invalidValue)
```
#### File: jasmin/protocols/validation.py
```python
class AbstractCredentialValidator(object):
"""An abstract CredentialValidator, when inherited it must validate self.user credentials
agains self.action"""
def __init__(self, action, user):
self.action = action
self.user = user
def updatePDUWithUserDefaults(self, PDU):
"""Must update PDU.params from User credential defaults whenever a
PDU.params item is None"""
raise NotImplementedError()
def validate(self):
"Must validate requests through Authorizations and ValueFilters credential check"
raise NotImplementedError()
```
#### File: routing/test/test_encoding.py
```python
import random
import urllib
from twisted.internet import defer
from twisted.internet import reactor
from twisted.web import server
from twisted.web.client import getPage
from jasmin.protocols.http.configs import HTTPApiConfig
from jasmin.protocols.http.server import HTTPApi
from jasmin.routing.proxies import RouterPBProxy
from jasmin.routing.test.codepages import (IA5_ASCII, ISO8859_1,
CYRILLIC, ISO_8859_8)
from jasmin.routing.test.test_router import HappySMSCTestCase, SubmitSmTestCaseTools
from jasmin.vendor.smpp.pdu.pdu_types import EsmClassGsmFeatures
def composeMessage(characters, length):
if length <= len(characters):
return ''.join(random.sample(characters, length))
else:
s = ''
while len(s) < length:
s += ''.join(random.sample(characters, len(characters)))
return s[:length]
class CodingTestCases(RouterPBProxy, HappySMSCTestCase, SubmitSmTestCaseTools):
@defer.inlineCallbacks
def run_test(self, content, datacoding=None, port=1401):
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Set content
self.params['content'] = content
# Set datacoding
if datacoding is None and 'coding' in self.params:
del self.params['coding']
if datacoding is not None:
self.params['coding'] = datacoding
# Prepare baseurl
baseurl = 'http://127.0.0.1:%s/send?%s' % (port, urllib.urlencode(self.params))
# Send a MT
# We should receive a msg id
c = yield getPage(baseurl, method=self.method, postdata=self.postdata)
msgStatus = c[:7]
# Wait 2 seconds before stopping SmppClientConnectors
exitDeferred = defer.Deferred()
reactor.callLater(2, exitDeferred.callback, None)
yield exitDeferred
yield self.stopSmppClientConnectors()
# Run tests
self.assertEqual(msgStatus, 'Success')
if datacoding is None:
datacoding = 0
datacoding_matrix = {}
datacoding_matrix[0] = {'schemeData': 'SMSC_DEFAULT_ALPHABET'}
datacoding_matrix[1] = {'schemeData': 'IA5_ASCII'}
datacoding_matrix[2] = {'schemeData': 'OCTET_UNSPECIFIED'}
datacoding_matrix[3] = {'schemeData': 'LATIN_1'}
datacoding_matrix[4] = {'schemeData': 'OCTET_UNSPECIFIED_COMMON'}
datacoding_matrix[5] = {'schemeData': 'JIS'}
datacoding_matrix[6] = {'schemeData': 'CYRILLIC'}
datacoding_matrix[7] = {'schemeData': 'ISO_8859_8'}
datacoding_matrix[8] = {'schemeData': 'UCS2'}
datacoding_matrix[9] = {'schemeData': 'PICTOGRAM'}
datacoding_matrix[10] = {'schemeData': 'ISO_2022_JP'}
datacoding_matrix[13] = {'schemeData': 'EXTENDED_KANJI_JIS'}
datacoding_matrix[14] = {'schemeData': 'KS_C_5601'}
# Check for content encoding
receivedContent = ''
for submitSm in self.SMSCPort.factory.lastClient.submitRecords:
if EsmClassGsmFeatures.UDHI_INDICATOR_SET in submitSm.params['esm_class'].gsmFeatures and submitSm.params[
'short_message'][
:3] == '\x05\x00\x03':
receivedContent += submitSm.params['short_message'][6:]
else:
receivedContent += submitSm.params['short_message']
if datacoding != 0:
self.assertEqual(content, receivedContent)
else:
# Assuming gsm0338 tests are done with the @ char only
self.assertEqual(content, receivedContent.replace("\x00", '@'))
# Check for schemeData
sentDataCoding = datacoding_matrix[datacoding]['schemeData']
for submitSm in self.SMSCPort.factory.lastClient.submitRecords:
self.assertEqual(str(submitSm.params['data_coding'].schemeData), sentDataCoding)
class SubmitSmCodingTestCases(CodingTestCases):
@defer.inlineCallbacks
def test_gsm0338_at(self):
"""Testing gsm338 encoding for the @ char"""
_gsm0338_str = composeMessage('@', 160)
yield self.run_test(content=_gsm0338_str)
@defer.inlineCallbacks
def test_IA5_ASCII(self):
_ia5ascii_str = composeMessage(IA5_ASCII, 160)
yield self.run_test(content=_ia5ascii_str, datacoding=1)
def test_OCTET_UNSPECIFIED(self):
# datacoding = 2
pass
test_OCTET_UNSPECIFIED.skip = 'TODO: What kind of data should we send using this DC ?'
@defer.inlineCallbacks
def test_LATIN_1(self):
_latin1_str = composeMessage(ISO8859_1, 140)
yield self.run_test(content=_latin1_str, datacoding=3)
def test_OCTET_UNSPECIFIED_COMMON(self):
# datacoding = 4
pass
test_OCTET_UNSPECIFIED_COMMON.skip = 'TODO: What kind of data should we send using this DC ?'
@defer.inlineCallbacks
def test_JIS(self):
# c.f. http://unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/JIS/SHIFTJIS.TXT
_jisx201_str = composeMessage({'\x8140', '\x96BA', '\xE062', '\xEAA2'}, 70)
yield self.run_test(content=''.join(_jisx201_str), datacoding=5)
@defer.inlineCallbacks
def test_CYRILLIC(self):
_cyrillic_str = composeMessage(CYRILLIC, 140)
yield self.run_test(content=_cyrillic_str, datacoding=6)
@defer.inlineCallbacks
def test_ISO_8859_8(self):
_iso8859_8_str = composeMessage(ISO_8859_8, 140)
yield self.run_test(content=_iso8859_8_str, datacoding=7)
@defer.inlineCallbacks
def test_UCS2(self):
_rabbit_arabic = composeMessage({'\x0623', '\x0631', '\x0646', '\x0628'}, 70)
yield self.run_test(content=''.join(_rabbit_arabic), datacoding=8)
@defer.inlineCallbacks
def test_PICTOGRAM(self):
# c.f. http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP932.TXT
_cp932_str = composeMessage({'\x8B89', '\x8B90', '\x8BC9', '\xFC4B'}, 70)
yield self.run_test(content=_cp932_str, datacoding=9)
def test_ISO_2022_JP(self):
# datacoding = 10
pass
test_ISO_2022_JP.skip = 'TODO: Didnt find unicode codepage for ISO2022-JP'
@defer.inlineCallbacks
def test_EXTENDED_KANJI_JIS(self):
# c.f. http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP932.TXT
_cp932_str = composeMessage({'\x8B89', '\x8B90', '\x8BC9', '\xFC4B'}, 70)
yield self.run_test(content=_cp932_str, datacoding=13)
@defer.inlineCallbacks
def test_KS_C_5601(self):
# c.f. ftp://ftp.unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/KSC/KSC5601.TXT
_ks_c_5601_str = composeMessage({'\x8141', '\xA496', '\xE1D7', '\xFDFE'}, 70)
yield self.run_test(content=_ks_c_5601_str, datacoding=14)
class LongSubmitSmCodingUsingSARTestCases(CodingTestCases):
@defer.inlineCallbacks
def test_gsm0338_at(self):
_gsm0338_str = composeMessage('@', 612) # 612 = 153 * 4
yield self.run_test(content=_gsm0338_str)
@defer.inlineCallbacks
def test_IA5_ASCII(self):
_ia5ascii_str = composeMessage(IA5_ASCII, 612) # 612 = 153 * 4
yield self.run_test(content=_ia5ascii_str, datacoding=1)
def test_OCTET_UNSPECIFIED(self):
# datacoding = 2
pass
test_OCTET_UNSPECIFIED.skip = 'TODO: What kind of data should we send using this DC ?'
@defer.inlineCallbacks
def test_LATIN_1(self):
_latin1_str = composeMessage(ISO8859_1, 670) # 670 = 134 * 5
yield self.run_test(content=_latin1_str, datacoding=3)
def test_OCTET_UNSPECIFIED_COMMON(self):
# datacoding = 4
pass
test_OCTET_UNSPECIFIED_COMMON.skip = 'TODO: What kind of data should we send using this DC ?'
@defer.inlineCallbacks
def test_JIS(self):
# c.f. http://unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/JIS/SHIFTJIS.TXT
_jisx201_str = composeMessage({'\x8140', '\x96BA', '\xE062', '\xEAA2'}, 335) # 335 = 67 * 5
yield self.run_test(content=''.join(_jisx201_str), datacoding=5)
@defer.inlineCallbacks
def test_CYRILLIC(self):
_cyrillic_str = composeMessage(CYRILLIC, 670) # 670 = 134 * 5
yield self.run_test(content=_cyrillic_str, datacoding=6)
@defer.inlineCallbacks
def test_ISO_8859_8(self):
_iso8859_8_str = composeMessage(ISO_8859_8, 670) # 670 = 134 * 5
yield self.run_test(content=_iso8859_8_str, datacoding=7)
@defer.inlineCallbacks
def test_UCS2(self):
_rabbit_arabic = composeMessage({'\x0623', '\x0631', '\x0646', '\x0628'}, 335) # 335 = 67 * 5
yield self.run_test(content=''.join(_rabbit_arabic), datacoding=8)
@defer.inlineCallbacks
def test_PICTOGRAM(self):
# c.f. http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP932.TXT
_cp932_str = composeMessage({'\x8B89', '\x8B90', '\x8BC9', '\xFC4B'}, 335) # 335 = 67 * 5
yield self.run_test(content=_cp932_str, datacoding=9)
def test_ISO_2022_JP(self):
# datacoding = 10
pass
test_ISO_2022_JP.skip = 'TODO: Didnt find unicode codepage for ISO2022-JP'
@defer.inlineCallbacks
def test_EXTENDED_KANJI_JIS(self):
# c.f. http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP932.TXT
_cp932_str = composeMessage({'\x8B89', '\x8B90', '\x8BC9', '\xFC4B'}, 335) # 335 = 67 * 5
yield self.run_test(content=_cp932_str, datacoding=13)
@defer.inlineCallbacks
def test_KS_C_5601(self):
# c.f. ftp://ftp.unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/KSC/KSC5601.TXT
_ks_c_5601_str = composeMessage({'\x8141', '\xA496', '\xE1D7', '\xFDFE'}, 335) # 335 = 67 * 5
yield self.run_test(content=_ks_c_5601_str, datacoding=14)
class LongSubmitSmCodingUsingUDHTestCases(CodingTestCases):
@defer.inlineCallbacks
def setUp(self):
yield CodingTestCases.setUp(self)
# Start a new http server with long_content_split = 'udh'
httpApiConfigInstance = HTTPApiConfig()
httpApiConfigInstance.port = 1402
httpApiConfigInstance.long_content_split = 'udh'
# Launch the http server
httpApi = HTTPApi(self.pbRoot_f, self.clientManager_f, httpApiConfigInstance)
self.httpServer_udh = reactor.listenTCP(httpApiConfigInstance.port, server.Site(httpApi))
self.httpPort_udh = httpApiConfigInstance.port
@defer.inlineCallbacks
def tearDown(self):
yield CodingTestCases.tearDown(self)
self.httpServer_udh.stopListening()
@defer.inlineCallbacks
def test_gsm0338_at(self):
_gsm0338_str = composeMessage({'@'}, 612)
yield self.run_test(content=_gsm0338_str, port=self.httpPort_udh)
@defer.inlineCallbacks
def test_IA5_ASCII(self):
_ia5ascii_str = composeMessage(IA5_ASCII, 612) # 612 = 153 * 4
yield self.run_test(content=_ia5ascii_str, datacoding=1, port=self.httpPort_udh)
def test_OCTET_UNSPECIFIED(self):
# datacoding = 2
pass
test_OCTET_UNSPECIFIED.skip = 'TODO: What kind of data should we send using this DC ?'
@defer.inlineCallbacks
def test_LATIN_1(self):
_latin1_str = composeMessage(ISO8859_1, 670) # 670 = 134 * 5
yield self.run_test(content=_latin1_str, datacoding=3, port=self.httpPort_udh)
def test_OCTET_UNSPECIFIED_COMMON(self):
# datacoding = 4
pass
test_OCTET_UNSPECIFIED_COMMON.skip = 'TODO: What kind of data should we send using this DC ?'
@defer.inlineCallbacks
def test_JIS(self):
# c.f. http://unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/JIS/SHIFTJIS.TXT
_jisx201_str = composeMessage({'\x8140', '\x96BA', '\xE062', '\xEAA2'}, 335) # 335 = 67 * 5
yield self.run_test(content=''.join(_jisx201_str), datacoding=5, port=self.httpPort_udh)
@defer.inlineCallbacks
def test_CYRILLIC(self):
_cyrillic_str = composeMessage(CYRILLIC, 670) # 670 = 134 * 5
yield self.run_test(content=_cyrillic_str, datacoding=6, port=self.httpPort_udh)
@defer.inlineCallbacks
def test_ISO_8859_8(self):
_iso8859_8_str = composeMessage(ISO_8859_8, 670) # 670 = 134 * 5
yield self.run_test(content=_iso8859_8_str, datacoding=7, port=self.httpPort_udh)
@defer.inlineCallbacks
def test_UCS2(self):
_rabbit_arabic = composeMessage({'\x0623', '\x0631', '\x0646', '\x0628'}, 335) # 335 = 67 * 5
yield self.run_test(content=''.join(_rabbit_arabic), datacoding=8, port=self.httpPort_udh)
@defer.inlineCallbacks
def test_PICTOGRAM(self):
# c.f. http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP932.TXT
_cp932_str = composeMessage({'\x8B89', '\x8B90', '\x8BC9', '\xFC4B'}, 335) # 335 = 67 * 5
yield self.run_test(content=_cp932_str, datacoding=9, port=self.httpPort_udh)
def test_ISO_2022_JP(self):
# datacoding = 10
pass
test_ISO_2022_JP.skip = 'TODO: Didnt find unicode codepage for ISO2022-JP'
@defer.inlineCallbacks
def test_EXTENDED_KANJI_JIS(self):
# c.f. http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP932.TXT
_cp932_str = composeMessage({'\x8B89', '\x8B90', '\x8BC9', '\xFC4B'}, 335) # 335 = 67 * 5
yield self.run_test(content=_cp932_str, datacoding=13, port=self.httpPort_udh)
@defer.inlineCallbacks
def test_KS_C_5601(self):
# c.f. ftp://ftp.unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/KSC/KSC5601.TXT
_ks_c_5601_str = composeMessage({'\x8141', '\xA496', '\xE1D7', '\xFDFE'}, 335) # 335 = 67 * 5
yield self.run_test(content=_ks_c_5601_str, datacoding=14, port=self.httpPort_udh)
```
#### File: jasmin/tools/eval.py
```python
from jasmin.tools.singleton import Singleton
class CompiledNode(object):
"""A compiled code holder singleton"""
__metaclass__ = Singleton
nodes = {}
def get(self, pyCode):
"""Return a compiled pyCode object or instanciate a new one"""
pyCodeHash = pyCode.encode('hex')
if pyCodeHash not in self.nodes:
self.nodes[pyCodeHash] = compile(pyCode, '', 'exec')
return self.nodes[pyCodeHash]
```
#### File: tools/migrations/migration.py
```python
from jasmin.routing.Filters import TagFilter
from jasmin.routing.jasminApi import User, Group
def user_status(data, context=None):
"""Changes impacted by #306
Will migrate users to enable newly applied changes for enable/disable"""
# Create new users, they will have the enable/disable methods
new_data = []
for old_user in data:
user = User(
uid=old_user.uid,
group=Group(old_user.group.gid),
username=old_user.username,
password=<PASSWORD>,
password_crypted=True,
mt_credential=old_user.mt_credential,
smpps_credential=old_user.smpps_credential)
new_data.append(user)
return new_data
def group_status(data, context=None):
"""Changes impacted by #306
Will migrate groups to enable newly applied changes for enable/disable"""
# Create new groups, they will have the enable/disable methods
new_data = []
for old_group in data:
group = Group(gid=old_group.gid)
new_data.append(group)
return new_data
def tagfilters_casting(data, context=None):
"""Changes impacted by #516
Will cast tag filters to string (from integer) in filters and routes having tagfilters"""
if context == 'filters':
for fid, tagfilter in data.iteritems():
if isinstance(tagfilter, TagFilter):
tagfilter.tag = str(tagfilter.tag)
elif context == 'mtroutes':
for routes in data.getAll():
route = routes[routes.keys()[0]]
for filter in route.filters:
if isinstance(filter, TagFilter):
# Cast tags to str
filter.tag = str(filter.tag)
return data
def fix_users_and_smppccs_09rc23(data, context=None):
"""Adding the new authorization 'set_hex_content' and fix smppccs with proto_id having a None string
value"""
if context == 'users':
# Create new users and modify the mt_credential to include the new authorization
new_data = []
for old_user in data:
user = User(
uid=old_user.uid,
group=Group(old_user.group.gid),
username=old_user.username,
password=<PASSWORD>,
password_crypted=True,
mt_credential=old_user.mt_credential,
smpps_credential=old_user.smpps_credential)
user.mt_credential.authorizations['set_hex_content'] = True
user.mt_credential.authorizations['set_schedule_delivery_time'] = True
new_data.append(user)
return new_data
elif context == 'smppccs':
# Fix smppccs proto_id value
for smppcc in data:
if isinstance(smppcc['config'].protocol_id, str) and smppcc['config'].protocol_id.lower() == 'none':
smppcc['config'].protocol_id = None
return data
def fix_users_09rc24(data, context=None):
"""Adding the new authorization 'set_schedule_delivery_time'
value"""
if context == 'users':
# Create new users and modify the mt_credential to include the new authorization
new_data = []
for old_user in data:
user = User(
uid=old_user.uid,
group=Group(old_user.group.gid),
username=old_user.username,
password=<PASSWORD>,
password_crypted=<PASSWORD>,
mt_credential=old_user.mt_credential,
smpps_credential=old_user.smpps_credential)
user.mt_credential.authorizations['set_schedule_delivery_time'] = True
new_data.append(user)
return new_data
"""This is the main map for orchestrating config migrations.
The map is based on 3 elements:
1. conditions: binary conditions on Jasmin version, the patch version is zfilled(3), this means 0.8rc2 (or
0.8.2) will be represented as 0.8002.
2. contexts: configuration context (users, groups, smppccs ...)
3. operations: functions to call to migrate the config
"""
MAP = [
{'conditions': ['<0.8008'],
'contexts': {'groups'},
'operations': [group_status]},
{'conditions': ['<0.8008'],
'contexts': {'users'},
'operations': [user_status]},
{'conditions': ['<=0.9015'],
'contexts': {'filters', 'mtroutes'},
'operations': [tagfilters_casting]},
{'conditions': ['<=0.9022'],
'contexts': {'users', 'smppccs'},
'operations': [fix_users_and_smppccs_09rc23]},
{'conditions': ['<=0.9023'],
'contexts': {'users'},
'operations': [fix_users_09rc24]},
]
```
#### File: falcon/routing/compiled.py
```python
import keyword
import re
_FIELD_REGEX = re.compile('{([^}]*)}')
_TAB_STR = ' ' * 4
class CompiledRouter(object):
"""Fast URI router which compiles its routing logic to Python code.
Generally you do not need to use this router class directly, as an
instance is created by default when the falcon.API class is initialized.
The router treats URI paths as a tree of URI segments and searches by
checking the URI one segment at a time. Instead of interpreting the route
tree for each look-up, it generates inlined, bespoke Python code to
perform the search, then compiles that code. This makes the route
processing quite fast.
"""
def __init__(self):
self._roots = []
self._find = self._compile()
self._code_lines = None
self._src = None
self._expressions = None
self._return_values = None
def add_route(self, uri_template, method_map, resource):
"""Adds a route between a URI path template and a resource.
Args:
uri_template (str): A URI template to use for the route
method_map (dict): A mapping of HTTP methods (e.g., 'GET',
'POST') to methods of a resource object.
resource (object): The resource instance to associate with
the URI template.
"""
if re.search('\s', uri_template):
raise ValueError('URI templates may not include whitespace.')
# NOTE(kgriffs): Ensure fields are valid Python identifiers,
# since they will be passed as kwargs to responders. Also
# ensure there are no duplicate names, since that causes the
# following problems:
#
# 1. For simple nodes, values from deeper nodes overwrite
# values from more shallow nodes.
# 2. For complex nodes, re.compile() raises a nasty error
#
fields = _FIELD_REGEX.findall(uri_template)
used_names = set()
for name in fields:
is_identifier = re.match('[A-Za-z_][A-Za-z0-9_]*$', name)
if not is_identifier or name in keyword.kwlist:
raise ValueError('Field names must be valid identifiers '
"('{}' is not valid).".format(name))
if name in used_names:
raise ValueError('Field names may not be duplicated '
"('{}' was used more than once)".format(name))
used_names.add(name)
path = uri_template.strip('/').split('/')
def insert(nodes, path_index=0):
for node in nodes:
segment = path[path_index]
if node.matches(segment):
path_index += 1
if path_index == len(path):
# NOTE(kgriffs): Override previous node
node.method_map = method_map
node.resource = resource
node.uri_template = uri_template
else:
insert(node.children, path_index)
return
if node.conflicts_with(segment):
msg = (
'The URI template for this route conflicts with another'
"route's template. This is usually caused by using "
'different field names at the same level in the path. '
'For example, given the route paths '
"'/parents/{id}' and '/parents/{parent_id}/children', "
'the conflict can be resolved by renaming one of the '
'fields to match the other, i.e.: '
"'/parents/{parent_id}' and '/parents/{parent_id}/children'."
)
raise ValueError(msg)
# NOTE(richardolsson): If we got this far, the node doesn't already
# exist and needs to be created. This builds a new branch of the
# routing tree recursively until it reaches the new node leaf.
new_node = CompiledRouterNode(path[path_index])
nodes.append(new_node)
if path_index == len(path) - 1:
new_node.method_map = method_map
new_node.resource = resource
new_node.uri_template = uri_template
else:
insert(new_node.children, path_index + 1)
insert(self._roots)
self._find = self._compile()
def find(self, uri, req=None):
"""Search for a route that matches the given partial URI.
Args:
uri(str): The requested path to route.
Keyword Args:
req(Request): The Request object that will be passed to
the routed responder. Currently the value of this
argument is ignored by :class:`~.CompiledRouter`.
Routing is based solely on the path.
Returns:
tuple: A 4-member tuple composed of (resource, method_map,
params, uri_template), or ``None`` if no route matches
the requested path.
"""
path = uri.lstrip('/').split('/')
params = {}
node = self._find(path, self._return_values, self._expressions, params)
if node is not None:
return node.resource, node.method_map, params, node.uri_template
else:
return None
def _compile_tree(self, nodes, indent=1, level=0, fast_return=True):
"""Generates Python code for a routing tree or subtree."""
def line(text, indent_offset=0):
pad = _TAB_STR * (indent + indent_offset)
self._code_lines.append(pad + text)
# NOTE(kgriffs): Base case
if not nodes:
return
line('if path_len > %d:' % level)
indent += 1
level_indent = indent
found_simple = False
# NOTE(kgriffs & philiptzou): Sort nodes in this sequence:
# static nodes(0), complex var nodes(1) and simple var nodes(2).
# so that none of them get masked.
nodes = sorted(
nodes, key=lambda node: node.is_var + (node.is_var and
not node.is_complex))
# NOTE(kgriffs): Down to this branch in the tree, we can do a
# fast 'return None'. See if the nodes at this branch are
# all still simple, meaning there is only one possible path.
if fast_return:
if len(nodes) > 1:
# NOTE(kgriffs): There's the possibility of more than
# one path.
var_nodes = [node for node in nodes if node.is_var]
found_var_nodes = bool(var_nodes)
fast_return = not found_var_nodes
for node in nodes:
if node.is_var:
if node.is_complex:
# NOTE(richardolsson): Complex nodes are nodes which
# contain anything more than a single literal or variable,
# and they need to be checked using a pre-compiled regular
# expression.
expression_idx = len(self._expressions)
self._expressions.append(node.var_regex)
line('match = expressions[%d].match(path[%d]) # %s' % (
expression_idx, level, node.var_regex.pattern))
line('if match is not None:')
indent += 1
line('params.update(match.groupdict())')
else:
# NOTE(kgriffs): Simple nodes just capture the entire path
# segment as the value for the param.
line('params["%s"] = path[%d]' % (node.var_name, level))
# NOTE(kgriffs): We don't allow multiple simple var nodes
# to exist at the same level, e.g.:
#
# /foo/{id}/bar
# /foo/{name}/bar
#
assert len([_node for _node in nodes
if _node.is_var and not _node.is_complex]) == 1
found_simple = True
else:
# NOTE(kgriffs): Not a param, so must match exactly
line('if path[%d] == "%s":' % (level, node.raw_segment))
indent += 1
if node.resource is not None:
# NOTE(kgriffs): This is a valid route, so we will want to
# return the relevant information.
resource_idx = len(self._return_values)
self._return_values.append(node)
self._compile_tree(node.children, indent, level + 1, fast_return)
if node.resource is None:
if fast_return:
line('return None')
else:
# NOTE(kgriffs): Make sure that we have consumed all of
# the segments for the requested route; otherwise we could
# mistakenly match "/foo/23/bar" against "/foo/{id}".
line('if path_len == %d:' % (level + 1))
line('return return_values[%d]' % resource_idx, 1)
if fast_return:
line('return None')
indent = level_indent
if not found_simple and fast_return:
line('return None')
def _compile(self):
"""Generates Python code for entire routing tree.
The generated code is compiled and the resulting Python method is
returned.
"""
self._return_values = []
self._expressions = []
self._code_lines = [
'def find(path, return_values, expressions, params):',
_TAB_STR + 'path_len = len(path)',
]
self._compile_tree(self._roots)
self._code_lines.append(
# PERF(kgriffs): Explicit return of None is faster than implicit
_TAB_STR + 'return None'
)
self._src = '\n'.join(self._code_lines)
scope = {}
exec(compile(self._src, '<string>', 'exec'), scope)
return scope['find']
class CompiledRouterNode(object):
"""Represents a single URI segment in a URI."""
def __init__(self, raw_segment,
method_map=None, resource=None, uri_template=None):
self.children = []
self.raw_segment = raw_segment
self.method_map = method_map
self.resource = resource
self.uri_template = uri_template
self.is_var = False
self.is_complex = False
self.var_name = None
# NOTE(kgriffs): CompiledRouter.add_route validates field names,
# so here we can just assume they are OK and use the simple
# _FIELD_REGEX to match them.
matches = list(_FIELD_REGEX.finditer(raw_segment))
if not matches:
self.is_var = False
else:
self.is_var = True
if len(matches) == 1 and matches[0].span() == (0, len(raw_segment)):
# NOTE(richardolsson): if there is a single variable and
# it spans the entire segment, the segment is not
# complex and the variable name is simply the string
# contained within curly braces.
self.is_complex = False
self.var_name = raw_segment[1:-1]
else:
# NOTE(richardolsson): Complex segments need to be
# converted into regular expressions in order to match
# and extract variable values. The regular expressions
# contain both literal spans and named group expressions
# for the variables.
# NOTE(kgriffs): Don't use re.escape() since we do not
# want to escape '{' or '}', and we don't want to
# introduce any unexpected side-effects by escaping
# non-ASCII characters (it is probably safe, but let's
# not take that chance in a minor point release).
#
# NOTE(kgriffs): The substitution template parser in the
# re library does not look ahead when collapsing '\\':
# therefore in the case of r'\\g<0>' the first r'\\'
# would be consumed and collapsed to r'\', and then the
# parser would examine 'g<0>' and not realize it is a
# group-escape sequence. So we add an extra backslash to
# trick the parser into doing the right thing.
escaped_segment = re.sub(r'[\.\(\)\[\]\?\$\*\+\^\|]', r'\\\g<0>', raw_segment)
seg_pattern = _FIELD_REGEX.sub(r'(?P<\1>.+)', escaped_segment)
seg_pattern = '^' + seg_pattern + '$'
self.is_complex = True
self.var_regex = re.compile(seg_pattern)
def matches(self, segment):
"""Returns True if this node matches the supplied template segment."""
return segment == self.raw_segment
def conflicts_with(self, segment):
"""Returns True if this node conflicts with a given template segment."""
# NOTE(kgriffs): This method assumes that the caller has already
# checked if the segment matches. By definition, only unmatched
# segments may conflict, so there isn't any sense in calling
# conflicts_with in that case.
assert not self.matches(segment)
# NOTE(kgriffs): Possible combinations are as follows.
#
# simple, simple ==> True
# simple, complex ==> False
# simple, string ==> False
# complex, simple ==> False
# complex, complex ==> (Depend)
# complex, string ==> False
# string, simple ==> False
# string, complex ==> False
# string, string ==> False
#
other = CompiledRouterNode(segment)
if self.is_var:
# NOTE(kgriffs & philiptzou): Falcon does not accept multiple
# simple var nodes exist at the same level as following:
#
# /foo/{thing1}
# /foo/{thing2}
#
# Nor two complex nodes like this:
#
# /foo/{thing1}.{ext}
# /foo/{thing2}.{ext}
#
# On the other hand, those are all OK:
#
# /foo/{thing1}
# /foo/all
# /foo/{thing1}.{ext}
# /foo/{thing2}.detail.{ext}
#
if self.is_complex:
if other.is_complex:
return (_FIELD_REGEX.sub('v', self.raw_segment) ==
_FIELD_REGEX.sub('v', segment))
return False
else:
return other.is_var and not other.is_complex
# NOTE(kgriffs): If self is a static string match, then all the cases
# for other are False, so no need to check.
return False
```
#### File: pdu/tests/test_sm_encoding.py
```python
import unittest, binascii, StringIO
from jasmin.vendor.smpp.pdu.sm_encoding import SMStringEncoder
from jasmin.vendor.smpp.pdu.pdu_types import *
from jasmin.vendor.smpp.pdu.gsm_types import *
from jasmin.vendor.smpp.pdu.pdu_encoding import PDUEncoder
class SMDecoderTest(unittest.TestCase):
def getPDU(self, hexStr):
return PDUEncoder().decode(StringIO.StringIO(binascii.a2b_hex(hexStr)))
def test_decode_UCS2(self):
pduHex = '000000480000000500000000dfd03a56415753424400010131353535313233343536370001013137373338323834303730000000000000000008000c00f10075014400ed00fc0073'
pdu = self.getPDU(pduHex)
smStr = SMStringEncoder().decodeSM(pdu)
self.assertEquals('\x00\xf1\x00u\x01D\x00\xed\x00\xfc\x00s', smStr.bytes)
self.assertEquals(u'\xf1u\u0144\xed\xfcs', smStr.unicode)
self.assertEquals(None, smStr.udh)
def test_decode_default_alphabet(self):
#'T- Mobile flip phone \xa7 \xa8 N random special charcters'
pduHex = '0000006f00000005000000005d3fe724544d4f4249000101313535353132333435363700010131373733383238343037300000000000000000000033542d204d6f62696c6520666c69702070686f6e6520a720a8204e2072616e646f6d207370656369616c20636861726374657273'
pdu = self.getPDU(pduHex)
self.assertRaises(UnicodeDecodeError, SMStringEncoder().decodeSM, pdu)
def test_decode_latin1(self):
pduHex = '0000004200000005000000002a603d56415753424400010131353535313233343536370001013137373338323834303730000000000000000003000645737061f161'
pdu = self.getPDU(pduHex)
smStr = SMStringEncoder().decodeSM(pdu)
self.assertEquals('Espa\xf1a', smStr.bytes)
self.assertEquals(u'Espa\xf1a', smStr.unicode)
self.assertEquals(None, smStr.udh)
def test_decode_ascii(self):
pduHex = '00000054000000050000000008c72a4154454c4550000101313535353535353535353500010131343034363635333431300000ff010000000001000e49732074686973206a757374696e0201000100020d000101'
pdu = self.getPDU(pduHex)
smStr = SMStringEncoder().decodeSM(pdu)
self.assertEquals('Is this justin', smStr.bytes)
self.assertEquals('Is this justin', smStr.unicode)
self.assertEquals(None, smStr.udh)
def test_decode_octet_unspecified_common(self):
pduHex = '000000a900000005000000003cf78935415753424400010131353535313233343536370001013134303436363533343130004000000000000004006d06050423f40000424547494e3a56434152440d0a56455253494f4e3a322e310d0a4e3b434841525345543d5554462d383a4269656265723b4a757374696e0d0a54454c3b564f4943453b434841525345543d5554462d383a343034363635333431300d0a454e443a5643415244'
pdu = self.getPDU(pduHex)
self.assertRaises(NotImplementedError, SMStringEncoder().decodeSM, pdu)
def test_decode_default_alphabet_with_udh(self):
pduHex = '000000da0000000500000000da4b62474652414e4300010131353535313233343536370001013134303436363533343130004000000000000000009e0500032403016869206a757374696e20686f772061726520796f753f204d79206e616d6520697320706570652069276d206672656e636820616e6420692077616e74656420746f2074656c6c20796f7520686f77206d7563682069206c6f766520796f752c20796f75206b6e6f7720796f75207361766564206d79206c69666520616e642069207265616c6c79207468616e6b20796f7520666f72207468'
pdu = self.getPDU(pduHex)
smStr = SMStringEncoder().decodeSM(pdu)
self.assertEquals("\x05\x00\x03$\x03\x01hi justin how are you? My name is pepe i'm french and i wanted to tell you how much i love you, you know you saved my life and i really thank you for th", smStr.bytes)
self.assertEquals("hi justin how are you? My name is pepe i'm french and i wanted to tell you how much i love you, you know you saved my life and i really thank you for th", smStr.unicode)
self.assertEquals([InformationElement(InformationElementIdentifier.CONCATENATED_SM_8BIT_REF_NUM, IEConcatenatedSM(0x24, 0x03, 0x01))], smStr.udh)
def test_isConcatenatedSM_true(self):
pduHex = '000000da0000000500000000da4b62474652414e4300010131353535313233343536370001013134303436363533343130004000000000000000009e0500032403016869206a757374696e20686f772061726520796f753f204d79206e616d6520697320706570652069276d206672656e636820616e6420692077616e74656420746f2074656c6c20796f7520686f77206d7563682069206c6f766520796f752c20796f75206b6e6f7720796f75207361766564206d79206c69666520616e642069207265616c6c79207468616e6b20796f7520666f72207468'
pdu = self.getPDU(pduHex)
self.assertTrue(SMStringEncoder().isConcatenatedSM(pdu))
iElem = SMStringEncoder().getConcatenatedSMInfoElement(pdu)
self.assertEquals(InformationElement(InformationElementIdentifier.CONCATENATED_SM_8BIT_REF_NUM, IEConcatenatedSM(0x24, 0x03, 0x01)), iElem)
def test_isConcatenatedSM_false(self):
pduHex = '000000490000000500000000b9b7e456544d4f424900010131353535313233343536370001013134303436363533343130000000000000000000000d49206c7576206a757374696e21'
pdu = self.getPDU(pduHex)
self.assertFalse(SMStringEncoder().isConcatenatedSM(pdu))
iElem = SMStringEncoder().getConcatenatedSMInfoElement(pdu)
self.assertEquals(None, iElem)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "2nd47/stat-txt",
"score": 3
} |
#### File: 2nd47/stat-txt/info-txt.py
```python
import xml.etree.ElementTree as ET
# NLP data analysis
import nltk
nltk.download()
from nltk.corpus import stopwords
# Interact with user machine
from enum import Enum
import re
import sys
from sys import argv
import os
# INCLUDE HOW MANY WISHES WERE MADE
# INCLUDE LONGEST STREAK OF TEXT MESSAGES
# Time In Milliseconds
class TIM(Enum):
''' Time in milliseconds'''
SECOND = 1000,
MINUTE = SECOND * 60,
HOUR = MINUTE * 60,
DAY = HOUR * 24,
WEEK = DAY * 7,
# 30d/mo; 365d/yr
MONTH = WEEK * 4 + DAY * 2,
YEAR = MONTH * 12 + DAY * 5,
class NLPAnalyze:
# REWORK IMMEDIATELY
# REWORK IMMEDIATELY
# REWORK IMMEDIATELY
'''all NLP analysis starts here'''
def __init__(self):
# Some basic regex
self.laugh = re.compile('[h[e|a]]{2,}')
self.love = re.compile('[l+[u|o]+v+e*]')
self.you = re.compile('[[y+o+u+]|u+]')
self.swear = re.compile('[f+[a|u]+[c|k]+] | [s+h+i+t+] | [c+u+n+t+]')
self.babey = re.compile('[b+a+b+[e+|y+]]')
self.heart = re.compile('<+3+')
# Remove stop words
cachedStopWords = stopwords.words('english')
[word for word in text if word not in cachedStopWords]
# text4.dispersion_plot(["citizens", "democracy", "freedom", "duties", "America"])
# Really cool feature to examine changes through time
class ConvParty:
"""Store data for a conversation participant and their associated data
"""
def __init__(self, partyName):
self.name = partyName
self.dataSet = {
'sms' : ConvData('sms'),
'length' : ConvData('length'),
'responseTime' : ConvData('responseTime'),
'timeToNext' : ConvData('timeToNext')
}
def __getitem__(self, key):
'''index over the dataSet'''
if key == 'name':
return self.name
if key in self.data:
return self.dataSet[key]
return None
def __setitem__(self, idx, value):
'''index over the dataSet'''
if key in self.data:
self.dataSet[idx] = value
def __str__(self):
returnStr = 'DATA FOR ' + self.name
return returnStr
def addSMS(self, sms):
self['sms'] += sms
self['length'] += sms['length']
if sms['responseTime'] > 0:
self['responseTime'] += sms['responseTime']
if sms['timeToNext'] > 0:
self['timeToNext'] += sms['timeToNext']
def analyze(self):
print('Analyzing data for ' + self.name + '...')
for data in self.dataSet:
data.analyze()
# Do NLTK analysis here
class ConvData:
"""Store conversation data associated with one particpant and one data type
"""
def __init__(self, title):
# e.g. 'responseTime', 'message', 'length', etc.
self.title = title
self.data = []
self.count = 0
self.stats = {}
def __add__(self, other):
self.data += other
self.count += 1
return self
def __iadd__(self, other):
self.data += other
self.count += 1
return self
def __str__(self):
returnStr = self.title + ' WITH ' + self.count
return returnStr
def analyze(self, other=None):
if not self.stats:
self.stats['average'] = mean(self.data)
self.stats['median'] = median_grouped(self.data)
self.stats['mode'] = mode(self.data)
self.stats['stdev'] = stdev(self.data)
class Conversation:
"""Store data for a conversation given two participants.
This will also store values for both participants combined
"""
def __init__(self, party1, party2):
self.parties = {
'party1' : ConvParty(partyName1),
'party2' : ConvParty(partyName2),
'total' : ConvParty('total')}
def __getitem__(self, key):
if key in self.parties:
return self.parties[key]
return None
def __str__(self):
returnStr = 'Conversation between ' + party1 + ' and ' + party2
return returnStr
def addSMS(self, sms):
self[sms[party]].addSMS(sms)
self['total'].addSMS(sms)
def analyze(self):
for party in self.parties:
party.analyze()
class SMS:
"""Store data for a single SMS
"""
def __init__(self, date, party, message):
self.data = {
'date' : date,
'message' : message,
'length' : len(message),
'party' : party,
'responseTime' : 0,
'timeToNext' : 0,
'wish' : False
}
self._checkWish()
def __getitem__(self, key):
if key in self.data:
return self.data[key]
return None
def __setitem__(self, idx, value):
self.data[idx] = value
def __str__(self):
returnStr = '[' + str(self.message) + '] '
returnStr += 'FROM [' + str(self.party) + '] '
returnStr += 'AT [' + str(self.date) + '] '
returnStr += 'IN [' + str(self.responseTime) + ']'
return returnStr
def _checkWish():
'''check if a wish was made around 11:11/23:11 with this SMS'''
pass
def transcribe(root, conversation):
"""Parse ElementTree XML and fill conversation object with relevant data
"""
print('Parsing messages from XML file...')
for sms in root.findall('sms'):
# Input time as milliseconds
date = int(sms.attrib['date'])
# Determine which party sent the message
if (sms.attrib['type'] == '2'):
party = conversation['party1']['name']
elif (sms.attrib['type'] == '1'):
party = conversation['party2']['name']
# Include UTF-8 and Emoji support in later revisions
message = str(sms.attrib['body']).encode('ascii', 'ignore')
newSMS = SMS(date, party, message)
# Traverse the list backwards, get most recent SMS from both parties
reversedSMSs = reversed(conversation.messages)
for previousSMS in reversedSMSs:
if previousSMS[party] == newSMS[party]:
# Set the time between responses for one party
if not previousSMS[timeToNext]:
previousSMS[timeToNext] = newSMS[date] - previousSMS[date]
else:
break
# Set the time it took to respond to the other party
else:
newSMS[responseTime] = newSMS[date] - previousSMS[date]
conversation.addSMS(newSMS)
print('Successfully parsed ' + conversation['total']['sms'].count + ' messages!')
def main(party1, party2):
'''main function that executes program function'''
# Initialize conversation participants
partyData1 = ConvParty(party1)
partyData2 = ConvParty(party2)
# Initialize conversation
convo = Conversation(party1, party2)
# Parse messages into conversation from ET-parsed XML file
messages = transcribe(ET.parse('sms.xml').getroot(), convo)
# Perform analysis on the gathered SMS data
convo.analyze()
# Initialize graphics output
if __name__ == '__main__':
if (len(argv) < 3):
raise Exception('Please enter your name and then your contact\'s name')
main(argv[1], argv[2])
``` |
{
"source": "2ndhukurou/discordpy-startup",
"score": 3
} |
#### File: 2ndhukurou/discordpy-startup/discordbot.py
```python
import discord
from discord.ext import commands
import ffmpeg
import os
# 自分のBotのアクセストークンに置き換えてください
TOKEN = '<PASSWORD>'
sound_path = "./sound"
# 接続に必要なオブジェクトを生成
client = commands.Bot(command_prefix='+')
voice_client = None
# 起動時に動作する処理
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.command()
async def join(ctx):
print('#voicechannelを取得')
vc = ctx.author.voice.channel
print('#voicechannelに接続')
await vc.connect()
@client.command()
async def bye(ctx):
print('#切断')
await ctx.voice_client.disconnect()
@client.event
async def on_message(message):
if message.content.startswith('+'):
origin_name = message.content.lstrip('+')
print(message.content)
name = 'sound/' + origin_name + '.ogg'
if os.path.exists(name):
source = discord.FFmpegPCMAudio(name)
message.guild.voice_client.play(source)
else:
pass
else:
pass
await client.process_commands(message)
# Botの起動とDiscordサーバーへの接続
client.run(TOKEN)
``` |
{
"source": "2ndspace/mobilenet_v2_keras",
"score": 3
} |
#### File: 2ndspace/mobilenet_v2_keras/hub_weight_loading.py
```python
import numpy as np
import tensorflow as tf
import tensornets as nets
import tensorflow_hub as hub
# from mobilenetv2 import MobileNetV2
from keras.models import Model
from keras.applications.mobilenetv2 import MobileNetV2
# from mobilenetv2 import MobileNetV2
def map_alpha_to_slim(alpha):
alpha_map = {
1.4: '140',
1.3: '130',
1.0: '100',
0.75: '075',
0.5: '050',
0.35: '035'
}
return alpha_map[alpha]
alpha = 0.35
rows = 192
# rows = 224
# rows = 160
# rows = 128
# rows = 96
print('ALPHA: ', alpha)
print('rows:', rows)
WEIGHTS_SAVE_PATH_INCLUDE_TOP = '/home/jon/Documents/keras_mobilenetV2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + \
str(alpha) + '_' + str(rows) + '.h5'
WEIGHTS_SAVE_PATH_NO_TOP = '/home/jon/Documents/keras_mobilenetV2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + \
str(alpha) + '_' + str(rows) + '_no_top' + '.h5'
img = nets.utils.load_img('cat.png', target_size=256, crop_size=rows)
img = (img / 128.0) - 1.0
inputs = tf.placeholder(tf.float32, [None, rows, rows, 3])
model = hub.Module(
"https://tfhub.dev/google/imagenet/mobilenet_v2_" + map_alpha_to_slim(alpha) + "_" + str(rows) + "/classification/1")
features = model(inputs, signature="image_classification", as_dict=True)
probs = tf.nn.softmax(features['default'])
# with tf.variable_scope('keras'):
print('for ALPHA: ', alpha)
model2 = MobileNetV2(weights='imagenet', alpha = alpha, input_shape = (rows, rows, 3))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
preds = sess.run(probs, {inputs: img})
preds2 = model2.predict(img)
print('TFHUB: ', nets.utils.decode_predictions(preds[:, 1:]))
print('MOBLV2 local bn new: ',nets.utils.decode_predictions(preds2))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
weights = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='module/MobilenetV2')
values = sess.run(weights)
values[-2] = np.delete(np.squeeze(values[-2]), 0, axis=-1)
values[-1] = np.delete(values[-1], 0, axis=-1)
model2.set_weights(values)
# Save weights no top and model
model2.save_weights(WEIGHTS_SAVE_PATH_INCLUDE_TOP)
model2_no_top = Model(input=model2.input,
output=model2.get_layer('out_relu').output)
model2_no_top.save_weights(WEIGHTS_SAVE_PATH_NO_TOP)
preds3 = model2.predict(img)
print('MOBLV2 local bn new weights new: ', nets.utils.decode_predictions(preds3))
# Now try to load new model locally and get the same weight score.
``` |
{
"source": "2ni/aprico",
"score": 2
} |
#### File: 2ni/aprico/listen.py
```python
import paho.mqtt.client as mqtt
import json
from dateutil.parser import parse
from datetime import timezone
import sys
import re
try:
from credentials import appid, key
except ModuleNotFoundError:
print("missing credentials.py with definition of apid='' and key=''")
sys.exit(1)
def on_connect(mqttc, mosq, obj, rc):
# subscribe to specific device in a specific app
mqttc.subscribe('+/devices/+/up')
def on_subscribe(mosq, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_message(mqttc, obj, msg):
try:
x = json.loads(msg.payload.decode("utf-8"))
devid = x["dev_id"]
counter = x["counter"]
gateways = x["metadata"]["gateways"]
gws_with_pos = [(x['rssi'], x['latitude'], x['longitude'], x['time']) for x in gateways if 'latitude' in x]
gws_with_pos_time = [(x['rssi'], x['latitude'], x['longitude'], x['time']) for x in gateways if 'latitude' in x and x['time']]
timestamp = parse(x["metadata"]["time"]).replace(tzinfo=timezone.utc).astimezone(tz=None)
timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
data = str("%s" % x["payload_fields"])
my_rssi = 0
for gw in gateways:
if gw["gtw_id"] == "eui-58a0cbfffe01bc78":
my_rssi = gw["rssi"]
print("{timestamp}: ({devid}:{counter},{datarate}|{frq}|{my_rssi}) {data} | gws ({no_gws_pos_time}/{no_gws_pos}/{no_gws}): {gws_with_pos}".format(
timestamp=timestamp_str,
devid=devid,
counter=counter,
datarate=re.findall("^(.*)BW", x["metadata"]["data_rate"])[0],
frq=x["metadata"]["frequency"],
data=data,
no_gws=len(gateways),
no_gws_pos=len(gws_with_pos),
no_gws_pos_time=len(gws_with_pos_time),
gws_with_pos=gws_with_pos,
my_rssi=my_rssi
))
sys.stdout.flush()
except Exception as e:
print(e)
pass
def on_publish(mosq, obj, mid):
print("mid: " + str(mid))
mqttc = mqtt.Client()
# Assign event callbacks
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.username_pw_set(appid, key)
mqttc.connect("eu.thethings.network", 1883, 60)
try:
mqttc.loop_forever()
except KeyboardInterrupt:
mqttc.disconnect()
sys.exit(0)
``` |
{
"source": "2Nice2U/HammerBotPython",
"score": 2
} |
#### File: 2Nice2U/HammerBotPython/bot.py
```python
import discord
from discord.ext import commands, tasks
from dotenv import load_dotenv
import os
import asyncio
from other.role import role_giver
from other.task import task_list
from other.voting import vote_handler
import help_command.help_data as hd
from help_command.helping import helper
from bug.fetcher import mc_bug
import bug.fixed as bug_fix
import bug.versions as mc_version
import utilities.data as data
from fun_zone.games.games import Games
from fun_zone.games.chess import ForbiddenChessMove
from cogs.dummy_commands import Dummy
from cogs.status import Status
# discord token is stored in a .env file in the same directory as the bot
load_dotenv() # load the .env file containing id's that have to be kept secret for security
TOKEN = os.getenv('DISCORD_TOKEN')
prefix = "/"
bot = commands.Bot(command_prefix=prefix)
bot.remove_command('help')
bot.latest_new_person = ""
bot.enabled = False
bot.debug = False
# Print a message if the bot is online and change it's status.
@bot.event
async def on_ready():
print('bot connected')
mc_version.get_versions(bot)
bot.enabled = True
await bot.change_presence(activity=discord.Game('Technical Minecraft on HammerSMP'))
@bot.event
async def on_message(message):
# Make sure the bot doesn't respond to itself.
if message.author == bot.user:
return
if bot.enabled:
# Ff a new message is sent in the application forms channel, the bot will automatically add reactions.
if message.channel.id == data.application_channel:
for e in data.vote_emotes:
await message.add_reaction(bot.get_emoji(e))
if '{}bug_vote'.format(prefix) not in message.content:
await mc_bug(message)
# We need this since since overriding the default provided on_message forbids any extra commands from running.
await bot.process_commands(message)
# Check which user was the latest to join and store this in a global variable.
@bot.event
async def on_member_join(member):
if bot.enabled:
bot.latest_new_person = member
# When the newest member leaves, there is a notification in th system channel.
@bot.event
async def on_member_remove(member):
if bot.latest_new_person == member and bot.enabled:
response = 'Sadly, `{}` left us already.'.format(member.name)
await bot.get_guild(data.hammer_guild).system_channel.send(response)
# Checking for new reactions being added.
# on_raw_reaction_add is used since it is called regardless of the state of the internal message cache.
@bot.event
async def on_raw_reaction_add(payload):
if payload.channel_id == data.vote_channel_id:
pass
# This will handle some errors and suppress raising them. It will also report to the user what the error was.
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, discord.ext.commands.errors.CommandNotFound):
await ctx.send("This command doesn't exist", delete_after=15)
elif isinstance(error, discord.ext.commands.MissingPermissions):
await ctx.send("You don't have permission to do that!", delete_after=15)
elif isinstance(error, discord.ext.commands.MissingRole):
await ctx.send("You don't have the correct role to use that command!", delete_after=15)
elif isinstance(error, discord.ext.commands.CheckFailure):
print('Check failed')
elif isinstance(error, discord.ext.commands.errors.CommandInvokeError):
if isinstance(error.original, ForbiddenChessMove):
await ctx.send("This is not a valid move!", delete_after=15)
else:
print('unknown error: {} of type {}'.format(error, type(error)))
await ctx.channel.send(error)
if bot.debug:
raise error
@bot.event
async def on_error(event_method, *args, **kwargs):
print(event_method)
print(*args)
print(**kwargs)
# This is a command purely for testing purposes during development.
@bot.command(name='testing', help=hd.testing_help, usage=hd.testing_usage)
@commands.has_role(data.member_role_id)
async def testing(ctx, *args):
pass
@bot.command(name='help', help=hd.help_help, usage=hd.help_usage)
async def helping(ctx, command=''):
try:
await ctx.send(embed=helper(ctx, bot, command))
except KeyError:
await ctx.send("Help Error: This command doesn't exist.", delete_after=10)
# This command will be used so members can give themselves some roles with a command
@bot.command(name='role', help=hd.role_help, usage=hd.role_usage)
@commands.has_role(data.member_role_id)
async def role(ctx, action, *args):
await role_giver(ctx, action, args, bot)
# Tell someone to stop being lazy
@bot.command(name='stop_lazy', help=hd.stop_lazy_help, usage=hd.stop_lazy_usage)
@commands.has_role(data.member_role_id)
async def stop_lazy(ctx, mention='jerk'):
await ctx.message.delete()
response = 'Stop Lazy {}'.format(mention)
await ctx.send(response)
await ctx.send(file=discord.File('stop_lazy.png'))
@bot.command(name='CMP', help=hd.CMP_help, usage=hd.CMP_usage)
@commands.has_any_role(data.member_role_id, data.cmp_role_id)
async def cmp(ctx):
CMP_IP = os.getenv('CMP_IP')
response = "Check your DM's"
await ctx.author.send(CMP_IP)
await ctx.send(response)
# Command that will handle voting, see voting.py.
@bot.command(name='vote', help=hd.vote_help, usage=hd.vote_usage)
@commands.has_role(data.member_role_id)
async def vote(ctx, vote_type='', *args):
await ctx.message.delete()
await vote_handler(ctx, vote_type, args, bot)
# Command to create, add, remove and delete bulletins in the bulletin board.
@bot.command(name='bulletin', help=hd.bulletin_help, usage=hd.bulletin_usage)
@commands.has_role(data.member_role_id)
async def bulletin(ctx, action, *args):
await ctx.message.delete()
await task_list(ctx=ctx, action=action, args=args, use='bulletin')
# Command to add a to do list to a project channel and pin it.
@bot.command(name='todo', help=hd.todo_help, usage=hd.todo_usage)
@commands.has_role(data.member_role_id)
async def todo(ctx, action, *args):
await ctx.message.delete()
await task_list(ctx=ctx, action=action, args=args, use='todo')
# Command to handle the coordinate list. There is one embed per dimension
@bot.command(name='coordinates', help=hd.coordinates_help, usage=hd.coordinates_usage)
@commands.has_role(data.member_role_id)
async def coordinates(ctx, action, *args):
await ctx.message.delete()
if ctx.channel.id == data.coordinate_channel:
await task_list(ctx=ctx, action=action, args=args, use="bulletin")
"""@bot.command(name="bug_vote", help=hd.bug_vote_help, usage=hd.bug_vote_usage)
@commands.has_any_role("members", "comrades")
async def bug_vote(ctx, bug):
embed = await bug_utils.vote(bug)
print(embed)
await ctx.send(embed=embed)"""
# A admin only command to mass delete messages in case of a bad discord discussion.
@bot.command(name='mass_delete', help=hd.mass_delete_help, usage=hd.mass_delete_usage)
@commands.has_role(data.admin_role_id)
async def mass_delete(ctx, number_of_messages: int):
await ctx.message.delete()
if number_of_messages > 250:
response = "You want to delete too many messages at once, I'm sorry."
await ctx.send(response)
return
else:
await ctx.channel.purge(limit=number_of_messages)
# this loop is used to check for new updates on the bug tracker every 60 seconds
@tasks.loop(seconds=10, reconnect=True)
async def fixed_bug_loop():
print("loop")
try:
# on startup this is ran the first time but the bot isn't yet online so this would return []
# to make sure it doesn't break we check for this
channel = bot.get_channel(data.fixed_bug_channel_id)
if channel:
await bug_fix.fixes_handler(bot)
# exceptions need to be handled, otherwise the loop might break
except Exception as e:
print(e)
raise e
@tasks.loop(seconds=25, reconnect=True)
async def version_update_loop():
try:
# on startup this is ran the first time but the bot isn't yet online so this would return []
# to make sure it doesn't break we check for this
channel = bot.get_channel(data.fixed_bug_channel_id)
if channel:
await mc_version.version_update_handler(bot, channel)
# exceptions need to be handled, otherwise the loop might break
except Exception as e:
print(e)
@tasks.loop(seconds=1, reconnect=True)
async def test():
pass
try:
bot.add_cog(Games(bot))
bot.add_cog(Dummy(bot))
bot.add_cog(Status(bot))
version_update_loop.start() # start the loop to check for new versions
fixed_bug_loop.start() # start the loop to check for bugs
bot.loop.run_until_complete(bot.start(TOKEN))
except KeyboardInterrupt:
pass
finally:
bot.loop.run_until_complete(bot.logout())
asyncio.sleep(3)
print("done")
```
#### File: HammerBotPython/bug/utils.py
```python
import discord
from dotenv import load_dotenv
from jira import JIRA
import os
from bug.fetcher import limited_bug
# Get the login details to login to the bug tracker.
load_dotenv()
mojira_username = os.getenv('mojira_username')
mojira_password = os.getenv('<PASSWORD>')
# This method will be used to vote for an open issue.
async def vote(bug):
jira_access = JIRA(
server='https://bugs.mojang.com',
basic_auth=(mojira_username, mojira_password),
)
try:
await jira_access.add_vote(bug)
except Exception as e:
print(e)
issue = jira_access.issue(bug)
votes = issue.fields.votes
embed = limited_bug(bug)
embed.color = discord.Colour.teal()
embed.description = f'Issue {issue} has been voted on.\n' \
f'The issue now has a total of {votes} votes.'
jira_access.close()
return embed
```
#### File: fun_zone/games/chess.py
```python
import chess.svg
import chess
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPM
import fun_zone.games.utils as utils
turn_mapping = {"white": chess.WHITE, "black": chess.BLACK}
class ForbiddenChessMove(Exception):
"""Exception raised when using a forbidden chess move.
Attributes:
message -- explanation of the error
"""
def __init__(self, message="This is not a valid chess move!"):
self.message = message
super().__init__(self.message)
class Chess:
"""Handles all chess game related actions like generating the board, moving pieces and promoting pawns.
Attributes:
board -- a chess.board object containing the current board
turn -- a string containing the colour of the player who's turn it is
"""
def __init__(self):
self.board = None
self.turn = "white"
def generate_board(self):
"""Generates the board and converts the .svg file to a .env file"""
self.board = chess.Board()
self.board.turn = chess.WHITE
self.turn = "white"
utils.gen_png_from_svg(self.board)
def move_piece(self, move):
"""
Move a piece. It checks for illegal moves first. Then it'll move the piece, generate the board
and change turns. If there is an illegal move a ForbiddenChessMove error will be raised.
"""
chess_move = chess.Move.from_uci(move)
if chess_move in self.board.legal_moves:
self.board.push(chess_move)
utils.gen_png_from_svg(self.board)
self.turn = "white" if self.turn == "black" else "black"
self.board.turn = turn_mapping[self.turn]
else:
raise ForbiddenChessMove()
def check_finished(self, arg):
"""
Checks if the conditions are met to end a game.
:param arg: can be 'checkmate', 'stalemate' or 'draw'. It will return either True or False depending on if the
condition was met.
"""
if arg == "checkmate":
return self.board.is_checkmate()
elif arg == "stalemate":
return self.board.is_stalemate
elif arg == "draw":
return self.board.has_insufficient_material(self.board.turn) or self.board.can_claim_draw()
def promote_pawn(self, move, piece):
"""
Promote a piece. It checks for illegal moves. Then it'll move the piece and promote it correctly then
change turns. If there is an illegal move a ForbiddenChessMove error will be raised.
:param move: the chess move in uci format
:param piece: the piece in number format. See pieces_mappings in the Games class in games.py
It can be found within th chess command in the section for the promote action.
"""
pseudo_move = chess.Move.from_uci(move)
chess_move = chess.Move(from_square=pseudo_move.from_square,
to_square=pseudo_move.to_square,
promotion=piece
)
if chess_move in self.board.legal_moves:
self.board.push(chess_move)
utils.gen_png_from_svg(self.board)
self.turn = "white" if self.turn == "black" else "black"
self.board.turn = turn_mapping[self.turn]
else:
raise ForbiddenChessMove()
``` |
{
"source": "2nOrderEDO/Tuenti_Challenge_8",
"score": 3
} |
#### File: Tuenti_Challenge_8/Challenge_3/Script.py
```python
import os
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
rst = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
song = ['C','G','A']
dic = {'Ab': 'G#','Bb': 'A#','B#': 'C','Cb': 'B','Db': 'C#','Eb': 'D#','E#': 'F','Fb': 'E','Gb': 'F#'}
W = 2
H = 1
s2 = set(song)
major = [W, W, H, W, W, W, H]
minor = [W, H, W, W, H, W, W]
vector_major = [sum(major[0:x]) for x in range(0,len(major))]
vector_minor = [sum(minor[0:x]) for x in range(0,len(minor))]
def rotate(l, n):
return l[n:] + l[:n]
with open(os.path.join(__location__, 'submitinput.txt'),"r") as f:
data = f.readlines()
with open(os.path.join(__location__, 'Output.txt'),'w') as output:
n = 1
for song in data:
song = song.strip('\n')
if song == '0':
output.write('Case #'+str(n)+ ': '+'MA MA# MB MC MC# MD MD# ME MF MF# MG MG# mA mA# mB mC mC# mD mD# mE mF mF# mG mG#\n')
n +=1
continue
song = song.split()
song = [dic.get(c, c) for c in song] #Translate with dictionary leaving non found items unchanged
song = set(song)
if not song.issubset(rst):
continue #if the line does not contain musical notes, skip
result_ma = []
result_mi = []
lst = rst
for i in lst: #Generate A major. This could have been generated just once
heptatonic = [lst[x] for x in vector_major]
s1 = set(heptatonic)
if song.issubset(s1):
result_ma.append('M'+str(heptatonic[0]))
lst = rotate(lst,1)
lst = rst
for i in lst:
heptatonic = [lst[x] for x in vector_minor]
s1 = set(heptatonic)
if song.issubset(s1):
result_mi.append('m'+str(heptatonic[0]))
lst = rotate(lst,1)
result_ma = sorted(result_ma)
result_mi = sorted(result_mi)
if len(result_ma) == 0 and len(result_mi) == 0:
output.write('Case #'+str(n)+ ': '+'None\n')
else:
output.write('Case #' + str(n) + ': '+ ' '.join(result_ma) +' '+' '.join(result_mi) + '\n')
n += 1
``` |
{
"source": "2nPlusOne/pygame-platformer",
"score": 4
} |
#### File: pygame-platformer/source/player.py
```python
import pygame
from settings import *
import utils
class Player(pygame.sprite.Sprite):
def __init__(self, pos, groups, collision_sprites):
super().__init__(groups)
self.image = pygame.Surface((TILE_SIZE / 2, TILE_SIZE))
self.image.fill(PLAYER_COLOR)
self.rect = self.image.get_rect(topleft=pos)
self.collision_sprites = collision_sprites
# Player movement
self.direction_x = 0 # -1 = left, 1 = right, 0 = none
self.velocity = pygame.math.Vector2()
self.speed = MAX_PLAYER_SPEED
# Jumping
self.jumps_remaining = MAX_JUMPS
self.is_grounded = False # Is the player on the ground?
self.was_grounded = False # Used to determine if the player has left the ground this frame
self.is_jumping = False # Is the player jumping?
self.jump_pressed = False # Is the jump key currently pressed?
self.jumping_locked = False # Used to lock the player from jumping again until they release the jump key
self.current_gravity = 0 # The current gravity affecting the player
self.jump_gravity = (2 * MAX_JUMP_HEIGHT) / (TIME_TO_JUMP_APEX ** 2)
self.fall_gravity = self.jump_gravity * FALL_GRAVITY_MULTIPLIER
self.jump_velocity = ((-2 * MAX_JUMP_HEIGHT) / TIME_TO_JUMP_APEX) - self.fall_gravity
# Time
self.coyote_timer = COYOTE_TIME # Time the player has to jump after leaving the ground
self.jump_buffer_timer = JUMP_BUFFER_TIME # Registers jump input as long as this is less than JUMP_BUFFER_TIME
self.last_frame_ticks = 0 # Not used if using estimated delta_time (1/FPS)
def process_input(self, events):
"""Process input events. This method is called by Level, which passes in the events from the main game loop."""
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT: # Move left
self.direction_x = -1
if event.key == pygame.K_RIGHT: # Move right
self.direction_x = 1
if event.key == pygame.K_UP: # Jump
self.jump_pressed = True
if event.key == pygame.K_g: # Invert gravity just for fun
self.fall_gravity = -self.fall_gravity
self.current_gravity = -self.current_gravity
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and self.direction_x < 0:
self.direction_x = 0
if event.key == pygame.K_RIGHT and self.direction_x > 0:
self.direction_x = 0
if event.key == pygame.K_UP:
self.jump_pressed = False
self.jumping_locked = False
def check_jump_buffer(self):
"""Conditionally applies jumping force to the player."""
self.update_jump_buffer_timer()
# jump_allowed = not (self.jumps_remaining > 0 and
# (self.is_grounded or self.is_jumping or
# self.coyote_timer < COYOTE_TIME))
jump_input = self.jump_buffer_timer < JUMP_BUFFER_TIME
can_jump = not self.jumping_locked and self.jumps_remaining > 0 and (
self.is_jumping or self.coyote_timer < COYOTE_TIME)
self.jumping_locked = self.jump_pressed
if jump_input and can_jump:
self.jump()
def jump(self):
self.coyote_timer = COYOTE_TIME
self.jump_buffer_timer = JUMP_BUFFER_TIME
self.is_jumping = True
self.jumps_remaining -= 1
self.current_gravity = self.jump_gravity
self.velocity.y = self.jump_velocity
def update_air_timer(self):
"""Resets air timer if grounded, otherwise increments by delta time."""
self.coyote_timer = 0 if self.is_grounded else round(self.coyote_timer + EST_DELTA_TIME, 2)
def update_jump_buffer_timer(self):
"""Resets jump buffer timer if jump key pressed, otherwise increments by delta time."""
self.jump_buffer_timer = 0 if self.jump_pressed and not self.jumping_locked else round(self.jump_buffer_timer + EST_DELTA_TIME, 2)
def move(self):
"""Move the player and apply collisions."""
self.velocity.y += self.current_gravity
self.check_jump_buffer() # Check if the player should jump this frame
target_velocity = pygame.math.Vector2(self.direction_x * self.speed, self.velocity.y)
self.velocity = utils.pygame_vector2_smooth_damp(self.velocity, target_velocity, SMOOTH_TIME, EST_DELTA_TIME)
self.velocity.x = 0 if abs(self.velocity.x) < 2*SMOOTH_TIME else self.velocity.x
# Horizontal movement and collisions
self.rect.x += self.velocity.x
for sprite in self.collision_sprites.sprites():
if not sprite.rect.colliderect(self.rect): continue
# Right collision
elif abs(self.rect.right - sprite.rect.left) < COLLISION_TOLERANCE and self.velocity.x > 0:
self.rect.right = sprite.rect.left
# Left collision
elif abs(self.rect.left - sprite.rect.right) < COLLISION_TOLERANCE and self.velocity.x < 0:
self.rect.left = sprite.rect.right
self.velocity.x = 0
break
# Vertical movement and collisions
# Since vertical movement can be potentially a lot faster than horizontal due to gravity,
# we need to check for collisions as we go each frame, instead of after moving by the velocity.
for i in range(abs(int(self.velocity.y))):
collided = False
self.rect.y += abs(self.velocity.y) / self.velocity.y
for sprite in self.collision_sprites.sprites():
if not sprite.rect.colliderect(self.rect): continue
# Bottom collision
elif abs(self.rect.bottom - sprite.rect.top) < COLLISION_TOLERANCE and self.velocity.y > 0:
self.rect.bottom = sprite.rect.top
# Top collision
elif abs(self.rect.top - sprite.rect.bottom) < COLLISION_TOLERANCE and self.velocity.y < 0:
self.rect.top = sprite.rect.bottom
self.velocity.y = 0
collided = True
break
if collided: break
# Set gravity to fall gravity scale if we're falling or not holding jump
if (not self.is_grounded and (not self.jump_pressed or self.velocity.y > 0)):
self.current_gravity = self.fall_gravity
def set_grounded(self):
"""Moves the player down 1 pixel and checks for a collision."""
self.rect.y += 1
for sprite in self.collision_sprites.sprites():
if sprite.rect.colliderect(self.rect):
if not abs(self.rect.bottom - sprite.rect.top) < COLLISION_TOLERANCE: continue
self.is_grounded = True
self.was_grounded = True
self.is_jumping = False
self.jumps_remaining = MAX_JUMPS
break
else:
self.is_grounded = False
left_ground_this_frame = self.was_grounded and not self.is_grounded
if not left_ground_this_frame: continue
self.air_time_start = pygame.time.get_ticks()
self.was_grounded = False
self.rect.y -= 1
def update(self):
"""Update the player."""
self.update_air_timer()
self.move()
self.set_grounded()
print(f"jumps_remaining: {self.jumps_remaining}")
print(f"jump_locked: {self.jumping_locked}")
# Zombie method, only used if I decide I need perfect delta time (should probably remove this...)
def update_delta_time(self):
"""Update the delta time."""
self.delta_time = (pygame.time.get_ticks() - self.last_frame_ticks) / 1000
self.last_frame_ticks = pygame.time.get_ticks()
```
#### File: pygame-platformer/source/utils.py
```python
import pygame
from math import inf, sqrt
def clamp(n, smallest, largest): return max(smallest, min(n, largest))
def pygame_vector2_smooth_damp(current: pygame.math.Vector2,
target: pygame.math.Vector2,
smooth_time: float, delta_time: float,
current_velocity=pygame.math.Vector2(0, 0)) -> pygame.math.Vector2:
"""
Gradually changes a vector towards a desired goal over time.
:param current: Current position.
:param target: Position we're trying to reach.
:param smooth_time: Approximately the time it will take to reach the target.
:param delta_time: The realtime since the last call to this function.
:param current_velocity: Current velocity, this value is modified by the function every time you call it.
:return: New position.
"""
max_speed = inf
smoothTime = max(0.0001, smooth_time)
omega = 2 / smoothTime
x = omega * delta_time
exp = 1 / (1 + x + 0.48 * x * x + 0.235 * x * x * x)
change_x = current.x - target.x
change_y = current.y - target.y
original_to = target
max_change = max_speed * smoothTime
max_change_sq = max_change * max_change
sq_dist = change_x * change_x + change_y * change_y
if sq_dist > max_change_sq:
mag = float(sqrt(sq_dist))
change_x = change_x / mag * max_change
change_y = change_y / mag * max_change
target.x = current.x - change_x
target.y = current.y - change_y
temp_x = (current_velocity.x + omega * change_x) * delta_time
temp_y = (current_velocity.y + omega * change_y) * delta_time
current_velocity.x = (current_velocity.x - omega * temp_x) * exp
current_velocity.y = (current_velocity.y - omega * temp_y) * exp
output_x = target.x + (change_x + temp_x) * exp
output_y = target.y + (change_y + temp_y) * exp
# Prevent overshooting the target position
orig_minus_cur_x = original_to.x - current.x
orig_minus_cur_y = original_to.y - current.y
out_minus_orig_x = output_x - original_to.x
out_minus_orig_y = output_y - original_to.y
if orig_minus_cur_x * out_minus_orig_x + orig_minus_cur_y * out_minus_orig_y > 0:
output_x = original_to.x
output_y = original_to.y
current_velocity.x = (output_x - original_to.x) / delta_time
current_velocity.y = (output_y - original_to.y) / delta_time
return pygame.math.Vector2(output_x, output_y)
``` |
{
"source": "2over12/archr",
"score": 2
} |
#### File: archr/analyzers/core.py
```python
import contextlib
import tempfile
import logging
import os
l = logging.getLogger("archr.analyzers.core_analyzer")
from . import ContextAnalyzer
class CoreResults:
local_core_path = None
target_core_path = None
_super_core_cmd = "echo core | docker run --rm --privileged -i ubuntu tee /proc/sys/kernel/core_pattern"
class CoreAnalyzer(ContextAnalyzer):
"""
Runs the target and retrieves a core file. Assumes a /proc/sys/kernel/core_pattern is "core".
"""
def __init__(self, *args, **kwargs):
with open("/proc/sys/kernel/core_pattern", 'rb') as c:
if c.read().strip() != b"core":
l.warning("/proc/sys/kernel/core_pattern needs to be 'core'. I am setting this system-wide.")
os.system(_super_core_cmd)
super().__init__(*args, **kwargs)
if type(self.target) is not targets.DockerImageTarget:
l.warning("When using a LocalTarget, this Analyzer will chmod 777 your CWD!!! Be careful.")
@contextlib.contextmanager
def fire_context(self, **kwargs): #pylint:disable=arguments-differ
if self.target.run_command(["chmod","777",os.path.dirname(self.target.target_path)], user="root").wait() != 0:
raise ArchrError("failed to chmod CWD. core will *not* drop")
r = CoreResults()
r.target_core_path = os.path.join(os.path.dirname(self.target.target_path), "core")
r.local_core_path = tempfile.mktemp(prefix="arch_core_")
try:
with self.target.flight_context(result=r, **kwargs) as flight:
yield flight
finally:
with open(r.local_core_path, 'wb') as c:
c.write(self.target.retrieve_contents(r.target_core_path))
from ..errors import ArchrError
from .. import targets
```
#### File: archr/analyzers/gdb.py
```python
from ..errors import ArchrError
from . import ContextAnalyzer
import contextlib
import subprocess
import tempfile
import logging
import signal
import shutil
import os
import time
l = logging.getLogger("archr.analyzers.gdb")
class FakeTempdir:
def __init__(self, path):
self.name = path
def cleanup(self):
return
class GDBResult:
returncode = None
signal = None
crashed = False
timed_out = False
def __init__(self, trace_dir=None):
if trace_dir is None:
self.trace_dir = tempfile.TemporaryDirectory(prefix='gdb_trace_dir_')
else:
self.trace_dir = FakeTempdir(trace_dir)
class GDBAnalyzer(ContextAnalyzer):
REQUIRED_IMPLANT = "gdb"
def __init__(self, target, local_trace_dir=None, timeout=10):
super().__init__(target)
self.timeout = timeout
self.local_trace_dir = local_trace_dir
@contextlib.contextmanager
def fire_context(self, prefix_args=None, gdb_args=None, gdb_script=None, sleep_time=0.1):
"""Run the target with gdb.
Keyword arguments:
prefix_args -- additional commands BEFORE the gdb command (default None)
gdb_args -- addition args for gdb (default None)
gdb_script -- Path of an optional gdb_script file (default None)
"""
if self.local_trace_dir:
if os.path.exists(self.local_trace_dir):
shutil.rmtree(self.local_trace_dir)
os.mkdir(self.local_trace_dir)
else:
self.local_trace_dir = tempfile.mkdtemp(prefix="/tmp/gdb_tracer_")
fire_path = os.path.join(self.target.tmpwd, "gdb", "fire")
gdb_command = []
if prefix_args:
gdb_command += prefix_args
gdb_command += [fire_path]
if gdb_args:
gdb_command += gdb_args
if gdb_script:
paths = {}
d_src = os.path.dirname(gdb_script)
d_dst = os.path.dirname(fire_path)
paths[d_dst] = d_src
self.target.inject_paths(paths)
script_remote_path = os.path.join(
d_dst, os.path.basename(gdb_script))
gdb_command += ["-x", script_remote_path]
gdb_command += ["--args"]
gdb_command += self.target.target_args
r = GDBResult(trace_dir=self.local_trace_dir)
try:
with self.target.flight_context(gdb_command, timeout=self.timeout, result=r) as flight:
# TODO: we need a better way of dealing with this, dnsmasq is too slow at initializing
time.sleep(sleep_time)
yield flight
except subprocess.TimeoutExpired:
r.timed_out = True
else:
r.timed_out = False
r.returncode = flight.process.returncode
assert r.returncode is not None
# did a crash occur?
if r.returncode in [139, -11]:
r.crashed = True
r.signal = signal.SIGSEGV
elif r.returncode == [132, -9]:
r.crashed = True
r.signal = signal.SIGILL
```
#### File: archr/analyzers/ltrace.py
```python
import os
import logging
from contextlib import contextmanager
from . import ContextAnalyzer
from .strace import super_yama
l = logging.getLogger("archr.analyzers.ltrace")
class LTraceAnalyzer(ContextAnalyzer):
"""
Returns an ltrace instance which has launched a fresh instance of the process
"""
REQUIRED_IMPLANT = "ltrace"
@contextmanager
def fire_context(self, args_prefix=None, trace_args=None, **kwargs): #pylint:disable=arguments-differ
"""
Starts ltrace with a fresh process.
:param trace_args: Options for ltrace
:return: Target instance returned by run_command
"""
fire_path = os.path.join(self.target.tmpwd, "ltrace", "fire")
args_prefix = (args_prefix or []) + [fire_path] + (trace_args or []) + ["--"]
with self.target.flight_context(args_prefix=args_prefix, **kwargs) as flight:
yield flight
flight.result = flight.process.stderr.read() # illegal, technically
class LTraceAttachAnalyzer(ContextAnalyzer):
"""
Returns an ltrace instance attached to a running instance of the target.
"""
REQUIRED_IMPLANT = "ltrace"
@contextmanager
def fire_context(self, pid=None, trace_args=None, **kwargs): #pylint:disable=arguments-differ
"""
Attaches ltrace to an already existing process.
:param pid: PID of target process
:param trace_args: Options for ltrace
:param kwargs: Additional arguments
:return:
"""
super_yama()
fire_path = os.path.join(self.target.tmpwd, "ltrace", "fire")
cmd_args = [fire_path] + (trace_args or []) + ["-p", "%d" % pid]
with self.target.flight_context(args=cmd_args, **kwargs) as flight:
yield flight
flight.result = flight.process.stderr.read() # illegal, technically
```
#### File: archr/tests/test_analyzer_angr.py
```python
import tempfile
import claripy
import shutil
import archr
import os
import unittest
from common import build_container
class TestangrAnalyzer(unittest.TestCase):
@classmethod
def setUpClass(cls):
build_container("entrypoint-env")
build_container("cat-flag")
build_container("fauxware")
build_container("syscall_test")
def angr_checks(self, t):
dsb = archr.analyzers.DataScoutAnalyzer(t)
apb = archr.analyzers.angrProjectAnalyzer(t, dsb)
asb = archr.analyzers.angrStateAnalyzer(t, apb)
project = apb.fire()
self.assertTrue(all(obj.binary.startswith("/tmp") for obj in project.loader.all_elf_objects[1:]))
state = asb.fire()
initial_stack = state.solver.eval(state.memory.load(state.regs.rsp, 200), cast_to=bytes)
self.assertIn(b"ARCHR=YES", initial_stack)
self.assertTrue(state.solver.eval_one(state.posix.brk == apb._mem_mapping['[heap]']))
self.assertTrue(state.solver.eval_one((state.regs.sp + 0xfff) & ~claripy.BVV(0xfff, project.arch.bits) == apb._mem_mapping['[stack-end]']))
# now screw with the memory map
apb._mem_mapping['[stack-end]'] = 0x1337000
state = asb.fire()
self.assertTrue(state.solver.eval_one((state.regs.sp + 0xfff) & ~claripy.BVV(0xfff, project.arch.bits) == apb._mem_mapping['[stack-end]']))
# now check the filesystem resolution
fd = state.posix.open('/etc/passwd', 0)
stat = state.posix.fstat(fd)
self.assertIsNotNone(stat)
self.assertFalse(state.solver.symbolic(stat.st_size))
self.assertNotEqual(state.solver.eval(stat.st_size), 0)
# done
project.loader.close()
@unittest.skipUnless(archr._angr_available, "angr required")
def test_env_angr(self):
with archr.targets.DockerImageTarget('archr-test:entrypoint-env').build().start() as t:
self.angr_checks(t)
@unittest.skipUnless(archr._angr_available, "angr required")
def test_env_angr_local(self):
tf = tempfile.mktemp()
shutil.copy("/usr/bin/env", tf)
with archr.targets.LocalTarget([tf], target_env=["ARCHR=YES"]).build().start() as t:
self.angr_checks(t)
os.unlink(tf)
@unittest.skipUnless(archr._angr_available, "angr required")
def test_angr_catflag(self):
with archr.targets.DockerImageTarget('archr-test:cat-flag').build().start() as t:
dsb = archr.analyzers.DataScoutAnalyzer(t)
apb = archr.analyzers.angrProjectAnalyzer(t, dsb)
asb = archr.analyzers.angrStateAnalyzer(t, apb)
project = apb.fire()
state = asb.fire()
simgr = project.factory.simulation_manager(state)
simgr.run()
self.assertEqual(len(simgr.errored), 0)
self.assertEqual(len(simgr.deadended), 1)
self.assertEqual(simgr.one_deadended.posix.dumps(1), b"archr-flag\n")
def _default_fauxware_checks(self, simgr):
num_authed, num_rejected, num_bypassed = 0, 0, 0
for s in simgr.deadended:
if b'Go away' in s.posix.dumps(1):
num_rejected += 1
if b'Welcome to the admin console, trusted user!' in s.posix.dumps(1):
num_authed += 1
if b'SOSNEAKY' in s.posix.dumps(0):
num_bypassed += 1
self.assertEqual(num_authed, 2)
self.assertEqual(num_bypassed, 1)
self.assertEqual(num_rejected, 1)
@unittest.skipUnless(archr._angr_available, "angr required")
def test_angr_fauxware(self):
with archr.targets.DockerImageTarget('archr-test:fauxware').build().start() as t:
dsb = archr.analyzers.DataScoutAnalyzer(t)
apb = archr.analyzers.angrProjectAnalyzer(t, dsb)
asb = archr.analyzers.angrStateAnalyzer(t, apb)
project = apb.fire()
state = asb.fire()
simgr = project.factory.simulation_manager(state)
simgr.run()
self._default_fauxware_checks(simgr)
@unittest.skipUnless(archr._angr_available, "angr required")
def test_angr_fauxware_custom_plt_hooks(self):
import angr # pylint:disable=import-outside-toplevel
original_puts = angr.SIM_PROCEDURES['libc']['puts']
original_read = angr.SIM_PROCEDURES['posix']['read']
class new_puts(angr.SimProcedure):
def run(self, s): # pylint:disable=arguments-differ
self.state.globals['num_puts'] = self.state.globals.get('num_puts', 0) + 1
return self.inline_call(original_puts, s).ret_expr
class new_read(angr.SimProcedure):
def run(self, fd, buf, _len): # pylint:disable=arguments-differ
self.state.globals['num_read'] = self.state.globals.get('num_read', 0) + 1
return self.inline_call(original_read, fd, buf, _len).ret_expr
with archr.targets.DockerImageTarget('archr-test:fauxware').build().start() as t:
dsb = archr.analyzers.DataScoutAnalyzer(t)
apb = archr.analyzers.angrProjectAnalyzer(t, dsb, custom_hooks={'puts': new_puts(), 'read': new_read()})
asb = archr.analyzers.angrStateAnalyzer(t, apb)
project = apb.fire()
state = asb.fire()
simgr = project.factory.simulation_manager(state)
simgr.run()
num_authed, num_rejected, num_bypassed = 0, 0, 0
for s in simgr.deadended:
if b'Go away' in s.posix.dumps(1):
num_rejected += 1
self.assertEqual(s.globals['num_puts'], 2)
self.assertEqual(s.globals['num_read'], 5)
if b'Welcome to the admin console, trusted user!' in s.posix.dumps(1):
num_authed += 1
if b'SOSNEAKY' in s.posix.dumps(0):
num_bypassed += 1
self.assertEqual(s.globals['num_puts'], 3)
self.assertEqual(s.globals['num_read'], 4)
else:
self.assertEqual(s.globals['num_puts'], 3)
self.assertEqual(s.globals['num_read'], 5)
self.assertEqual(num_authed, 2)
self.assertEqual(num_bypassed, 1)
self.assertEqual(num_rejected, 1)
@unittest.skipUnless(archr._angr_available, "angr required")
def test_angr_fauxware_custom_binary_function_hooks(self):
import angr # pylint:disable=import-outside-toplevel
class rejected(angr.SimProcedure):
def run(self): # pylint:disable=arguments-differ
self.state.posix.stdout.write(None, b"Get outta here!")
self.exit(1)
class authorized(angr.SimProcedure):
def run(self): # pylint:disable=arguments-differ
self.state.posix.stdout.write(None, b"Good on ya, mate! Get in 'ere, ya bloody admin.")
hooks = {'accepted': authorized(), 'rejected': rejected()}
with archr.targets.DockerImageTarget('archr-test:fauxware').build().start() as t:
dsb = archr.analyzers.DataScoutAnalyzer(t)
apb = archr.analyzers.angrProjectAnalyzer(t, dsb, custom_hooks=hooks)
asb = archr.analyzers.angrStateAnalyzer(t, apb)
project = apb.fire()
state = asb.fire()
simgr = project.factory.simulation_manager(state)
simgr.run()
num_authed, num_rejected, num_bypassed = 0, 0, 0
for s in simgr.deadended:
if b"Get outta here!" in s.posix.dumps(1):
num_rejected += 1
if b"Good on ya, mate! Get in 'ere, ya bloody admin." in s.posix.dumps(1):
num_authed += 1
if b'SOSNEAKY' in s.posix.dumps(0):
num_bypassed += 1
self.assertEqual(num_authed, 2)
self.assertEqual(num_bypassed, 1)
self.assertEqual(num_rejected, 1)
@unittest.skipUnless(archr._angr_available, "angr required")
def test_angr_syscall_test(self):
with archr.targets.DockerImageTarget('archr-test:syscall_test').build().start() as t:
dsb = archr.analyzers.DataScoutAnalyzer(t)
apb = archr.analyzers.angrProjectAnalyzer(t, dsb)
asb = archr.analyzers.angrStateAnalyzer(t, apb)
project = apb.fire()
state = asb.fire()
simgr = project.factory.simulation_manager(state)
simgr.run()
self.assertEqual(len(simgr.deadended), 1)
exit_code, = [e.objects['exit_code'] for e in simgr.one_deadended.history.events if e.type == 'terminate']
self.assertEqual(simgr.one_deadended.posix.dumps(1), b'Hello, world!\n')
self.assertEqual(simgr.one_deadended.solver.eval_one(exit_code), 42)
@unittest.skipUnless(archr._angr_available, "angr required")
def test_angr_syscall_test_hooks(self):
import angr # pylint:disable=import-outside-toplevel
original_write = angr.SIM_PROCEDURES['posix']['write']
class new_puts(angr.SimProcedure):
def run(self, code): # pylint:disable=arguments-differ
new_exit = self.state.solver.eval_one(code) + 27
self.exit(new_exit)
class new_write(angr.SimProcedure):
def run(self, fd, buf, _): # pylint:disable=arguments-differ
self.state.globals['num_write'] = self.state.globals.get('num_read', 0) + 1
return self.inline_call(original_write, fd, buf, 5).ret_expr
syscalls =dict(exit=new_puts(), write=new_write())
with archr.targets.DockerImageTarget('archr-test:syscall_test').build().start() as t:
dsb = archr.analyzers.DataScoutAnalyzer(t)
apb = archr.analyzers.angrProjectAnalyzer(t, dsb, custom_systemcalls=syscalls)
asb = archr.analyzers.angrStateAnalyzer(t, apb)
project = apb.fire()
state = asb.fire()
simgr = project.factory.simulation_manager(state)
simgr.run()
self.assertEqual(len(simgr.deadended), 1)
exit_code, = [e.objects['exit_code'] for e in simgr.one_deadended.history.events if e.type == 'terminate']
self.assertEqual(simgr.one_deadended.posix.dumps(1), b'Hello')
self.assertEqual(simgr.one_deadended.solver.eval_one(exit_code), 69)
if __name__ == '__main__':
unittest.main()
```
#### File: archr/tests/test_analyzer_gdbserver.py
```python
import pygdbmi.gdbcontroller
import archr
import unittest
from common import build_container
class TestGdbServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
build_container("entrypoint-false")
build_container("entrypoint-env")
def gdb_do(self, t):
with archr.analyzers.GDBServerAnalyzer(t).fire_context(port=31337) as gbf:
gc = pygdbmi.gdbcontroller.GdbController()
gc.write("target remote %s:%d" % (t.ipv4_address, 31337))
gc.write("continue")
gc.exit()
return gbf.process
def check_gdb_cat(self, t):
p = self.gdb_do(t)
assert b"Child exited with status 1" in p.stderr.read()
@unittest.skip("broken")
def test_cat_docker(self):
with archr.targets.DockerImageTarget('archr-test:entrypoint-false').build().start() as t:
self.check_gdb_cat(t)
@unittest.skip("broken")
def test_env_order(self):
with archr.targets.DockerImageTarget('archr-test:entrypoint-env').build().start() as t:
a = self.gdb_do(t).stdout.read()
b = self.gdb_do(t).stdout.read()
c = self.gdb_do(t).stdout.read()
assert a == b
assert b == c
@unittest.skip("broken")
def test_cat_local(self):
with archr.targets.LocalTarget(["/bin/false"]).build().start() as t:
self.check_gdb_cat(t)
if __name__ == '__main__':
unittest.main()
```
#### File: archr/tests/test_analyzer_ltrace.py
```python
from time import sleep
import archr
import unittest
from common import build_container
BIN_CAT = "/bin/cat"
CAT_ARGS = ["/etc/passwd"]
LTRACE_ARGS = "-f -e [email protected]* -n 2".split()
class TestAnalyzerLtrace(unittest.TestCase):
@classmethod
def setUpClass(cls):
build_container("socat")
build_container("cat")
def ltrace_proc(self, t, **kwargs):
b = archr.analyzers.LTraceAnalyzer(t)
with b.fire_context(trace_args=LTRACE_ARGS, **kwargs) as flight:
sleep(1)
flight.process.terminate()
return flight.result
def ltrace_attach(self, t, p, **kwargs):
b = archr.analyzers.LTraceAttachAnalyzer(t)
pid = p.pid if isinstance(t, archr.targets.LocalTarget) else t.get_proc_pid('socat')
with b.fire_context(pid=pid, trace_args=LTRACE_ARGS, **kwargs) as flight:
sleep(0.1)
nc = flight.get_channel('tcp:0') # misuse of flight
nc.send(b'ahoi!')
assert nc.readuntil(b'ahoi!', timeout=5) == b'ahoi!'
return flight.result
def check_ltrace_proc(self, t, **kwargs):
output = self.ltrace_proc(t, **kwargs)
assert b'cat->open' in output
assert b'cat->malloc' in output
assert b'cat->read' in output
def check_ltrace_attach(self, t, **kwargs):
target = t.run_command() # start target
output = self.ltrace_attach(t, target, **kwargs)
target.terminate()
assert b'exe->accept' in output
assert b'exe->malloc' in output
assert b'exe->free' in output
assert b'exe->read' in output
assert b'exe->write' in output
@unittest.skip("broken")
def test_ltrace_proc_local(self):
with archr.targets.LocalTarget(["/bin/cat", "/etc/passwd"]).build().start() as t:
self.check_ltrace_proc(t)
@unittest.skip("broken")
def test_ltrace_proc_docker(self):
with archr.targets.DockerImageTarget('archr-test:cat', target_args=['/bin/cat', '/etc/passwd']).build().start() as t:
self.check_ltrace_proc(t)
@unittest.skip("broken")
def test_ltrace_attach_local(self):
with archr.targets.LocalTarget("socat tcp-l:7573,reuseaddr exec:cat".split(), tcp_ports=[7573]).build().start() as t:
self.check_ltrace_attach(t)
@unittest.skip("broken")
def test_ltrace_attach_docker(self):
with archr.targets.DockerImageTarget('archr-test:socat').build().start() as t:
self.check_ltrace_attach(t)
if __name__ == '__main__':
unittest.main()
```
#### File: archr/tests/test_localtarget_simple.py
```python
import socket
import archr
import time
import os
import unittest
class TestLocalTarget(unittest.TestCase):
def test_local_cat(self):
with archr.targets.LocalTarget(["/bin/cat"]).build().start() as t:
p = t.run_command()
p.stdin.write(b"Hello!\n")
assert p.stdout.read(7) == b"Hello!\n"
def test_local_true(self):
with archr.targets.LocalTarget(["/bin/true"]).build().start() as t:
p = t.run_command()
p.wait()
assert p.returncode == 0
def test_local_false(self):
with archr.targets.LocalTarget(["/bin/false"]).build().start() as t:
p = t.run_command()
p.wait()
assert p.returncode == 1
def test_local_crasher(self):
with archr.targets.LocalTarget([os.path.join(os.path.dirname(__file__), "dockers", "crasher", "crasher")]).build().start() as t:
p = t.run_command()
p.wait()
assert p.returncode == -11
def test_local_nccat(self):
with archr.targets.LocalTarget("socat tcp-l:40001,reuseaddr exec:cat".split(), tcp_ports=[40001], ipv4_address="127.0.0.1").build().start() as t:
t.run_command()
assert t.tcp_ports == [ 40001 ]
try:
s = socket.create_connection((t.ipv4_address, 40001))
except ConnectionRefusedError:
time.sleep(5)
s = socket.create_connection((t.ipv4_address, 40001))
s.send(b"Hello\n")
assert s.recv(6) == b"Hello\n"
def test_local_env_context(self):
with archr.targets.LocalTarget(["/usr/bin/env"], target_env=["ARCHR=HAHA"]).build().start() as t:
with t.run_command() as p:
stdout,_ = p.communicate()
assert b"ARCHR=HAHA" in stdout.split(b'\n')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "2PacIsAlive/deepnet.works",
"score": 3
} |
#### File: deep_networks/data/csv.py
```python
import logging
import pandas as pd
class Parser(object):
"""Parse that b
Attributes:
log (logging.Logger): The logger for this module.
"""
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def parse(self, csvfile):
"""
Generates:
list of tuples: First item is index, second item is pandas.dataframe.
"""
return pd.read_csv(csvfile).iterrows()
```
#### File: deep_networks/data/np.py
```python
import logging
import numpy as np
class Saver(object):
"""Save that b
Attributes:
log (logging.Logger): The logger for this module.
"""
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def save(self, filename, image):
"""
Args:
filename (str): The path/name of the file being saved.
image (numpy.ndarray): The image to save.
"""
np.save(filename, image)
class Loader(object):
"""Load that b
Attributes:
log (logging.Logger): The logger for this module.
"""
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def load(self, filename):
"""
Args:
filename (str): The path/name of the file to load.
Returns:
numpy.ndarray: The numpy array from the specified file.
"""
return np.load(filename)
```
#### File: data/preprocessing/segment_lungs.py
```python
import logging
import numpy as np
from skimage import measure
class LungSegmenter(object):
"""Extract the lungs from a scan.
Attributes:
log (logging.Logger): The logger for this module.
"""
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def largest_label_volume(self, im, bg=-1):
"""
Adapted from:
https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial
Args:
im:
bg:
Returns:
"""
vals, counts = np.unique(im, return_counts=True)
counts = counts[vals != bg]
vals = vals[vals != bg]
biggest = vals[np.argmax(counts)]
return biggest
def mask(self, image, fill_lungs=True):
"""
Adapted from:
https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial
Args:
image:
fill_lungs:
Returns:
"""
# not actually binary, but 1 and 2.
# 0 is treated as background, which we do not want
binary_image = np.array(image > -320, dtype=np.int8)+1
labels = measure.label(binary_image)
# Pick the pixel in the very corner to determine which label is air.
# Improvement: Pick multiple background labels from around the patient
# More resistant to "trays" on which the patient lays cutting the air
# around the person in half
background_label = labels[0,0,0]
#Fill the air around the person
binary_image[background_label == labels] = 2
# Method of filling the lung structures (that is superior to something like
# morphological closing)
if fill_lungs:
# For every slice we determine the largest solid structure
for i, axial_slice in enumerate(binary_image):
axial_slice = axial_slice - 1
labeling = measure.label(axial_slice)
l_max = self.largest_label_volume(labeling, bg=0)
if l_max is not None: #This slice contains some lung
binary_image[i][labeling != l_max] = 1
binary_image -= 1 #Make the image actual binary
binary_image = 1-binary_image # Invert it, lungs are now 1
# Remove other air pockets insided body
labels = measure.label(binary_image, background=0)
l_max = self.largest_label_volume(labels, bg=0)
if l_max is not None: # There are air pockets
binary_image[labels != l_max] = 0
return binary_image
```
#### File: deepnet.works/www/views.py
```python
from www import app
from flask import render_template, request
@app.route('/')
def index():
return render_template('index.html')
@app.route('/build')
def builder():
return render_template('build.html')
@app.route('/train')
def trainer():
return render_template('train.html', network_name=request.args.get("network") or None)
``` |
{
"source": "2pees/remi",
"score": 2
} |
#### File: remi/editor/editor_widgets.py
```python
import remi.gui as gui
import html_helper
import inspect
import re
class InstancesTree(gui.TreeView, gui.EventSource):
def __init__(self, **kwargs):
super(InstancesTree, self).__init__(**kwargs)
gui.EventSource.__init__(self)
def append_instance(self, instance, parent):
item = gui.TreeItem(instance.attributes['editor_varname'])
if parent==None:
parent = self
item.instance = instance
item.onclick.connect(self.on_tree_item_selected)
parent.append(item)
return item
def item_by_instance(self, node, instance):
if not hasattr(node, 'attributes'):
return None
if node.identifier!=self.identifier:
if not hasattr(node,'instance'):
return None
for item in node.children.values():
if self.item_by_instance(item, instance) == instance.identifier:
return [node, item]
return None
def remove_instance(self, instance):
node, item = self.item_by_instance(self, instance)
node.remove_child(item)
def select_instance(self, node, instance):
if not hasattr(node, 'attributes'):
return
if node.identifier!=self.identifier:
if hasattr(node,'instance'):
if node.instance.identifier == instance.identifier:
node.style['background-color'] = 'lightblue'
else:
node.style['background-color'] = 'white'
node.attributes['treeopen'] = 'true'
for item in node.children.values():
self.select_instance(item, instance)
@gui.decorate_event
def on_tree_item_selected(self, emitter):
self.select_instance(self, emitter.instance)
return (emitter.instance,)
def append_instances_from_tree(self, node, parent=None):
if not hasattr(node, 'attributes'):
return
if not 'editor_varname' in node.attributes.keys():
return
nodeTreeItem = self.append_instance(node, parent)
for child in node.children.values():
self.append_instances_from_tree(child, nodeTreeItem)
class InstancesWidget(gui.VBox):
def __init__(self, **kwargs):
super(InstancesWidget, self).__init__(**kwargs)
self.titleLabel = gui.Label('Instances list', width='100%')
self.titleLabel.add_class("DialogTitle")
self.style['align-items'] = 'flex-start'
self.treeView = InstancesTree()
self.append([self.titleLabel, self.treeView])
self.titleLabel.style['order'] = '-1'
self.titleLabel.style['-webkit-order'] = '-1'
self.treeView.style['order'] = '0'
self.treeView.style['-webkit-order'] = '0'
def update(self, editorProject, selectedNode):
self.treeView.empty()
if 'root' in editorProject.children.keys():
self.treeView.append_instances_from_tree(editorProject.children['root'])
self.treeView.select_instance(self.treeView, selectedNode)
def select(self, selectedNode):
self.treeView.select_instance(self.treeView, selectedNode)
class ToolBar(gui.Widget):
def __init__(self, **kwargs):
super(ToolBar, self).__init__(**kwargs)
self.set_layout_orientation(gui.Widget.LAYOUT_HORIZONTAL)
self.style['background-color'] = 'white'
def add_command(self, imagePath, callback, title):
icon = gui.Image(imagePath, height='90%', margin='0px 1px')
icon.style['outline'] = '1px solid lightgray'
icon.onclick.connect(callback)
icon.attributes['title'] = title
self.append(icon)
class SignalConnection(gui.Widget):
def __init__(self, widget, listenersList, eventConnectionFuncName, eventConnectionFunc, **kwargs):
super(SignalConnection, self).__init__(**kwargs)
self.set_layout_orientation(gui.Widget.LAYOUT_HORIZONTAL)
self.style.update({'overflow':'visible', 'height':'24px', 'display':'block', 'outline':'1px solid lightgray'})
self.label = gui.Label(eventConnectionFuncName, width='49%')
self.label.style.update({'float':'left', 'font-size':'10px', 'overflow':'hidden', 'outline':'1px solid lightgray'})
self.dropdown = gui.DropDown(width='49%', height='100%')
self.dropdown.onchange.connect(self.on_connection)
self.append([self.label, self.dropdown])
self.dropdown.style['float'] = 'right'
self.eventConnectionFunc = eventConnectionFunc
self.eventConnectionFuncName = eventConnectionFuncName
self.refWidget = widget
self.listenersList = listenersList
self.dropdown.append(gui.DropDownItem("None"))
for w in listenersList:
ddi = gui.DropDownItem(w.attributes['editor_varname'])
ddi.listenerInstance = w
self.dropdown.append(ddi)
if hasattr(self.eventConnectionFunc, 'callback_copy'): #if the callback is not None, and so there is a listener
connectedListenerName = eventConnectionFunc.callback_copy.__self__.attributes['editor_varname']
self.dropdown.set_value( connectedListenerName )
def on_connection(self, widget, dropDownValue):
if self.dropdown.get_value()=='None':
self.eventConnectionFunc.connect(None)
else:
listener = self.dropdown._selected_item.listenerInstance
listener.attributes['editor_newclass'] = "True"
print("Event: " + self.eventConnectionFuncName + " signal connection to: " + listener.attributes['editor_varname'] + " from:" + self.refWidget.attributes['editor_varname'])
back_callback = getattr(self.refWidget, self.eventConnectionFuncName).callback
listener.__class__.fakeListenerFunc = fakeListenerFunc
getattr(self.refWidget, self.eventConnectionFuncName).connect(listener.fakeListenerFunc)
getattr(self.refWidget, self.eventConnectionFuncName).callback_copy = getattr(self.refWidget, self.eventConnectionFuncName).callback
getattr(self.refWidget, self.eventConnectionFuncName).callback = back_callback
def fakeListenerFunc(self,*args):
print('event trap')
class SignalConnectionManager(gui.Widget):
""" This class allows to interconnect event signals """
def __init__(self, **kwargs):
super(SignalConnectionManager, self).__init__(**kwargs)
self.label = gui.Label('Signal connections', width='100%')
self.label.add_class("DialogTitle")
self.append(self.label)
self.container = gui.VBox(width='100%', height='90%')
self.container.style['justify-content'] = 'flex-start'
self.container.style['overflow-y'] = 'scroll'
self.listeners_list = []
def build_widget_list_from_tree(self, node):
if not hasattr(node, 'attributes'):
return
if not 'editor_varname' in node.attributes.keys():
return
self.listeners_list.append(node)
for child in node.children.values():
self.build_widget_list_from_tree(child)
def update(self, widget, widget_tree):
""" for the selected widget are listed the relative signals
for each signal there is a dropdown containing all the widgets
the user will select the widget that have to listen a specific event
"""
self.listeners_list = []
self.build_widget_list_from_tree(widget_tree)
self.label.set_text('Signal connections: ' + widget.attributes['editor_varname'])
#del self.container
self.container = gui.VBox(width='100%', height='90%')
self.container.style['justify-content'] = 'flex-start'
self.container.style['overflow-y'] = 'scroll'
self.append(self.container, 'container')
##for all the events of this widget
#isclass instead of ismethod because event methods are replaced with ClassEventConnector
for (setOnEventListenerFuncname,setOnEventListenerFunc) in inspect.getmembers(widget):
#if the member is decorated by decorate_set_on_listener and the function is referred to this event
if hasattr(setOnEventListenerFunc, '_event_info'):
self.container.append( SignalConnection(widget,
self.listeners_list,
setOnEventListenerFuncname,
setOnEventListenerFunc,
width='100%') )
class ProjectConfigurationDialog(gui.GenericDialog, gui.EventSource):
KEY_PRJ_NAME = 'config_project_name'
KEY_ADDRESS = 'config_address'
KEY_PORT = 'config_port'
KEY_MULTIPLE_INSTANCE = 'config_multiple_instance'
KEY_ENABLE_CACHE = 'config_enable_file_cache'
KEY_START_BROWSER = 'config_start_browser'
KEY_RESOURCEPATH = 'config_resourcepath'
def __init__(self, title='', message=''):
super(ProjectConfigurationDialog, self).__init__('Project Configuration', 'Here are the configuration options of the project.', width=500)
gui.EventSource.__init__(self)
#standard configuration
self.configDict = {}
self.configDict[self.KEY_PRJ_NAME] = 'untitled'
self.configDict[self.KEY_ADDRESS] = '0.0.0.0'
self.configDict[self.KEY_PORT] = 8081
self.configDict[self.KEY_MULTIPLE_INSTANCE] = True
self.configDict[self.KEY_ENABLE_CACHE] = True
self.configDict[self.KEY_START_BROWSER] = True
self.configDict[self.KEY_RESOURCEPATH] = "./res/"
self.add_field_with_label( self.KEY_PRJ_NAME, 'Project Name', gui.TextInput() )
self.add_field_with_label( self.KEY_ADDRESS, 'IP address', gui.TextInput() )
self.add_field_with_label( self.KEY_PORT, 'Listen port', gui.SpinBox(8082, 1025, 65535) )
self.add_field_with_label( self.KEY_MULTIPLE_INSTANCE, 'Use single App instance for multiple users', gui.CheckBox(True) )
self.add_field_with_label( self.KEY_ENABLE_CACHE, 'Enable file caching', gui.CheckBox(True) )
self.add_field_with_label( self.KEY_START_BROWSER, 'Start browser automatically', gui.CheckBox(True) )
self.add_field_with_label( self.KEY_RESOURCEPATH, 'Additional resource path', gui.TextInput() )
self.from_dict_to_fields(self.configDict)
def from_dict_to_fields(self, dictionary):
for key in self.inputs.keys():
if key in dictionary.keys():
self.get_field(key).set_value( str( dictionary[key] ) )
def from_fields_to_dict(self):
self.configDict[self.KEY_PRJ_NAME] = self.get_field(self.KEY_PRJ_NAME).get_value()
self.configDict[self.KEY_ADDRESS] = self.get_field(self.KEY_ADDRESS).get_value()
self.configDict[self.KEY_PORT] = int( self.get_field(self.KEY_PORT).get_value() )
self.configDict[self.KEY_MULTIPLE_INSTANCE] = self.get_field(self.KEY_MULTIPLE_INSTANCE).get_value()
self.configDict[self.KEY_ENABLE_CACHE] = self.get_field(self.KEY_ENABLE_CACHE).get_value()
self.configDict[self.KEY_START_BROWSER] = self.get_field(self.KEY_START_BROWSER).get_value()
self.configDict[self.KEY_RESOURCEPATH] = self.get_field(self.KEY_RESOURCEPATH).get_value()
@gui.decorate_event
def confirm_dialog(self, emitter):
"""event called pressing on OK button.
"""
#here the user input is transferred to the dict, ready to use
self.from_fields_to_dict()
return super(ProjectConfigurationDialog,self).confirm_dialog(self)
def show(self, baseAppInstance):
"""Allows to show the widget as root window"""
self.from_dict_to_fields(self.configDict)
super(ProjectConfigurationDialog, self).show(baseAppInstance)
class EditorFileSelectionDialog(gui.FileSelectionDialog):
def __init__(self, title='File dialog', message='Select files and folders',
multiple_selection=True, selection_folder='.', allow_file_selection=True,
allow_folder_selection=True, baseAppInstance = None):
super(EditorFileSelectionDialog, self).__init__( title,
message, multiple_selection, selection_folder,
allow_file_selection, allow_folder_selection)
self.baseAppInstance = baseAppInstance
def show(self, *args):
super(EditorFileSelectionDialog, self).show(self.baseAppInstance)
class EditorFileSaveDialog(gui.FileSelectionDialog, gui.EventSource):
def __init__(self, title='File dialog', message='Select files and folders',
multiple_selection=True, selection_folder='.',
allow_file_selection=True, allow_folder_selection=True, baseAppInstance = None):
super(EditorFileSaveDialog, self).__init__( title, message, multiple_selection, selection_folder,
allow_file_selection, allow_folder_selection)
gui.EventSource.__init__(self)
self.baseAppInstance = baseAppInstance
def show(self, *args):
super(EditorFileSaveDialog, self).show(self.baseAppInstance)
def add_fileinput_field(self, defaultname='untitled'):
self.txtFilename = gui.TextInput()
self.txtFilename.onkeydown.connect(self.on_enter_key_pressed)
self.txtFilename.set_text(defaultname)
self.add_field_with_label("filename","Filename",self.txtFilename)
def get_fileinput_value(self):
return self.get_field('filename').get_value()
def on_enter_key_pressed(self, widget, value, keycode):
if keycode=="13":
self.confirm_value(None)
@gui.decorate_event
def confirm_value(self, widget):
"""event called pressing on OK button.
propagates the string content of the input field
"""
self.hide()
params = (self.fileFolderNavigator.pathEditor.get_text(),)
return params
class WidgetHelper(gui.HBox):
""" Allocates the Widget to which it refers,
interfacing to the user in order to obtain the necessary attribute values
obtains the constructor parameters, asks for them in a dialog
puts the values in an attribute called constructor
"""
def __init__(self, appInstance, widgetClass, **kwargs_to_widget):
self.kwargs_to_widget = kwargs_to_widget
self.appInstance = appInstance
self.widgetClass = widgetClass
super(WidgetHelper, self).__init__()
self.style['display'] = 'block'
self.style['background-color'] = 'white'
self.icon = gui.Image('/editor_resources:widget_%s.png'%self.widgetClass.__name__, width='auto', margin='2px')
self.icon.style['max-width'] = '100%'
self.icon.style['image-rendering'] = 'auto'
self.icon.attributes['draggable'] = 'false'
self.icon.attributes['ondragstart'] = "event.preventDefault();"
self.append(self.icon)
self.attributes.update({'draggable':'true',
'ondragstart':"this.style.cursor='move'; event.dataTransfer.dropEffect = 'move'; event.dataTransfer.setData('application/json', JSON.stringify(['add',event.target.id,(event.clientX),(event.clientY)]));",
'ondragover':"event.preventDefault();",
'ondrop':"event.preventDefault();return false;"})
self.optional_style_dict = {} #this dictionary will contain optional style attributes that have to be added to the widget once created
self.onclick.connect(self.prompt_new_widget)
def build_widget_name_list_from_tree(self, node):
if not hasattr(node, 'attributes'):
return
if not 'editor_varname' in node.attributes.keys():
return
self.varname_list.append(node.attributes['editor_varname'])
for child in node.children.values():
self.build_widget_name_list_from_tree(child)
def prompt_new_widget(self, widget):
self.varname_list = list()
self.build_widget_name_list_from_tree(self.appInstance.project)
self.constructor_parameters_list = self.widgetClass.__init__.__code__.co_varnames[1:] #[1:] removes the self
param_annotation_dict = ''#self.widgetClass.__init__.__annotations__
self.dialog = gui.GenericDialog(title=self.widgetClass.__name__, message='Fill the following parameters list', width='40%')
varNameTextInput = gui.TextInput()
varNameTextInput.attributes['tabindex'] = '1'
varNameTextInput.attributes['autofocus'] = 'autofocus'
self.dialog.add_field_with_label('name', 'Variable name', varNameTextInput)
#for param in self.constructor_parameters_list:
for index in range(0,len(self.widgetClass.__init__._constructor_types)):
param = self.constructor_parameters_list[index]
_typ = self.widgetClass.__init__._constructor_types[index]
note = ' (%s)'%_typ.__name__
editWidget = None
if _typ==int:
editWidget = gui.SpinBox('0',-65536,65535)
elif _typ==bool:
editWidget = gui.CheckBox()
else:
editWidget = gui.TextInput()
editWidget.attributes['tabindex'] = str(index+2)
self.dialog.add_field_with_label(param, param + note, editWidget)
self.dialog.add_field_with_label("editor_newclass", "Overload base class", gui.CheckBox())
self.dialog.confirm_dialog.connect(self.on_dialog_confirm)
self.dialog.show(self.appInstance)
def on_dropped(self, left, top):
self.optional_style_dict['left'] = gui.to_pix(left)
self.optional_style_dict['top'] = gui.to_pix(top)
self.prompt_new_widget(None)
def on_dialog_confirm(self, widget):
""" Here the widget is allocated
"""
variableName = str(self.dialog.get_field("name").get_value())
if re.match('(^[a-zA-Z][a-zA-Z0-9_]*)|(^[_][a-zA-Z0-9_]+)', variableName) == None:
self.errorDialog = gui.GenericDialog("Error", "Please type a valid variable name.", width=350,height=120)
self.errorDialog.show(self.appInstance)
return
if variableName in self.varname_list:
self.errorDialog = gui.GenericDialog("Error", "The typed variable name is already used. Please specify a new name.", width=350,height=150)
self.errorDialog.show(self.appInstance)
return
param_annotation_dict = ''#self.widgetClass.__init__.__annotations__
param_values = []
param_for_constructor = []
for index in range(0,len(self.widgetClass.__init__._constructor_types)):
param = self.constructor_parameters_list[index]
_typ = self.widgetClass.__init__._constructor_types[index]
if _typ==int:
param_for_constructor.append(self.dialog.get_field(param).get_value())
param_values.append(int(self.dialog.get_field(param).get_value()))
elif _typ==bool:
param_for_constructor.append(self.dialog.get_field(param).get_value())
param_values.append(bool(self.dialog.get_field(param).get_value()))
else:#if _typ==str:
param_for_constructor.append("""\'%s\'"""%self.dialog.get_field(param).get_value())
param_values.append(self.dialog.get_field(param).get_value())
#else:
# param_for_constructor.append("""%s"""%self.dialog.get_field(param).get_value())
print(self.constructor_parameters_list)
print(param_values)
#constructor = '%s(%s)'%(self.widgetClass.__name__, ','.join(map(lambda v: str(v), param_values)))
constructor = '(%s)'%(','.join(map(lambda v: str(v), param_for_constructor)))
#here we create and decorate the widget
widget = self.widgetClass(*param_values, **self.kwargs_to_widget)
widget.attributes.update({'editor_constructor':constructor,
'editor_varname':variableName,
'editor_tag_type':'widget',
'editor_newclass':'True' if self.dialog.get_field("editor_newclass").get_value() else 'False',
'editor_baseclass':widget.__class__.__name__}) #__class__.__bases__[0].__name__
#"this.style.cursor='default';this.style['left']=(event.screenX) + 'px'; this.style['top']=(event.screenY) + 'px'; event.preventDefault();return true;"
#if not 'position' in widget.style:
# widget.style['position'] = 'absolute'
#if not 'display' in widget.style:
# widget.style['display'] = 'block'
for key in self.optional_style_dict:
widget.style[key] = self.optional_style_dict[key]
self.optional_style_dict = {}
self.appInstance.add_widget_to_editor(widget)
class WidgetCollection(gui.Widget):
def __init__(self, appInstance, **kwargs):
self.appInstance = appInstance
super(WidgetCollection, self).__init__(**kwargs)
self.lblTitle = gui.Label("Widgets Toolbox")
self.lblTitle.add_class("DialogTitle")
self.widgetsContainer = gui.HBox(width='100%', height='85%')
self.widgetsContainer.style.update({'overflow-y':'scroll',
'overflow-x':'hidden',
'flex-wrap':'wrap',
'background-color':'white'})
self.append([self.lblTitle, self.widgetsContainer])
#load all widgets
self.add_widget_to_collection(gui.HBox, width='250px', height='250px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.VBox, width='250px', height='250px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.Widget, width='250px', height='250px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.GridBox, width='250px', height='250px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.Button, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.TextInput, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.Label, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.ListView, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute', 'border':'1px solid lightgray'})
self.add_widget_to_collection(gui.ListItem)
self.add_widget_to_collection(gui.DropDown, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.DropDownItem)
self.add_widget_to_collection(gui.Image, width='100px', height='100px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.CheckBoxLabel, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.CheckBox, width='30px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.SpinBox, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.Slider, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.ColorPicker, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.Date, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.Link, width='100px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.Progress, width='130px', height='30px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.VideoPlayer, width='100px', height='100px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.TableWidget, width='100px', height='100px', style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.Svg, style={'top':'20px', 'left':'20px', 'position':'absolute'})
self.add_widget_to_collection(gui.SvgLine, attributes={'stroke':'black', 'stroke-width':'1'})
self.add_widget_to_collection(gui.SvgCircle)
self.add_widget_to_collection(gui.SvgRectangle)
self.add_widget_to_collection(gui.SvgText)
self.add_widget_to_collection(gui.SvgPath, attributes={'stroke':'black', 'stroke-width':'1'})
def add_widget_to_collection(self, widgetClass, **kwargs_to_widget):
#create an helper that will be created on click
#the helper have to search for function that have 'return' annotation 'event_listener_setter'
helper = WidgetHelper(self.appInstance, widgetClass, **kwargs_to_widget)
helper.attributes['title'] = widgetClass.__doc__
self.widgetsContainer.append( helper )
class EditorAttributesGroup(gui.Widget):
""" Contains a title and widgets. When the title is clicked, the contained widgets are hidden.
Its scope is to provide a foldable group
"""
def __init__(self, title, **kwargs):
super(EditorAttributesGroup, self).__init__(**kwargs)
self.add_class('.RaisedFrame')
self.style['display'] = 'block'
self.style['overflow'] = 'visible'
self.opened = True
self.title = gui.Label(title)
self.title.add_class("Title")
self.title.style.update({'padding-left':'32px',
'background-image':"url('/editor_resources:minus.png')",
'background-repeat':'no-repeat',
'background-position':'5px',
'border-top':'3px solid lightgray'})
self.title.onclick.connect(self.openClose)
self.append(self.title, '0')
def openClose(self, widget):
self.opened = not self.opened
backgroundImage = "url('/editor_resources:minus.png')" if self.opened else "url('/editor_resources:plus.png')"
self.title.style['background-image'] = backgroundImage
display = 'block' if self.opened else 'none'
for widget in self.children.values():
if widget!=self.title and type(widget)!=str:
widget.style['display'] = display
class EditorAttributes(gui.VBox, gui.EventSource):
""" Contains EditorAttributeInput each one of which notify a new value with an event
"""
def __init__(self, appInstance, **kwargs):
super(EditorAttributes, self).__init__(**kwargs)
gui.EventSource.__init__(self)
self.EVENT_ATTRIB_ONCHANGE = 'on_attribute_changed'
#self.style['overflow-y'] = 'scroll'
self.style['justify-content'] = 'flex-start'
self.style['-webkit-justify-content'] = 'flex-start'
self.titleLabel = gui.Label('Attributes editor', width='100%')
self.titleLabel.add_class("DialogTitle")
self.infoLabel = gui.Label('Selected widget: None')
self.infoLabel.style['font-weight'] = 'bold'
self.append([self.titleLabel, self.infoLabel])
self.titleLabel.style['order'] = '-1'
self.titleLabel.style['-webkit-order'] = '-1'
self.infoLabel.style['order'] = '0'
self.infoLabel.style['-webkit-order'] = '0'
self.attributesInputs = list()
#load editable attributes
self.append(self.titleLabel)
self.attributeGroups = {}
for attribute in html_helper.editorAttributeList:
attributeName = attribute[0]
attributeValue = attribute[1]
attributeEditor = EditorAttributeInput(attributeName, attributeValue, appInstance)
attributeEditor.on_attribute_changed.connect(self.onattribute_changed)
attributeEditor.on_attribute_remove.connect(self.onattribute_remove)
#attributeEditor.style['display'] = 'none'
if not attributeValue['group'] in self.attributeGroups.keys():
groupContainer = EditorAttributesGroup(attributeValue['group'], width='100%')
self.attributeGroups[attributeValue['group']] = groupContainer
self.append(groupContainer)
groupContainer.style['order'] = str(html_helper.editorAttributesGroupOrdering[attributeValue['group']])
groupContainer.style['-webkit-order'] = str(html_helper.editorAttributesGroupOrdering[attributeValue['group']])
self.attributeGroups[attributeValue['group']].append(attributeEditor)
self.attributesInputs.append(attributeEditor)
#this function is called by an EditorAttributeInput change event and propagates to the listeners
#adding as first parameter the tag to which it refers
#widgetAttributeMember can be 'style' or 'attributes'
@gui.decorate_event
def onattribute_changed(self, widget, widgetAttributeMember, attributeName, newValue):
print("setting attribute name: %s value: %s attributeMember: %s"%(attributeName, newValue, widgetAttributeMember))
getattr(self.targetWidget, widgetAttributeMember)[attributeName] = str(newValue)
return (widgetAttributeMember, attributeName, newValue)
@gui.decorate_event
def onattribute_remove(self, widget, widgetAttributeMember, attributeName):
if attributeName in getattr(self.targetWidget, widgetAttributeMember):
del getattr(self.targetWidget, widgetAttributeMember)[attributeName]
return (widgetAttributeMember, attributeName)
def set_widget(self, widget):
self.infoLabel.set_text("Selected widget: %s"%widget.attributes['editor_varname'])
self.attributes['selected_widget_id'] = widget.identifier
self.targetWidget = widget
for w in self.attributesInputs:
w.style['display'] = 'block'
#w.style['visibility'] = 'visible'
if w.attributeDict['additional_data'].get('applies_to', None):
if not type(widget) in w.attributeDict['additional_data'].get('applies_to', None):
w.style['display'] = 'none'
w.set_from_dict(getattr(widget, w.attributeDict['affected_widget_attribute']))
class CssSizeInput(gui.Widget, gui.EventSource):
def __init__(self, appInstance, **kwargs):
super(CssSizeInput, self).__init__(**kwargs)
gui.EventSource.__init__(self)
self.appInstance = appInstance
self.set_layout_orientation(gui.Widget.LAYOUT_HORIZONTAL)
self.style['display'] = 'block'
self.style['overflow'] = 'hidden'
self.numInput = gui.SpinBox('0',-999999999, 999999999, 1, width='60%', height='100%')
self.numInput.onchange.connect(self.onchange)
self.numInput.style['text-align'] = 'right'
self.append(self.numInput)
self.dropMeasureUnit = gui.DropDown(width='40%', height='100%')
self.dropMeasureUnit.append( gui.DropDownItem('px'), 'px' )
self.dropMeasureUnit.append( gui.DropDownItem('%'), '%' )
self.dropMeasureUnit.select_by_key('px')
self.dropMeasureUnit.onchange.connect(self.onchange)
self.append(self.dropMeasureUnit)
@gui.decorate_event
def onchange(self, widget, new_value):
new_size = str(self.numInput.get_value()) + str(self.dropMeasureUnit.get_value())
return (new_size,)
def set_value(self, value):
"""The value have to be in the form '10px' or '10%', so numeric value plus measure unit
"""
v = 0
measure_unit = 'px'
try:
v = int(float(value.replace('px', '')))
except ValueError:
try:
v = int(float(value.replace('%', '')))
measure_unit = '%'
except ValueError:
pass
self.numInput.set_value(v)
self.dropMeasureUnit.set_value(measure_unit)
class UrlPathInput(gui.Widget, gui.EventSource):
def __init__(self, appInstance, **kwargs):
super(UrlPathInput, self).__init__(**kwargs)
gui.EventSource.__init__(self)
self.appInstance = appInstance
self.set_layout_orientation(gui.Widget.LAYOUT_HORIZONTAL)
self.style['display'] = 'block'
self.style['overflow'] = 'hidden'
self.txtInput = gui.TextInput(width='80%', height='100%')
self.txtInput.style['float'] = 'left'
self.txtInput.onchange.connect(self.on_txt_changed)
self.append(self.txtInput)
self.btFileFolderSelection = gui.Widget(width='20%', height='100%')
self.btFileFolderSelection.style.update({'background-repeat':'round',
'background-image':"url('/res:folder.png')",
'background-color':'transparent'})
self.append(self.btFileFolderSelection)
self.btFileFolderSelection.onclick.connect(self.on_file_selection_bt_pressed)
self.selectionDialog = gui.FileSelectionDialog('Select a file', '', False, './', True, False)
self.selectionDialog.confirm_value.connect(self.file_dialog_confirmed)
@gui.decorate_event
def onchange(self, widget, value):
return (value,)
def on_txt_changed(self, widget, value):
return self.onchange(None, value)
def on_file_selection_bt_pressed(self, widget):
self.selectionDialog.show(self.appInstance)
def file_dialog_confirmed(self, widget, fileList):
if len(fileList)>0:
self.txtInput.set_value("url('/editor_resources:" + fileList[0].split('/')[-1].split('\\')[-1] + "')")
return self.onchange(None, self.txtInput.get_value())
def set_value(self, value):
self.txtInput.set_value(value)
#widget that allows to edit a specific html and css attributes
# it has a descriptive label, an edit widget (TextInput, SpinBox..) based on the 'type' and a title
class EditorAttributeInput(gui.Widget, gui.EventSource):
def __init__(self, attributeName, attributeDict, appInstance=None):
super(EditorAttributeInput, self).__init__()
gui.EventSource.__init__(self)
self.set_layout_orientation(gui.Widget.LAYOUT_HORIZONTAL)
self.style.update({'display':'block',
'overflow':'auto',
'margin':'2px',
'outline':'1px solid lightgray'})
self.attributeName = attributeName
self.attributeDict = attributeDict
self.EVENT_ATTRIB_ONCHANGE = 'on_attribute_changed'
self.EVENT_ATTRIB_ONREMOVE = 'onremove_attribute'
self.removeAttribute = gui.Image('/editor_resources:delete.png', width='5%')
self.removeAttribute.attributes['title'] = 'Remove attribute from this widget.'
self.removeAttribute.onclick.connect(self.on_attribute_remove)
self.append(self.removeAttribute)
self.label = gui.Label(attributeName, width='45%', height=22, margin='0px')
self.label.style['overflow'] = 'hidden'
self.label.style['font-size'] = '13px'
self.label.style['outline'] = '1px solid lightgray'
self.append(self.label)
self.inputWidget = None
#'background-repeat':{'type':str, 'description':'The repeat behaviour of an optional background image', ,'additional_data':{'affected_widget_attribute':'style', 'possible_values':'repeat | repeat-x | repeat-y | no-repeat | inherit'}},
if attributeDict['type'] in (bool,int,float,gui.ColorPicker,gui.DropDown,'url_editor','css_size'):
if attributeDict['type'] == bool:
self.inputWidget = gui.CheckBox('checked')
if attributeDict['type'] == int or attributeDict['type'] == float:
self.inputWidget = gui.SpinBox(attributeDict['additional_data']['default'], attributeDict['additional_data']['min'], attributeDict['additional_data']['max'], attributeDict['additional_data']['step'])
if attributeDict['type'] == gui.ColorPicker:
self.inputWidget = gui.ColorPicker()
if attributeDict['type'] == gui.DropDown:
self.inputWidget = gui.DropDown()
for value in attributeDict['additional_data']['possible_values']:
self.inputWidget.append(gui.DropDownItem(value),value)
if attributeDict['type'] == 'url_editor':
self.inputWidget = UrlPathInput(appInstance)
if attributeDict['type'] == 'css_size':
self.inputWidget = CssSizeInput(appInstance)
else: #default editor is string
self.inputWidget = gui.TextInput()
self.inputWidget.onchange.connect(self.on_attribute_changed)
self.inputWidget.set_size('50%','22px')
self.inputWidget.attributes['title'] = attributeDict['description']
self.label.attributes['title'] = attributeDict['description']
self.append(self.inputWidget)
self.inputWidget.style['float'] = 'right'
self.style['display'] = 'block'
self.set_valid(False)
def set_valid(self, valid=True):
self.label.style['opacity'] = '1.0'
if 'display' in self.removeAttribute.style:
del self.removeAttribute.style['display']
if not valid:
self.label.style['opacity'] = '0.5'
self.removeAttribute.style['display'] = 'none'
@gui.decorate_event
def on_attribute_remove(self, widget):
self.set_valid(False)
return (self.attributeDict['affected_widget_attribute'], self.attributeName)
def set_from_dict(self, dictionary):
self.inputWidget.set_value('')
self.set_valid(False)
if self.attributeName in dictionary:
self.set_valid()
self.inputWidget.set_value(dictionary[self.attributeName])
def set_value(self, value):
self.set_valid()
self.inputWidget.set_value(value)
@gui.decorate_event
def on_attribute_changed(self, widget, value):
self.set_valid()
return (self.attributeDict['affected_widget_attribute'], self.attributeName, value)
```
#### File: remi/examples/widgets_overview_app.py
```python
import remi.gui as gui
from remi import start, App
from threading import Timer
class MyApp(App):
def __init__(self, *args):
super(MyApp, self).__init__(*args)
def idle(self):
self.counter.set_text('Running Time: ' + str(self.count))
self.progress.set_value(self.count%100)
def main(self):
# the margin 0px auto centers the main container
verticalContainer = gui.Widget(width=540, margin='0px auto', style={'display': 'block', 'overflow': 'hidden'})
horizontalContainer = gui.Widget(width='100%', layout_orientation=gui.Widget.LAYOUT_HORIZONTAL, margin='0px', style={'display': 'block', 'overflow': 'auto'})
subContainerLeft = gui.Widget(width=320, style={'display': 'block', 'overflow': 'auto', 'text-align': 'center'})
self.img = gui.Image('/res:logo.png', height=100, margin='10px')
self.img.onclick.connect(self.on_img_clicked)
self.table = gui.Table.new_from_list([('ID', 'First Name', 'Last Name'),
('101', 'Danny', 'Young'),
('102', 'Christine', 'Holand'),
('103', 'Lars', 'Gordon'),
('104', 'Roberto', 'Robitaille'),
('105', 'Maria', 'Papadopoulos')], width=300, height=200, margin='10px')
self.table.on_table_row_click.connect(self.on_table_row_click)
# the arguments are width - height - layoutOrientationOrizontal
subContainerRight = gui.Widget(style={'width': '220px', 'display': 'block', 'overflow': 'auto', 'text-align': 'center'})
self.count = 0
self.counter = gui.Label('', width=200, height=30, margin='10px')
self.lbl = gui.Label('This is a LABEL!', width=200, height=30, margin='10px')
self.bt = gui.Button('Press me!', width=200, height=30, margin='10px')
# setting the listener for the onclick event of the button
self.bt.onclick.connect(self.on_button_pressed)
self.txt = gui.TextInput(width=200, height=30, margin='10px')
self.txt.set_text('This is a TEXTAREA')
self.txt.onchange.connect(self.on_text_area_change)
self.spin = gui.SpinBox(1, 0, 100, width=200, height=30, margin='10px')
self.spin.onchange.connect(self.on_spin_change)
self.progress = gui.Progress(1, 100, width=200, height=5)
self.check = gui.CheckBoxLabel('Label checkbox', True, width=200, height=30, margin='10px')
self.check.onchange.connect(self.on_check_change)
self.btInputDiag = gui.Button('Open InputDialog', width=200, height=30, margin='10px')
self.btInputDiag.onclick.connect(self.open_input_dialog)
self.btFileDiag = gui.Button('File Selection Dialog', width=200, height=30, margin='10px')
self.btFileDiag.onclick.connect(self.open_fileselection_dialog)
self.btUploadFile = gui.FileUploader('./', width=200, height=30, margin='10px')
self.btUploadFile.onsuccess.connect(self.fileupload_on_success)
self.btUploadFile.onfailed.connect(self.fileupload_on_failed)
items = ('<NAME>','<NAME>','<NAME>','<NAME>')
self.listView = gui.ListView.new_from_list(items, width=300, height=120, margin='10px')
self.listView.onselection.connect(self.list_view_on_selected)
self.link = gui.Link("http://localhost:8081", "A link to here", width=200, height=30, margin='10px')
self.dropDown = gui.DropDown.new_from_list(('DropDownItem 0', 'DropDownItem 1'),
width=200, height=20, margin='10px')
self.dropDown.onchange.connect(self.drop_down_changed)
self.dropDown.select_by_value('DropDownItem 0')
self.slider = gui.Slider(10, 0, 100, 5, width=200, height=20, margin='10px')
self.slider.onchange.connect(self.slider_changed)
self.colorPicker = gui.ColorPicker('#ffbb00', width=200, height=20, margin='10px')
self.colorPicker.onchange.connect(self.color_picker_changed)
self.date = gui.Date('2015-04-13', width=200, height=20, margin='10px')
self.date.onchange.connect(self.date_changed)
self.video = gui.Widget( _type='iframe', width=290, height=200, margin='10px')
self.video.attributes['src'] = "https://drive.google.com/file/d/0B0J9Lq_MRyn4UFRsblR3UTBZRHc/preview"
self.video.attributes['width'] = '100%'
self.video.attributes['height'] = '100%'
self.video.attributes['controls'] = 'true'
self.video.style['border'] = 'none'
self.tree = gui.TreeView(width='100%', height=300)
ti1 = gui.TreeItem("Item1")
ti2 = gui.TreeItem("Item2")
ti3 = gui.TreeItem("Item3")
subti1 = gui.TreeItem("Sub Item1")
subti2 = gui.TreeItem("Sub Item2")
subti3 = gui.TreeItem("Sub Item3")
subti4 = gui.TreeItem("Sub Item4")
subsubti1 = gui.TreeItem("Sub Sub Item1")
subsubti2 = gui.TreeItem("Sub Sub Item2")
subsubti3 = gui.TreeItem("Sub Sub Item3")
self.tree.append([ti1, ti2, ti3])
ti2.append([subti1, subti2, subti3, subti4])
subti4.append([subsubti1, subsubti2, subsubti3])
# appending a widget to another, the first argument is a string key
subContainerRight.append([self.counter, self.lbl, self.bt, self.txt, self.spin, self.progress, self.check, self.btInputDiag, self.btFileDiag])
# use a defined key as we replace this widget later
fdownloader = gui.FileDownloader('download test', '../remi/res/logo.png', width=200, height=30, margin='10px')
subContainerRight.append(fdownloader, key='file_downloader')
subContainerRight.append([self.btUploadFile, self.dropDown, self.slider, self.colorPicker, self.date, self.tree])
self.subContainerRight = subContainerRight
subContainerLeft.append([self.img, self.table, self.listView, self.link, self.video])
horizontalContainer.append([subContainerLeft, subContainerRight])
menu = gui.Menu(width='100%', height='30px')
m1 = gui.MenuItem('File', width=100, height=30)
m2 = gui.MenuItem('View', width=100, height=30)
m2.onclick.connect(self.menu_view_clicked)
m11 = gui.MenuItem('Save', width=100, height=30)
m12 = gui.MenuItem('Open', width=100, height=30)
m12.onclick.connect(self.menu_open_clicked)
m111 = gui.MenuItem('Save', width=100, height=30)
m111.onclick.connect(self.menu_save_clicked)
m112 = gui.MenuItem('Save as', width=100, height=30)
m112.onclick.connect(self.menu_saveas_clicked)
m3 = gui.MenuItem('Dialog', width=100, height=30)
m3.onclick.connect(self.menu_dialog_clicked)
menu.append([m1, m2, m3])
m1.append([m11, m12])
m11.append([m111, m112])
menubar = gui.MenuBar(width='100%', height='30px')
menubar.append(menu)
verticalContainer.append([menubar, horizontalContainer])
#this flag will be used to stop the display_counter Timer
self.stop_flag = False
# kick of regular display of counter
self.display_counter()
# returning the root widget
return verticalContainer
def display_counter(self):
self.count += 1
if not self.stop_flag:
Timer(1, self.display_counter).start()
def menu_dialog_clicked(self, widget):
self.dialog = gui.GenericDialog(title='Dialog Box', message='Click Ok to transfer content to main page', width='500px')
self.dtextinput = gui.TextInput(width=200, height=30)
self.dtextinput.set_value('Initial Text')
self.dialog.add_field_with_label('dtextinput', 'Text Input', self.dtextinput)
self.dcheck = gui.CheckBox(False, width=200, height=30)
self.dialog.add_field_with_label('dcheck', 'Label Checkbox', self.dcheck)
values = ('<NAME>', '<NAME>', '<NAME>', '<NAME>')
self.dlistView = gui.ListView.new_from_list(values, width=200, height=120)
self.dialog.add_field_with_label('dlistView', 'Listview', self.dlistView)
self.ddropdown = gui.DropDown.new_from_list(('DropDownItem 0', 'DropDownItem 1'),
width=200, height=20)
self.dialog.add_field_with_label('ddropdown', 'Dropdown', self.ddropdown)
self.dspinbox = gui.SpinBox(min=0, max=5000, width=200, height=20)
self.dspinbox.set_value(50)
self.dialog.add_field_with_label('dspinbox', 'Spinbox', self.dspinbox)
self.dslider = gui.Slider(10, 0, 100, 5, width=200, height=20)
self.dspinbox.set_value(50)
self.dialog.add_field_with_label('dslider', 'Slider', self.dslider)
self.dcolor = gui.ColorPicker(width=200, height=20)
self.dcolor.set_value('#ffff00')
self.dialog.add_field_with_label('dcolor', 'Colour Picker', self.dcolor)
self.ddate = gui.Date(width=200, height=20)
self.ddate.set_value('2000-01-01')
self.dialog.add_field_with_label('ddate', 'Date', self.ddate)
self.dialog.confirm_dialog.connect(self.dialog_confirm)
self.dialog.show(self)
def dialog_confirm(self, widget):
result = self.dialog.get_field('dtextinput').get_value()
self.txt.set_value(result)
result = self.dialog.get_field('dcheck').get_value()
self.check.set_value(result)
result = self.dialog.get_field('ddropdown').get_value()
self.dropDown.select_by_value(result)
result = self.dialog.get_field('dspinbox').get_value()
self.spin.set_value(result)
result = self.dialog.get_field('dslider').get_value()
self.slider.set_value(result)
result = self.dialog.get_field('dcolor').get_value()
self.colorPicker.set_value(result)
result = self.dialog.get_field('ddate').get_value()
self.date.set_value(result)
result = self.dialog.get_field('dlistView').get_value()
self.listView.select_by_value(result)
# listener function
def on_img_clicked(self, widget):
self.lbl.set_text('Image clicked!')
def on_table_row_click(self, table, row, item):
self.lbl.set_text('Table Item clicked: ' + item.get_text())
def on_button_pressed(self, widget):
self.lbl.set_text('Button pressed! ')
self.bt.set_text('Hi!')
def on_text_area_change(self, widget, newValue):
self.lbl.set_text('Text Area value changed!')
def on_spin_change(self, widget, newValue):
self.lbl.set_text('SpinBox changed, new value: ' + str(newValue))
def on_check_change(self, widget, newValue):
self.lbl.set_text('CheckBox changed, new value: ' + str(newValue))
def open_input_dialog(self, widget):
self.inputDialog = gui.InputDialog('Input Dialog', 'Your name?',
initial_value='type here',
width=500, height=160)
self.inputDialog.confirm_value.connect(
self.on_input_dialog_confirm)
# here is returned the Input Dialog widget, and it will be shown
self.inputDialog.show(self)
def on_input_dialog_confirm(self, widget, value):
self.lbl.set_text('Hello ' + value)
def open_fileselection_dialog(self, widget):
self.fileselectionDialog = gui.FileSelectionDialog('File Selection Dialog', 'Select files and folders', False,
'.')
self.fileselectionDialog.confirm_value.connect(
self.on_fileselection_dialog_confirm)
# here is returned the Input Dialog widget, and it will be shown
self.fileselectionDialog.show(self)
def on_fileselection_dialog_confirm(self, widget, filelist):
# a list() of filenames and folders is returned
self.lbl.set_text('Selected files: %s' % ','.join(filelist))
if len(filelist):
f = filelist[0]
# replace the last download link
fdownloader = gui.FileDownloader("download selected", f, width=200, height=30)
self.subContainerRight.append(fdownloader, key='file_downloader')
def list_view_on_selected(self, widget, selected_item_key):
""" The selection event of the listView, returns a key of the clicked event.
You can retrieve the item rapidly
"""
self.lbl.set_text('List selection: ' + self.listView.children[selected_item_key].get_text())
def drop_down_changed(self, widget, value):
self.lbl.set_text('New Combo value: ' + value)
def slider_changed(self, widget, value):
self.lbl.set_text('New slider value: ' + str(value))
def color_picker_changed(self, widget, value):
self.lbl.set_text('New color value: ' + value)
def date_changed(self, widget, value):
self.lbl.set_text('New date value: ' + value)
def menu_save_clicked(self, widget):
self.lbl.set_text('Menu clicked: Save')
def menu_saveas_clicked(self, widget):
self.lbl.set_text('Menu clicked: Save As')
def menu_open_clicked(self, widget):
self.lbl.set_text('Menu clicked: Open')
def menu_view_clicked(self, widget):
self.lbl.set_text('Menu clicked: View')
def fileupload_on_success(self, widget, filename):
self.lbl.set_text('File upload success: ' + filename)
def fileupload_on_failed(self, widget, filename):
self.lbl.set_text('File upload failed: ' + filename)
def on_close(self):
""" Overloading App.on_close event to stop the Timer.
"""
self.stop_flag = True
super(MyApp, self).on_close()
if __name__ == "__main__":
# starts the webserver
# optional parameters
# start(MyApp,address='127.0.0.1', port=8081, multiple_instance=False,enable_file_cache=True, update_interval=0.1, start_browser=True)
import ssl
start(MyApp, debug=True, address='0.0.0.0', port=8081, start_browser=True, multiple_instance=True)
``` |
{
"source": "2percentmilk/dataclass_to_graphql_queries",
"score": 3
} |
#### File: 2percentmilk/dataclass_to_graphql_queries/query_builder.py
```python
import inspect
from dataclasses import dataclass, fields, field
from typing import Optional, Type, List, Any, Union
from enums import RequestType
type_map = {
'str': 'String',
'bool': 'Boolean',
'Id': "ID",
'datetime': 'ISO8601DateTime',
'int': 'Int'
}
@dataclass
class AttrVal:
val: Optional[str]
attr: str
col: Optional[Any] = None
@dataclass
class QueryVars:
query: str
var: dict
@dataclass
class MOD:
type: RequestType
name: str
variables: Optional[dict] = None
exclude_type: Optional[List[Type]] = None
exclude_name: Optional[List[str]] = None
modifiers: Optional[dict] = None
@dataclass
class SimpleBuilder:
"""
Builds Query(Query) for a GraphQl Api
"""
data_returns: list
state: dict = field(default_factory=dict)
vars: list = field(default_factory=list)
query: list = field(default_factory=list)
def add_cls_value(self, cls, q: Union[List[AttrVal], AttrVal]):
self.add_variable(cls, q)
if not self.state.get(cls.__name__):
self.state[cls.__name__] = {'cls': {}, 'attr': {}}
self.state[cls.__name__]['cls'] = {q.val: q.attr}
def add_attr_value(self, cls, q: Union[List[AttrVal], AttrVal]):
self.add_variable(cls, q)
if not self.state.get(cls.__name__):
self.state[cls.__name__] = {'cls': {}, 'attr': {}}
if not self.state[cls.__name__]['attr'].get(q.col):
self.state[cls.__name__]['attr'][q.col] = {}
self.state[cls.__name__]['attr'][q.col][q.val] = q.attr
def print_vars(self):
return f"({', '.join(self.vars)})"
def add_variable(self, cls, q: Union[List[AttrVal], AttrVal]):
for attr, t in inspect.get_annotations(cls).items():
if attr == q.val:
self.vars.append(f'{q.attr}: {type_map.get(t.__name__)}')
def define_pagination(self, page: int, page_size: int) -> str:
return f'page: {page} page_size: {page_size}'
def variables_on_cls(self, vars, dc_name):
if dc_name not in vars:
pass
else:
var_string = []
if cls_vars := vars.get(dc_name, {}).get('cls'):
for k, v in cls_vars.items():
var_string.append(f'{k}: ${v}')
return f"{dc_name.lower()}({', '.join(var_string)})"
def variables_on_cls_val(self, vars, dc_name, field, level):
if dc_name not in vars:
pass
else:
var_string = []
if cls_vars := vars.get(dc_name)['attr']:
for k, v in cls_vars.items():
if k == field.name:
for i, j in v.items():
if i not in ['pagination', 'order_by', 'order_by_direction']:
var_string.append(f'{i}: ${j}')
elif i == 'pagination':
var_string.append(f'{i}: {{{j}}}')
elif i in ['order_by', 'order_by']:
var_string.append(f'{i}: {j}')
return f'''{" " * level}{field.name.lower()}({', '.join(var_string)})'''
def pprint(self, field: field, level: int, overwrite: str):
if overwrite:
return overwrite
else:
return f'{" " * level}{field.name.lower()}'
def exclude(self, mods: MOD, dc: dataclass, field, level: int):
_type = field.type
if isinstance(field.type, list):
_type = field.type[0]
overwrite_value = self.variables_on_cls_val(mods.variables, dc.__name__, field, level)
if not mods.exclude_type and not mods.exclude_type:
self.query.append(self.pprint(field, level, overwrite_value))
return True
elif any([_type == _t for _t in mods.exclude_type]):
return False
elif field.name in mods.exclude_name:
return False
else:
self.query.append(self.pprint(field, level, overwrite_value))
return True
def r_fields(self, dc: dataclass, level: Optional[int] = 2, mod: Optional[MOD] = None, count: int = 0):
if count == 0:
self.query.append(f'{mod.type:s} {mod.name:s}')
self.query.append(f'{" " * (level - 2)}{{')
self.query.append(self.variables_on_cls(mod.variables, dc.__name__))
self.query.append(f'{" " * (level - 2)}{{')
for field in fields(dc):
if isinstance(field.type, list):
if self.exclude(mod, dc, field, level):
self.r_fields(field.type[0], level=level + 2, mod=mod, count=count + 1)
elif any([field.type == _t for _t in self.data_returns]):
if self.exclude(mod, dc, field, level):
self.r_fields(field.type, level=level + 2, mod=mod, count=count + 1)
elif isinstance(field.type, dict):
pass # Unable to convert dict to graphql schema
else:
self.exclude(mod, dc, field, level)
self.query.append(f'{" " * (level - 2)}}}')
def print_query(self):
print(' '.join([l for l in self.query if l]))
def print_query_readable(self):
for l in self.query:
if l:
print(l)
``` |
{
"source": "2phi/waec",
"score": 2
} |
#### File: waec/weac/eigensystem.py
```python
import numpy as np
# Project imports
from weac.tools import gerling, calc_center_of_gravity, load_dummy_profile
class Eigensystem:
"""
Base class for a layered beam on an elastic foundation.
Provides geometry, material and loading attributes, and methods
for the assembly of a fundamental system.
Attributes
----------
g : float
Gravitational constant (mm/s^2). Default is 9180.
lski : float
Effective out-of-plance length of skis (mm). Default is 1000.
tol : float
Relative Romberg integration toleranc. Default is 1e-3.
system : str
Type of boundary value problem. Default is 'pst-'.
weak : dict
Dictionary that holds the weak layer properties Young's
modulus (MPa) and Poisson's raito. Defaults are 0.25
and 0.25, respectively.
t : float
Weak-layer thickness (mm). Default is 30.
kn : float
Compressive foundation (weak-layer) stiffness (N/mm^3).
kt : float
Shear foundation (weak-layer) stiffness (N/mm^3).
slab : ndarray
Matrix that holds the elastic properties of all slab layers.
Columns are density (kg/m^3), layer heigth (mm), Young's
modulus (MPa), shear modulus (MPa), and Poisson's ratio.
k : float
Shear correction factor of the slab. Default is 5/6.
h : float
Slab thickness (mm). Default is 300.
zs : float
Z-coordinate of the center of gravity of the slab (mm).
A11 : float
Extensional stiffness of the slab (N/mm).
B11 : float
Bending-extension coupling stiffness of the slab (N).
D11 : float
Bending stiffness of the slab (Nmm).
kA55 : float
Shear stiffness of the slab (N/mm).
E0 : float
Characteristic stiffness value (N).
ewC : ndarray
List of complex eigenvalues.
ewR : ndarray
List of real eigenvalues.
evC : ndarray
Matrix with eigenvectors corresponding to complex
eigenvalues as columns.
evR : ndarray
Matrix with eigenvectors corresponding to real
eigenvalues as columns.
sC : float
X-coordinate shift (mm) of complex parts of the solution.
Used for numerical stability.
sR : float
X-coordinate shift (mm) of real parts of the solution.
Used for numerical stability.
sysmat : ndarray
System matrix.
"""
def __init__(self, system='pst-'):
"""
Initialize eigensystem with user input.
Arguments
---------
system : {'pst-', '-pst', 'skier', 'skiers'}, optional
Type of system to analyse: PST cut from the right (pst-),
PST cut form the left (-pst), one skier on infinite
slab (skier) or multiple skiers on infinite slab (skeirs).
Default is 'pst-'.
layers : list, optional
2D list of layer densities and thicknesses. Columns are
density (kg/m^3) and thickness (mm). One row corresponds
to one layer. Default is [[240, 200], ].
"""
# Assign global attributes
self.g = 9810 # Gravitaiton (mm/s^2)
self.lski = 1000 # Effective out-of-plane length of skis (mm)
self.tol = 1e-3 # Relative Romberg integration tolerance
self.system = system # 'pst-', '-pst', 'skier', 'skiers'
# Initialize weak-layer attributes that will be filled later
self.weak = False # Weak-layer properties dictionary
self.t = False # Weak-layer thickness (mm)
self.kn = False # Weak-layer compressive stiffness
self.kt = False # Weak-layer shear stiffness
# Initialize slab attributes
self.p = 0 # Surface line load (N/mm)
self.slab = False # Slab properties dictionary
self.k = False # Slab shear correction factor
self.h = False # Total slab height (mm)
self.zs = False # Z-coordinate of slab center of gravity (mm)
self.A11 = False # Slab extensional stiffness
self.B11 = False # Slab bending-extension coupling stiffness
self.D11 = False # Slab bending stiffness
self.kA55 = False # Slab shear stiffness
self.E0 = False # Stiffness determinant
# Inizialize eigensystem attributes
self.ewC = False # Complex eigenvalues
self.ewR = False # Real eigenvalues
self.evC = False # Complex eigenvectors
self.evR = False # Real eigenvectors
self.sC = False # Stability shift of complex eigenvalues
self.sR = False # Stability shift of real eigenvalues
self.sysmat = False # System matrix
def set_foundation_properties(self, t=30, E=0.25, nu=0.25, update=False):
"""
Set material properties and geometry of foundation (weak layer).
Arguments
---------
t : float, optional
Weak-layer thickness (mm). Default is 30.
E : float, optional
Weak-layer Young modulus (MPa). Default is 0.25.
nu : float, optional
Weak-layer Poisson ratio. Default is 0.25.
update : bool, optional
If true, recalculate the fundamental system after
foundation properties have changed.
"""
# Geometry
self.t = t # Weak-layer thickness (mm)
# Material properties
self.weak = {
'nu': nu, # Poisson's ratio (-)
'E': E # Young's modulus (MPa)
}
# Recalculate the fundamental system after properties have changed
if update:
self.calc_fundamental_system()
def set_beam_properties(self, layers, C0=6.0, C1=4.60,
nu=0.25, update=False):
"""
Set material and properties geometry of beam (slab).
Arguments
---------
layers : list or str
2D list of top-to-bottom layer densities and thicknesses.
Columns are density (kg/m^3) and thickness (mm). One row
corresponds to one layer. If entered as str, last split
must be available in database.
C0 : float, optional
Multiplicative constant of Young modulus parametrization
according to Gerling et al. (2017). Default is 6.0.
C1 : float, optional
Exponent of Young modulus parameterization according to
Gerling et al. (2017). Default is 4.6.
nu : float, optional
Possion's ratio. Default is 0.25
update : bool, optional
If true, recalculate the fundamental system after
foundation properties have changed.
"""
if isinstance(layers, str):
# Read layering and Young's modulus from database
layers, E = load_dummy_profile(layers.split()[-1])
else:
# Compute Young's modulus from density parametrization
layers = np.array(layers)
E = gerling(layers[:, 0], C0=C0, C1=C1) # Young's modulus
# Derive other elastic properties
nu = nu*np.ones(layers.shape[0]) # Global poisson's ratio
G = E/(2*(1 + nu)) # Shear modulus
self.k = 5/6 # Shear correction factor
# Compute total slab thickness and center of gravity
self.h, self.zs = calc_center_of_gravity(layers)
# Assemble layering into matrix (top to bottom)
# Columns are density (kg/m^3), layer thickness (mm)
# Young's modulus (MPa), shear modulus (MPa), and
# Poisson's ratio
self.slab = np.vstack([layers.T, E, G, nu]).T
# Recalculate the fundamental system after properties have changed
if update:
self.calc_fundamental_system()
def set_surface_load(self, p):
"""
Set surface line load.
Define a distributed surface load (N/mm) that acts
in vertical (gravity) direction on the top surface
of the slab.
Arguments
---------
p : float
Surface line load (N/mm) that acts in vertical
(gravity) direction onm the top surface of the
slab.
"""
self.p = p
def calc_foundation_stiffness(self):
"""Compute foundation normal and shear stiffness."""
# Elastic moduli (MPa) under plane-strain conditions
G = self.weak['E']/(2*(1 + self.weak['nu'])) # Shear modulus
E = self.weak['E']/(1 - self.weak['nu']**2) # Young's modulus
# Foundation (weak layer) stiffnesses (N/mm^3)
self.kn = E/self.t # Normal stiffness
self.kt = G/self.t # Shear stiffness
def calc_laminate_stiffness_matrix(self):
"""
Provide ABD matrix.
Return plane-strain laminate stiffness matrix (ABD matrix).
"""
# Number of plies and ply thicknesses (top to bottom)
n = self.slab.shape[0]
t = self.slab[:, 1]
# Calculate ply coordinates (top to bottom) in coordinate system
# with downward pointing z-axis (z-list will be negative to positive)
z = np.zeros(n + 1)
for j in range(n + 1):
z[j] = -self.h/2 + sum(t[0:j])
# Initialize stiffness components
A11, B11, D11, kA55 = 0, 0, 0, 0
# Add layerwise contributions
for i in range(n):
E, G, nu = self.slab[i, 2:5]
A11 = A11 + E/(1 - nu**2)*(z[i+1] - z[i])
B11 = B11 + 1/2*E/(1 - nu**2)*(z[i+1]**2 - z[i]**2)
D11 = D11 + 1/3*E/(1 - nu**2)*(z[i+1]**3 - z[i]**3)
kA55 = kA55 + self.k*G*(z[i+1] - z[i])
self.A11 = A11
self.B11 = B11
self.D11 = D11
self.kA55 = kA55
self.E0 = B11**2 - A11*D11
def calc_system_matrix(self):
"""
Assemble first-order ODE system matrix.
Using the solution vector z = [u, u', w, w', psi, psi']
the ODE system is written in the form Az' + Bz = d
and rearranged to z' = -(A ^ -1)Bz + (A ^ -1)d = Ez + F
"""
kn = self.kn
kt = self.kt
# Abbreviations (MIT t/2 im GGW, MIT w' in Kinematik)
E21 = kt*(-2*self.D11 + self.B11*(self.h + self.t))/(2*self.E0)
E24 = (2*self.D11*kt*self.t
- self.B11*kt*self.t*(self.h + self.t)
+ 4*self.B11*self.kA55)/(4*self.E0)
E25 = (-2*self.D11*self.h*kt
+ self.B11*self.h*kt*(self.h + self.t)
+ 4*self.B11*self.kA55)/(4*self.E0)
E43 = kn/self.kA55
E61 = kt*(2*self.B11 - self.A11*(self.h + self.t))/(2*self.E0)
E64 = (-2*self.B11*kt*self.t
+ self.A11*kt*self.t*(self.h+self.t)
- 4*self.A11*self.kA55)/(4*self.E0)
E65 = (2*self.B11*self.h*kt
- self.A11*self.h*kt*(self.h+self.t)
- 4*self.A11*self.kA55)/(4*self.E0)
# System matrix
E = [[0, 1, 0, 0, 0, 0],
[E21, 0, 0, E24, E25, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, E43, 0, 0, -1],
[0, 0, 0, 0, 0, 1],
[E61, 0, 0, E64, E65, 0]]
self.sysmat = np.array(E)
def calc_eigensystem(self):
"""Calculate eigenvalues and eigenvectors of the system matrix."""
# Calculate eigenvalues (ew) and eigenvectors (ev)
ew, ev = np.linalg.eig(self.sysmat)
# Classify real and complex eigenvalues
real = (ew.imag == 0) & (ew.real != 0) # real eigenvalues
cmplx = ew.imag > 0 # positive complex conjugates
# Eigenvalues
self.ewC = ew[cmplx]
self.ewR = ew[real].real
# Eigenvectors
self.evC = ev[:, cmplx]
self.evR = ev[:, real].real
# Prepare positive eigenvalue shifts for numerical robustness
self.sR, self.sC = np.zeros(self.ewR.shape), np.zeros(self.ewC.shape)
self.sR[self.ewR > 0], self.sC[self.ewC > 0] = -1, -1
def calc_fundamental_system(self):
"""Calculate the fundamental system of the problem."""
self.calc_foundation_stiffness()
self.calc_laminate_stiffness_matrix()
self.calc_system_matrix()
self.calc_eigensystem()
def get_weight_load(self, phi):
"""
Calculate line loads from slab mass.
Arguments
---------
phi : float
Inclination (degrees). Counterclockwise positive.
Returns
-------
qn : float
Line load (N/mm) at center of gravity in normal direction.
qt : float
Line load (N/mm) at center of gravity in tangential direction.
"""
# Convert units
phi = np.deg2rad(phi) # Convert inclination to rad
rho = self.slab[:, 0]*1e-12 # Convert density to t/mm^3
# Sum up layer weight loads
q = sum(rho*self.g*self.slab[:, 1]) # Line load (N/mm)
# Split into components
qn = q*np.cos(phi) # Normal direction
qt = -q*np.sin(phi) # Tangential direction
return qn, qt
def get_surface_load(self, phi):
"""
Calculate surface line loads.
Arguments
---------
phi : float
Inclination (degrees). Counterclockwise positive.
Returns
-------
pn : float
Surface line load (N/mm) in normal direction.
pt : float
Surface line load (N/mm) in tangential direction.
"""
# Convert units
phi = np.deg2rad(phi) # Convert inclination to rad
# Split into components
pn = self.p*np.cos(phi) # Normal direction
pt = -self.p*np.sin(phi) # Tangential direction
return pn, pt
def get_skier_load(self, m, phi):
"""
Calculate skier point load.
Arguments
---------
m : float
Skier weight (kg).
phi : float
Inclination (degrees). Counterclockwise positive.
Returns
-------
Fn : float
Skier load (N) in normal direction.
Ft : float
Skier load (N) in tangential direction.
"""
phi = np.deg2rad(phi) # Convert inclination to rad
F = 1e-3*np.array(m)*self.g/self.lski # Total skier load (N)
Fn = F*np.cos(phi) # Normal skier load (N)
Ft = -F*np.sin(phi) # Tangential skier load (N)
return Fn, Ft
def zh(self, x, l=0, bed=True):
"""
Compute bedded or free complementary solution at position x.
Arguments
---------
x : float
Horizontal coordinate (mm).
l : float, optional
Segment length (mm). Default is 0.
bed : bool
Indicates whether segment has foundation or not. Default
is True.
Returns
-------
zh : ndarray
Complementary solution matrix (6x6) at position x.
"""
if bed:
zh = np.concatenate([
# Real
self.evR*np.exp(self.ewR*(x + l*self.sR)),
# Complex
np.exp(self.ewC.real*(x + l*self.sC))*(
self.evC.real*np.cos(self.ewC.imag*x)
- self.evC.imag*np.sin(self.ewC.imag*x)),
# Complex
np.exp(self.ewC.real*(x + l*self.sC))*(
self.evC.imag*np.cos(self.ewC.imag*x)
+ self.evC.real*np.sin(self.ewC.imag*x))], axis=1)
else:
# Abbreviations
H14 = 3*self.B11/self.A11*x**2
H24 = 6*self.B11/self.A11*x
H54 = -3*x**2 + 6*self.E0/(self.A11*self.kA55)
# Complementary solution matrix of free segments
zh = np.array(
[[0, 0, 0, H14, 1, x],
[0, 0, 0, H24, 0, 1],
[1, x, x**2, x**3, 0, 0],
[0, 1, 2*x, 3*x**2, 0, 0],
[0, -1, -2*x, H54, 0, 0],
[0, 0, -2, -6*x, 0, 0]])
return zh
def zp(self, x, phi, bed=True):
"""
Compute bedded or free particular integrals at position x.
Arguments
---------
x : float
Horizontal coordinate (mm).
phi : float
Inclination (degrees).
bed : bool
Indicates whether segment has foundation (True) or not
(False). Default is True.
Returns
-------
zp : ndarray
Particular integral vector (6x1) at position x.
"""
# Get weight and surface loads
qn, qt = self.get_weight_load(phi)
pn, pt = self.get_surface_load(phi)
# Set foundation stiffnesses
kn = self.kn
kt = self.kt
# Unpack laminate stiffnesses
A11 = self.A11
B11 = self.B11
kA55 = self.kA55
E0 = self.E0
# Unpack geometric properties
h = self.h
t = self.t
zs = self.zs
# Assemble particular integral vectors
if bed:
zp = np.array([
[(qt + pt)/kt + h*qt*(h + t - 2*zs)/(4*kA55)
+ h*pt*(2*h + t)/(4*kA55)],
[0],
[(qn + pn)/kn],
[0],
[-(qt*(h + t - 2*zs) + pt*(2*h + t))/(2*kA55)],
[0]])
else:
zp = np.array([
[(-3*(qt + pt)/A11 - B11*(qn + pn)*x/E0)/6*x**2],
[(-2*(qt + pt)/A11 - B11*(qn + pn)*x/E0)/2*x],
[-A11*(qn + pn)*x**4/(24*E0)],
[-A11*(qn + pn)*x**3/(6*E0)],
[A11*(qn + pn)*x**3/(6*E0)
+ ((zs - B11/A11)*qt - h*pt/2 - (qn + pn)*x)/kA55],
[(qn + pn)*(A11*x**2/(2*E0) - 1/kA55)]])
return zp
def z(self, x, C, l, phi, bed=True):
"""
Assemble solution vector at positions x.
Arguments
---------
x : float or squence
Horizontal coordinate (mm). Can be sequence of length N.
C : ndarray
Vector of constants (6xN) at positions x.
l : float
Segment length (mm).
phi : float
Inclination (degrees).
bed : bool
Indicates whether segment has foundation (True) or not
(False). Default is True.
Returns
-------
z : ndarray
Solution vector (6xN) at position x.
"""
if isinstance(x, (list, tuple, np.ndarray)):
z = np.concatenate([
np.dot(self.zh(xi, l, bed), C)
+ self.zp(xi, phi, bed) for xi in x], axis=1)
else:
z = np.dot(self.zh(x, l, bed), C) + self.zp(x, phi, bed)
return z
``` |
{
"source": "2piruben/BFM_multistate",
"score": 2
} |
#### File: 2piruben/BFM_multistate/data_analysis.py
```python
import cProfile
import re
import pickle
import integration_ME as ME
import numpy as np
import cProfile,pstats
from scipy import sparse
import matplotlib.pyplot as plt
from random import choices
Nmax = 13 # Maximum number of stators
ME.Nmax = Nmax # overriding Nmax in mE module for consistenct along files
thin = 1000 # number of datapoints to ignore between measurements
timeunits = 1 # timestep between measurements in the data
FPS = 1000 # frames per second
factor_seconds = thin/FPS # seconds between timepoints
threshold_aver_stall = int(475 * FPS / thin) # number of points to include in the average trajectories (for plotting)
threshold_aver_release = int(825 * FPS / thin) # number of points to include in the average trajectories (for plotting)
threshold_aver_res = int(660 * FPS / thin) # number of points to include in the average trajectories (for plotting)
threshold_aver_res = int(660 * FPS / thin) # number of points to include in the average trajectories (for plotting)
file_dict = {300: "data/NilsD300_V2.p", 500: "data/NilsD500_V2.p", 1300: "data/NilsD1300_V2.p"} # files with the numeric traces
def _nonzero_mean(trajs, threshold = 3, excludezeros = True):
# given a set of aligned trajectories (rows) return the average of strictly positive entries only if there are at least
# a threshold numnber of positive entries
# if excludezeros is True then the zeros are taken into account to do the average
if excludezeros:
accepted_entries = trajs>0
mean = np.nansum(trajs,axis = 0)
countnans = np.isnan(trajs).sum(axis = 0)
countnonzero = np.count_nonzero(trajs, axis = 0)
countaccepted = countnonzero - countnans
mean = mean/countaccepted
else:
mean = np.nanmean(trajs,axis = 0)
countaccepted = (~np.isnan(trajs)).sum(axis = 0)
mean[countaccepted<threshold] = np.nan
return mean
##################################################################################################
## The data is processed to be stored in a lighter format into the data_light dictionary
## by thining the data and calculating statistics
data_light = {300: [],500: [],1300: []}
data_light_stats = {300: {},500: {},1300: {}} # summary of some stats of data_light
for gamma in file_dict:
max_len_stall = 0
max_len_res_nozeros = 0
max_len_res_withzeros = 0
max_len_release = 0
init_stall = np.zeros([])
# this cumulative containers will be used temporaly to perform average and std deviations of all trajectories in a condition
cumulative_trajectories_stall = np.array([],dtype = float).reshape(0,threshold_aver_stall)
cumulative_trajectories_release = np.array([],dtype = float).reshape(0,threshold_aver_release)
cumulative_trajectories_res_nozeros = np.array([],dtype = float).reshape(0,threshold_aver_res)
cumulative_trajectories_res_withzeros = np.array([],dtype = float).reshape(0,threshold_aver_res)
dataset = pickle.load(open(file_dict[gamma],'rb'))
data_light[gamma] = []
data_light_stats[gamma]['resurrection_dwell'] = []
for exp in dataset:
dict_temp = {}
if 'statnum_before_stall' in dataset[exp]:
init_stall = np.append(init_stall,dataset[exp]['statnum_before_stall'][0])
dd = np.array(dataset[exp]['statnum_before_stall'][::thin],dtype = float)
dd[dd>Nmax] = Nmax # correcting upper limit to Nmax
dd[dd<0] = 0 # correcting bottom limit to 0
non_zero_idx = np.argmax(dd>0) # will return the index with the first non-zero stators after resurrection (argmax stops at the first True)
dd = dd[non_zero_idx:] # removing the first part of the transient where the number of stators is zero to synchronize all the resurrections
dict_temp['statnum_before_stall'] = dd
npoints = len(dict_temp['statnum_before_stall'])
max_len_stall = max([max_len_stall, npoints])
# to find an average trajectory for the condition the array is padded with nans to make a nanaverage,nanvar at the end
cumulative_trajectories_stall = np.vstack((cumulative_trajectories_stall,
np.pad(dict_temp['statnum_before_stall'][:threshold_aver_stall],(0,max(0,threshold_aver_stall-npoints)),'constant',constant_values = np.nan)))
else:
print("No data found for load {}, replicate {}, condition statnum_before_stall".format(gamma,exp))
if 'statnum_after_release' in dataset[exp]:
dd = dataset[exp]['statnum_after_release'][::thin]
dd[dd>Nmax] = Nmax # correcting limit to Nmax
dd[dd<0] = 0 # correcting bottom limit to 0
non_zero_idx = np.argmax(dd>0) # will return the index with the first non-zero stators after resurrection (argmax stops at the first True)
dd = dd[non_zero_idx:] # removing the first part of the transient where the number of stators is zero to synchronize all the resurrections
dict_temp['statnum_after_release'] = dd
npoints = len(dict_temp['statnum_after_release'])
max_len_release = max([max_len_release, len(dict_temp['statnum_after_release'])])
cumulative_trajectories_release = np.vstack((cumulative_trajectories_release,
np.pad(dict_temp['statnum_after_release'][:threshold_aver_release],(0,max(0,threshold_aver_release-npoints)),'constant',constant_values = np.nan)))
else:
print("No data found for load {}, replicate {},condition statnum_after_release".format(gamma,exp))
if 'statnum_resurrection' in dataset[exp]:
dd = dataset[exp]['statnum_resurrection'][::thin]
dd[dd>Nmax] = Nmax # correcting limit to Nmax
dd[dd<0] = 0 # correcting bottoms limit to 0
non_zero_idx = np.argmax(dd>0) # will return the index with the first non-zero stators after resurrection (argmax stops at the first True)
if dd[non_zero_idx]<6:
data_light_stats[gamma]['resurrection_dwell'].append(non_zero_idx)
dict_temp['statnum_resurrection_withzeros'] = np.copy(dd)
dd = dd[non_zero_idx:] # removing the first part of the transient where the number of stators is zero to synchronize all the resurrections
dict_temp['statnum_resurrection_nozeros'] = dd
npoints_nozeros = len(dict_temp['statnum_resurrection_nozeros'])
npoints_withzeros = len(dict_temp['statnum_resurrection_withzeros'])
max_len_res_nozeros = max([max_len_res_nozeros, npoints_nozeros])
max_len_res_withzeros = max([max_len_res_withzeros, npoints_withzeros])
cumulative_trajectories_res_nozeros = np.vstack((cumulative_trajectories_res_nozeros,
np.pad(dict_temp['statnum_resurrection_nozeros'][:threshold_aver_res],(0,max(0,threshold_aver_res-npoints_nozeros)),
'constant',constant_values = np.nan)))
max_len_res_withzeros = max([max_len_res_withzeros, npoints_withzeros])
cumulative_trajectories_res_withzeros = np.vstack((cumulative_trajectories_res_withzeros,
np.pad(dict_temp['statnum_resurrection_withzeros'][:threshold_aver_res],(0,max(0,threshold_aver_res-npoints_withzeros)),
'constant',constant_values = np.nan)))
max_len_res_nozeros = max([max_len_res_nozeros, npoints_nozeros])
else:
print("No data found for load {}, replicate {},condition statnum_resurrection".format(gamma,exp))
data_light[gamma].append(dict(dict_temp))
print('mean stall gamma = {}, = {}'.format(gamma,np.mean(init_stall)))
# Storing trajectory stats per condition (mostly for plotting)
data_light_stats[gamma]['longest_time_vector_stall'] = np.arange(max_len_stall)*factor_seconds
data_light_stats[gamma]['longest_time_vector_resurrection_nozeros'] = np.arange(max_len_res_nozeros)*factor_seconds
data_light_stats[gamma]['longest_time_vector_resurrection_withzeros'] = np.arange(max_len_res_withzeros)*factor_seconds
data_light_stats[gamma]['longest_time_vector_release'] = np.arange(max_len_release)*factor_seconds
data_light_stats[gamma]['common_time_vector_stall'] = np.arange(threshold_aver_stall)*factor_seconds
data_light_stats[gamma]['common_time_vector_resurrection_nozeros'] = np.arange(threshold_aver_res)*factor_seconds
data_light_stats[gamma]['common_time_vector_resurrection_withzeros'] = np.arange(threshold_aver_res)*factor_seconds
data_light_stats[gamma]['common_time_vector_release'] = np.arange(threshold_aver_release)*factor_seconds
cumulative_trajectories_stall[cumulative_trajectories_stall == 0] = np.nan
cumulative_trajectories_release[cumulative_trajectories_release == 0] = np.nan
data_light_stats[gamma]['mean_stall'] = np.nanmean(cumulative_trajectories_stall,axis = 0)
data_light_stats[gamma]['mean_resurrection_nozeros'] = _nonzero_mean(cumulative_trajectories_res_nozeros)
#print('cumulative_trajectories_res_nozeros',cumulative_trajectories_res_nozeros)
data_light_stats[gamma]['mean_resurrection_withzeros'] = _nonzero_mean(cumulative_trajectories_res_withzeros)
data_light_stats[gamma]['mean_resurrection_withzeros'][0] = 0 # the first zero is the only believable one
print('mean_nozeros', gamma,data_light_stats[gamma]['mean_resurrection_nozeros'])
print('mean_withzeros', gamma,data_light_stats[gamma]['mean_resurrection_withzeros'])
data_light_stats[gamma]['mean_release'] = np.nanmean(cumulative_trajectories_release,axis = 0)
data_light_stats[gamma]['median_stall'] = np.nanmedian(cumulative_trajectories_stall,axis = 0)
data_light_stats[gamma]['median_resurrection_nozeros'] = np.nanmedian(cumulative_trajectories_res_nozeros,axis = 0)
data_light_stats[gamma]['median_resurrection_withzeros'] = np.nanmedian(cumulative_trajectories_res_withzeros,axis = 0)
data_light_stats[gamma]['median_release'] = np.nanmedian(cumulative_trajectories_release,axis = 0)
data_light_stats[gamma]['std_stall'] = np.nanstd(cumulative_trajectories_stall,axis = 0)
data_light_stats[gamma]['std_resurrection_nozeros'] = np.nanstd(cumulative_trajectories_res_nozeros,axis = 0)
data_light_stats[gamma]['std_resurrection_withzeros'] = np.nanstd(cumulative_trajectories_res_withzeros,axis = 0)
data_light_stats[gamma]['std_release'] = np.nanstd(cumulative_trajectories_release,axis = 0)
data_light_stats[gamma]['mean_stall_distance'] = np.sum(data_light_stats[gamma]['std_stall'])
data_light_stats[gamma]['mean_resurrection_nozeros_distance'] = np.sum(data_light_stats[gamma]['std_resurrection_nozeros'])
data_light_stats[gamma]['mean_resurrection_withzeros_distance'] = np.sum(data_light_stats[gamma]['std_resurrection_withzeros'])
data_light_stats[gamma]['mean_release_distance'] = np.sum(data_light_stats[gamma]['std_release'])
print('mean distances gamma:',data_light_stats[gamma]['mean_stall_distance'],data_light_stats[gamma]['mean_release_distance'],data_light_stats[gamma]['mean_resurrection_nozeros_distance'])
#dataset = None # empty this monster if there are memory issues
cumulative_trajectories_release = None
cumulative_trajectories_stall = None
cumulative_trajectories_res_nozeros = None
cumulative_trajectories_res_withzeros = None
def DistanceLikelihood(gamma = 300, model = 'WeakStrong', params = [], return_trajs = False, sto_trajs = False, resurrectionzeros = 'nozeros',
likelihoodcondition = 'all'):
# for each model and parameters, create a compatible set of average trajectories and compare them to
# the average trajectories measured
# if sto_trajs is True the likelihood corresponds to one trajectory, otherwise the mean is used
# if the mean is used then WeakStrong is integrated using 2-D the analytical solution rather than solving the N^2 ME
# resurrection can be 'nozeros' or 'withzeros' to consider that alignment of the resurrections.
# 'withzeros': the resurrections starts at t=0
# 'nonzeros' : the starting time and occupancy are the first stator recruitment in the resurrection trace
# likelihoodconditions can be 'all', or a list with the selected conditions 'stall', 'release', 'resurrection'
# models can be 'Berg' for the speed-rate model proposed in Wadhwa et al 2019 or 'WeakStrong' for the two-state model
if resurrectionzeros == 'nozeros':
str_res = 'statnum_resurrection_nozeros'
str_common_vector_res = 'common_time_vector_resurrection_nozeros'
str_mean_res = 'mean_resurrection_nozeros'
elif resurrectionzeros == 'withzeros' :
str_res = 'statnum_resurrection_withzeros'
str_common_vector_res = 'common_time_vector_resurrection_withzeros'
str_mean_res = 'mean_resurrection_withzeros'
if likelihoodcondition == 'all':
likelihoodcondition = ['stall','release','resurrection']
if model == 'Berg':
A = ME.Berg_Matrix(*params, N=Nmax)
Aext = ME.GetEigenElements(A)
P_eq = ME.Normalize(ME.Equilibrium(Aext,eigen_given = True))
elif model == 'WeakStrong':
if sto_trajs:
# full ME is only required when comparing stochastic trajectories
A = ME.WeakStrong_Matrix(*params, N=Nmax)
Aext = ME.GetEigenElements(A)
P_eq = ME.Normalize(ME.Equilibrium(Aext,eigen_given = True))
params_stall = params[:]
params_stall[1] = 0 # forbidden dettachment
A_stall = ME.WeakStrong_Matrix(*params_stall, N=Nmax)
Aext_stall = ME.GetEigenElements(A_stall)
P_eq_stall = ME.Normalize(ME.Equilibrium(Aext_stall,eigen_given = True))
if model == 'Berg':
cumulative_trajectories_stall = np.zeros_like(data_light_stats[gamma]['common_time_vector_stall'])
cumulative_trajectories_release = np.zeros_like(data_light_stats[gamma]['common_time_vector_release'])
cumulative_trajectories_res = np.zeros_like(data_light_stats[gamma][str_common_vector_res])
elif model == 'WeakStrong':
cumulative_trajectories_stall = np.zeros((len(data_light_stats[gamma]['common_time_vector_stall']),2))
cumulative_trajectories_release = np.zeros((len(data_light_stats[gamma]['common_time_vector_release']),2))
cumulative_trajectories_res = np.zeros((len(data_light_stats[gamma][str_common_vector_res]),2))
N_stall = 0 # number of trajectories acceptee
N_release = 0
N_res = 0
for exp in data_light[gamma]: # for each experimental trajectory, the initial condition is used to integrate the equations
# afterwards the summary statistics are calculated with the ensemble of trajectories and compared to the experiments
#### BEFORE STALL (STEADY STATE)
if 'stall' in likelihoodcondition:
N0_stall = exp['statnum_before_stall'][0].astype(int) #
if model == 'Berg':
if sto_trajs is True:
cumulative_trajectories_stall += ME.Get_Berg_Trajectory(N0_stall, *params,
data_light_stats[gamma]['common_time_vector_stall'])
else:
P0_before_stall = np.zeros_like(P_eq)
P0_before_stall[N0_stall] = 1 # initial condition
Pt_before_stall = ME.Integrate(Aext, P0_before_stall, data_light_stats[gamma]['common_time_vector_stall'], eigen_given= True)
mean_stall, std_stall = ME.stats(Pt_before_stall,model,Nmax)
cumulative_trajectories_stall += mean_stall
elif model == 'WeakStrong':
if sto_trajs is True:
W0_stall,S0_stall = ME.WeakStrong_indextovalues(choices(range(len(P_eq)),P_eq)[0])
cumulative_trajectories_stall += ME.Get_WeakStrong_Trajectory(W0_stall, S0_stall,
*params, data_light_stats[gamma]['common_time_vector_stall'])
else:
# These lines solve the ME for initialm condition. Not required at the moment, just using the mean number
#P0_before_stall = np.copy(P_eq)
#ME.ConditionPwstoN(P0_before_stall, N0_stall)
#Pt_before_stall = ME.Integrate(Aext, P0_before_stall, data_light_stats[gamma]['common_time_vector_stall'], eigen_given= True)
#mean_stall, std_stall, w_stall, s_stall = ME.stats(Pt_before_stall,model,Nmax, weakstrong_output = True)
w_stall, s_stall = ME.MeanSteadyStateWeakStrong(*params)
cumulative_trajectories_stall[:,0] += w_stall
cumulative_trajectories_stall[:,1] += s_stall
N_stall += 1
#### RELEASE
# The release initial points are taken from each individual trajectory
if 'release' in likelihoodcondition:
N0_release = exp['statnum_after_release'][0].astype(int)
if model == 'Berg':
if sto_trajs is True:
cumulative_trajectories_release += ME.Get_Berg_Trajectory(N0_release, *params,
data_light_stats[gamma]['common_time_vector_release'])
else:
P0_after_release = np.zeros_like(P_eq)
P0_after_release[N0_release] = 1
Pt_after_release = ME.Integrate(Aext, P0_after_release, data_light_stats[gamma]['common_time_vector_release'], eigen_given= True)
mean_rel, std_rel = ME.stats(Pt_after_release,model,Nmax)
cumulative_trajectories_release += mean_rel
elif model == 'WeakStrong':
# In the weakstorng model the intial weak strong stators are taken randomly
# from the stall steady state distribution (kwu=0) and the condition (W+S =N0)
if sto_trajs is True:
P_rel = np.copy(P_eq_stall)
ME.ConditionPwstoN(P_rel,N0_release)
W0_release,S0_release = ME.WeakStrong_indextovalues(choices(range(len(P_rel)),P_rel)[0])
cumulative_trajectories_release += ME.Get_WeakStrong_Trajectory(W0_release, S0_release,
*params, data_light_stats[gamma]['common_time_vector_release'])
else:
P0_after_release = np.copy(P_eq_stall)
ME.ConditionPwstoN(P0_after_release, N0_release)
### These two lines would integrate the equation numerically (keeping for future comparisons with std)
# Pt_after_release = ME.Integrate(Aext, P0_after_release, data_light_stats[gamma]['common_time_vector_release'], eigen_given= True)
# mean_rel_old, std_rel_old, mean_rel_weak_old, mean_rel_strong_old = ME.stats(Pt_after_release,model,Nmax, weakstrong_output = True)
N0, std0, w0, s0 = ME.stats(P0_after_release,model,Nmax,weakstrong_output = True)
mean_rel_weak, mean_rel_strong = ME.TrajMeanFieldWeakStrong(*params, w0, s0, data_light_stats[gamma]['common_time_vector_release'],
Nmax=Nmax, method = 'analytical')
mean_rel = mean_rel_weak + mean_rel_strong
cumulative_trajectories_release[:,0] += mean_rel_weak
cumulative_trajectories_release[:,1] += mean_rel_strong
N_release += 1
if 'resurrection' in likelihoodcondition:
if str_res in exp: ## Not all the datasets have resurrection traces
N0_resurrection = exp[str_res][0].astype(int)
if model == 'Berg':
if sto_trajs is True:
cumulative_trajectories_res += ME.Get_Berg_Trajectory(N0_resurrection, *params,
data_light_stats[gamma][str_common_vector_res])
else:
P0_resurrection = np.zeros_like(P_eq)
P0_resurrection[N0_resurrection] = 1
Pt_resurrection = ME.Integrate(Aext, P0_resurrection, data_light_stats[gamma][str_common_vector_res], eigen_given= True)
mean_res, std_res = ME.stats(Pt_resurrection,model,Nmax)
cumulative_trajectories_res += mean_res
elif model == 'WeakStrong':
# In the weakstorng model the intial stators are assumed to be bound weakly
if sto_trajs is True:
cumulative_trajectories_res += ME.Get_WeakStrong_Trajectory(N0_resurrection, 0, *params,
data_light_stats[gamma][str_common_vector_res])
else:
P0_resurrection = np.zeros_like(P_eq_stall)
P0_resurrection[ME.WeakStrong_valuestoindex(N0_resurrection,0)] = 1
### These two lines would integrate the equation numerically (keeping for future comparisons with std)
# Pt_resurrection = ME.Integrate(Aext, P0_resurrection, data_light_stats[gamma][str_common_vector_res], eigen_given= True)
# mean_res, std_res, mean_res_weak, mean_res_strong = ME.stats(Pt_resurrection,model,Nmax, weakstrong_output = True)
mean_res_weak, mean_res_strong = ME.TrajMeanFieldWeakStrong(*params, 0, 0, data_light_stats[gamma][str_common_vector_res],
Nmax=Nmax, method = 'analytical')
cumulative_trajectories_res[:,0] += mean_res_weak
cumulative_trajectories_res[:,1] += mean_res_strong
N_res += 1
if model == 'WeakStrong':
# mean number of stators in the two-state model is the sum of weak and strongly bound stators
cumulative_trajectories_stall = cumulative_trajectories_stall[:,0]+cumulative_trajectories_stall[:,1]
cumulative_trajectories_release = cumulative_trajectories_release[:,0]+cumulative_trajectories_release[:,1]
cumulative_trajectories_res = cumulative_trajectories_res[:,0]+cumulative_trajectories_res[:,1]
distance = 0
if 'stall' in likelihoodcondition:
cumulative_trajectories_stall/= N_stall
distance_stall = cumulative_trajectories_stall - data_light_stats[gamma]['mean_stall']
distance_stall = np.sum(distance_stall*distance_stall)
distance += distance_stall
if 'resurrection' in likelihoodcondition:
cumulative_trajectories_res/= N_res
distance_res = cumulative_trajectories_res - data_light_stats[gamma][str_mean_res]
distance_res = np.nansum(distance_res*distance_res)
distance += distance_res
if 'release' in likelihoodcondition:
cumulative_trajectories_release/= N_release
distance_release = cumulative_trajectories_release - data_light_stats[gamma]['mean_release']
distance_release = np.sum(distance_release*distance_release)
distance += distance_release
if return_trajs:
return(cumulative_trajectories_stall,cumulative_trajectories_release,cumulative_trajectories_res,distance)
else:
return(distance)
``` |
{
"source": "2press/sc2monitor",
"score": 3
} |
#### File: sc2monitor/sc2monitor/controller.py
```python
import asyncio
import logging
import math
import time
from datetime import datetime, timedelta
from operator import itemgetter
import aiohttp
import sc2monitor.model as model
from sc2monitor.handlers import SQLAlchemyHandler
from sc2monitor.sc2api import SC2API
logger = logging.getLogger(__name__)
sql_logger = logging.getLogger()
class Controller:
"""Control the sc2monitor."""
def __init__(self, **kwargs):
"""Init the sc2monitor."""
self.kwargs = kwargs
self.sc2api = None
self.db_session = None
self.current_season = {}
async def __aenter__(self):
"""Create a aiohttp and db session that will later be closed."""
headers = {'Accept-Encoding': 'gzip, deflate'}
self.http_session = aiohttp.ClientSession(headers=headers)
self.create_db_session()
return self
def create_db_session(self):
"""Create sqlalchemy database session."""
self.db_session = model.create_db_session(
db=self.kwargs.pop('db', ''),
encoding=self.kwargs.pop('encoding', ''))
self.handler = SQLAlchemyHandler(self.db_session)
self.handler.setLevel(logging.INFO)
sql_logger.setLevel(logging.INFO)
sql_logger.addHandler(self.handler)
if len(self.kwargs) > 0:
self.setup(**self.kwargs)
self.sc2api = SC2API(self)
self.cache_matches = self.get_config(
'cache_matches',
default_value=1000)
self.cache_logs = self.get_config(
'cache_logs',
default_value=500)
self.cache_runs = self.get_config(
'cache_runs',
default_value=500)
self.analyze_matches = self.get_config(
'analyze_matches',
default_value=100)
async def __aexit__(self, exc_type, exc, tb):
"""Close all aiohtto and database session."""
await self.http_session.close()
self.db_session.commit()
self.db_session.close()
self.db_session = None
def get_config(self, key, default_value=None,
raise_key_error=True,
return_object=False):
"""Read a config value from database."""
if default_value is not None:
raise_key_error = False
entry = self.db_session.query(
model.Config).filter(model.Config.key == key).scalar()
if not entry:
if raise_key_error:
raise ValueError(f'Unknown config key "{key}"')
else:
if return_object:
return None
else:
return '' if default_value is None else default_value
else:
if return_object:
return entry
else:
return entry.value
def set_config(self, key, value, commit=True):
"""Save a config value to the database."""
entry = self.db_session.query(
model.Config).filter(model.Config.key == key).scalar()
if not entry:
self.db_session.add(model.Config(key=key, value=value))
else:
entry.value = value
if commit:
self.db_session.commit()
def setup(self, **kwargs):
"""Set up the sc2monitor with api-key and api-secret."""
valid_keys = ['api_key', 'api_secret',
'cache_matches', 'analyze_matches']
for key, value in kwargs.items():
if key not in valid_keys:
raise ValueError(
f"Invalid configuration key '{key}'"
f" (valid keys: {', '.join(valid_keys)})")
self.set_config(key, value, commit=False)
self.db_session.commit()
if self.sc2api:
self.sc2api.read_config()
def add_player(self, url, race=model.Race['Random']):
"""Add a player by url to the sc2monitor."""
close_db = False
if self.db_session is None:
self.create_db_session()
close_db = True
server, realm, player_id = self.sc2api.parse_profile_url(url)
count = self.db_session.query(model.Player).filter(
model.Player.realm == realm,
model.Player.player_id == player_id,
model.Player.server == server).count()
if count == 0:
new_player = model.Player(
realm=realm,
player_id=player_id,
server=server,
race=race)
self.db_session.add(new_player)
self.db_session.commit()
if close_db:
self.db_session.close()
self.db_session = None
def remove_player(self, url):
"""Remove a player by url to the sc2monitor."""
close_db = False
if self.db_session is None:
self.create_db_session()
close_db = True
server, realm, player_id = self.sc2api.parse_profile_url(url)
for player in self.db_session.query(model.Player).filter(
model.Player.realm == realm,
model.Player.player_id == player_id,
model.Player.server == server).all():
self.db_session.delete(player)
self.db_session.commit()
if close_db:
self.db_session.close()
self.db_session = None
async def update_season(self, server: model.Server):
"""Update info about the current season in the database."""
current_season = await self.sc2api.get_season(server)
season = self.db_session.query(model.Season).\
filter(model.Season.server == server).\
order_by(model.Season.season_id.desc()).\
limit(1).scalar()
if not season or current_season.season_id != season.season_id:
self.db_session.add(current_season)
self.db_session.commit()
self.db_session.refresh(current_season)
logger.info(f'Found a new ladder season: {current_season}')
return current_season
else:
season.start = current_season.start
season.end = current_season.end
season.year = current_season.year
season.number = current_season.number
self.db_session.commit()
return season
async def update_seasons(self):
"""Update seasons info for all servers."""
servers = [server[0] for server in self.db_session.query(
model.Player.server).distinct()]
tasks = []
for server in servers:
tasks.append(asyncio.create_task(self.update_season(server)))
for season in await asyncio.gather(*tasks, return_exceptions=True):
try:
if isinstance(season, model.Season):
self.current_season[season.server.id()] = season
else:
raise season
except Exception:
logger.exception(
('The following exception was'
' raised while updating seasons:'))
async def query_player(self, player: model.Player):
"""Collect api data of a player."""
complete_data = []
for ladder in await self.sc2api.get_ladders(player):
async for data in self.sc2api.get_ladder_data(player, ladder):
current_player = await self.get_player_with_race(player, data)
missing_games, new = self.count_missing_games(
current_player, data)
if missing_games['Total'] > 0:
complete_data.append({'player': current_player,
'new_data': data,
'missing': missing_games,
'Win': 0,
'Loss': 0})
if len(complete_data) > 0:
await self.process_player(complete_data, new)
elif (not player.name
or not isinstance(player.refreshed, datetime)
or player.refreshed <= datetime.now() - timedelta(days=1)):
await self.update_player_name(player)
async def update_player_name(self, player: model.Player, name=''):
"""Update the name of a player from api data."""
if not name:
metadata = await self.sc2api.get_metadata(player)
name = metadata['name']
for tmp_player in self.db_session.query(model.Player).filter(
model.Player.player_id == player.player_id,
model.Player.realm == player.realm,
model.Player.server == player.server,
model.Player.name != name).all():
logger.info(f"{tmp_player.id}: Updating name to '{name}'")
tmp_player.name = name
self.db_session.commit()
async def check_match_history(self, complete_data):
"""Check matches in match history and assign them to races."""
match_history = await self.sc2api.get_match_history(
complete_data[0]['player'])
for match in match_history:
positive = []
for data_key, data in enumerate(complete_data):
needed = data['missing'].get(match['result'].describe(), 0) > 0
try:
datetime_check = (match['datetime']
- data['player'].last_played
> timedelta(seconds=0))
except TypeError:
datetime_check = True
if (needed and datetime_check):
positive.append(data_key)
if len(positive) == 0:
continue
elif len(positive) >= 1:
# Choose the race with most missing results.
max_missing = 0
for key in positive:
tmp_missing = complete_data[key][
'missing'][match['result'].describe()]
if tmp_missing > max_missing:
data_key = key
max_missing = tmp_missing
complete_data[data_key][
'missing'][match['result'].describe()] -= 1
complete_data[data_key][match['result'].describe()] += 1
try:
complete_data[data_key]['games'].insert(0, match)
except KeyError:
complete_data[data_key]['games'] = [match]
try:
last_played = match['datetime']
except Exception:
last_played = datetime.now()
return last_played, len(match_history)
async def process_player(self, complete_data, new=False):
"""Process the api data of a player."""
last_played, len_history \
= await self.check_match_history(complete_data)
for race_player in complete_data:
race_player['missing']['Total'] = race_player['missing']['Win'] + \
race_player['missing']['Loss']
if race_player['missing']['Total'] > 0:
if new:
logger.info(
f"{race_player['player'].id}: Ignoring "
f"{race_player['missing']['Total']} games missing in"
f" match history ({len_history}) "
"of new player.")
else:
self.guess_games(race_player, last_played)
self.guess_mmr_changes(race_player)
await self.update_player(race_player)
self.calc_statistics(race_player['player'])
async def update_player(self, complete_data):
"""Update database with new data of a player."""
player = complete_data['player']
new_data = complete_data['new_data']
player.mmr = new_data['mmr']
player.ladder_id = new_data['ladder_id']
player.league = new_data['league']
player.ladder_joined = new_data['joined']
player.wins = new_data['wins']
player.losses = new_data['losses']
player.last_active_season = self.get_season_id(player.server)
if player.name != new_data['name']:
await self.update_player_name(
player,
new_data['name'])
if (not player.last_played
or player.ladder_joined
> player.last_played):
player.last_played = player.ladder_joined
self.db_session.commit()
def calc_statistics(self, player: model.Player):
"""Recalculate player statistics."""
self.db_session.refresh(player)
if not player.statistics:
stats = model.Statistics(player=player)
self.db_session.add(stats)
self.db_session.commit()
self.db_session.refresh(stats)
else:
stats = player.statistics
matches = self.db_session.query(model.Match).filter(
model.Match.player_id == player.id).order_by(
model.Match.datetime.desc()).limit(self.analyze_matches).all()
stats.games_available = len(matches)
wma_mmr_denominator = stats.games_available * \
(stats.games_available + 1.0) / 2.0
stats.max_mmr = player.mmr
stats.min_mmr = player.mmr
stats.current_mmr = player.mmr
wma_mmr = 0.0
expected_mmr_value = 0.0
expected_mmr_value2 = 0.0
current_wining_streak = 0
current_losing_streak = 0
for idx, match in enumerate(matches):
if match.result == model.Result.Win:
stats.wins += 1
current_wining_streak += 1
current_losing_streak = 0
if current_wining_streak > stats.longest_wining_streak:
stats.longest_wining_streak = current_wining_streak
elif match.result == model.Result.Loss:
stats.losses += 1
current_losing_streak += 1
current_wining_streak = 0
if current_losing_streak > stats.longest_losing_streak:
stats.longest_losing_streak = current_losing_streak
if match.max_length <= 120:
stats.instant_left_games += 1
if match.guess:
stats.guessed_games += 1
mmr = match.mmr
wma_mmr += mmr * \
(stats.games_available - idx) / wma_mmr_denominator
if stats.max_mmr < mmr:
stats.max_mmr = mmr
if stats.min_mmr > mmr:
stats.min_mmr = mmr
expected_mmr_value += mmr / stats.games_available
expected_mmr_value2 += mmr * (mmr / stats.games_available)
if stats.games_available <= 1:
stats.lr_mmr_slope = 0.0
stats.lr_mmr_intercept = expected_mmr_value
else:
ybar = expected_mmr_value
xbar = -0.5 * (stats.games_available - 1)
numerator = 0
denominator = 0
for x, match in enumerate(matches):
x = -x
y = match.mmr
numerator += (x - xbar) * (y - ybar)
denominator += (x - xbar) * (x - xbar)
stats.lr_mmr_slope = numerator / denominator
stats.lr_mmr_intercept = ybar - stats.lr_mmr_slope * xbar
stats.sd_mmr = round(
math.sqrt(expected_mmr_value2
- expected_mmr_value
* expected_mmr_value))
# critical_idx = min(self.controller.config['no_critical_games'],
# stats.games_available) - 1
# stats.critical_game_played = matches[critical_idx]["played"]
stats.avg_mmr = expected_mmr_value
stats.wma_mmr = wma_mmr
self.db_session.commit()
@classmethod
def guess_games(cls, complete_data, last_played):
"""Guess games of a player if missing in match history."""
# If a player isn't new in the database and has played more
# than 25 games since the last refresh or the match
# history is not available for this player, there are
# missing games in the match history. These are guessed to be very
# close to the last game of the match history and in alternating
# order.
player = complete_data['player']
if 'games' not in complete_data:
complete_data['games'] = []
logger.info((
"{}: {} missing games in match "
+ "history - more guessing!").format(
player.id, complete_data['missing']['Total']))
try:
delta = (last_played - player.last_played) / \
complete_data['missing']['Total']
except Exception:
delta = timedelta(minutes=3)
if delta > timedelta(minutes=3):
delta = timedelta(minutes=3)
if delta.total_seconds() <= 0:
last_played = datetime.now()
delta = timedelta(minutes=3)
while (complete_data['missing']['Win'] > 0
or complete_data['missing']['Loss'] > 0):
if complete_data['missing']['Win'] > 0:
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Win})
complete_data['missing']['Win'] -= 1
complete_data['Win'] += 1
if (complete_data['missing']['Win'] > 0
and complete_data['missing']['Win']
> complete_data['missing']['Loss']):
# If there are more wins than losses add
# a second win before the next loss.
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Win})
complete_data['missing']['Win'] -= 1
complete_data['Win'] += 1
if complete_data['missing']['Loss'] > 0:
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Loss})
complete_data['missing']['Loss'] -= 1
complete_data['Loss'] += 1
if (complete_data['missing']['Loss'] > 0
and complete_data['missing']['Win']
< complete_data['missing']['Loss']):
# If there are more losses than wins add second loss before
# the next win.
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Loss})
complete_data['missing']['Loss'] -= 1
complete_data['Loss'] += 1
def guess_mmr_changes(self, complete_data):
"""Guess MMR change of matches."""
MMR = complete_data['player'].mmr
if MMR is None:
MMR = 0
totalMMRchange = complete_data['new_data']['mmr'] - MMR
wins = complete_data['Win']
losses = complete_data['Loss']
complete_data['games'] = sorted(
complete_data.get('games', []), key=itemgetter('datetime'))
logger.info('{}: Adding {} wins and {} losses!'.format(
complete_data['player'].id, wins, losses))
if wins + losses <= 0:
# No games to guess
return
# Estimate MMR change to be +/-21 for a win and losse, each adjusted
# by the average deviation to achive the most recent MMR value.
# Is 21 accurate? Yes, as the empirical avrage MMR change is 20.9016
# according to data gathered by this tool.
if wins + losses == 1 and MMR != 0:
MMRchange = abs(totalMMRchange)
else:
MMRchange = 21
if MMR == 0:
totalMMRchange = MMRchange * (wins - losses)
MMR = complete_data['new_data']['mmr'] - totalMMRchange
while True:
avgMMRadjustment = (totalMMRchange - MMRchange
* (wins - losses)) / (wins + losses)
# Make sure that sign of MMR change is correct
if abs(avgMMRadjustment) >= MMRchange and MMRchange <= 50:
MMRchange += 1
logger.info(f"{complete_data['player'].id}:"
f" Adjusting avg. MMR change to {MMRchange}")
else:
break
last_played = complete_data['player'].last_played
previous_match = self.db_session.query(model.Match).\
filter(model.Match.player_id
== complete_data['player'].id).\
order_by(model.Match.datetime.desc()).limit(1).scalar()
# Warning breaks Travis CI
# if not previous_match:
# logger.warning('{}: No previous match found.'.format(
# complete_data['player'].id))
for idx, match in enumerate(complete_data['games']):
estMMRchange = round(
MMRchange * match['result'].change() + avgMMRadjustment)
MMR = MMR + estMMRchange
try:
delta = match['datetime'] - last_played
except Exception:
delta = timedelta(minutes=3)
last_played = match['datetime']
max_length = delta.total_seconds()
# Don't mark the most recent game as guess, as time and mmr value
# should be accurate (but not mmr change).
guess = not (idx + 1 == len(complete_data['games']))
alpha = 2.0 / (100.0 + 1.0)
if previous_match and previous_match.ema_mmr > 0.0:
delta = MMR - previous_match.ema_mmr
ema_mmr = previous_match.ema_mmr + alpha * delta
emvar_mmr = (1.0 - alpha) * \
(previous_match.emvar_mmr + alpha * delta * delta)
else:
ema_mmr = MMR
emvar_mmr = 0.0
new_match = model.Match(
player=complete_data['player'],
result=match['result'],
datetime=match['datetime'],
mmr=MMR,
mmr_change=estMMRchange,
guess=guess,
ema_mmr=ema_mmr,
emvar_mmr=emvar_mmr,
max_length=max_length)
complete_data['player'].last_played = match['datetime']
self.db_session.add(new_match)
previous_match = new_match
self.db_session.commit()
# Delete old matches:
deletions = 0
for match in self.db_session.query(model.Match).\
filter(model.Match.player_id == complete_data['player'].id).\
order_by(model.Match.datetime.desc()).\
offset(self.cache_matches).all():
self.db_session.delete(match)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{complete_data['player'].id}: "
f"{deletions} matches deleted!")
def update_ema_mmr(self, player: model.Player):
"""Update the exponential moving avarage MMR of a player."""
matches = self.db_session.query(model.Match).\
filter(model.Match.player == player).\
order_by(model.Match.datetime.asc()).all()
previous_match = None
for match in matches:
alpha = 2.0 / (100.0 + 1.0)
if previous_match and previous_match.ema_mmr > 0.0:
delta = match.mmr - previous_match.ema_mmr
ema_mmr = previous_match.ema_mmr + alpha * delta
emvar_mmr = (1.0 - alpha) * \
(previous_match.emvar_mmr + alpha * delta * delta)
else:
ema_mmr = match.mmr
emvar_mmr = 0.0
match.ema_mmr = ema_mmr
match.emvar_mmr = emvar_mmr
previous_match = match
self.db_session.commit()
def get_season_id(self, server: model.Server):
"""Get the current season id on a server."""
return self.current_season[server.id()].season_id
def count_missing_games(self, player: model.Player, data):
"""Count games of the api data that are not yet in the database."""
missing = {}
missing['Win'] = data['wins']
missing['Loss'] = data['losses']
if player.last_active_season == 0 or player.mmr == 0:
new = True
elif (player.last_active_season < self.get_season_id(player.server)):
# New Season!
# TODO: Check if last season endpoint can be requested!
# Only the legacy endpoints give the option to query the
# previous season's data (given that the ladder ID is
# known), e.g.:
# https://eu.api.blizzard.com/sc2/legacy/ladder/2/209966
new = False
elif (player.ladder_id != data['ladder_id']
or not player.ladder_joined
or player.ladder_joined < data['joined']
or data['wins'] < player.wins
or data['losses'] < player.losses):
# Old season, but new ladder or same ladder, but rejoined
if (data['wins'] < player.wins
or data['losses'] < player.losses):
# Forced ladder reset!
logger.info('{}: Manual ladder reset to {}!'.format(
player.id, data['ladder_id']))
new = True
else:
# Promotion?!
missing['Win'] -= player.wins
missing['Loss'] -= player.losses
new = player.mmr == 0
if missing['Win'] + missing['Loss'] == 0:
# Player was promoted/demoted to/from GM!
promotion = data['league'] == model.League.Grandmaster
demotion = player.league == model.League.Grandmaster
if promotion == demotion:
logger.warning(
'Logical error in GM promotion/'
'demotion detection.')
player.ladder_joined = data['joined']
player.ladder_id = data['ladder_id']
player.league = data['league']
self.db_session.commit()
logger.info(f"{player.id}: GM promotion/demotion.")
else:
if data['league'] < player.league:
logger.warning('Logical error in promtion detection.')
else:
logger.info(f"{player.id}: Promotion "
f"to ladder {data['ladder_id']}!")
else:
missing['Win'] -= player.wins
missing['Loss'] -= player.losses
new = player.mmr == 0
missing['Total'] = missing['Win'] + missing['Loss']
if (missing['Total']) > 0:
logger.info(
'{player}: {Total} new matches found!'.format(
player=player.id, **missing))
return missing, new
async def get_player_with_race(self, player, ladder_data):
"""Get the player with the race present in the ladder data."""
if player.ladder_id == 0:
player.race = ladder_data['race']
correct_player = player
elif player.race != ladder_data['race']:
correct_player = self.db_session.query(model.Player).filter(
model.Player.player_id == player.player_id,
model.Player.realm == player.realm,
model.Player.server == player.server,
model.Player.race == ladder_data['race']).scalar()
if not correct_player:
correct_player = model.Player(
player_id=player.player_id,
realm=player.realm,
server=player.server,
race=ladder_data['race'],
ladder_id=0)
self.db_session.add(correct_player)
self.db_session.commit()
self.db_session.refresh(correct_player)
else:
correct_player = player
return correct_player
def delete_old_logs_and_runs(self):
""" Delete old logs and runs from database."""
deletions = 0
for log_entry in self.db_session.query(model.Log).\
order_by(model.Log.datetime.desc()).\
offset(self.cache_logs).all():
self.db_session.delete(log_entry)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{deletions} old log entries were deleted!")
deletions = 0
for run in self.db_session.query(model.Run).\
order_by(model.Run.datetime.desc()).\
offset(self.cache_runs).all():
self.db_session.delete(run)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{deletions} old run logs were deleted!")
async def run(self):
"""Run the sc2monitor."""
start_time = time.time()
logger.debug("Starting job...")
await self.update_seasons()
unique_group = (model.Player.player_id,
model.Player.realm, model.Player.server)
tasks = []
players = self.db_session.query(model.Player).distinct(
*unique_group).group_by(*unique_group).all()
for player in players:
tasks.append(asyncio.create_task(self.query_player(player)))
results = await asyncio.gather(*tasks, return_exceptions=True)
for key, result in enumerate(results):
try:
if result is not None:
raise result
except Exception:
logger.exception(
'The following exception was'
f' raised while quering player {players[key].id}:')
self.delete_old_logs_and_runs()
duration = time.time() - start_time
self.db_session.add(
model.Run(duration=duration,
api_requests=self.sc2api.request_count,
api_retries=self.sc2api.retry_count,
warnings=self.handler.warnings,
errors=self.handler.errors))
self.db_session.commit()
logger.debug(f"Finished job performing {self.sc2api.request_count}"
f" api requests ({self.sc2api.retry_count} retries)"
f" in {duration:.2f} seconds.")
```
#### File: sc2monitor/sc2monitor/sc2api.py
```python
import asyncio
import logging
import re
from datetime import datetime, timedelta
from aiohttp import BasicAuth
from aiohttp.client_exceptions import ClientResponseError, ContentTypeError
import sc2monitor.model as model
logger = logging.getLogger(__name__)
class SC2API:
"""Wrapper for the SC2 api."""
def __init__(self, controller):
"""Init the sc2 api."""
self._controller = controller
try:
self._session = self._controller.http_session
except AttributeError:
self._session = None
self._key = ''
self._secret = ''
self._access_token = ''
self._access_token_checked = False
self.read_config()
try:
self._access_token_lock = asyncio.Lock()
except RuntimeError:
self._access_token_lock = None
self.request_count = 0
self.retry_count = 0
self._precompile()
def _precompile(self):
"""Precompile regular expression for bnet urls."""
self._p1 = re.compile(
r'^https?:\/\/starcraft2.com\/(?:\w+-\w+\/)?'
r'profile\/([1-5])\/([1-2])\/(\d+)\/?',
re.IGNORECASE)
self._p2 = re.compile(
r'^https?:\/\/(eu|us).battle.net\/sc2\/\w+\/'
r'(?:\w+\/)*profile\/(\d+)\/([1-2])\/\w+\/?',
re.IGNORECASE)
def read_config(self):
"""Read the api key and secret from the config."""
self._key = self._controller.get_config(
'api_key', raise_key_error=False)
self._secret = self._controller.get_config(
'api_secret', raise_key_error=False)
new_token = self._controller.get_config(
'access_token', raise_key_error=False)
if self._access_token != new_token:
self._access_token = new_token
self._access_token_checked = False
async def check_access_token(self, token):
"""Check if the access token is valid for at least an hour."""
async with self._session.get(
'https://eu.battle.net/oauth/check_token',
params={'token': token}) as resp:
self.request_count += 1
valid = resp.status == 200
if valid:
json = await resp.json()
exp = datetime.fromtimestamp(json['exp'])
valid = valid and exp - datetime.now() >= timedelta(hours=1)
self._access_token_checked = valid
return self._access_token_checked
async def get_access_token(self):
"""Get an valid access token."""
async with self._access_token_lock:
if (not self._access_token
or (not self._access_token_checked
and not await self.check_access_token(
self._access_token))):
await self.receive_new_access_token()
return self._access_token
async def receive_new_access_token(self):
"""Receive a new acces token vai oauth."""
data, status = await self._perform_api_post_request(
'https://eu.battle.net/oauth/token',
auth=BasicAuth(
self._key, self._secret),
params={'grant_type': 'client_credentials'})
if status != 200:
raise InvalidApiResponse(status)
self._access_token = data.get('access_token')
self._access_token_checked = True
self._controller.set_config('access_token', self._access_token)
logger.info('New access token received.')
def parse_profile_url(self, url):
"""Parse a profile URL for the server, the realm and the profile ID."""
m = self._p1.match(url)
if m:
server = model.Server(int(m.group(1)))
realmID = int(m.group(2))
profileID = int(m.group(3))
else:
m = self._p2.match(url)
if m:
server = model.Server(2 if m.group(1).lower() == 'eu' else 1)
profileID = int(m.group(2))
realmID = int(m.group(3))
else:
raise ValueError('Invalid profile url {}'.format(url))
return server, realmID, profileID
async def get_season(self, server: model.Server):
"""Collect the current season info."""
api_url = ('https://eu.api.blizzard.com/sc2/'
f'ladder/season/{server.id()}')
payload = {'locale': 'en_US',
'access_token': await self.get_access_token()}
data, status = await self._perform_api_request(api_url, params=payload)
if status != 200:
raise InvalidApiResponse(f'{status}: {api_url}')
return model.Season(
season_id=data.get('seasonId'),
number=data.get('number'),
year=data.get('year'),
server=server,
start=datetime.fromtimestamp(int(data.get('startDate'))),
end=datetime.fromtimestamp(int(data.get('endDate')))
)
async def get_metadata(self, player: model.Player):
"""Collect meta data for a player."""
return await self._get_metadata(
player.server, player.realm, player.player_id)
async def get_ladders(self, player: model.Player):
"""Collect all 1v1 ladders where a player is ranked."""
return await self._get_ladders(
player.server, player.realm, player.player_id)
async def get_ladder_data(self, player: model.Player, ladder_id):
"""Collect data about a player's ladder."""
async for data in self._get_ladder_data(
player.server, player.realm, player.player_id, ladder_id):
yield data
async def get_match_history(self, player: model.Player):
"""Collect match history of a player."""
return await self._get_match_history(
player.server, player.realm, player.player_id)
async def _get_ladders(self, server: model.Server,
realmID, profileID, scope='1v1'):
"""Collect all ladder of a scope where a player is ranked."""
api_url = ('https://eu.api.blizzard.com/sc2/'
f'profile/{server.id()}/{realmID}/{profileID}/'
'ladder/summary')
payload = {'locale': 'en_US',
'access_token': await self.get_access_token()}
data, status = await self._perform_api_request(api_url, params=payload)
if status != 200:
raise InvalidApiResponse(f'{status}: {api_url}')
data = data.get('allLadderMemberships', [])
ladders = set()
for ladder in data:
if ladder.get('localizedGameMode', '').find('1v1') != -1:
ladder_id = ladder.get('ladderId')
if ladder_id not in ladders:
ladders.add(ladder_id)
return ladders
async def _get_metadata(self, server: model.Server,
realmID, profileID):
"""Collect a player's meta data."""
api_url = ('https://eu.api.blizzard.com/sc2/'
f'metadata/profile/{server.id()}/{realmID}/{profileID}')
payload = {'locale': 'en_US',
'access_token': await self.get_access_token()}
data, status = await self._perform_api_request(api_url, params=payload)
if status != 200:
raise InvalidApiResponse(f'{status}: {api_url}')
return data
async def _get_ladder_data(self, server: model.Server,
realmID, profileID, ladderID):
"""Collect data of a specific player's ladder."""
api_url = ('https://eu.api.blizzard.com/sc2/profile/'
f'{server.id()}/{realmID}/{profileID}/ladder/{ladderID}')
payload = {'locale': 'en_US',
'access_token': await self.get_access_token()}
data, status = await self._perform_api_request(api_url, params=payload)
if status != 200:
raise InvalidApiResponse(f'{status}: {api_url}')
league = model.League.get(data.get('league'))
found_idx = -1
found = 0
used = set()
for meta_data in data.get('ranksAndPools'):
mmr = meta_data.get('mmr')
try:
idx = meta_data.get('rank') - 1
team = data.get('ladderTeams')[idx]
player = team.get('teamMembers')[0]
used.add(idx)
if (int(player.get('id')) != profileID
or int(player.get('realm')) != realmID):
raise InvalidApiResponse(api_url)
except (IndexError, InvalidApiResponse):
found = False
for team_idx in range(
found_idx + 1, len(data.get('ladderTeams'))):
team = data.get('ladderTeams')[team_idx]
player = team.get('teamMembers')[0]
if (team_idx not in used):
used.add(team_idx)
if (int(player.get('id')) == profileID
and int(player.get('realm')) == realmID):
found_idx = team_idx
found = True
break
if not found:
raise InvalidApiResponse(api_url)
if mmr != team.get('mmr'):
logger.debug(
f'{api_url}: MMR in ladder request'
f" does not match {mmr} vs {team.get('mmr')}.")
mmr = team.get('mmr', mmr)
race = player.get('favoriteRace')
games = int(team.get('wins')) + int(team.get('losses'))
if mmr is None:
raise InvalidApiResponse(api_url)
yield {
'mmr': int(mmr),
'race': model.Race.get(race),
'games': games,
'wins': int(team.get('wins')),
'losses': int(team.get('losses')),
'name': player.get('displayName'),
'joined': datetime.fromtimestamp(team.get('joinTimestamp')),
'ladder_id': int(ladderID),
'league': league}
async def _get_match_history(self, server: model.Server,
realmID, profileID, scope='1v1'):
"""Collect matches of a specific scope from the match history."""
api_url = ('https://eu.api.blizzard.com/sc2/legacy/profile/'
f'{server.id()}/{realmID}/{profileID}/matches')
payload = {'locale': 'en_US',
'access_token': await self.get_access_token()}
data, status = await self._perform_api_request(api_url, params=payload)
if status != 200:
raise InvalidApiResponse(f'{status}: {api_url}')
match_history = []
for match in data.get('matches', []):
if match['type'] == scope:
match_data = {
'result': model.Result.get(match['decision']),
'datetime': datetime.fromtimestamp(match['date'])}
match_history.append(match_data)
return match_history
async def _perform_api_post_request(self, url, **kwargs):
"""Perform a generic api request (including retries)."""
error = ''
json = {}
max_retries = 5
for retries in range(max_retries):
async with self._session.post(url, **kwargs) as resp:
self.request_count += 1
status = resp.status
if resp.status == 504:
error = 'API timeout'
self.retry_count += 1
continue
try:
resp.raise_for_status()
except ClientResponseError:
error = f'{resp.status}: {resp.reason}'
continue
try:
json = await resp.json()
except ContentTypeError:
error = 'Unable to decode JSON'
self.retry_count += 1
status = 0
continue
json['request_datetime'] = datetime.now()
break
if retries == max_retries - 1 and error:
logger.warning(error)
return json, status
async def _perform_api_request(self, url, **kwargs):
"""Perform a generic api request (including retries)."""
error = ''
json = {}
max_retries = 5
for retries in range(max_retries):
async with self._session.get(url, **kwargs) as resp:
self.request_count += 1
status = resp.status
if resp.status == 504:
error = 'API timeout'
self.retry_count += 1
continue
try:
resp.raise_for_status()
except ClientResponseError:
error = f'{resp.status}: {resp.reason}'
continue
try:
json = await resp.json()
except ContentTypeError:
error = 'Unable to decode JSON'
self.retry_count += 1
status = 0
continue
json['request_datetime'] = datetime.now()
break
if retries == max_retries - 1 and error:
logger.warning(error)
return json, status
class InvalidApiResponse(Exception):
"""Invalid API Response exception."""
def __init__(self, api_url):
"""Init the InvalidApiResponse exception."""
self.api_url = api_url
def __str__(self):
"""Return URL of invalid api request."""
return repr(self.api_url)
``` |
{
"source": "2-propanol/aries_controller",
"score": 4
} |
#### File: 2-propanol/aries_controller/example.py
```python
from time import sleep
from aries import Aries
def main():
# ARIESへの接続を試みる(2秒待機して3回まで再試行する)
for i in range(3):
try:
print("Trying 192.168.1.20:12321.")
stage = Aries()
except ConnectionError as err:
# 接続失敗時は`ConnetionError`を投げる
print(err)
sleep(2)
else:
print(f"connected to 192.168.1.20:12321.")
break
else:
# 3回とも接続に失敗した(`break`されなかった)
print("connection failed.")
return 1
stage.speed = (5, 5, 5, 5) # 5速で駆動する
stage.position = (0, 90, 0, 20)
stage.sleep_until_stop() # 停止するまで待機する
x, y, _, _ = stage.position
def direction(int_val):
if int_val % 2 == 0:
return 1
else:
return -1
# パルス値による指定
for i in range(4):
x = 90 * direction(i)
stage.position = (x, y, 0, 20)
stage.sleep_until_stop()
for _ in range(10):
print(f"shot {x},{y}")
x += -20 * direction(i)
stage.position = (x, y, 0, 20)
stage.sleep_until_stop()
y -= 30
stage.position = (x, y, 0, 20)
print("reseting stage position")
stage.reset()
stage.sleep_until_stop()
return 0
if __name__ == "__main__":
main()
``` |
{
"source": "2-propanol/BTF_extractor",
"score": 2
} |
#### File: 2-propanol/BTF_extractor/build.py
```python
import platform
from setuptools import Extension
import numpy
from Cython.Build import cythonize
compile_args = []
link_args = []
pf = platform.system()
if pf == "Windows":
# for MSVC
compile_args = ["/std:c++14", "/DNOMINMAX", "/O2", "/openmp"]
elif pf == "Darwin":
# for clang
compile_args = ["-std=c++14", "-O2", "-march=native", "-Xpreprocessor", "-fopenmp"]
link_args = ["-lomp"]
elif pf == "Linux":
# for gcc
compile_args = ["-std=c++14", "-Ofast", "-march=native", "-fopenmp"]
link_args = ["-fopenmp"]
ext_modules = [
Extension(
name="ubo2014_cy",
sources=["btf_extractor/ubo2014.pyx"],
include_dirs=[numpy.get_include(), "btf_extractor/c_ext"],
define_macros=[("BTF_IMPLEMENTATION", "1"), ("NPY_NO_DEPRECATED_API", "1")],
extra_compile_args=compile_args,
extra_link_args=link_args,
language="c++",
)
]
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update(
{"ext_modules": cythonize(ext_modules)}
)
return setup_kwargs
if __name__ == "__main__":
build({})
``` |
{
"source": "2-propanol/btfnpz-helper",
"score": 3
} |
#### File: btfnpz-helper/btf_helper/btfzip.py
```python
from collections import Counter
from decimal import Decimal
from sys import stderr
from typing import Tuple
from zipfile import ZipFile
import cv2
import numpy as np
from simplejpeg import decode_jpeg
# PEP484 -- Type Hints:
# Type Definition Syntax:
# The numeric tower:
# when an argument is annotated as having type `float`,
# an argument of type `int` is acceptable
class Btfzip:
"""画像ファイルを格納したzipファイルから角度と画像を取り出す(小数点角度と画像拡張子指定対応)
角度は全て度数法(degree)を用いている。
zipファイルに含まれる角度情報の順番は保証せず、並べ替えもしない。
`angles_set`には`list`ではなく、順序の無い`set`を用いている。
画像の実体はopencvと互換性のあるndarray形式(BGR, channels-last)で出力する。
zipファイル要件:
f"tl{float}{angle_sep}pl{float}{angle_sep}tv{float}{angle_sep}pv{float}.{file_ext}"
を格納している。
例) "tl20.25_pl10_tv11.5_pv0.exr"
Attributes:
zip_filepath (str): コンストラクタに指定したzipファイルパス。
angles_set (set[tuple[float,float,float,float]]):
zipファイルに含まれる画像の角度条件の集合。
Example:
>>> btf = Btfzip("Colorchecker.zip")
>>> angles_list = list(btf.angles_set)
>>> image = btf.angles_to_image(*angles_list[0])
>>> print(image.shape)
(256, 256, 3)
>>> print(angles_list[0])
(0, 0, 0, 0)
"""
def __init__(
self, zip_filepath: str, file_ext: str = ".exr", angle_sep: str = " "
) -> None:
"""使用するzipファイルを指定する
指定したzipファイルに角度条件の重複がある場合、
何が重複しているか表示し、`RuntimeError`を投げる。
"""
self.zip_filepath = zip_filepath
self.__z = ZipFile(zip_filepath)
# NOTE: ARIES4軸ステージの分解能は0.001度
self.DECIMAL_PRECISION = Decimal("1E-3")
# ファイルパスは重複しないので`filepath_set`はsetで良い
filepath_set = {path for path in self.__z.namelist() if path.endswith(file_ext)}
self.__angles_vs_filepath_dict = {
self._filename_to_angles(path, angle_sep): path for path in filepath_set
}
self.angles_set = frozenset(self.__angles_vs_filepath_dict.keys())
# 角度条件の重複がある場合、何が重複しているか調べる
if len(filepath_set) != len(self.angles_set):
angles_list = [self._filename_to_angles(path) for path in filepath_set]
angle_collection = Counter(angles_list)
for angles, counter in angle_collection.items():
if counter > 1:
print(
f"[BTF-Helper] '{self.zip_filepath}' has"
+ f"{counter} files with condition {angles}.",
file=stderr,
)
raise RuntimeError(f"'{self.zip_filepath}' has duplicated conditions.")
if file_ext == ".jpg" or file_ext == ".jpeg":
self.angles_to_image = self._angles_to_image_simplejpeg
else:
self.angles_to_image = self._angles_to_image_cv2
def _filename_to_angles(
self, filename: str, sep: str
) -> Tuple[Decimal, Decimal, Decimal, Decimal]:
"""ファイル名(orパス)から角度(`Decimal`)のタプル(`tl`, `pl`, `tv`, `pv`)を取得する"""
angles = filename.split("/")[-1][:-4].split(sep)
try:
tl = Decimal(angles[0][2:]).quantize(self.DECIMAL_PRECISION)
pl = Decimal(angles[1][2:]).quantize(self.DECIMAL_PRECISION)
tv = Decimal(angles[2][2:]).quantize(self.DECIMAL_PRECISION)
pv = Decimal(angles[3][2:]).quantize(self.DECIMAL_PRECISION)
except ValueError as e:
raise ValueError("invalid angle:", angles) from e
return (tl, pl, tv, pv)
def _angles_to_image_cv2(
self, tl: float, pl: float, tv: float, pv: float
) -> np.ndarray:
"""`tl`, `pl`, `tv`, `pv`の角度条件の画像をndarray形式で返す
`filename`が含まれるファイルが存在しない場合は`ValueError`を投げる。
"""
key = (
Decimal(tl).quantize(self.DECIMAL_PRECISION),
Decimal(pl).quantize(self.DECIMAL_PRECISION),
Decimal(tv).quantize(self.DECIMAL_PRECISION),
Decimal(pv).quantize(self.DECIMAL_PRECISION),
)
filepath = self.__angles_vs_filepath_dict.get(key)
if not filepath:
raise ValueError(
f"Condition {key} does not exist in '{self.zip_filepath}'."
)
with self.__z.open(filepath) as f:
return cv2.imdecode(
np.frombuffer(f.read(), np.uint8),
cv2.IMREAD_ANYDEPTH + cv2.IMREAD_ANYCOLOR,
)
def _angles_to_image_simplejpeg(
self, tl: float, pl: float, tv: float, pv: float
) -> np.ndarray:
"""`tl`, `pl`, `tv`, `pv`の角度条件の画像をndarray形式で返す
`filename`が含まれるファイルが存在しない場合は`ValueError`を投げる。
"""
key = (
Decimal(tl).quantize(self.DECIMAL_PRECISION),
Decimal(pl).quantize(self.DECIMAL_PRECISION),
Decimal(tv).quantize(self.DECIMAL_PRECISION),
Decimal(pv).quantize(self.DECIMAL_PRECISION),
)
filepath = self.__angles_vs_filepath_dict.get(key)
if not filepath:
raise ValueError(
f"Condition {key} does not exist in '{self.zip_filepath}'."
)
with self.__z.open(filepath) as f:
return decode_jpeg(f.read(), colorspace="BGR")
``` |
{
"source": "2ps/djenga",
"score": 3
} |
#### File: djenga/animal_pairs/utils.py
```python
import random
from .dictionary import ADJECTIVES, ANIMALS
__all__ = [ 'animal_pair' ]
def animal_pair():
return u'%s-%s' % (
random.choice(ADJECTIVES),
random.choice(ANIMALS),
)
```
#### File: djenga/currency/rounding.py
```python
from decimal import Decimal
from decimal import ROUND_FLOOR
from decimal import ROUND_DOWN
from decimal import ROUND_UP
from decimal import ROUND_HALF_UP
from typing import TypeVar
__all__ = [
'currency_round_down',
'currency_round_up',
'currency_round_up4',
'currency_round_half_up',
'currency_round_half_up4',
'round_up',
'round_down',
'round_half_up',
'round_floor',
]
def currency_round_down(amount):
"""
Useful helper function that takes a numerical amount, converts
it to a decimal.Decimal object and rounds it down to the nearest
cent.
"""
amount = amount or 0.00
amount = Decimal(amount)
amount = amount.quantize(Decimal(u'.0001'), rounding=ROUND_FLOOR)
amount = amount.quantize(Decimal(u'.01'), rounding=ROUND_DOWN)
return amount
def currency_round_up(amount):
"""
Useful helper function that takes a numerical amount, converts
it to a decimal.Decimal object and rounds it up to the nearest
cent.
"""
amount = amount or 0.00
amount = Decimal(amount)
amount = amount.quantize(Decimal(u'.0001'), rounding=ROUND_FLOOR)
amount = amount.quantize(Decimal(u'.01'), rounding=ROUND_UP)
return amount
def currency_round_up4(amount):
"""
Useful helper function that takes a numerical amount, converts
it to a decimal.Decimal object and rounds it up to the nearest
one-hundredth of a cent.
"""
amount = amount or 0.00
amount = Decimal(amount)
amount = amount.quantize(Decimal(u'.0001'), rounding=ROUND_UP)
return amount
def currency_round_half_up(amount):
"""
Useful helper function that takes a numerical amount, converts
it to a decimal.Decimal object and rounds it up to the nearest
cent using half-adjust rounding. Unlike `currency_round_up`, this
function will use half-adjust rounding for the cents place after
floor rounding the ten thousandths place.
"""
amount = amount or 0.00
amount = Decimal(amount)
amount = amount.quantize(Decimal(u'.0001'), rounding=ROUND_FLOOR)
amount = amount.quantize(Decimal(u'.01'), rounding=ROUND_HALF_UP)
return amount
def currency_round_half_up4(amount):
"""
Useful helper function that takes a numerical amount, converts
it to a decimal.Decimal object and rounds it up to the nearest
one-hundredth of a cent using half-adjust rounding. Unlike
`currency_round_up4`, this function will use half-adjust rounding
for the ten thousandths place.
"""
amount = amount or 0.00
amount = Decimal(amount)
amount = amount.quantize(Decimal(u'.0001'), rounding=ROUND_HALF_UP)
return amount
DecimalLike = TypeVar('DecimalLike', str, float, Decimal)
def q_round(
amount: DecimalLike,
places: int = 2,
rounding=ROUND_HALF_UP) -> Decimal:
"""
Useful helper function that takes a numerical amount, converts
it to a decimal.Decimal object and rounds it up to the nearest
one-hundredth of a cent using half-adjust rounding. Unlike
`currency_round_up4`, this function will use half-adjust rounding
for the ten thousandths place.
"""
amount = amount or Decimal(0.00)
if not isinstance(amount, Decimal):
amount = Decimal(amount)
# This version is slower according to timeit
# q = Decimal('1') / Decimal(10 ** places)
# This version, using strings, is actually faster
if places > 0:
q = '.%s1' % ('0' * (places - 1),)
else:
q = '1'
q = Decimal(q)
amount = amount.quantize(q, rounding=rounding)
return amount
def round_up(amount: DecimalLike, places: int = 2) -> Decimal:
"""
Rounds amount up, to a specified number of places
:type amount: float | str | decimal.Decimal
:type places: int
:rtype: Decimal
>>> round_up(1.241, 1)
Decimal('1.3')
"""
return q_round(amount, places, ROUND_UP)
def round_down(amount, places=2):
"""
Rounds amount down, to a specified number of places
:type amount: float | str | decimal.Decimal
:type places: int
:rtype: Decimal
>>> round_down(1.995555, 4)
Decimal('1.9955')
>>> round_down('58.12', 0)
Decimal('58')
"""
return q_round(amount, places, ROUND_DOWN)
def round_floor(amount: DecimalLike, places: int = 2) -> Decimal:
"""
Floor rounds amount to a specified number of places
:type amount: float | str | decimal.Decimal
:type places: int
:rtype: Decimal
>>> round_floor(1.995555, 4)
Decimal('1.9955')
>>> round_floor('58.12', 0)
Decimal('58')
"""
return q_round(amount, places, ROUND_FLOOR)
def round_half_up(amount: DecimalLike, places: int = 2) -> Decimal:
"""
Rounds amount, to a specified number of places, using half-rounding
:type amount: float | str | decimal.Decimal
:type places: int
:rtype: Decimal
Decimal('1.9956')
>>> round_half_up(1.9911, 3)
Decimal('1.991')
>>> round_half_up(1.995, 2)
Decimal('2.00')
"""
return q_round(amount, places, ROUND_HALF_UP)
```
#### File: djenga/db/uuidfield.py
```python
from uuid import UUID
from django.db.backends.mysql.base import django_conversions
from django.db import models
def prep_uuid(
o,
*args # pylint: disable=unused-argument
):
return '0x%s' % o.hex
django_conversions.update({
UUID: prep_uuid
})
class UuidField(models.fields.Field):
"""
A Uuid field for MySQL (and MySQL only!) that converts to
a `binary(16)` on the DB backend. Unlike many of the implementations
of binary fields out there (I'm looking at you django 1.6), this
field does allow the user to do a lookup based on the UUID.
The related value type of a Uuid field is the `uuid.UUID` class
from python.
"""
def to_python(self, value):
if isinstance(value, UUID):
return value
if value is None:
return None
return UUID(bytes=value)
def get_prep_value(self, value):
if value is None:
return 'null'
if isinstance(value, UUID):
return value
try:
p = UUID(value)
return p
except ValueError:
raise TypeError(
'A %s cannot be used in a query involving a UUID' % (
value.__class__.__name__,
)
)
def get_prep_lookup(self, lookup_type, value):
if lookup_type not in { 'exact', 'iexact', 'isnull', 'in'}:
raise TypeError('Binary Fields do not support %s' % lookup_type)
return self.get_prep_value(value)
def db_type(self, connection):
supported_engines = {
'django.db.backends.mysql',
'django_mysqlpool.backends.mysqlpool' }
if connection.settings_dict['ENGINE'] in supported_engines:
return 'binary(16)'
raise Exception('MySql is the only defined engine for UuidField.')
```
#### File: djenga/encryption/kms.py
```python
from .helpers import _as_bytes
from .helpers import b64_str
from .helpers import from_b64_str
from .helpers import _get_client
from .helpers import _prefix_alias
def encrypt_bytes(
plain_text: bytes
, alias: str
, region: str = None
, profile: str = None) -> bytes:
client = _get_client(region, profile)
alias = _prefix_alias(alias)
data = client.encrypt(KeyId=alias, Plaintext=plain_text)
return data['CiphertextBlob']
def decrypt_bytes(
cipher_text: bytes
, region: str = None
, profile: str = None) -> bytes:
client = _get_client(region, profile)
data = client.decrypt(CiphertextBlob=cipher_text)
return data['Plaintext']
def encrypt(plain_text, alias, region: str = None, profile: str = None) -> str:
plain_text = _as_bytes(plain_text)
data = encrypt_bytes(plain_text, alias, region, profile)
return b64_str(data)
def decrypt(cipher_text: str, region: str = None, profile: str = None):
cipher_text = from_b64_str(cipher_text)
data = decrypt_bytes(cipher_text, region, profile)
return data.decode('utf-8')
```
#### File: djenga/encryption/kms_wrap.py
```python
from argparse import ArgumentParser
import os
import sys
from .kms_wrapped import encrypt
def get_parser():
parser = ArgumentParser()
parser.add_argument(
'-r', '--region',
dest='region',
metavar='region_name',
help='aws region name, e.g., us-east-2',
default=None,
)
parser.add_argument(
'-p', '--profile',
dest='profile',
metavar='profile_name',
help='the name of the profile to use when connecting to aws',
default=None,
)
parser.add_argument(
'-k', '--key',
dest='key',
metavar='<id or alias>',
help='the name of the key to use for encryption',
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
# do not print prompt if input is being piped
if sys.stdin.isatty():
print('Enter plaintext: ', end='', file=sys.stderr)
sys.stderr.flush()
stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0)
plain_text = stdin.readline()
plain_text = plain_text.decode('utf-8').rstrip()
value = encrypt(
plain_text,
alias=args.key,
profile=args.profile,
region=args.region)
print(f'{value}')
if __name__ == "__main__":
main()
```
#### File: djenga/loggers/filters.py
```python
import logging
__all__ = [
'ZeepHttpsFilter',
'CeleryRestoringFilter',
]
class ZeepHttpsFilter(logging.Filter):
def filter(self, record):
"""
:type record: logging.LogRecord
:rtype: bool
"""
result = record.name.startswith('zeep.wsdl.bindings')
result &= record.levelno == logging.WARNING
result &= record.msg.startswith('Forcing soap:address location')
return not result
class CeleryRestoringFilter(logging.Filter):
def filter(self, record):
"""
:type record: logging.LogRecord
:rtype: bool
"""
result = record.name.startswith('celery.redirected')
result &= record.msg.startswith('Restoring')
result &= record.msg.endswith('unacknowledged message(s)')
return not result
```
#### File: management/commands/debug_celery.py
```python
import importlib
from django.core.management.base import BaseCommand
from django.conf import settings
class Command(BaseCommand):
@staticmethod
def queues():
routes = getattr(settings, 'CELERY_TASK_ROUTES', None)
routes = routes or {}
queues = { 'celery' }
if isinstance(routes, dict):
for x in routes.values():
queues.add(x['queue'])
else:
if isinstance(routes, (list, tuple)):
for route_set in routes:
for _, x in route_set:
queues.add(x['queue'])
return list(queues)
def handle(self, *args, **options):
if settings.DEBUG:
from celery import platforms
# print('root: %s' % (C_FORCE_ROOT,))
from celery.bin import worker
module = importlib.import_module(settings.DJENGA_CELERY_MODULE)
app = module.app
platforms.C_FORCE_ROOT = True
w = worker.worker(app)
w.run(loglevel='info', concurrency=1, queues=Command.queues())
else:
return "Sorry, I shouldn't be run in production mode (DEBUG=False)"
return 'Done.'
```
#### File: management/commands/emailcommand.py
```python
import logging
import socket
from django.core.management.base import BaseCommand
from ...mixins.loggingmixin import LoggingMixin
from ...email.helpers import send_html_email
from .commandlogginglevels import VerbosityLevels as V
class EmailCommand(BaseCommand, LoggingMixin):
help = u'%s%s%s%s%s' % (
u'djenga.management.command.EmailCommand is a ',
u'base class that provides useful functionality for ',
u'django management commands such as logging and ',
u'e-mail. Subclasses should override the _handle method ',
u'to implement actual commands.',
)
def _handle(self, *args, **options):
return 0
def handle(self, *args, **options):
self.verbosity = options.get(u'verbosity', 1)
self.logging_level = V.to_logging_level(self.verbosity)
return_value = self._handle(*args, **options)
self._send_email()
return return_value
def _send_email(self):
pass
def _send_html_email(self, html_template, context, text_template=None):
context[u'hostname'] = socket.gethostname()
context[u'summary_messages'] = self.log_map.get(logging.INFO, [])
context[u'info_messages'] = self.log_map.get(logging.DEBUG, [])
context[u'warning_messages'] = self.log_map.get(logging.WARNING, [])
context[u'error_messages'] = self.log_map.get(
logging.ERROR, []
) + self.log_map.get(
logging.CRITICAL, []
)
send_html_email(
subject=u'%s' % (self.__class__.__module__,),
template=html_template,
context=context,
text_template=text_template,
)
```
#### File: management/commands/statuscommand.py
```python
from abc import abstractmethod
import logging
import codecs
import sys
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connections
from django.utils import timezone
from ...models import ManagementCommand
from ...models import CommandOutput
class StatusCommand(BaseCommand):
def set_verbosity(self, verbosity):
LEVELS = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARNING,
3: logging.DEBUG,
}
self.verbosity = verbosity
self.logging_level = LEVELS[verbosity]
def create_parser(self, prog_name, subcommand, **kwargs):
parser = super().create_parser(prog_name, subcommand, **kwargs)
parser.add_argument(
'--last',
action='store_true',
dest='b_show_last',
default=False
)
return parser
def start_run(self):
ManagementCommand.objects.update_or_create(
name=self.command_name,
defaults={
'last_run': datetime.now(timezone.utc),
'status': 'running',
}
)
@property
def command_name(self):
name = self.__module__
return name.replace('.management.commands', '')
def end_run(self, success=True):
if self.current_line:
self.plain_log('\n')
connection = connections['default']
if connection.connection and not connection.is_usable():
connection.close()
q = ManagementCommand.objects.get(
name=self.command_name,
)
q.status = 'success' if success else 'error'
if success:
q.last_success = datetime.now(timezone.utc)
q.save()
CommandOutput.objects.create(
command=q,
output='\n'.join(self.output)
)
def __init__(self):
super(StatusCommand, self).__init__()
self.verbosity = 3 if settings.DEBUG else 1
"""@type: int"""
self.indent = 0
"""@type: int"""
self.logging_level = logging.DEBUG if settings.DEBUG else 1
self.output = []
self.print_level = True
self.stdout.ending = ''
self.stdout = codecs.getwriter('utf8')(self.stdout)
self.current_line = ''
def color_format(self, level, message):
level_colors = {
# Level and a pair of colors: first for the label,
# the rest for the text;
# the bolder color label can make
# them easier to spot in the console log.
logging.DEBUG: (33, 39),
# logging.TRACE: (147, 153),
logging.INFO: (43, 49),
logging.WARNING: (214, 226),
logging.ERROR: (196, 197),
logging.CRITICAL: (196, 197),
}.get(level, (33, 39))
# 256-color to give wider spectrum than just ANSI
color = "\033[38;5;{:d}m"
reset = "\033[0m"
# Pass any simple messages from internal things, like Django's
# runserver, without special formatting.
mp_levels = {
logging.INFO: u'INF',
logging.WARNING: u'WRN',
logging.ERROR: u'ERR',
logging.DEBUG: u'DBG',
logging.CRITICAL: u'CRT'
}
st_level = mp_levels[level]
level_prefix = '%s[%s] ' % (color.format(level_colors[0]), st_level)
return u'{level_prefix}{color_normal}{message}{reset}'.format(
level_prefix=level_prefix if self.print_level else '',
message=message,
color_normal=color.format(level_colors[1]),
reset=reset
)
def llog(self, logging_level, format_string, *args):
"""
@param logging_level:
50 = summary/critical
40 = error
30 = warning
20 = info
10 = debug
@return:
"""
LEVELS = {
logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG
}
if logging_level not in LEVELS:
logging_level = logging.DEBUG
message = format_string % args
if logging_level >= self.logging_level:
if self.stdout.isatty():
message = self.color_format(logging_level, message)
self.stdout.write(message)
else:
self.stdout.write(message)
self.stdout.write('\n')
self.add_message(message)
def add_message(self, message):
if self.current_line:
self.output.append((self.current_line + message).strip())
self.current_line = ''
elif message is not None:
self.output.append(message.strip())
def log(self, format_string, *args):
message = format_string % args
self.stdout.write(message)
self.stdout.write('\n')
self.add_message(message)
def color_log(self, fn, format_string, *args):
message = format_string % args
if message[-1] == '\n':
close_line = True
message = message[:-1]
else:
close_line = False
message = fn(message)
self.stdout.write(message)
if close_line:
self.stdout.write('\n')
self.add_message(message)
elif self.current_line:
self.current_line += message
else:
self.current_line = message
def plain_log(self, format_string, *args):
self.color_log(lambda x: x, format_string, *args)
def critical(self, format_string, *args):
self.llog(logging.CRITICAL, format_string, *args)
def debug(self, format_string, *args):
self.llog(logging.DEBUG, format_string, *args)
def info(self, format_string, *args):
self.llog(logging.INFO, format_string, *args)
def warning(self, format_string, *args):
self.llog(logging.WARNING, format_string, *args)
def error(self, format_string, *args):
self.llog(logging.ERROR, format_string, *args)
def exception(self, format_string, *args):
p_type, p_exception, p_traceback = sys.exc_info()
self.llog(logging.ERROR, format_string, *args)
self.llog(logging.ERROR, u'Exception message: %s', p_exception)
self.llog(logging.ERROR, u'Exception type : %s', p_type)
self.llog(logging.ERROR, u'Traceback\n%s', p_traceback.format_exc())
def show_last(self):
try:
p = CommandOutput.objects.filter(
command__name=self.command_name
).latest('id')
self.log('%s', p.output)
except CommandOutput.DoesNotExist:
self.log('This is the first run of %s', self.command_name)
def execute(self, *args, **options):
if options.get('b_show_last', False):
self.show_last()
return
success = False
try:
self.start_run()
super(StatusCommand, self).execute(*args, **options)
success = True
except: # noqa: 216
success = False
raise
finally:
self.end_run(success)
@abstractmethod
def handle(self, *args, **options):
pass
```
#### File: djenga/mixins/loggingmixin.py
```python
import logging
import sys
from traceback import format_exc
from django.conf import settings
from django.core.management.base import OutputWrapper
class LoggingMixin:
verbosity = 3 if settings.DEBUG else 1
"""@type: int"""
indent = 0
"""@type: int"""
logging_level = logging.DEBUG if settings.DEBUG else 1
log_map = dict()
logging_initialized = False
print_level = True
def set_verbosity(self, verbosity):
LEVELS = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARNING,
3: logging.DEBUG,
}
self.verbosity = verbosity
self.logging_level = LEVELS[verbosity]
def initialize_logging(self):
if not self.logging_initialized:
try:
self.stdout = OutputWrapper(self.stdout._out, ending='')
except AttributeError:
self.stdout = OutputWrapper(sys.stdout, ending='')
# self.stdout = codecs.getwriter('utf8')(self.stdout)
self.logging_initialized = True
def color_format(self, level, message):
level_colors = {
# Level and a pair of colors: first for the label,
# the rest for the text;
# the bolder color label can make them easier to spot
# in the console log.
logging.DEBUG: (33, 39),
# logging.TRACE: (147, 153),
logging.INFO: (43, 49),
logging.WARNING: (214, 226),
logging.ERROR: (196, 197),
logging.CRITICAL: (196, 197),
}.get(level, (33, 39))
# 256-color to give wider spectrum than just ANSI
color = "\033[38;5;{:d}m"
reset = "\033[0m"
# Pass any simple messages from internal things, like Django's
# runserver, without special formatting.
mp_levels = {
logging.INFO: u'INF',
logging.WARNING: u'WRN',
logging.ERROR: u'ERR',
logging.DEBUG: u'DBG',
logging.CRITICAL: u'CRT'
}
st_level = mp_levels[level]
level_prefix = '%s[%s] ' % (color.format(level_colors[0]), st_level)
return u'{level_prefix}{color_normal}{message}{reset}'.format(
level_prefix=level_prefix if self.print_level else '',
message=message,
color_normal=color.format(level_colors[1]),
reset=reset
)
def llog(self, logging_level, format_string, *args):
"""
@param logging_level:
50 = summary/critical
40 = error
30 = warning
20 = info
10 = debug
@return:
"""
LEVELS = {
logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG
}
if logging_level not in LEVELS:
logging_level = logging.DEBUG
message = format_string % args
if logging_level >= self.logging_level:
if hasattr(self, 'stdout'):
self.initialize_logging()
self.stdout.write(u' ' * self.indent)
if self.stdout.isatty():
self.stdout.write(self.color_format(
logging_level, message))
else:
self.stdout.write(message)
self.stdout.write('\n')
self.log_map.setdefault(logging_level, []).append(message)
def log(self, format_string, *args):
message = format_string % args
if hasattr(self, 'stdout'):
self.initialize_logging()
self.stdout.write(u' ' * self.indent)
self.stdout.write(message)
self.stdout.write('\n')
def critical(self, format_string, *args):
self.llog(logging.CRITICAL, format_string, *args)
def debug(self, format_string, *args):
self.llog(logging.DEBUG, format_string, *args)
def info(self, format_string, *args):
self.llog(logging.INFO, format_string, *args)
def warning(self, format_string, *args):
self.llog(logging.WARNING, format_string, *args)
def error(self, format_string, *args):
self.llog(logging.ERROR, format_string, *args)
def exception(self, format_string, *args):
p_type, p_exception, _ = sys.exc_info()
self.llog(logging.ERROR, format_string, *args)
self.llog(logging.ERROR, u'Exception message: %s', p_exception)
self.llog(logging.ERROR, u'Exception type : %s', p_type)
self.llog(logging.ERROR, u'Traceback\n%s', format_exc())
```
#### File: djenga/djenga_tests/celery.py
```python
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from celery.signals import after_setup_logger
from celery.signals import worker_process_init
from djenga.celery.tasks import DetailTask
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djenga_tests.settings')
class DjengaCelery(Celery):
def __init__(self, main=None, loader=None, backend=None,
amqp=None, events=None, log=None, control=None,
set_as_current=True, tasks=None, broker=None, include=None,
changes=None, config_source=None, fixups=None, task_cls=None,
autofinalize=True, namespace=None, strict_typing=True,
**kwargs):
from djenga.celery.tasks import DetailTask
from djenga.celery.backends import patch_aliases
patch_aliases()
super().__init__(main, loader, backend, amqp, events, log, control,
set_as_current, tasks, broker, include, changes,
config_source, fixups, task_cls=DetailTask,
autofinalize=autofinalize, namespace=namespace,
strict_typing=strict_typing, **kwargs)
def now(self):
"""Return the current time and date as a datetime."""
from datetime import datetime
return datetime.now(self.timezone)
app = DjengaCelery('djenga_tests')
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
app.conf.broker_transport_options = {'visibility_timeout': 86400}
n = 0
@app.task(bind=True, steps=[
( 1, 'get logger',),
( 2, 'increment n',),
( 3, 'output debugging',), ]
)
def debug_task(self):
""":type self: djenga.celery.tasks.DetailTask"""
import logging
import time
self.start_step(1)
logger = logging.getLogger(__name__)
time.sleep(30)
self.end_step()
self.start_step(2)
global n
n += 1
time.sleep(45)
self.end_step()
self.start_step(3)
logger.info('request type: %s', type(self.request))
print('{0} Request: {1!r}'.format(n, self.request))
time.sleep(60)
self.end_step()
logger.info('details: ')
for x in self.request.details.values():
logger.info(' - %s [%s]', x, x.done)
@app.task(bind=True, base=DetailTask)
def error_task(self):
import logging
logger = logging.getLogger(__name__)
global n
n += 1
logger.info('request type: %s', type(self.request))
logger.error('{0} Request: {1!r}'.format(n, self.request))
def update_loglevel(*args, **kwargs):
app.log.redirect_stdouts(loglevel='INFO')
# it's not at all clear to me why these
# two signals work, or the correct timing at
# which to call the function to redirect the
# stdouts, but this worked, so I felt it
# was wise to just go with it . . .
after_setup_logger.connect(update_loglevel)
worker_process_init.connect(update_loglevel)
from djenga.celery.utils import auto_step
@auto_step(key=1)
def fly_to_the_moon(self):
pass
@auto_step(key=2)
def shrink_the_moon(self):
pass
@auto_step(key=3)
def grab_the_moon(self):
pass
@auto_step(key=4)
def sit_on_the_toilet(self):
pass
@app.task(bind=True, base=DetailTask, steps=[
(1, 'Fly to the moon'), (2, 'Shrink the moon'),
(3, 'Grab the moon'), (4, 'Sit on the toilet'),])
def steal_the_moon(self):
fly_to_the_moon(self)
shrink_the_moon(self)
grab_the_moon(self)
sit_on_the_toilet(self)
```
#### File: djenga_tests/integration_tests/env_test.py
```python
import os
from djenga.test import IntegrationTest
from djenga.core import ConfigBunch
class EnvTest(IntegrationTest):
def test_01(self):
config = ConfigBunch(
'env_test_01.yml',
'env_test_02.yml',
)
self.assert_equal(config.dbs.default.username, 'user')
self.assert_equal(config.dbs.default.host, 'localhost')
self.assert_equal(config.dbs.default.port, 3306)
self.assert_equal(config.dbs.default.password, '<PASSWORD>')
os.environ['DBS_DEFAULT_PASSWORD'] = 'overridden secret value'
env = config.env()
self.assert_equal(env('DBS_DEFAULT_PASSWORD'), 'overridden secret value')
self.assert_equal(env('DBS_DEFAULT_HOST'), 'localhost')
value = config.get('dbs.default.not_there', 'default')
self.assert_equal(value, 'default')
value = config.setdefault('dbs.default.not_there', 'new value')
self.assert_equal(value, 'new value')
config['dbs.default.second'] = 'old value'
self.assert_equal(config.dbs.default.second, 'old value')
value = config.setdefault('dbs.default.second', 'new value')
self.assert_equal(value, 'old value')
```
#### File: djenga/tasks/setup.py
```python
import os
import sys
from raft import task
__all__ = [
'deploy',
'setup',
'build',
]
@task
def deploy(ctx):
"""
Deploys djenga to pypi
Performs the following steps:
* bumps the version using `bumpversion`
* builds the djenga tarball and bdist_wheel
* pushes the build to pypi
"""
ctx.run('bumpversion patch')
build(ctx)
ctx.run('twine upload dist/*')
@task
def build(ctx):
ctx.run('rm -rf build djenga.egg-info')
ctx.run('rm -rf build dist')
ctx.run('python setup.py bdist_wheel')
ctx.run('python setup.py sdist')
def detect_ec2(ctx):
if os.path.exists('/sys/hypervisor/uuid'):
result = ctx.run('head -c 3 /sys/hypervisor/uuid')
return result.stdout.startswith('ec2')
return False
@task
def setup(ctx):
"""
Creates a virtual environment to begin working on djenga
"""
python = None
codebuild = os.environ.get('CODEBUILD_BUILD_ARN')
ec2 = detect_ec2(ctx)
if sys.platform in ('linux', 'darwin'):
result = ctx.run('which python3.6')
if result.ok:
x = '/env' if codebuild else '.'
python = os.path.join(x, 'bin/python')
ctx.run(f'python3.6 -m venv {x}')
elif sys.platform == 'win32':
if os.path.exists(r'c:\python36\python.exe'):
name = os.path.basename(os.getcwd())
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
python = os.path.join(home, 'Envs', name, 'Scripts', 'python.exe')
if not os.path.exists(python):
ctx.run(
f'powershell.exe mkvirtualenv {name} '
f'-python c:/python36/python.exe'
)
if python:
pip = f'{python} -m pip install'
if codebuild or ec2:
pip = f'{pip} --cache-dir ./.pip -q'
ctx.run(f'{pip} -U pip setuptools wheel invoke')
ctx.run(f'{pip} -r requirements.txt')
else:
print('no suitable python version found')
``` |
{
"source": "2qU24Tlb/PerfKitBenchmarker",
"score": 2
} |
#### File: perfkitbenchmarker/scripts/wait_for_command.py
```python
import errno
import fcntl
import optparse
import os
import shutil
import signal
import sys
import threading
import time
WAIT_TIMEOUT_IN_SEC = 120.0
WAIT_SLEEP_IN_SEC = 5.0
RETRYABLE_SSH_RETCODE = 255
def signal_handler(signum, frame):
# Pre python3.5 the interruption of a system call would automatically raise
# an InterruptedError exception, but since PEP 475 was implemented for fcntl
# interruptions are automatically retried; this implementation depends on
# interrupting the attempt to acquire a lock on the status file, so we can
# ensure this in all python3 versions by raising it explicitly in the signal
# handler.
raise InterruptedError()
def main():
p = optparse.OptionParser()
p.add_option('-o', '--stdout', dest='stdout',
help="""Read stdout from FILE.""", metavar='FILE')
p.add_option('-e', '--stderr', dest='stderr',
help="""Read stderr from FILE.""", metavar='FILE')
p.add_option('-s', '--status', dest='status', metavar='FILE',
help='Get process exit status from FILE. '
'Will block until a shared lock is acquired on FILE.')
p.add_option('-d', '--delete', dest='delete', action='store_true',
help='Delete stdout, stderr, and status files when finished.')
p.add_option(
'-x',
'--exclusive',
dest='exclusive',
help='Will block until FILE exists to ensure that status is ready to be '
'read. Required.',
metavar='FILE')
options, args = p.parse_args()
if args:
sys.stderr.write('Unexpected arguments: {0}\n'.format(args))
return 1
missing = []
for option in ('status', 'exclusive'):
if getattr(options, option) is None:
missing.append(option)
if missing:
p.print_usage()
msg = 'Missing required flag(s): {0}\n'.format(
', '.join('--' + i for i in missing))
sys.stderr.write(msg)
return 1
start = time.time()
return_code_str = None
while time.time() < WAIT_TIMEOUT_IN_SEC + start:
try:
with open(options.exclusive, 'r'):
with open(options.status, 'r'):
break
except IOError as e:
print('WARNING: file doesn\'t exist, retrying: %s' % e, file=sys.stderr)
time.sleep(WAIT_SLEEP_IN_SEC)
# Set a signal handler to raise an InterruptedError on SIGALRM (this is no
# longer done automatically after PEP 475).
signal.signal(signal.SIGALRM, signal_handler)
# Send a SIGALRM signal after WAIT_TIMEOUT_IN_SEC seconds
signal.alarm(int(WAIT_TIMEOUT_IN_SEC))
with open(options.status, 'r') as status:
try:
# If we can acquire the lock on status, the command we're waiting on is
# done; if we can't acquire it for the next WAIT_TIMEOUT_IN_SEC seconds
# this attempt will be interrupted and we'll catch an InterruptedError.
fcntl.lockf(status, fcntl.LOCK_SH)
except InterruptedError:
print('Wait timed out. This will be retried with a subsequent wait.')
return 0
# OSError and IOError have similar interfaces, and later versions of fcntl
# will raise OSError where earlier versions raised IOError--we catch both
# here for compatibility.
except (OSError, IOError) as e:
if e.errno == errno.ECONNREFUSED:
print('Connection refused during wait. '
'This will be retried with a subsequent wait.')
return 0
elif e.errno in (errno.EAGAIN, errno.EACCES):
print('Status currently being modified and cannot be read right now. '
'This will be retried with a subsequent wait.')
return 0
raise e
signal.alarm(0)
return_code_str = status.read()
if not (options.stdout and options.stderr):
print('Command finished.')
return 0
with open(options.stdout, 'r') as stdout:
with open(options.stderr, 'r') as stderr:
if return_code_str:
return_code = int(return_code_str)
else:
print('WARNING: wrapper script interrupted.', file=sys.stderr)
return_code = 1
# RemoteCommand retries 255 as temporary SSH failure. In this case,
# long running command actually returned 255 and should not be retried.
if return_code == RETRYABLE_SSH_RETCODE:
print('WARNING: command returned 255.', file=sys.stderr)
return_code = 1
stderr_copier = threading.Thread(target=shutil.copyfileobj,
args=[stderr, sys.stderr],
name='stderr-copier')
stderr_copier.daemon = True
stderr_copier.start()
try:
shutil.copyfileobj(stdout, sys.stdout)
finally:
stderr_copier.join()
if options.delete:
for f in [options.stdout, options.stderr, options.status]:
os.unlink(f)
return return_code
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "2quarius/rplidar_ros",
"score": 2
} |
#### File: rplidar_ros/launch/test_rplidar.launch.py
```python
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
Node(
node_name='rplidar_composition',
package='rplidar_ros',
node_executable='rplidar_composition',
output='screen',
parameters=[{
'serial_port': '/dev/ttyUSB0',
'serial_baudrate': 115200,
'frame_id': 'laser',
'inverted': False,
'angle_compensate': True,
}],
),
Node(
node_name='rplidarNodeClient',
package='rplidar_ros',
node_executable='rplidarNodeClient',
output='screen',
),
])
``` |
{
"source": "2read-online/pydantic-mongodb",
"score": 3
} |
#### File: pkg/pydantic_mongo/mongo_model.py
```python
from datetime import datetime
from typing import Optional
from bson import ObjectId
from bson.errors import InvalidId
from pydantic import BaseModel, BaseConfig
class OID(str):
"""Wrapper around ObjectId"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
"""Validate ID
"""
try:
return ObjectId(str(v))
except InvalidId as err:
raise ValueError("Not a valid ObjectId") from err
class MongoModel(BaseModel):
"""Base mongo document with ID"""
id: Optional[OID]
class Config(BaseConfig):
"""Configrequirements
"""
allow_population_by_field_name = True # << Added
json_encoders = {
datetime: lambda dt: dt.isoformat(), # pylint: disable=unnecessary-lambda
ObjectId: lambda oid: str(oid), # pylint: disable=unnecessary-lambda
}
@classmethod
def from_db(cls, obj: dict):
"""Load model from DB document
"""
if obj is None:
return None
return cls(id=obj['_id'], **obj)
def db(self) -> dict:
"""Export to mongo document"""
data: dict = self.dict(exclude_none=True)
if 'id' in data:
data['_id'] = data.pop('id')
return data
``` |
{
"source": "2-REC-forks/OpenPype",
"score": 2
} |
#### File: plugins/publish/closeAE.py
```python
import pyblish.api
from avalon import aftereffects
class CloseAE(pyblish.api.ContextPlugin):
"""Close AE after publish. For Webpublishing only.
"""
order = pyblish.api.IntegratorOrder + 14
label = "Close AE"
optional = True
active = True
hosts = ["aftereffects"]
targets = ["remotepublish"]
def process(self, context):
self.log.info("CloseAE")
stub = aftereffects.stub()
self.log.info("Shutting down AE")
stub.save()
stub.close()
self.log.info("AE closed")
```
#### File: plugins/publish/collect_extension_version.py
```python
import os
import re
import pyblish.api
from avalon import aftereffects
class CollectExtensionVersion(pyblish.api.ContextPlugin):
""" Pulls and compares version of installed extension.
It is recommended to use same extension as in provided Openpype code.
Please use Anastasiy’s Extension Manager or ZXPInstaller to update
extension in case of an error.
You can locate extension.zxp in your installed Openpype code in
`repos/avalon-core/avalon/aftereffects`
"""
# This technically should be a validator, but other collectors might be
# impacted with usage of obsolete extension, so collector that runs first
# was chosen
order = pyblish.api.CollectorOrder - 0.5
label = "Collect extension version"
hosts = ["aftereffects"]
optional = True
active = True
def process(self, context):
installed_version = aftereffects.stub().get_extension_version()
if not installed_version:
raise ValueError("Unknown version, probably old extension")
manifest_url = os.path.join(os.path.dirname(aftereffects.__file__),
"extension", "CSXS", "manifest.xml")
if not os.path.exists(manifest_url):
self.log.debug("Unable to locate extension manifest, not checking")
return
expected_version = None
with open(manifest_url) as fp:
content = fp.read()
found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")',
content)
if found:
expected_version = found[0][1]
if expected_version != installed_version:
msg = (
"Expected version '{}' found '{}'\n Please update"
" your installed extension, it might not work properly."
).format(expected_version, installed_version)
raise ValueError(msg)
```
#### File: plugins/publish/validate_naming.py
```python
import re
import pyblish.api
import openpype.api
from avalon import photoshop
class ValidateNamingRepair(pyblish.api.Action):
"""Repair the instance asset."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
invalid_chars, replace_char = plugin.get_replace_chars()
self.log.info("{} --- {}".format(invalid_chars, replace_char))
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
self.log.info("validate_naming instance {}".format(instance))
metadata = stub.read(instance[0])
self.log.info("metadata instance {}".format(metadata))
layer_name = None
if metadata.get("uuid"):
layer_data = stub.get_layer(metadata["uuid"])
self.log.info("layer_data {}".format(layer_data))
if layer_data:
layer_name = re.sub(invalid_chars,
replace_char,
layer_data.name)
stub.rename_layer(instance.data["uuid"], layer_name)
subset_name = re.sub(invalid_chars, replace_char,
instance.data["name"])
instance[0].Name = layer_name or subset_name
metadata["subset"] = subset_name
stub.imprint(instance[0], metadata)
return True
class ValidateNaming(pyblish.api.InstancePlugin):
"""Validate the instance name.
Spaces in names are not allowed. Will be replace with underscores.
"""
label = "Validate Naming"
hosts = ["photoshop"]
order = openpype.api.ValidateContentsOrder
families = ["image"]
actions = [ValidateNamingRepair]
# configured by Settings
invalid_chars = ''
replace_char = ''
def process(self, instance):
help_msg = ' Use Repair action (A) in Pyblish to fix it.'
msg = "Name \"{}\" is not allowed.{}".format(instance.data["name"],
help_msg)
assert not re.search(self.invalid_chars, instance.data["name"]), msg
msg = "Subset \"{}\" is not allowed.{}".format(instance.data["subset"],
help_msg)
assert not re.search(self.invalid_chars, instance.data["subset"]), msg
@classmethod
def get_replace_chars(cls):
"""Pass values configured in Settings for Repair."""
return cls.invalid_chars, cls.replace_char
``` |
{
"source": "2-REC/python-qt-resizable-messagebox",
"score": 2
} |
#### File: python-qt-resizable-messagebox/extras/usage.py
```python
import sys
from Qt.QtWidgets import (
QApplication
)
import dialogs
def run(*args):
if not QApplication.instance():
app = QApplication(*args)
else:
app = QApplication.instance()
long_text = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut "
"enim ad minim veniam, quis nostrud exercitation ullamco laboris"
" nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor"
" in reprehenderit in voluptate velit esse cillum dolore eu "
"fugiat nulla pariatur. Excepteur sint occaecat cupidatat non "
"proident, sunt in culpa qui officia deserunt mollit anim id est"
" laborum. Lorem ipsum dolor sit amet, consectetur adipiscing "
"elit, sed do eiusmod tempor incididunt ut labore et dolore magna"
" aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
"ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis "
"aute irure dolor in reprehenderit in voluptate velit esse cillum"
" dolore eu fugiat nulla pariatur. Excepteur sint occaecat "
"cupidatat non proident, sunt in culpa qui officia deserunt "
"mollit anim id est laborum."
)
dialogs.success(
None,
"Success message",
details=long_text
)
dialogs.warning(
None,
"Warning message",
details=long_text
)
dialogs.alert(
None,
"Alert message",
details=long_text
)
dialogs.question(
None,
"Question?",
details=long_text
)
dialogs.questionWarning(
None,
"Question as warning?",
details=long_text
)
return 0
if __name__ == "__main__":
return_code = run(sys.argv)
sys.exit(return_code)
```
#### File: 2-REC/python-qt-resizable-messagebox/resizable_messagebox.py
```python
__version__ = "1.0"
__author__ = "2-REC"
import logging
logger = logging.getLogger(__name__)
from Qt.QtCore import Qt as qt
from Qt.QtWidgets import (
QMessageBox,
QTextEdit,
QDialogButtonBox,
QSizePolicy
)
class ResizableMessageBox(QMessageBox):
_max_width = 4096
_max_height = 2048
def __init__(self, *args, **kwargs):
super(ResizableMessageBox, self).__init__(*args, **kwargs)
self.clearDetailBox()
def setDetailedText(self, text):
super(ResizableMessageBox, self).setDetailedText(text)
if not text:
self.clearDetailBox()
return
details_box = self.findChild(QTextEdit)
if not details_box:
logger.error("No 'QTextEdit' found in 'QDialogButtonBox'")
return
self.details_box = details_box
dialog_button_box = self.findChild(QDialogButtonBox)
if dialog_button_box:
for button in dialog_button_box.buttons():
if (
dialog_button_box.buttonRole(button)
== QDialogButtonBox.ButtonRole.ActionRole
):
button.released.connect(self.detailsToggle)
break
else:
logger.error("No 'ActionRole' button in 'QDialogButtonBox'")
def resizeEvent(self, event):
result = super(ResizableMessageBox, self).resizeEvent(event)
if self.details_visible:
self.setSizing()
return result
def clearDetailBox(self):
self.details_box = None
self.details_visible = False
self.setSizeGripEnabled(False)
def detailsToggle(self):
self.details_visible = not self.details_visible
self.setSizeGripEnabled(self.details_visible)
if self.details_visible:
self.setSizing()
def setSizing(self):
self.setWidgetSizing(self)
self.setWidgetSizing(self.details_box)
@classmethod
def setWidgetSizing(cls, widget):
widget.setMaximumHeight(cls._max_width)
widget.setMaximumWidth(cls._max_height)
widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
``` |
{
"source": "2rei/flask-btce",
"score": 3
} |
#### File: flask-btce/helpers/format_helper.py
```python
import decimal
from console.configs.btce import BtceConfig
decimal.getcontext().rounding = decimal.ROUND_DOWN
exps = [decimal.Decimal("1e-%d" % i) for i in range(16)]
def truncateAmountDigits(value, digits):
quantum = exps[digits]
return decimal.Decimal(value).quantize(quantum)
def truncateAmount(value, pair):
return truncateAmountDigits(value, BtceConfig.MAX_DIGITS[pair])
def formatCurrencyDigits(value, digits):
s = str(truncateAmountDigits(value, digits))
dot = s.index(".")
while s[-1] == "0" and len(s) > dot + 2:
s = s[:-1]
return s
def formatCurrency(value, pair):
return formatCurrencyDigits(value, BtceConfig.MAX_DIGITS[pair])
```
#### File: flask-btce/libs/btce_api.py
```python
import hmac
import hashlib
import string
import json
import time
import urllib
from grab import Grab
from console import app
from helpers.format_helper import *
class BtceApi(object):
def __init__(self, const):
self.grab = None
self.result_count = 100
self.result_start = 0
self.result_active = 1
self.nonce = int(time.time())
self.const = const
self.pair = self.const.PAIR
self.public_method = self.const.PUBLIC_METHOD
self.private_method = self.const.PRIVATE_METHOD
def set_request(self, url, post=None, headers=None):
return_data = False
if not self.grab:
self.grab = Grab()
if post:
self.grab.setup(post=post)
if headers:
self.grab.setup(headers=headers)
try:
self.grab.go(url)
except Exception as e:
app.logger.error(e)
else:
return_data = self.grab
return return_data
def _sign(self, post):
post = urllib.urlencode(post)
H = hmac.new(self.const.SECRET, digestmod=hashlib.sha512)
H.update(post)
return H.hexdigest()
def _private_post(self, params):
params['nonce'] = self.nonce
headers = {
'Key': self.const.KEY,
'Sign': self._sign(params)
}
return self.set_request(self.const.PRIVATE_URL, params, headers)
def _private_info(self, params):
return_data = False
result = self._private_post(params)
if result:
data = json.loads(result.response.body)
if int(data['success']) == 1:
return_data = data['return']
return return_data
def get_public_url(self, method):
pair = '-'.join(self.pair)
return "%s/%s/%s" % (self.const.PUBLIC_URL, method, pair)
def _public_info(self, method):
data = {}
result = self.set_request(self.get_public_url(method))
if result:
data = json.loads(result.response.body)
if len(data) == 0:
data = False
return data
def get_info(self):
return self._public_info(self.public_method['info'])
def get_ticker(self):
return self._public_info(self.public_method['ticker'])
def get_trades(self):
return self._public_info(self.public_method['trades'])
def get_depth(self):
return self._public_info(self.public_method['depth'])
def get_user_info(self):
params = {
'method': self.private_method['user_info']
}
return self._private_info(params)
def get_trans_history(self):
params = {
'method': self.private_method['trans_history'],
'count': self.result_count,
'from_id': self.result_start,
}
return self._private_info(params)
def get_trade_history(self, pair=None):
params = {
'method': self.private_method['trade_history'],
'count': self.result_count,
'from_id': self.result_start,
}
if pair:
params['pair'] = pair
return self._private_info(params)
def get_active_order(self, pair=None):
params = {
'method': self.private_method['active_order'],
}
if pair:
params['pair'] = pair
return self._private_info(params)
def cancel_order(self, order_id):
params = {
'method': self.private_method['cancel_order'],
'order_id': order_id,
}
return self._private_info(params)
def trade(self, pair, trade_type, rate, amount):
params = {
'method': self.private_method['trade'],
'pair': pair,
'type': trade_type,
'rate': rate,
'amount': amount,
}
return self._private_info(params)
``` |
{
"source": "2rintf/ai-search-model",
"score": 3
} |
#### File: app/FD/face_detection_func.py
```python
import face_recognition
import numpy as np
import time
from flask_sqlalchemy import SQLAlchemy
from flask_sqlalchemy import BaseQuery
from app.Database.db_orm import TblModel,TblBusiness
list_business_name=["nike","uniqlo",
"muji","longines",
"adidas","tiffany",
"zara","chanel","hm"]
def calFaceDistance(face_encodings,face_to_compare):
'''
计算face之间的欧氏距离
:param face_encodings: 数据库中的face encodings.
:param face_to_compare: 输入的用于比较的face encoding.
:return: distance
'''
print(face_encodings.shape)
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
def faceEncodingPipeline(real_path):
'''
正常的人脸检测与编码
:param real_path: 图像路径
:return: encoding: 128维向量
'''
start = time.time()
image = face_recognition.load_image_file(real_path)
face_locations = face_recognition.face_locations(image,model="cnn",number_of_times_to_upsample=1)
if len(face_locations)==0:
face_locations = face_recognition.face_locations(image,
model="cnn",
number_of_times_to_upsample=2)
if len(face_locations)==0:
print("{%s}. No face detected."%(real_path))
elif len(face_locations)>1:
print("{%s}. More than 1 face detected." % (real_path))
elif len(face_locations)>1:
print("{%s}. More than 1 face detected."%(real_path))
encoding = face_recognition.face_encodings(image,face_locations,model="large")
print("[TIME COST] faceEncodingPipeline cost : "+str(time.time()-start))
return encoding
def getTop6FaceComparision(uplaod_encoding):
'''
[for model-feature, not for business-feature]
获取前6个最相似的人脸。
:param uplaod_encoding: 上传的图片的encoding
:return: dict of top 6 models
'''
print("------- [Face Comparision Info] ---------")
total_encoding = TblModel.query.all()
# print(total_encoding[0].encoding)
print("nums of encoding in db: "+str(len(total_encoding)))
encodings = []
distances = []
ids = []
for i in range(len(total_encoding)):
encodings.append(total_encoding[i].face_encoding)
ids.append(total_encoding[i].id_model)
distances = calFaceDistance(np.array(encodings),np.array(uplaod_encoding))
# print(ids)
# print(distances.tolist())
# print(len(distances))
# temp = [zip(ids,distances)]
# Return the index of the list of sorted distances.
index_sorted = np.argsort(distances)
# print(ind)
top6Distances=[]
top6Id = []
temp =[]
for i in range(6):
# get top-6's index.
temp.append(index_sorted[i])
# save top-6's distances.
top6Distances.append(distances[index_sorted[i]])
top6Id = [ids[i] for i in temp]
print(top6Id)
print(top6Distances)
top6Models = []
# # !此查询方法没有按照给定的id list返回信息,而是重新按id从小到大进行排序.
# top6Models = EncodingTable.query.filter(EncodingTable.id.in_(top6Index)).all()
for i in top6Id:
top6Models.append(TblModel.query.get(i))
print(top6Models)
print([i.name for i in top6Models])
print("------- [Face Comparision Info] ---------")
top6ModelsInfo = {}
count = 0
for i in top6Models:
# temp_dict = {
# 'name':i.name,
# 'id':i.id,
# 'pic_path':i.pic_path,
# 'attr_encoding':i.attr_encoding,
# 'img_stream':"",
# }
top6ModelsInfo[count] = i.get_dict().copy()
count+=1
return top6ModelsInfo
def getStyleFromFaceComparision_mean(upload_encoding):
'''
[for business-feature] 此函数是以单个business下所有model的相似值的平均值为标准。
:param upload_encoding:
:return: list 前三最佳business_name。
'''
distances_of_business = {}
for b_name in list_business_name:
business_model_data = TblBusiness.query.filter_by(business_name=b_name).all()
nums_of_model = len(business_model_data)
encodings=[]
ids=[]
for i in range(nums_of_model):
encodings.append(business_model_data[i].face_encodings)
# ids.append(business_model_data[i].id_business)
distances = calFaceDistance(np.array(encodings), np.array(upload_encoding))
print(distances)
mean_distances = np.mean(distances)
print(mean_distances)
distances_of_business[b_name] = mean_distances.copy()
print(distances_of_business)
# 取dict中的value做排序,此处维升序,找最小distance.
list_sorted_distances = sorted(distances_of_business.items(),key=lambda a:a[1])
print(list_sorted_distances)
return [list_sorted_distances[i][0] for i in range(3)]
def getStyleFromFaceComparision_minimum(upload_encoding):
'''
[for business-feature] 此函数是以单个model最相似为标准。
:param upload_encoding:
:return: dict 前六个最相似model的信息(business_name, pic_path)
'''
business_model_data = TblBusiness.query.all()
nums_of_model = len(business_model_data)
encodings=[]
ids=[]
print(nums_of_model)
for i in range(nums_of_model):
encodings.append(business_model_data[i].face_encodings)
ids.append(business_model_data[i].id_business)
distances = calFaceDistance(np.array(encodings), np.array(upload_encoding))
dict_distances_of_business = dict(zip(distances,ids))
list_sorted_distances = sorted(dict_distances_of_business)
# print(dict_distances_of_business)
print(list_sorted_distances)
# business_name, pic_path
top6_model_info={}
for n in range(6):
id_temp = int(dict_distances_of_business[list_sorted_distances[n]])
print(list_sorted_distances[n])
result = TblBusiness.query.filter_by(id_business=id_temp).first()
top6_model_info[n] = result.get_showing_dict().copy()
print(top6_model_info)
return top6_model_info
``` |
{
"source": "2runo/Curse-detection",
"score": 3
} |
#### File: Curse-detection/src/embedding.py
```python
import numpy as np
import joblib
def char2vec(char):
# (사전 예측된 dict를 통한) 글자 임베딩 수행
if char == '~':
# 빈 데이터 -> [0., 0., .., 0.]
return np.array([0.] * len(vecdict['ㄱ']))
return vecdict[char]
def embedding(x):
# 데이터에 대해 임베딩을 수행
return np.array([[char2vec(e) for e in ele] for ele in x])
def padding(x, length=256, pad=None):
# 패딩을 수행
result = []
for n, ele in enumerate(x):
if len(ele) == length:
result.append(ele)
continue
if pad is None:
pad = [0.] * len(ele[0])
a, b = np.array(ele), np.array([pad] * (length - len(ele)))
try:
mid = np.concatenate((a, b))
except:
continue
result.append(mid)
return np.array(result)
def padding_x(x, length=256, pad=None):
# 하나의 input 값에만 padding 수행
if len(x) > length:
return None
if len(x) == length:
return x
if pad == None:
pad = [0.] * len(x[0])
a, b = np.array(x), np.array([pad] * (length - len(x)))
try:
mid = np.concatenate((a, b))
except:
None
return mid
vecdict = joblib.load('models/char2vec.dic') # 각 글자에 대응하는 vector가 담긴 dictionary
```
#### File: Curse-detection/src/text_preprocessing.py
```python
import re
import itertools
BASE_CODE, CHOSUNG, JUNGSUNG = 44032, 588, 28
CHOSUNG_LIST = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ', 'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
JUNGSUNG_LIST = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ', 'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ', 'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ',
'ㅣ']
JONGSUNG_LIST = ['~', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ', 'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ', 'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ',
'ㅆ', 'ㅇ', 'ㅈ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
GYUP2CHO = {'ㄳ': 'ㄱㅅ', 'ㄵ': 'ㄴㅈ', 'ㄶ': 'ㄴㅎ', 'ㄺ': 'ㄹㄱ', 'ㄻ': 'ㄹㅁ', 'ㄽ': 'ㄹㅅ', 'ㄾ': 'ㄹㅌ', 'ㄿ': 'ㄹㅍ',
'ㅄ': 'ㅂㅅ'} # 겹자음을 자음으로 변환
def remain_char(x):
# 오직 한글 글자만 남기기 (띄어쓰기, 숫자, 특수문자, 영어 등은 삭제)
return [''.join(re.findall(r'[ㄱ-ㅎㅏ-ㅣ각-힣]', i)) for i in x] # 숫자도 삭제 (숫자 보존하려면 표현식 뒤에 '0-9' 추가)
def long2short(x):
# 연속적으로 긴 단어는 간추리기
# ef) f('ㅋㅋㅋㅋㅋㅋㅋ앜ㅋㅋㅋ') -> f('ㅋ앜ㅋ')
result = []
keep = True
for ele in x:
while True:
candidates = set(re.findall(r'(\w)\1', ele))
repeats = itertools.chain(*[re.findall(r"({0}{0}+)".format(c), ele) for c in candidates])
keep = False
for org in [i for i in repeats if len(i) >= 2]:
ele = ele.replace(org, org[0])
keep = True
if not keep:
break
result.append(ele)
return result
def analchar(test_keyword):
# 글자 -> 초성, 중성, 종성 분리 (한글 아니면 그대로 반환)
# ex) f('아녕ㅕㄴ') -> 'ㅇㅏ~ㄴㅕㅇㅕ~~ㄴ~~'
split_keyword_list = list(test_keyword)
result = []
for keyword in split_keyword_list:
# 한글 여부 확인 후 초성, 중성, 종성 분리
if re.match(r'.*[가-힣]+.*', keyword) is not None:
char_code = ord(keyword) - BASE_CODE
char1 = int(char_code / CHOSUNG)
result.append(CHOSUNG_LIST[char1])
char2 = int((char_code - (CHOSUNG * char1)) / JUNGSUNG)
result.append(JUNGSUNG_LIST[char2])
char3 = int((char_code - (CHOSUNG * char1) - (JUNGSUNG * char2))) # 종성 없으면 char3 = 0 = '~'
result.append(JONGSUNG_LIST[char3])
elif re.match(r'[ㄱ-ㅎ]', keyword) is not None:
result.append(keyword + '~~')
elif re.match(r'[ㅏ-ㅣ]', keyword) is not None:
result.append('~' + keyword + '~')
else:
result.append(keyword)
return ''.join(result)
def data2anal(x):
# 글자 -> 초성, 중성, 종성 분리 (한글 아니면 그대로 반환)
return [analchar(i) for i in x]
def replace_gyup(x):
# 겹자음을 자음으로 변환한다.
# ex) 'ㅄ새끼' -> 'ㅂㅅ새끼'
result = []
for ele in x:
for gyup, cho in GYUP2CHO.items():
ele = ele.replace(gyup, cho)
result.append(ele)
return result
def preprocess(texts):
texts = remain_char(texts) # 특수문자, 영어 등 제거
texts = long2short(texts) # 연속적인 글자 단축 (ㅋㅋㅋㅋ->ㅋ)
texts = data2anal(texts) # 초성, 중성, 종성 분리
return texts
``` |
{
"source": "2runo/dl_numpy",
"score": 3
} |
#### File: dl_numpy/core/math.py
```python
import numpy as np
from .decorators import for_all_methods
from .base import check_tensor_decorator, Tensor, Add, Mul, Div, Pow
from .calc import Log, Sqrt, Sum, Matmul, Max, Min, Erf
@for_all_methods(check_tensor_decorator())
class Math:
def add(self, x, y):
return Add(x, y)
def mul(self, x, y):
return Mul(x, y)
def div(self, x, y):
return Div(x, y)
def pow(self, x, y):
return Pow(x, y)
def log(self, x):
return Log(x)
def exp(self, x):
return Pow(Tensor(np.e, var=False), x)
def sqrt(self, x):
return Sqrt(x)
def sum(self, x, axis=None, keepdim=False):
return Sum(x, axis=axis, keepdim=keepdim)
def mean(self, x, axis=None, keepdim=False):
n = x.value.shape[0 if axis is None else axis]
return Sum(x, axis=axis, keepdim=keepdim) / n
def matmul(self, x, y):
return Matmul(x, y)
def max(self, x, axis=None, keepdim=False):
return Max(x, axis=axis, keepdim=keepdim)
def min(self, x, axis=None):
return Min(x, axis=axis)
def erf(self, x):
return Erf(x)
```
#### File: dl_numpy/utils/layer.py
```python
import numpy as np
def init_weights(size: tuple) -> np.ndarray:
return np.random.uniform(-0.1, 0.1, size=size)
``` |
{
"source": "2sang/oneshot-tfgpu",
"score": 2
} |
#### File: oneshot-tfgpu/tfgpu/app.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import logging
import os
import sys
import docker
import fire
from PyInquirer import prompt
import tfgpu.cli
import tfgpu.utils as utils
from tfgpu.prompts import custom_style_1, custom_style_2, custom_style_3
__version__ = '0.0.1'
PY3 = sys.version_info[0] >= 3
def check_initialized():
conf = utils.load_conf()
return int(conf['general']['num_images']) != 0
def _run(image_name='default'):
if not check_initialized():
return _init()
docker_client = docker.from_env()
tfgpu.cli.run.run_container(image_name, docker_client)
return True
def _ls():
pass
def _commit():
pass
def _init():
tfgpu.cli.init.create_new_image_prompt()
return True
def _ps():
pass
def _set():
pass
def main():
if len(sys.argv) == 1:
utils.print_usage()
return False
if not os.path.exists('conf.yaml'):
utils.recreate_conf()
available_commands = ['run', 'ls', 'ps', 'set', 'init', 'commit']
fire.Fire({command: eval('_' + command) for command in available_commands})
if __name__ == "__main__":
main()
```
#### File: tfgpu/cli/init.py
```python
from PyInquirer import prompt
from tfgpu.prompts import custom_style_1, custom_style_2, custom_style_3
import tfgpu.utils as utils
import names
def ask_tag():
print("We're loading available tags from Dockerhub, please wait...")
available_tags_by_version = utils.load_available_tags_by_version()
select_version = [{
'type': 'list',
'name': 'version',
'message': 'Choose tensorflow image version you want to fetch:',
'choices': sorted(available_tags_by_version.keys(), reverse=True)
}]
answer = prompt(select_version, style=custom_style_1)
version = answer['version']
select_tag = [{
'type': 'list',
'name': 'tag',
'message': 'Choose specific tag:',
'choices': sorted(available_tags_by_version[version], reverse=True)
}]
answer = prompt(select_tag, style=custom_style_1)
return answer['tag']
def ask_others():
image_name = names.get_first_name()
while utils.image_name_duplicated(image_name):
image_name = names.get_first_name(gender='female')
questions = [
{
'type': 'input',
'name': 'image_name',
'message': 'image name to use:',
'default': image_name
},
{
'type': 'input',
'name': 'host_mountpath',
'message': 'Mount path of Docker Volume in your local filesystem:',
'default': '~/tfgpu/'
},
{
'type': 'input',
'name': 'container_mountpath',
'message': 'Mount path on the container that will synchronize with the local mount path:',
'default': '/notebooks/'
},
{
'type': 'input',
'name': 'volume_name',
'message': 'name for the docker volume:',
'default': 'tfgpu_volume'
},
{
'type': 'input',
'name': 'local_port',
'message': 'local port number for accessing jupyter notebook:',
'default': '9999'
},
]
answer = prompt(questions, style=custom_style_1)
image_name = answer['image_name']
del answer['image_name']
return image_name, answer
def ask_questions():
tag = ask_tag()
image_name, image_conf = ask_others()
image_conf['tag'] = tag
return image_name, image_conf
def create_new_image_prompt():
image_name, image_conf = ask_questions()
utils.add_image_to_conf(image_name, image_conf)
``` |
{
"source": "2series/Artificial-Intelligence",
"score": 4
} |
#### File: 1 - Python/3.functions/lesson02.py
```python
- Python/3.functions/lesson02.py
# print e to the power of 3 using the math module
import math
print(math.exp(3))
# Use an import statement at the top
import random
word_file = "words.txt"
word_list = []
#fill up the word_list
with open(word_file,'r') as words:
for line in words:
# remove white space and make everything lowercase
word = line.strip().lower()
# don't include words that are too long or too short
if 3 < len(word) < 8:
word_list.append(word)
# Add your function generate_password here
# It should return a string consisting of three random words
# concatenated together without spaces
def generate_password():
random_list = ''
for x in range(3):
random_word = random.choice(word_list)
random_list += random_word
return random_list
# test your function
print(generate_password())
``` |
{
"source": "2series/Autonomous-Flight-Engineer",
"score": 3
} |
#### File: 2series/Autonomous-Flight-Engineer/hash256.py
```python
import hashlib
def hash256(string):
hash_object = hashlib.sha256(string)
hex_dig = hash_object.hexdigest()
return(hex_dig)
print(hash256("Hello World"))
``` |
{
"source": "2series/DataScience-Courses",
"score": 4
} |
#### File: Week 6 - Database Basics: SQL/Week 6 Assignment/portfolio_pricing.py
```python
import mysql.connector as conn
User = "YourUserID"
Name = "YourDBName"
Host = "YourHost"
password = "<PASSWORD>"
"""
Do Not Change!!
"""
db = conn.connect(host = Host, user = User, password = password, database = Name )
cursor = db.cursor()
file = "portfolio.txt"
with open(file,'r') as f:
line_count = 0
stocks_set = set()
for line in f:
line = line.strip()
if line_count == 0:
headers = line.split(':')
headers = [x.replace(' ','_') for x in headers]
query1 = "DROP TABLE IF EXISTS stocks;"
query2 = "DROP TABLE IF EXISTS holdings"
cursor.execute(query1)
cursor.execute(query2)
query1 = "CREATE TABLE IF NOT EXISTS stocks ("
query1 += headers[0] + " VARCHAR(10),"
query1 += headers[1] + " VARCHAR(30));"
query2 = "CREATE TABLE IF NOT EXISTS holdings ("
query2 += headers[0] + " VARCHAR(10),"
query2 += headers[2] + " DECIMAL(10,2),"
query2 += headers[3] + " INT,"
query2 += headers[4] + " DATE);"
cursor.execute(query1)
cursor.execute(query2)
line_count += 1
continue
data = line.split(':')
stock_info = (data[0],data[1])
stocks_set.add(stock_info)
holdings_query = 'INSERT INTO holdings VALUES ("'
holdings_query +=data[0] + '",'
holdings_query +=data[2] + ','
holdings_query +=data[3] + ',"'
holdings_query +=data[4] + '");'
cursor.execute(holdings_query)
for s_info in stocks_set:
stock_query = 'INSERT INTO stocks VALUES ("'
stock_query += s_info[0] + '","'
stock_query += s_info[1] +'");'
cursor.execute(stock_query)
db.commit()
db.close()
"""
Change get_pnl and get_price
"""
def get_pnl():
import mysql.connector as conn
gain_dict = dict()
return gain_dict
def get_price(ticker):
import requests
from bs4 import BeautifulSoup
return price
#Test
#get_price('AAPL')
``` |
{
"source": "2series/Data-Scientist-Portfolio",
"score": 3
} |
#### File: Project 2 - Image Classifier Application/Application/model_spec.py
```python
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from workspace_utils import active_session
from PIL import Image
import json
import argparse
from collections import OrderedDict # use dict, but we have to keep the order
import matplotlib.pyplot as plt
# ================= Train Model Functions =====================
def train_model(hyperparameters, data_dir, save_dir, device):
"""Train the neural network, called in main function utilize the following helper functions,
"""
model_init, resize_aspect = get_model(hyperparameters['architecture'])
image_dataset = loadImageData(resize_aspect, data_dir)
model_spec = buildNeuralNetwork(model_init, hyperparameters, data_dir, device)
for e in range(hyperparameters['epochs']):
model_spec['model'].train()
running_loss = 0 # the loss for every batch
for i, train_batch in enumerate(image_dataset['trainloader']): # minibatch training
# send the inputs labels to the tensors that uses the specified devices
inputs, labels = tuple(map(lambda x: x.to(device), train_batch))
model_spec['optimizer'].zero_grad() # clear out previous gradients, avoids accumulations
# Forward and backward passes
try:
predictions,_ = model_spec['model'].forward(inputs)
except:
predictions = model_spec['model'].forward(inputs)
loss = model_spec['criterion'](predictions, labels)
loss.backward()
model_spec['optimizer'].step()
# calculate the total loss for 1 epoch of training
running_loss += loss.item()
# print the loss every .. batches
if i % hyperparameters['print_every'] == 0:
model_spec['model'].eval() # set to evaluation mode
train_accuracy = evaluate_performance(model_spec['model'],
image_dataset['trainloader'],
model_spec['criterion']) # see evaluate function below
validate_accuracy = evaluate_performance(model_spec['model'],
image_dataset['validloader'],
model_spec['criterion'])
print("Epoch: {}/{}... :".format(e+1, hyperparameters['epochs']),
"Loss: {:.4f},".format(running_loss/hyperparameters['print_every']),
"Training Accuracy:{: .4f} %,".format(train_accuracy * 100),
"Validation Accuracy:{: .4f} %".format(validate_accuracy * 100)
)
running_loss = 0
model_spec['model'].train()
saveModel(image_dataset, model_spec['model'], model_spec['classifier'], save_dir)
return model_spec['model']
def get_model(architecture):
# set model architecture
if architecture == 'inception_v3':
model_init = models.inception_v3(pretrained=True)
model_init.arch = 'inception_v3'
resize_aspect = [320, 299]
elif architecture == 'densenet161':
model_init = models.densenet161(pretrained=True)
model_init.arch = 'densenet161'
resize_aspect = [256, 224]
elif architecture == 'vgg19':
model_init = models.vgg19(pretrained=True)
model_init.arch = 'vgg19'
resize_aspect = [256, 224]
return model_init, resize_aspect
def loadImageData(resize_aspect, data_dir):
"""Input:
resize_aspect - depends on the architecture
data_dir - directory of all image data"""
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Define transforms for the training, validation, and testing sets, using data augumentations on training set,
# Inception_v3 has input size 299x299
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(resize_aspect[1]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
validation_transforms = transforms.Compose([transforms.Resize(resize_aspect[1]),
transforms.CenterCrop(resize_aspect[1]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(resize_aspect[1]),
transforms.CenterCrop(resize_aspect[1]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
validation_data = datasets.ImageFolder(valid_dir, transform=validation_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
# Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
validloader = torch.utils.data.DataLoader(validation_data, batch_size= 32)
testloader = torch.utils.data.DataLoader(test_data, batch_size= 32)
# label mapping
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
image_dataset = {'train': train_data, 'test': test_data, 'validate': validation_data,
'trainloader':trainloader, 'validloader':validloader, 'testloader': testloader,
'mapping': cat_to_name}
return image_dataset
def buildNeuralNetwork(model, hyperparameters, data_dir, device = 'cuda'):
"""Builds the transfer learning network according to the given architecture
"""
# turns off gradient
for param in model.parameters():
param.requires_grad = False
# input units mapping:
input_units = {'inception_v3': 2048, 'densenet161': 2208, 'vgg19': 25088}
# rebuild last layer
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_units[model.arch],
hyperparameters['hidden_units'])),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(hyperparameters['dropout_prob'])),
('fc2', nn.Linear(hyperparameters['hidden_units'],
102)),
('output', nn.LogSoftmax(dim=1))
]))
# Attach the feedforward neural network, adjust for nameing conventions
# Define criteria and loss
criterion = nn.NLLLoss()
if model.arch == 'inception_v3':
model.fc = classifier
optimizer = optim.Adam(model.fc.parameters(), lr = hyperparameters['learning_rate'])
else:
model.classifier = classifier
optimizer = optim.Adam(model.classifier.parameters(), lr = hyperparameters['learning_rate'])
# Important: Send model to use gpu cuda
model = model.to(device)
model_spec = {'model': model, 'criterion': criterion,
'optimizer': optimizer, 'classifier':classifier}
return model_spec
def evaluate_performance(model, dataloader,criterion, device = 'cuda'):
# Evaluate performance for all batches in an epoch
performance = [evaluate_performance_batch(model, i, criterion) for i in iter(dataloader)]
correct, total = list(map(sum, zip(*performance)))
return correct/total
def evaluate_performance_batch(model,batch, criterion, device = 'cuda'):
"""Evaluate performance for a single batch"""
with torch.no_grad():
images, labels = tuple(map(lambda x: x.to(device), batch))
predictions = model.forward(images)
_, predict = torch.max(predictions, 1)
correct = (predict == labels).sum().item()
total = len(labels)
return correct, total
def saveModel(image_dataset, model, classifier, save_dir):
# Saves the pretrained model
with active_session():
check_point_file = save_dir + model.arch + '_checkpoint.pth'
model.class_to_idx = image_dataset['train'].class_to_idx
checkpoint_dict = {
'architecture': 'inception_v3',
'class_to_idx': model.class_to_idx,
'state_dict': model.state_dict(),
'classifier': classifier
}
torch.save(checkpoint_dict, check_point_file)
print("Model saved")
return None
# ================= Predict Functions =====================
def predict(image_path, checkpoint_path, category_names, device, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# Implement the code to predict the class from an image file
model, resize_aspect = load_model_checkpoint(checkpoint_path)
model.eval()
image = process_image(image_path, resize_aspect)
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
# use forward propagation to obtain the class probabilities
image = torch.tensor(image, dtype= torch.float).unsqueeze(0).to(device)
predict_prob_tensor = torch.exp(model.forward(image)) # convert log probabilities to real probabilities
predict_prob = predict_prob_tensor.cpu().detach().numpy()[0] # change into numpy array
# Find the correspoinding top k classes
top_k_idx = predict_prob.argsort()[-topk:][::-1]
probs = predict_prob[top_k_idx]
classes = np.array(list(range(1, 102)))[top_k_idx]
visualize_pred(image, model, probs, classes, cat_to_name, topk)
return probs, classes
def load_model_checkpoint(path):
"""Load model checkpoint given path"""
checkpoint = torch.load(path, map_location={'cuda:0': 'cpu'})
model, resize_aspect = get_model(checkpoint['architecture'])
if model.arch == 'inception_v3':
model.fc = checkpoint['classifier']
else:
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
return model, resize_aspect
def process_image(image, resize_aspect):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
im = Image.open(image)
# resize image to 320 on the shortest side
size = (resize_aspect[0], resize_aspect[0])
im.thumbnail(size)
# crop out 299 portion in the center
width, height = im.size
left = (width - resize_aspect[1])/2
top = (height - resize_aspect[1])/2
right = (width + resize_aspect[1])/2
bottom = (height + resize_aspect[1])/2
im = im.crop((left, top, right, bottom))
# normalize image
np_image = np.array(im)
im_mean = np.array([0.485, 0.456, 0.406])
im_sd = np.array([0.229, 0.224, 0.225])
np_image = (np_image/255 - im_mean)/im_sd
# transpose the image
np_image = np_image.T
return np_image
def imshow2(image, ax=None, title=None):
"""Returns the original image after preprocessing"""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
#plt.suptitle(title)
ax.imshow(image)
return ax
# Display an image along with the top 5 classes
def visualize_pred(image, model, probs, classes, cat_to_name, topk):
""" Visualize the top k probabilities an image is predicted as"""
im = process_image(image)
flower_names = [cat_to_name[str(x)] for x in classes]
# Build subplots above
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10,10))
# set axis settings top
imshow2(im, ax =ax1)
ax1.set_title(cat_to_name[image.split('/')[2]])
# set axis settings bottom
ax2.barh(np.arange(1, topk + 1), probs)
ax2.set_yticks(np.arange(1, topk + 1))
ax2.set_yticklabels(flower_names)
ax2.set_aspect(0.187)
ax2.set_xlim(0,1)
return None
#=================== get input args train / predict ======================
def get_input_args_train():
parser = argparse.ArgumentParser()
parser.add_argument('data_directory', type=str, default = None,
help="data directory")
parser.add_argument('--save_dir', type=str, default='checkpoints/',
help='save checkpoints to directory')
parser.add_argument('--arch', type=str, default='inception_v3',
help='model architecture')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='learning rate, default 0.001')
parser.add_argument('--hidden_units', type=int, default=500,
help='hidden units, default 500')
parser.add_argument('--print_every', type=int, default=20,
help='print every iterations')
parser.add_argument('--dropout_prob', type=int, default=0.1,
help='print every iterations')
parser.add_argument('--epochs', type=int, default=15,
help='epochs, default 15')
parser.add_argument('--gpu', action='store_true',
default= 'cuda', help='to cuda gpu')
return parser.parse_args()
def get_input_args_predict():
parser = argparse.ArgumentParser()
parser.add_argument('path_to_image', type=str, default=None,
help='image file to predict')
parser.add_argument('checkpoint', type=str, default='checkpoints/inception_v3_checkpoint.pth',
help='path to checkpoint')
parser.add_argument('--topk', type=int, default=5,
help='return top k most likely classes the image belongs to')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='class names mapping')
parser.add_argument('--gpu', default='cuda',
help='use cuda')
return parser.parse_args()
``` |
{
"source": "2series/Data-Structures-and-Algorithms",
"score": 4
} |
#### File: Data-Structures-and-Algorithms/Lesson 1/lesson-1-quiz.py
```python
def example1(manatees):
for manatee in manatees:
print manatee['name']
# Efficiency: O(1)
def example2(manatees):
print manatees[0]['name']
print manatees[0]['age']
# Efficiency: O(n*m)
def example3(manatees):
for manatee in manatees:
for manatee_property in manatee:
print manatee_property, ": ", manatee[manatee_property]
# Efficiency: O(n^2)
def example4(manatees):
oldest_manatee = "No manatees here!"
for manatee1 in manatees:
for manatee2 in manatees:
if manatee1['age'] < manatee2['age']:
oldest_manatee = manatee2['name']
else:
oldest_manatee = manatee1['name']
print oldest_manatee
``` |
{
"source": "2series/Introduction-to-Computational-Thinking-and-Data-Science",
"score": 3
} |
#### File: Introduction-to-Computational-Thinking-and-Data-Science/Lecture 10 - Experimental Data Part 1/lecture10-segment1.py
```python
import random, pylab, numpy
#set line width
pylab.rcParams['lines.linewidth'] = 4
#set font size for titles
pylab.rcParams['axes.titlesize'] = 20
#set font size for labels on axes
pylab.rcParams['axes.labelsize'] = 20
#set size of numbers on x-axis
pylab.rcParams['xtick.labelsize'] = 16
#set size of numbers on y-axis
pylab.rcParams['ytick.labelsize'] = 16
#set size of ticks on x-axis
pylab.rcParams['xtick.major.size'] = 7
#set size of ticks on y-axis
pylab.rcParams['ytick.major.size'] = 7
#set size of markers
pylab.rcParams['lines.markersize'] = 10
#set number of examples shown in legends
pylab.rcParams['legend.numpoints'] = 1
def getData(fileName):
dataFile = open(fileName, 'r')
distances = []
masses = []
dataFile.readline() #discard header
for line in dataFile:
d, m = line.split()
distances.append(float(d))
masses.append(float(m))
dataFile.close()
return (masses, distances)
def labelPlot():
pylab.title('Measured Displacement of Spring')
pylab.xlabel('|Force| (Newtons)')
pylab.ylabel('Distance (meters)')
def plotData(fileName):
xVals, yVals = getData(fileName)
xVals = pylab.array(xVals)
yVals = pylab.array(yVals)
xVals = xVals*9.81 #acc. due to gravity
pylab.plot(xVals, yVals, 'bo',
label = 'Measured displacements')
labelPlot()
plotData('springData.txt')
```
#### File: Introduction-to-Computational-Thinking-and-Data-Science/Lecture 12 - Machine Learning/lecture12-segment1.py
```python
import pylab
##set line width
#pylab.rcParams['lines.linewidth'] = 4
##set font size for titles
#pylab.rcParams['axes.titlesize'] = 20
##set font size for labels on axes
#pylab.rcParams['axes.labelsize'] = 20
##set size of numbers on x-axis
#pylab.rcParams['xtick.labelsize'] = 16
##set size of numbers on y-axis
#pylab.rcParams['ytick.labelsize'] = 16
##set size of ticks on x-axis
#pylab.rcParams['xtick.major.size'] = 7
##set size of ticks on y-axis
#pylab.rcParams['ytick.major.size'] = 7
##set size of markers
#pylab.rcParams['lines.markersize'] = 10
##set number of examples shown in legends
#pylab.rcParams['legend.numpoints'] = 1
def minkowskiDist(v1, v2, p):
"""Assumes v1 and v2 are equal-length arrays of numbers
Returns Minkowski distance of order p between v1 and v2"""
dist = 0.0
for i in range(len(v1)):
dist += abs(v1[i] - v2[i])**p
return dist**(1/p)
class Animal(object):
def __init__(self, name, features):
"""Assumes name a string; features a list of numbers"""
self.name = name
self.features = pylab.array(features)
def getName(self):
return self.name
def getFeatures(self):
return self.features
def distance(self, other):
"""Assumes other an Animal
Returns the Euclidean distance between feature vectors
of self and other"""
return minkowskiDist(self.getFeatures(),
other.getFeatures(), 2)
def __str__(self):
return self.name
cobra = Animal('cobra', [1,1,1,1,0])
rattlesnake = Animal('rattlesnake', [1,1,1,1,0])
boa = Animal('boa constrictor', [0,1,0,1,0])
chicken = Animal('chicken', [1,1,0,1,2])
alligator = Animal('alligator', [1,1,0,1,4])
dartFrog = Animal('dart frog', [1,0,1,0,4])
salmon = Animal('salmon', [1,1,0,1,0])
python = Animal('python', [1,1,0,1,0])
animals = [cobra, rattlesnake, boa, chicken,
alligator, dartFrog, salmon, python]
#def compareAnimals(animals, precision):
# """Assumes animals is a list of animals, precision an int >= 0
# Builds a table of Euclidean distance between each animal"""
# #Get labels for columns and rows
# columnLabels = []
# for a in animals:
# columnLabels.append(a.getName())
# rowLabels = columnLabels[:]
# tableVals = []
# #Get distances between pairs of animals
# #For each row
# for a1 in animals:
# row = []
# #For each column
# for a2 in animals:
# if a1 == a2:
# row.append('--')
# else:
# distance = a1.distance(a2)
# row.append(str(round(distance, precision)))
# tableVals.append(row)
# #Produce table
# table = pylab.table(rowLabels = rowLabels,
# colLabels = columnLabels,
# cellText = tableVals,
# cellLoc = 'center',
# loc = 'center',
# colWidths = [0.2]*len(animals))
# table.scale(1, 2.5)
# pylab.savefig('distances')
#
#compareAnimals(animals, 3)
for i in range(len(animals)):
for j in range(len(animals)):
if i < j:
a = animals[i]
b = animals[j]
dist = round(a.distance(b), 3)
print('Distance between', a, 'and', b,
'is', dist)
```
#### File: Introduction-to-Computational-Thinking-and-Data-Science/remove-Nodes-from-linked-list/removeDuplicates.py
```python
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print(current.data,end=' ')
current = current.next
def removeDuplicates(self, head):
current = head
while current.next:
if current.data == current.next.data:
current.next = current.next.next
else:
current = current.next
return head
# myList = []
# while head:
# if head.data not in myList:
# myList.append(head.data)
# head = head.next
# print(*myList, sep=' ')
mylist= Solution()
T= [1,2,2,3,3,4]
head=None
for i in T:
data=int(i)
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head)
``` |
{
"source": "2series/Introduction-To-Computer-Science",
"score": 4
} |
#### File: pset6/crack/crack.py
```python
import crypt
import itertools
import sys
#characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" # CS50 charset
def main():
if len(sys.argv) == 2:
argv = sys.argv[1]
else:
print("Usage: python3 crack.py hash")
return
salt = argv[:2]
wordlist = create_wordlist(characters, 4)
for word in wordlist:
if crypt.crypt(word, salt) == argv:
print(word)
return word
def create_wordlist(charset, wordlen):
if str(wordlen).isdigit() != True:
return None
words = []
for i in range(1, wordlen + 1):
words += list(map("".join, itertools.product(str(charset), repeat=i)))
return words
main()
```
#### File: pset6/credit/credit.py
```python
def checkcc(nums):
def cctype(cc):
cc = str(cc)
cc_range = {"visa": [13, 16], "mc": [16], "amex":[15]}
digits = len(str(nums))
start = int(cc[:2])
if int(cc[:1]) == 4 and digits in cc_range["visa"]:
return "VISA"
elif start in [34, 37] and digits in cc_range["amex"]:
return "AMEX"
elif start in list(range(51, 56)) and digits in cc_range["mc"]:
return "MASTERCARD"
else:
return None
def luhn(cc):
cc = str(cc)[::-1] # Reverse string
seclast_digits = list(map(lambda x: int(x) * 2, cc[1::2])) # Extract all second digits to a list multiplied by 2
rem_digits = sum(list(map(lambda x: int(x), cc[::2]))) # Sum up remaining digits
for i in range(len(seclast_digits)):
if seclast_digits[i] > 9:
seclast_digits[i] -= 9
seclast_digits = sum(seclast_digits)
if (seclast_digits + rem_digits) % 10 == 0:
return True
else:
return False
cardtype = cctype(nums)
if cardtype is not None and luhn(nums) == True:
return cardtype
else:
return "INVALID"
def get_int(s, pos):
try:
i = int(input(s))
except:
i = get_int("Try again: ", pos)
finally:
if pos == True and i < 0:
i = get_int("Try again: ", pos)
return i
def main():
ccdigits = get_int("Number: ", True)
print(checkcc(ccdigits))
main()
```
#### File: pset6/sentiments/analyzer.py
```python
import re
class Analyzer():
"""Implements sentiment analysis."""
def __init__(self, positives, negatives):
"""Initialize Analyzer."""
# Open and load positive words
with open(positives) as positives:
for line in positives:
if line.startswith(";"):
#pass
continue
else:
pos_words = positives.read().split()
self.positives = pos_words
# Open and load negative words
with open(negatives) as negatives:
for line in negatives:
if line.startswith(";"):
#pass
continue
else:
pos_words = negatives.read().split()
self.negatives = pos_words
def analyze(self, text):
"""Analyze text for sentiment, returning its score."""
# Convert to lower case and split words into list
text = text.lower().split()
# Strip off extraneous characters
pattern = r"(\w+)[+-]*(\w+)*"
for i in range(len(text)):
match = re.search(pattern, text[i])
if match:
text[i] = match.group()
for word in text:
score = 0
if word in self.positives:
score += 1
elif word in self.negatives:
score -= 1
else:
continue
return score
``` |
{
"source": "2series/Professional-In-Artificial-Intelligence",
"score": 3
} |
#### File: 3 - Data Science Research Methods/lab5/PlotOutliers.py
```python
def id_outliers(df):
df['outlier'] = [0] * df.shape[0]
df.ix[df['enginesize'] > 190, 'outlier'] = 1
df.ix[df['weight'] > 3500, 'outlier'] = 1
df.ix[df['citympg'] > 40, 'outlier'] = 1
return df
def auto_scatter_outliers(df, plot_cols):
import matplotlib.pyplot as plt
outlier = [0, 0, 1, 1]
fuel = ['gas', 'diesel', 'gas', 'diesel']
color = ['DarkBlue', 'DarkBlue', 'Red', 'Red']
marker = ['x','o','o','x'] # vector of shape choices for plot
for col in plot_cols:
fig = plt.figure(figsize = (6, 6))
ax = fig.gca()
for o, f, c, m in zip(outlier, fuel, color, marker):
temp = df.ix[(df['outlier'] == o) & (df['fueltype'] == f), :]
if temp.shape[0] > 0:
temp.plot(kind = 'scatter', x = col, y = 'lnprice', marker = m, color = c, ax = ax)
ax.set_title('Scatter plot of lnprice vs. ' + col)
fig.savefig('scatter_' + col + '.png')
#return plot_cols
def azureml_main(df):
import matplotlib
matplotlib.use('agg')
plot_cols = ["weight", "enginesize", "citympg"]
df = id_outliers(df)
auto_scatter_outliers(df, plot_cols)
df = df[df.outlier == 1]
return df
```
#### File: 4 - Principles of Machine Learning/lab2/BikeEvaluate.py
```python
import pandas as pd
def ts_bikes(df, times):
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
for tm in times:
fig = plt.figure(figsize = (8, 6))
fig.clf()
ax = fig.gca()
df_hr = df[df.hr == tm]
df_hr.plot(kind = 'line', ax = ax, x = 'days', y = 'cnt', color = 'blue')
df_hr.plot(kind = 'line', ax = ax, x = 'days', y = 'predicted', color = 'red')
ax.set_xlabel('Days from start')
ax.set_ylabel('Number of bikes rented')
ax.set_title('Bikes rented for hour ' + str(tm))
plt.savefig('ts_' + str(tm) + '.png')
def box_resids(df):
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
df['resids'] = df['predicted'] - df['cnt']
fig = plt.figure(figsize = (12, 6))
ax = fig.gca()
df.boxplot(column = 'resids', by = ['hr'], ax = ax)
ax.set_xlabel('')
ax.set_ylabel('Residuals')
fig.savefig('resids.png')
return df
def ts_resids_hist(df, times):
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
for time in times:
df_time = df.ix[df.hr == time, ['resids']]
fig = plt.figure(figsize = (8, 6))
ax = fig.gca()
df_time.hist(bins = 30, ax = ax)
ax.set_xlabel('Residuals')
ax.set_ylabel('Density')
ax.set_title('Histogram of residuals by hour for hour ' + str(time))
plt.savefig('hist_' + str(time) + '.png')
def azureml_main(df, dataframe2 = None):
times = [6, 8, 10, 12, 14, 16, 18, 20, 22]
df = df.sort(['days', 'hr'], axis = 0, ascending = True)
ts_bikes(df, times)
df = box_resids(df)
ts_resids_hist(df, times)
return df
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.