content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
from zomboid.java import ArrayList
from .base import BaseScriptObject
from ..parser import ScriptParser
Model = None #TODO: replace with class
Vector3f = None #TODO: replace with class
class ModelAttachment:
id : str = None
offset : Vector3f = None
rotate : Vector3f = None
bone : str = None
def __init__(self, id : str):
self.id = id
self.offset = None #TODO: replace with class initialization
self.rorate = None #TODO: replace with class initialization
def getId(self) -> str:
return self.id
def setId(self, value : str) -> None:
if not value:
return #TODO: raise exception
self.id = value
def getOffset(self) -> Vector3f:
return self.offset
def getRotate(self) -> Vector3f:
return self.rotate
def getBone(self) -> str:
return self.bone
def setBone(self, value : str) -> None:
if not value:
value = None # no empty strings
self.bone = value
class ModelScript(BaseScriptObject):
DEFAULT_SHADER_NAME : str = "basicEffect"
fileName : str = None
name : str = None
scale : float = 1.0
meshName : str = None
textureName : str = None
shaderName : str = None
bStatic : bool = None
m_attachments : ArrayList = None
invertX : bool = False
loadedModel : Model = None
def __init__(self):
self.m_attachments = ArrayList()
def Load(self, name : str, data : str) -> None:
from ..manager import instance
self.fileName = instance.currentFileName
self.name = name
block = ScriptParser.parse(data)
block = block.children[0]
for sub in block.children:
if sub.type == 'attachment':
self.LoadAttachment(sub)
for value in block.values:
key, value = value.string.split(' = ')
key = key.lower()
if key == 'mesh':
self.meshName = value
elif key == 'scale':
self.scale = float(value)
elif key == 'shader':
self.shaderName = value
elif key == 'static':
self.bStatic = value.lower() == 'true'
elif key == 'texture':
self.textureName = value
elif key == 'invertX':
self.invertX = value.lower() == 'true'
def LoadAttachment(self, block) -> ModelAttachment:
attach = self.getAttachmentById(block.id)
if not attach:
attach = ModelAttachment(block.id)
self.m_attachments.add(attach)
for value in block.values:
key, value = value.string.split(' = ')
if key == 'bone':
attach.setBone(value)
elif key == 'offset':
self.LoadVector3f(value, attach.offset)
elif key == 'rotate':
self.LoadVector3f(value, attach.offset)
return attach
def LoadVector3f(cls, data : str, vector : Vector3f) -> None:
data = [float(x) for x in data.split()]
# TODO: set the vector here
def getName(self) -> str:
return self.name
def getFullType(self) -> str:
return f"{self.module.name}.{self.name}"
def getMeshName(self) -> str:
return self.meshName
def getTextureName(self) -> str:
if not self.textureName:
return self.meshName
return self.textureName
def getShaderName(self) -> str:
if not self.shaderName:
return 'basicEffect'
return self.textureName
def getFileName(self) -> str:
return self.fileName
def getAttachmentCount(self) -> int:
return len(self.m_attachments)
def getAttachment(self, index : int) -> ModelAttachment:
return self.m_attachments[index]
def getAttachmentById(self, id : str) -> ModelAttachment:
for attach in self.m_attachments:
if attach.id == id:
return attach
return None
def addAttachment(self, attach : ModelAttachment) -> ModelAttachment:
self.m_attachments.add(attach)
return attach
def removeAttachment(self, attach : ModelAttachment) -> ModelAttachment:
if isinstance(attach, int):
attach = self.m_attachments[attach] #TODO: beware exceptions
self.m_attachments.remove(attach)
return attach
def addAttachmentAt(self, index : int, attach : ModelAttachment) -> ModelAttachment:
self.m_attachments.add(index, attach)
return attach
def reset(self) -> None:
self.name = None
self.meshName = None
self.textureName = None
self.shaderName = None
self.bStatic = False
self.scale = 1.0
|
#!/usr/bin/env python
"""Tests for `dmx` package."""
import unittest
import time
import os
import numpy as np
import dmx
class TestDMX(unittest.TestCase):
"""Tests for `dmx` package."""
@classmethod
def setUpClass(cls):
"""
setting up everything
:return:
"""
def setUp(self):
"""Set up test fixtures, if any."""
@classmethod
def tearDownClass(cls):
"""Tear down test fixtures, if any."""
cls.disconnect()
def test_DMX_(self):
"""Test dmx with numpy array"""
pass
@staticmethod
def disconnect():
pass
|
__all__ = ["MMSegmentationCallback"]
from icevision.imports import *
from icevision.utils import *
from icevision.core import *
from icevision.data import *
from icevision.engines.fastai import *
from icevision.models.mmseg.utils import *
from icevision.models.mmseg.common.prediction import convert_raw_predictions
class _ModelWrap(nn.Module):
def __init__(self, model: nn.Module):
super().__init__()
self.model = model
def forward(self, xb):
return self.model.train_step(data_batch=xb, optimizer=None)
def forward_test(self, xb):
imgs = xb[0]["img"]
img_metas = xb[0]["img_metas"]
return self.model.forward_test(imgs=[imgs], img_metas=[img_metas])
class MMSegmentationCallback(fastai.Callback):
def after_create(self):
self.learn.model = _ModelWrap(self.model)
self.model.param_groups = self.model.model.param_groups
def before_batch(self):
self.learn.records = self.yb[0]
self.learn.yb = self.xb
def convert_raw_predictions(self, batch, raw_preds, records):
return convert_raw_predictions(
batch=batch, raw_preds=raw_preds, records=records
)
# TODO: Understand why this is needed - why can't we get the preds directly from mmseg?
def after_loss(self):
if not self.training:
self.model.eval()
with torch.no_grad():
self.learn.raw_preds = self.model.forward_test(self.xb)
self.model.train()
# TODO: implement abstract function
self.learn.converted_preds = self.convert_raw_predictions(
batch=self.xb[0], raw_preds=self.raw_preds, records=self.records
)
|
import unittest
import os
import bcrypt
from flask import session
from app import create_app as create_app_test
from utils.database import Database
from user.models import User
"""
NB: To run your tests safely, you should comment sending email operations in the `views.py` file.
"""
class UserTest(unittest.TestCase):
def create_app(self):
return create_app_test(
'testing'
)
def setUp(self) -> None:
self.app_factory = self.create_app()
self.db_name = os.environ['MONGODB_NAME']
self.app = self.app_factory.test_client()
def tearDown(self) -> None:
db = Database.CLIENT
db.drop_database(self.db_name)
def getUser(self):
return dict(
first_name="Mohamed",
last_name="El Rahali",
username="mdrahali",
email="[email protected]",
password="12345",
confirm="12345"
)
def test_register_user(self):
"""
Basic Registration Test
"""
self.app.post("/register", data=self.getUser(), follow_redirects=True)
user = User.getByName("mdrahali")
user.email_confirmation = True
user.change_configuration = {}
user.update_record()
assert Database.count_record('users', {'username': "mdrahali"}) == 1
"""
Invalid Username
"""
invalid_user = self.getUser()
invalid_user['username'] = "wrong wrong"
response = self.app.post("/register", data=invalid_user, follow_redirects=True)
assert "Username must contain only letters numbers or underscore" in str(response.data)
def test_login_user(self):
self.app.post("/register", data=self.getUser(), follow_redirects=True)
user = User.getByName(username=self.getUser()['username'])
code = user.change_configuration.get('confirmation_code')
rv = self.app.get('/confirm/' + user.username + '/' + code)
assert "Your email has been confirmed" in str(rv.data)
# try again to confirm
rv = self.app.get('/confirm/' + user.username + '/' + code)
assert rv.status_code == 404
response = self.app.post('/login', data=dict(
username=user.username,
password=self.getUser()['password']
))
with self.app as c: # Each time you want to use a session map you should follow this method
c.get('/')
assert response.status_code == 302
assert session['username'] == user.username
def test_edit_profile(self):
self.app.post("/register", data=self.getUser(), follow_redirects=True)
user = User.getByName(username=self.getUser()['username'])
code = user.change_configuration.get('confirmation_code')
rv = self.app.get('/confirm/' + user.username + '/' + code)
assert "Your email has been confirmed" in str(rv.data)
self.app.post('/login', data=dict(
username=user.username,
password=self.getUser()['password']
))
response = self.app.get('/edit')
assert response.status_code == 200
# Edit First Name
user = self.getUser()
user['first_name'] = "Test First Name"
response = self.app.post('/edit', data=user, follow_redirects=True)
assert "Your info has been updated succefully ..!" in str(response.data)
assert "Test First Name" == User.getByName(user['username']).first_name
# Edit username & email
user = self.getUser()
user['username'] = "kaka123"
user['email'] = "[email protected]"
response = self.app.post('/edit', data=user, follow_redirects=True)
user = User.getByName(username=user['username'])
code = user.change_configuration.get('confirmation_code')
rv = self.app.get('/confirm/' + user.username + '/' + code)
assert "Your email has been confirmed" in str(rv.data)
self.app.post('/login', data=dict(
username=user.username,
password=self.getUser()['password']
))
assert "kaka123" == User.getByName(user.username).username
assert "[email protected]" == User.getByName(user.username).email
# Second User
self.app.post("/register", data=self.getUser(), follow_redirects=True)
user = User.getByName(username=self.getUser()['username'])
code = user.change_configuration.get('confirmation_code')
rv = self.app.get('/confirm/' + user.username + '/' + code)
assert "Your email has been confirmed" in str(rv.data)
response = self.app.post('/login', data=dict(
username=user.username,
password=self.getUser()['password']
))
assert response.status_code == 302
# use a username already used
user = self.getUser()
user['username'] = "kaka123"
response = self.app.post('/edit', data=user, follow_redirects=True)
assert "This username is already in use." in str(response.data)
# save same email
# use a username already used
user = self.getUser()
user['email'] = "[email protected]"
response = self.app.post('/edit', data=user, follow_redirects=True)
assert "This email is already in use." in str(response.data)
def test_get_profile(self):
user = self.getUser()
self.app.post("/register", data=user, follow_redirects=True)
user = User.getByName(username=self.getUser()['username'])
code = user.change_configuration.get('confirmation_code')
rv = self.app.get('/confirm/' + user.username + '/' + code)
assert "Your email has been confirmed" in str(rv.data)
self.app.post('/login', data=dict(
username=user.username,
password=self.getUser()['password']
))
response = self.app.get('/profile/' + user.username)
assert "@" + user.username in str(response.data)
def test_forget_password(self):
# create user
user = self.getUser()
self.app.post("/register", data=user, follow_redirects=True)
user = User.getByName(username=self.getUser()['username'])
code = user.change_configuration.get('confirmation_code')
rv = self.app.get('/confirm/' + user.username + '/' + code)
assert "Your email has been confirmed" in str(rv.data)
user = self.getUser()
response = self.app.post('/forgot', data=user)
assert "You will receive a password reset email if we find that email in our system" in str(response.data)
user = User.getByName(username=self.getUser()['username'])
assert user.change_configuration != {}
user_passwords = {
"password": "12346",
"confirm": "12346"
}
self.app.post(
'/password_reset/' + user.username + '/'
+ user.change_configuration['password_reset_code'],
data=user_passwords
)
user = User.getByName(username=self.getUser()['username'])
assert bcrypt.hashpw(user_passwords['password'], user.password) == user.password
assert bcrypt.checkpw(user_passwords['password'], user.password)
response = self.app.get('/password_reset_complete')
assert "Your password has been updated" in str(response.data)
# logging with new password
response = self.app.post('/login', data=dict(
username=user.username,
password=user_passwords['password']
))
assert response.status_code == 302
with self.app as c:
c.get('/')
assert session['username'] == user.username
def test_change_password(self):
# create user
user = self.getUser()
self.app.post("/register", data=user, follow_redirects=True)
user = User.getByName(username=self.getUser()['username'])
code = user.change_configuration.get('confirmation_code')
rv = self.app.get('/confirm/' + user.username + '/' + code)
assert "Your email has been confirmed" in str(rv.data)
self.app.post('/login', data=dict(
username=user.username,
password=self.getUser()['password']
))
user_passwords = {
"current_password": self.getUser()['password'],
"password": "12346",
"confirm": "12346"
}
response = self.app.post('/change_password', data=user_passwords, follow_redirects=True)
assert "Your password has been updated." in str(response.data)
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List
class Mediator(ABC):
@abstractmethod
def broadcast(self, person: Colleague, message: str) -> None:
pass
@abstractmethod
def direct(self, sender: Colleague, receiver: str, message: str) -> None:
pass
class Colleague(ABC):
def __init__(self) -> None:
self.name: str
@abstractmethod
def broadcast(self, message: str) -> None:
pass
@abstractmethod
def direct(self, message: str) -> None:
pass
class Person(Colleague):
def __init__(self, name: str, mediator: Mediator) -> None:
self.name: str = name
self.mediator = mediator
def broadcast(self, message: str) -> None:
self.mediator.broadcast(self, message)
def direct(self, message: str) -> None:
print(message)
def send_direct(self, receiver: Colleague, message: str) -> None:
self.mediator.direct(self, receiver, message)
class Chatroom(Mediator):
def __init__(self) -> None:
self.colleagues: List[Colleague] = []
def is_colleague(self, colleague: Colleague) -> bool:
return colleague in self.colleagues
def add(self, colleague: Colleague) -> None:
if not self.is_colleague(colleague):
self.colleagues.append(colleague)
def remove(self, colleague: Colleague) -> None:
if self.is_colleague(colleague):
self.colleagues.remove(colleague)
def broadcast(self, colleague: Colleague, message: str) -> None:
if not self.is_colleague(colleague):
return
print(f'{colleague.name} says {message}')
def direct(self, sender: Colleague, receiver: str, message: str) -> None:
if not self.is_colleague(sender):
return
receiver_obj: List[Colleague] = [
colleague for colleague in self.colleagues
if colleague.name == receiver
]
if not receiver_obj:
return
receiver_obj[0].direct(
f'{sender.name} for {receiver_obj[0].name}: {message}')
if __name__ == '__main__':
chat = Chatroom()
john = Person('John', chat)
mary = Person('Mary', chat)
janet = Person('janet', chat)
kant = Person('kant', chat)
chat.add(john)
chat.add(mary)
chat.add(janet)
# chat.add(kant)
john.broadcast('Hello people')
janet.broadcast('Hi')
# kant.broadcast('I was not added to chat')
print()
john.send_direct('Mary', 'Hi Mary, are you okay?')
mary.send_direct('John', 'I am pretty good. And how about you ?')
|
import pathmagic # noqa isort:skip
import datetime
import os
import unittest
from database import Database
class TestDB(unittest.TestCase):
def test_run(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.db = Database()
EVENT_COUNT = 4
ARTIST_COUNT = 3
# Check if initially empty
self.db_empty_test("events")
self.db_empty_test("artists")
# Check if not empty after insert from file
self.db.insert_events(TEST_DIR + "/events.json")
self.db_not_empty_test("events", EVENT_COUNT)
# Check if empty after clean
self.db.clean_events()
self.db_empty_test("events")
# Check if not empty after insert from file
self.db.insert_artists(TEST_DIR + "/artists.json")
self.db_not_empty_test("artists", ARTIST_COUNT)
# Check if empty after clean
self.db.clean_artists()
self.db_empty_test("artists")
# Check if not empty after insert one
start_date = datetime.date(2020, 12, 1)
end_date = datetime.date(2020, 12, 31)
self.db.insert_event_from_date("test_title", start_date, end_date)
self.db_not_empty_test("events", 1)
# Check if get elem returns correct elem
res = self.db.get_event(2020, 12, 15)
self.assertTrue(res[0][0] == "test_title")
# Check if file exists after save
self.db.save_events(TEST_DIR + "/events2.json")
print(TEST_DIR)
print(TEST_DIR + "/events2.json")
self.assertTrue(os.path.exists(TEST_DIR + "/events2.json"))
# Check if empty after clean
self.db.delete_event("test_title")
self.db_empty_test("events")
# Check if not empty after insert one
self.db.insert_artist("test_name", "test_make", "test_model")
self.db_not_empty_test("artists", 1)
# Check if get elem returns correct elem
res = self.db.get_artist("test_make", "test_model")
self.assertTrue(res[0][0] == "test_name")
# Check if file exists after save
self.db.save_artists(TEST_DIR + "/artists2.json")
self.assertTrue(os.path.exists(TEST_DIR + "/artists2.json"))
# Check if empty after clean
self.db.delete_artist("test_name")
self.db_empty_test("artists")
# Check if not empty after insert from saved file
self.db.insert_events(TEST_DIR + "/events2.json")
self.db_not_empty_test("events", 1)
# Check if not empty after insert from saved file
self.db.insert_artists(TEST_DIR + "/artists2.json")
self.db_not_empty_test("artists", 1)
def db_not_empty_test(self, table: str, size):
res = self.db.get_all_from_table(table)
self.assertTrue(len(res) == size)
def db_empty_test(self, table: str):
res = self.db.get_all_from_table(table)
self.assertTrue(len(res) == 0)
|
import types
import sys
import os
import simplejson
from kascfg import model as model
"""
This module can be used for loading data into your models, for example when setting up default application data,
unit tests, JSON export/import and importing/exporting legacy data. Data is serialized to and from the JSON format.
"""
VALID_FIXTURE_FILE_EXTENSIONS = ['.json']
def load_data(model, filename=None, base_dir=None):
"""Installs provided fixture files into given model. Filename may be directory, file or list of dirs or files. If filename is
None, assumes that source file is located in fixtures/model_module_name/model_tablename.yaml of your application directory,
for example MyProject/fixtures/news/newsitems.yaml. The base_dir argument is the top package of the application unless
specified. You can also pass the name of a table instead of a model class."""
if type(model) is types.StringType:
return load_data_to_table(model, filename, base_dir)
else:
if filename is None:
filename = _default_fixture_path_for_model(model, base_dir)
return _load_data_from_file(model, filename)
def load_data_to_table(table, filename=None, base_dir=None):
"""Installs data directly into a table. Useful if table does not have a corresponding model, for example a many-to-many join table.
"""
if filename is None:
filename = _default_fixture_path_for_table(table, base_dir)
_load_data_to_table(table, filename)
def dump_data(model, filename=None, **params):
"""Dumps data to given destination. Params are optional arguments for selecting data. If filename is None, assumes that destination
file is located in fixtures/model_module_name/model_name_lowercase.yaml of your application directory, for example
MyProject/fixtures/news/newsitem.yaml.
"""
if filename is None:
filename = _default_fixture_path_for_model(model)
_dump_data_to_file(model, filename, **params)
_base_dir = os.path.dirname(os.path.dirname(__file__))
def _default_fixture_path_for_model(model, base_dir=None):
if base_dir is None:
base_dir = _base_dir
path = os.path.join(base_dir, 'fixtures')
module_dirs = model.__module__.split('.', 2)[-1].split('.')
for dir in module_dirs:
path = os.path.join(path, dir)
return os.path.join(path, model.table.name + '.json')
def _default_fixture_path_for_table(table, base_dir=None):
if base_dir is None:
base_dir = _base_dir
module_dirs = table.split('.')
path = os.path.join(base_dir, 'fixtures')
for name in module_dirs:
path = os.path.join(path, name)
return path + ".json"
def _is_fixture_file(filename):
basename, ext = os.path.splitext(filename)
return (ext.lower() in VALID_FIXTURE_FILE_EXTENSIONS)
def _load_data_from_dir(model, dirname):
for dirpath, dirnames, filenames in os.walk(dirname):
for filename in filenames:
_load_data_from_file(model, filename)
def _load_data_from_file(model, filename):
if not _is_fixture_file(filename):
return
fp = file(filename, 'r')
data = simplejson.load(fp)
fp.close()
retval = None
if type(data) is types.ListType:
retval = []
for item in data:
retval.append(_load_instance_from_dict(model, item))
elif type(data) is types.DictType:
retval = {}
for key, item in data.iteritems():
retval[key] = _load_instance_from_dict(model, item)
return retval
def _load_data_to_table(tablename, filename):
if not _is_fixture_file(filename):
return
fp = file(filename, 'r')
data = simplejson.load(fp)
fp.close()
tablename = tablename.split(".")[-1]
table = model.context.metadata.tables[tablename]
if type(data) is types.ListType:
for item in data:
table.insert(item).execute()
elif type(data) is types.DictType:
for key, item in data.iteritems():
table.insert(item).execute()
return data
def _dump_data_to_file(model, filename, **params):
if params:
queryset = model.select_by(**params)
else:
queryset = model.select()
data = []
for instance in queryset:
data.append(_dump_instance_to_dict(instance))
fp = file(filename, 'w')
simplejson.dump(data, fp)
fp.close()
def _load_instance_from_dict(model, dict):
if not dict: return
instance = model()
fields = model._descriptor.fields.keys()
for k, v in dict.iteritems():
if k in fields:
setattr(instance, k, v)
instance.flush()
return instance
def _dump_instance_to_dict(instance):
if hasattr(instance, 'to_json'):
return instance.to_json()
d = {}
fields = instance._descriptor.fields.keys()
for field in fields:
d[field] = getattr(instance, field)
return d
__all__ = ['load_data', 'dump_data']
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Only (de)serialization utils hasn't been removed to decrease requirements
# number.
"""Utility methods for working with WSGI servers."""
import datetime
import errno
import os
import signal
import eventlet
from eventlet import wsgi
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import sslutils
from sahara import exceptions
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
LOG = logging.getLogger(__name__)
wsgi_opts = [
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated by the "
"Keystone v3 API with big service catalogs)."),
]
CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
def sanitizer(obj):
if isinstance(obj, datetime.datetime):
_dtime = obj - datetime.timedelta(microseconds=obj.microsecond)
return _dtime.isoformat()
return unicode(obj)
return jsonutils.dumps(data, default=sanitizer)
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exceptions.MalformedRequestBody(msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=500):
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.threads = threads
self.children = []
self.running = True
def start(self, application):
"""Run a WSGI server with the given application.
:param application: The application to run in the WSGI server
"""
def kill_children(*args):
"""Kills the entire process group."""
LOG.error(_LE('SIGTERM received'))
signal.signal(signal.SIGTERM, signal.SIG_IGN)
self.running = False
os.killpg(0, signal.SIGTERM)
def hup(*args):
"""Shuts down the server(s).
Shuts down the server(s), but allows running requests to complete
"""
LOG.error(_LE('SIGHUP received'))
signal.signal(signal.SIGHUP, signal.SIG_IGN)
os.killpg(0, signal.SIGHUP)
signal.signal(signal.SIGHUP, hup)
self.application = application
self.sock = eventlet.listen((CONF.host, CONF.port), backlog=500)
if sslutils.is_enabled(CONF):
LOG.info(_LI("Using HTTPS for port %s"), CONF.port)
self.sock = sslutils.wrap(CONF, self.sock)
if CONF.api_workers == 0:
# Useful for profiling, test, debug etc.
self.pool = eventlet.GreenPool(size=self.threads)
self.pool.spawn_n(self._single_run, application, self.sock)
return
LOG.debug("Starting %d workers", CONF.api_workers)
signal.signal(signal.SIGTERM, kill_children)
signal.signal(signal.SIGHUP, hup)
while len(self.children) < CONF.api_workers:
self.run_child()
def wait_on_children(self):
while self.running:
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
if pid in self.children:
LOG.error(_LE('Removing dead child %s'), pid)
self.children.remove(pid)
self.run_child()
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
os.killpg(0, signal.SIGTERM)
break
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
LOG.debug('Server exited')
def wait(self):
"""Wait until all servers have completed running."""
try:
if self.children:
self.wait_on_children()
else:
self.pool.waitall()
except KeyboardInterrupt:
pass
def run_child(self):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.run_server()
LOG.debug('Child %d exiting normally', os.getpid())
return
else:
LOG.info(_LI('Started child %s'), pid)
self.children.append(pid)
def run_server(self):
"""Run a WSGI server."""
self.pool = eventlet.GreenPool(size=self.threads)
wsgi.server(self.sock,
self.application,
custom_pool=self.pool,
log=LOG,
debug=False)
self.pool.waitall()
def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread."""
LOG.info(_LI("Starting single process server"))
eventlet.wsgi.server(sock, application,
custom_pool=self.pool,
log=LOG,
debug=False)
|
#!/usr/bin/python3
from aos.util.trapezoid_profile import TrapezoidProfile
from frc971.control_loops.python import control_loop
from frc971.control_loops.python import controls
import numpy
import sys
from matplotlib import pylab
import gflags
import glog
FLAGS = gflags.FLAGS
try:
gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
except gflags.DuplicateFlagError:
pass
class Turret(control_loop.ControlLoop):
def __init__(self, name='Turret'):
super(Turret, self).__init__(name)
# Stall Torque in N m
self.stall_torque = 0.71
# Stall Current in Amps
self.stall_current = 134
self.free_speed_rpm = 18730.0
# Free Speed in rotations/second.
self.free_speed = self.free_speed_rpm / 60.0
# Free Current in Amps
self.free_current = 0.7
# Resistance of the motor
self.resistance = 12.0 / self.stall_current
# Motor velocity constant
self.Kv = ((self.free_speed * 2.0 * numpy.pi) /
(12.0 - self.resistance * self.free_current))
# Torque constant
self.Kt = self.stall_torque / self.stall_current
# Gear ratio
self.G = (12.0 / 60.0) * (11.0 / 94.0)
# Motor inertia in kg * m^2
self.motor_inertia = 0.00001187
# Moment of inertia, measured in CAD.
# Extra mass to compensate for friction is added on.
self.J = 0.06 + self.motor_inertia * ((1.0 / self.G) ** 2.0)
glog.debug('Turret J is: %f', self.J)
# Control loop time step
self.dt = 0.005
# State is [position, velocity]
# Input is [Voltage]
C1 = self.Kt / (self.resistance * self.J * self.Kv * self.G * self.G)
C2 = self.Kt / (self.J * self.resistance * self.G)
self.A_continuous = numpy.matrix(
[[0, 1],
[0, -C1]])
# Start with the unmodified input
self.B_continuous = numpy.matrix(
[[0],
[C2]])
self.C = numpy.matrix([[1, 0]])
self.D = numpy.matrix([[0]])
self.A, self.B = self.ContinuousToDiscrete(
self.A_continuous, self.B_continuous, self.dt)
controllability = controls.ctrb(self.A, self.B)
glog.debug('Free speed is %f',
-self.B_continuous[1, 0] / self.A_continuous[1, 1] * 12.0)
# Calculate the LQR controller gain
q_pos = 0.20
q_vel = 5.0
self.Q = numpy.matrix([[(1.0 / (q_pos ** 2.0)), 0.0],
[0.0, (1.0 / (q_vel ** 2.0))]])
self.R = numpy.matrix([[(1.0 / (12.0 ** 2.0))]])
self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
# Calculate the feed forwards gain.
q_pos_ff = 0.005
q_vel_ff = 1.0
self.Qff = numpy.matrix([[(1.0 / (q_pos_ff ** 2.0)), 0.0],
[0.0, (1.0 / (q_vel_ff ** 2.0))]])
self.Kff = controls.TwoStateFeedForwards(self.B, self.Qff)
q_pos = 0.10
q_vel = 1.65
self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0],
[0.0, (q_vel ** 2.0)]])
r_volts = 0.025
self.R = numpy.matrix([[(r_volts ** 2.0)]])
self.KalmanGain, self.Q_steady = controls.kalman(
A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
self.L = self.A * self.KalmanGain
# The box formed by U_min and U_max must encompass all possible values,
# or else Austin's code gets angry.
self.U_max = numpy.matrix([[12.0]])
self.U_min = numpy.matrix([[-12.0]])
self.InitializeState()
class IntegralTurret(Turret):
def __init__(self, name='IntegralTurret'):
super(IntegralTurret, self).__init__(name=name)
self.A_continuous_unaugmented = self.A_continuous
self.B_continuous_unaugmented = self.B_continuous
self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))
self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented
self.A_continuous[0:2, 2] = self.B_continuous_unaugmented
self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))
self.B_continuous[0:2, 0] = self.B_continuous_unaugmented
self.C_unaugmented = self.C
self.C = numpy.matrix(numpy.zeros((1, 3)))
self.C[0:1, 0:2] = self.C_unaugmented
self.A, self.B = self.ContinuousToDiscrete(
self.A_continuous, self.B_continuous, self.dt)
q_pos = 0.12
q_vel = 2.00
q_voltage = 3.0
self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0, 0.0],
[0.0, (q_vel ** 2.0), 0.0],
[0.0, 0.0, (q_voltage ** 2.0)]])
r_pos = 0.05
self.R = numpy.matrix([[(r_pos ** 2.0)]])
self.KalmanGain, self.Q_steady = controls.kalman(
A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
self.L = self.A * self.KalmanGain
self.K_unaugmented = self.K
self.K = numpy.matrix(numpy.zeros((1, 3)))
self.K[0, 0:2] = self.K_unaugmented
self.K[0, 2] = 1
self.Kff = numpy.concatenate((self.Kff, numpy.matrix(numpy.zeros((1, 1)))), axis=1)
self.InitializeState()
class ScenarioPlotter(object):
def __init__(self):
# Various lists for graphing things.
self.t = []
self.x = []
self.v = []
self.a = []
self.x_hat = []
self.u = []
self.offset = []
def run_test(self, turret, end_goal,
controller_turret,
observer_turret=None,
iterations=200):
"""Runs the turret plant with an initial condition and goal.
Args:
turret: turret object to use.
end_goal: end_goal state.
controller_turret: Turret object to get K from, or None if we should
use turret.
observer_turret: Turret object to use for the observer, or None if we should
use the actual state.
iterations: Number of timesteps to run the model for.
"""
if controller_turret is None:
controller_turret = turret
vbat = 12.0
if self.t:
initial_t = self.t[-1] + turret.dt
else:
initial_t = 0
goal = numpy.concatenate((turret.X, numpy.matrix(numpy.zeros((1, 1)))), axis=0)
profile = TrapezoidProfile(turret.dt)
profile.set_maximum_acceleration(100.0)
profile.set_maximum_velocity(7.0)
profile.SetGoal(goal[0, 0])
U_last = numpy.matrix(numpy.zeros((1, 1)))
for i in range(iterations):
observer_turret.Y = turret.Y
observer_turret.CorrectObserver(U_last)
self.offset.append(observer_turret.X_hat[2, 0])
self.x_hat.append(observer_turret.X_hat[0, 0])
next_goal = numpy.concatenate(
(profile.Update(end_goal[0, 0], end_goal[1, 0]),
numpy.matrix(numpy.zeros((1, 1)))),
axis=0)
ff_U = controller_turret.Kff * (next_goal - observer_turret.A * goal)
U_uncapped = controller_turret.K * (goal - observer_turret.X_hat) + ff_U
U_uncapped = controller_turret.K * (end_goal - observer_turret.X_hat)
U = U_uncapped.copy()
U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
self.x.append(turret.X[0, 0])
if self.v:
last_v = self.v[-1]
else:
last_v = 0
self.v.append(turret.X[1, 0])
self.a.append((self.v[-1] - last_v) / turret.dt)
offset = 0.0
if i > 100:
offset = 2.0
turret.Update(U + offset)
observer_turret.PredictObserver(U)
self.t.append(initial_t + i * turret.dt)
self.u.append(U[0, 0])
ff_U -= U_uncapped - U
goal = controller_turret.A * goal + controller_turret.B * ff_U
if U[0, 0] != U_uncapped[0, 0]:
profile.MoveCurrentState(
numpy.matrix([[goal[0, 0]], [goal[1, 0]]]))
glog.debug('Time: %f', self.t[-1])
glog.debug('goal_error %s', repr(end_goal - goal))
glog.debug('error %s', repr(observer_turret.X_hat - end_goal))
def Plot(self):
pylab.subplot(3, 1, 1)
pylab.plot(self.t, self.x, label='x')
pylab.plot(self.t, self.x_hat, label='x_hat')
pylab.legend()
pylab.subplot(3, 1, 2)
pylab.plot(self.t, self.u, label='u')
pylab.plot(self.t, self.offset, label='voltage_offset')
pylab.legend()
pylab.subplot(3, 1, 3)
pylab.plot(self.t, self.a, label='a')
pylab.legend()
pylab.show()
def main(argv):
argv = FLAGS(argv)
glog.init()
scenario_plotter = ScenarioPlotter()
turret = Turret()
turret_controller = IntegralTurret()
observer_turret = IntegralTurret()
# Test moving the turret with constant separation.
initial_X = numpy.matrix([[0.0], [0.0]])
R = numpy.matrix([[numpy.pi/2.0], [0.0], [0.0]])
scenario_plotter.run_test(turret, end_goal=R,
controller_turret=turret_controller,
observer_turret=observer_turret, iterations=200)
if FLAGS.plot:
scenario_plotter.Plot()
# Write the generated constants out to a file.
if len(argv) != 5:
glog.fatal('Expected .h file name and .cc file name for the turret and integral turret.')
else:
namespaces = ['y2017', 'control_loops', 'superstructure', 'turret']
turret = Turret('Turret')
loop_writer = control_loop.ControlLoopWriter('Turret', [turret],
namespaces=namespaces)
loop_writer.AddConstant(control_loop.Constant(
'kFreeSpeed', '%f', turret.free_speed))
loop_writer.AddConstant(control_loop.Constant(
'kOutputRatio', '%f', turret.G))
loop_writer.Write(argv[1], argv[2])
integral_turret = IntegralTurret('IntegralTurret')
integral_loop_writer = control_loop.ControlLoopWriter(
'IntegralTurret', [integral_turret],
namespaces=namespaces)
integral_loop_writer.Write(argv[3], argv[4])
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
from time import time
from parameterized import parameterized
import torch
from torch.optim import SGD, Adadelta, Adam # type: ignore
from fairscale.nn import FullyShardedDataParallel
from fairscale.optim.utils import recursive_copy_to_device
from fairscale.utils.testing import objects_are_equal
from .test_fsdp import (
DistributedTest,
DummyProcessGroup,
NestedWrappedModule,
TransformerWithSharedParams,
rename_test,
spawn_and_init,
)
def first_tensor_numel(dct):
for k, v in dct.items():
if torch.is_tensor(v):
return v.numel()
return 0
def assert_equal(a, b):
assert a == b, f"{a} != {b}"
class TestOptimizerUtils(DistributedTest):
@parameterized.expand(
[[functools.partial(SGD, momentum=0.9), True], [SGD, False], [Adam, False], [Adadelta, True]],
name_func=rename_test,
)
def test_consolidate_optimizer(self, optim_fn, transformer):
config = {"mixed_precision": True, "flatten_parameters": True}
test_fn = functools.partial(
self._test_consolidated_optimizer, config, optim_fn=optim_fn, transformer=transformer
)
spawn_and_init(test_fn, world_sizes=[min(torch.cuda.device_count(), 4)])
@classmethod
def _test_consolidated_optimizer(self, config, rank, group, optim_fn=torch.optim.SGD, transformer=False):
"""FSDP.gather_full_optim_state_dict() should return something very similar to optimizer.state_dict()"""
# Establish reference behavior.
if transformer:
fsdp = self.get_wrapped_model(group, config=config).cuda()
unwrapped_model = TransformerWithSharedParams(group).cuda()
else:
fsdp = FullyShardedDataParallel(NestedWrappedModule(group, wrapper_config=config), group, **config).cuda()
unwrapped_model = NestedWrappedModule(group, wrapper_config=None).cuda()
try:
fsdp_optim = optim_fn(fsdp.parameters(), lr=0.01,)
optim_unwrapped = optim_fn(unwrapped_model.parameters(), lr=0.01)
except TypeError: # Adadelta
fsdp_optim = optim_fn(fsdp.parameters())
optim_unwrapped = optim_fn(unwrapped_model.parameters())
fsdp_optim.zero_grad()
optim_unwrapped.zero_grad()
x = fsdp.module.get_input(torch.device("cuda"))
output = fsdp(*x)
loss = fsdp.module.get_loss(x, output).to("cuda")
fsdp.module.run_backward(loss)
fsdp_optim.step()
output = unwrapped_model(*x)
loss = unwrapped_model.get_loss(x, output)
unwrapped_model.run_backward(loss)
optim_unwrapped.step()
unwrapped_sd = optim_unwrapped.state_dict()
tstart = time()
sd = fsdp.gather_full_optim_state_dict(fsdp_optim, recipient_rank=0)
duration = time() - tstart
# Switching from fairscale.optim.utils.broadcast_object to torch.broadcast_object_list will cause this to raise
assert duration < fsdp.world_size, f"gather optim state took {duration} seconds, suspect change in _consolidate"
if fsdp.rank > 0:
return
assert_equal(len(sd["state"]), len(unwrapped_sd["state"]))
assert_equal(len(sd["param_groups"][0]["params"]), len(unwrapped_sd["param_groups"][0]["params"]))
assert_equal(
sum([first_tensor_numel(v) for k, v in sd["state"].items()]),
sum([first_tensor_numel(v) for k, v in unwrapped_sd["state"].items()]),
)
shard_sd = fsdp.get_shard_from_optim_state_dict(sd)
original_shard_sd = fsdp_optim.state_dict()
assert_equal(len(shard_sd["state"]), len(original_shard_sd["state"]))
assert_equal(shard_sd.keys(), original_shard_sd.keys())
original_shard_sd = recursive_copy_to_device(original_shard_sd, non_blocking=False, device="cpu")
assert_equal(
sum([first_tensor_numel(v) for k, v in shard_sd["state"].items()]),
sum([first_tensor_numel(v) for k, v in original_shard_sd["state"].items()]),
)
assert objects_are_equal(shard_sd, original_shard_sd)
def test_named_params_ordering(self):
"""Test assumption of consolidate_optimizer_state_dict"""
group = DummyProcessGroup(0, 1)
model = TransformerWithSharedParams(group)
named_pars = [p for n, p in model.named_parameters()]
for i, p in enumerate(model.parameters()):
assert objects_are_equal(p, named_pars[i])
|
# Generated by Django 2.2.13 on 2020-11-23 08:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('per', '0034_auto_20201119_1502'),
]
operations = [
migrations.AddField(
model_name='overview',
name='assessment_number',
field=models.IntegerField(default=1, verbose_name='assessment number'),
),
]
|
#!/bin/python3
'''
https://www.hackerrank.com/contests/101hack44/challenges/picking-numbers
'''
import sys
n = int(input().strip())
a = [int(a_temp) for a_temp in input().strip().split(' ')]
countArr = [0] * (max(a)+1)
for i in a:
countArr[i]+=1
maxRepeatedNum = countArr.index(max(countArr))
ans = 2
for i in range(len(countArr)-1):
adjacentSum = countArr[i]+countArr[i+1]
if ans < adjacentSum:
ans = adjacentSum
print(ans)
|
from django.urls import path
from .friends import MyFriendsAPI
from .request import RequestFriendsAPI, AcceptRequestAPI
urlpatterns = [
path('', MyFriendsAPI.as_view()),
path('request/', RequestFriendsAPI.as_view()),
path('accept-request/', AcceptRequestAPI.as_view()),
] |
import os
import pytest
import requests
from sqlalchemy import create_engine, text
from sqlalchemy.orm import Session
from carbonserver.config import settings
# Get the API utl to use from an env variable if exist
URL = os.getenv("CODECARBON_API_URL")
if URL is None:
pytest.exit("CODECARBON_API_URL is not defined")
experiment_id = project_id = user_id = api_key = org_id = team_id = None
org_name = org_description = org_new_id = None
team_name = team_description = team_new_id = emission_id = None
USER_PASSWORD = "Secret1!îstring"
USER_EMAIL = "[email protected]"
# @pytest.fixture()
def del_test_user():
"""Fixture to destroy user"""
engine = create_engine(settings.db_url) #
stmt = text("DELETE FROM users WHERE email=:email").bindparams(email=USER_EMAIL)
with Session(engine) as session:
session.execute(stmt)
session.commit()
# Clean up user before ending test execution by pytest
# delete(SqlModelUser).where(SqlModelUser.email == USER_EMAIL)
def is_key_value_exist(list_of_dict, key, value):
"""
Check if at least one value of a key is equal to the specified value.
"""
for d in list_of_dict:
if d[key] == value:
return True
return False
def is_key_all_values_equal(list_of_dict, key, value):
"""
Check if all values of a key are equal to the specified value.
"""
for d in list_of_dict:
if d[key] != value:
return False
return True
def test_api_user_create():
assert URL is not None
# we delete it if exist
del_test_user()
payload = {"email": USER_EMAIL, "name": "toto", "password": USER_PASSWORD}
r = requests.post(url=URL + "/user", json=payload, timeout=2)
assert r.status_code == 201
assert r.json()["email"] == USER_EMAIL
assert r.json()["is_active"] == True # noqa
def test_api_user_signup():
global user_id, api_key, org_id, team_id
# signup is creating a user, we delete it if exist
del_test_user()
payload = {"email": USER_EMAIL, "name": "toto", "password": USER_PASSWORD}
r = requests.post(url=URL + "/user/signup/", json=payload, timeout=2)
assert r.status_code == 201
assert r.json()["email"] == USER_EMAIL
assert r.json()["is_active"] == True # noqa
user_id = r.json()["id"]
api_key = r.json()["api_key"]
org_id = r.json()["organizations"][0]
team_id = r.json()["teams"][0]
def test_api_users_list():
r = requests.get(url=URL + "/users", timeout=2)
assert r.status_code == 200
assert is_key_value_exist(r.json(), "id", user_id)
def test_api_get_user():
r = requests.get(url=URL + "/user/" + user_id, timeout=2)
assert r.status_code == 200
assert r.json()["id"] == user_id
assert r.json()["email"] == USER_EMAIL
def test_api_auth_success():
payload = {"email": USER_EMAIL, "password": USER_PASSWORD}
r = requests.post(url=URL + "/authenticate/", json=payload, timeout=2)
assert r.status_code == 200
assert r.json()["access_token"] == "a"
assert r.json()["token_type"] == "access"
def test_api_auth_wrong_email():
payload = {
"email": "[email protected]",
"password": USER_PASSWORD,
}
r = requests.post(url=URL + "/authenticate/", json=payload, timeout=2)
assert r.status_code == 401
def test_api_auth_wrong_password():
payload = {"email": USER_EMAIL, "password": "wrong-password"}
r = requests.post(url=URL + "/authenticate/", json=payload, timeout=2)
assert r.status_code == 401
def test_api_user_deleted():
del_test_user()
payload = {"email": USER_EMAIL, "password": USER_PASSWORD}
r = requests.post(url=URL + "/authenticate/", json=payload, timeout=2)
assert r.status_code == 401
def test_api_organization_create():
global org_name, org_description, org_new_id
org_name = "test_to_delete"
org_description = "test to delete"
payload = {"name": org_name, "description": org_description}
r = requests.post(url=URL + "/organization", json=payload, timeout=2)
assert r.status_code == 201
assert r.json()["name"] == org_name
assert r.json()["description"] == org_description
org_new_id = r.json()["id"]
def test_api_organization_read():
r = requests.get(url=URL + "/organization/" + org_new_id, timeout=2)
assert r.status_code == 200
assert r.json()["name"] == org_name
assert r.json()["description"] == org_description
def test_api_organization_list():
r = requests.get(url=URL + "/organizations", timeout=2)
assert r.status_code == 200
assert r.json()[-1]["id"] == org_new_id
def test_api_team_create():
global team_name, team_description, team_new_id
team_name = "test_to_delete"
team_description = "test to delete"
payload = {
"name": team_name,
"description": team_description,
"organization_id": org_new_id,
"api_key": api_key,
}
r = requests.post(url=URL + "/team", json=payload, timeout=2)
assert r.status_code == 201
assert r.json()["name"] == team_name
assert r.json()["description"] == team_description
team_new_id = r.json()["id"]
def test_api_team_read():
r = requests.get(url=URL + "/team/" + team_new_id, timeout=2)
assert r.status_code == 200
assert r.json()["name"] == team_name
assert r.json()["description"] == team_description
def test_api_teams_list():
r = requests.get(url=URL + "/teams", timeout=2)
assert r.status_code == 200
assert is_key_value_exist(r.json(), "id", team_new_id)
def test_api_teams_for_organization_list():
r = requests.get(url=URL + "/teams/organization/" + org_new_id, timeout=2)
assert r.status_code == 200
assert is_key_value_exist(r.json(), "id", team_new_id)
assert is_key_all_values_equal(r.json(), "organization_id", org_new_id)
def test_api_project_create():
global project_id
payload = {
"name": "test_to_delete",
"description": "Test to delete",
"team_id": team_new_id,
}
r = requests.post(url=URL + "/project/", json=payload, timeout=2)
assert r.status_code == 201
assert r.json()["team_id"] == team_new_id
project_id = r.json()["id"]
def test_api_projects_for_team_list():
r = requests.get(url=URL + "/projects/team/" + team_new_id, timeout=2)
assert r.status_code == 200
assert is_key_value_exist(r.json(), "id", project_id)
assert is_key_all_values_equal(r.json(), "team_id", team_new_id)
def test_api_experiment_create():
global experiment_id
payload = {
"name": "Run on Premise",
"description": "Premise API for Code Carbon",
"timestamp": "2021-04-04T08:43:00+02:00",
"country_name": "France",
"country_iso_code": "FRA",
"region": "france",
"on_cloud": True,
"cloud_provider": "Premise",
"cloud_region": "eu-west-1a",
"project_id": project_id,
}
r = requests.post(url=URL + "/experiment", json=payload, timeout=2)
assert r.status_code == 201
assert r.json()["project_id"] == project_id
experiment_id = r.json()["id"]
def test_api_experiment_read():
r = requests.get(url=URL + "/experiment/" + experiment_id, timeout=2)
assert r.status_code == 200
assert r.json()["id"] == experiment_id
def test_api_experiment_list():
r = requests.get(url=URL + "/experiments/project/" + project_id, timeout=2)
assert r.status_code == 200
assert is_key_value_exist(r.json(), "id", experiment_id)
def test_api_run_create():
global run_id
payload = {"timestamp": "2021-04-04T08:43:00+02:00", "experiment_id": experiment_id}
r = requests.post(url=URL + "/run/", json=payload, timeout=2)
assert r.status_code == 201
run_id = r.json()["id"]
def test_api_run_read():
r = requests.get(url=URL + "/run/" + run_id, timeout=2)
assert r.status_code == 200
assert r.json()["id"] == run_id
def test_api_run_list():
r = requests.get(url=URL + "/runs", timeout=2)
assert r.status_code == 200
assert is_key_value_exist(r.json(), "id", run_id)
def test_api_runs_for_team_list():
r = requests.get(url=URL + "/runs/experiment/" + experiment_id, timeout=2)
assert r.status_code == 200
assert is_key_value_exist(r.json(), "id", run_id)
assert is_key_all_values_equal(r.json(), "experiment_id", experiment_id)
def test_api_emission_create():
payload = {
"timestamp": "2021-04-04T08:43:00+02:00",
"run_id": run_id,
"duration": 98745,
"emissions_sum": 1544.54,
"emissions_rate": 1.548444,
"cpu_power": 0.3,
"gpu_power": 10.65,
"ram_power": 1.15,
"cpu_energy": 55.21874,
"gpu_energy": 106540.65484,
"ram_energy": 64.654688,
"energy_consumed": 57.21874,
}
r = requests.post(url=URL + "/emission/", json=payload, timeout=2)
assert r.status_code == 201
def test_api_emission_list():
global emission_id
r = requests.get(url=URL + "/emissions/run/" + run_id, timeout=2)
assert r.status_code == 200
assert is_key_all_values_equal(r.json(), "run_id", run_id)
emission_id = r.json()[-1]["id"]
def test_api_emission_read():
r = requests.get(url=URL + "/emission/" + emission_id, timeout=2)
assert r.status_code == 200
assert r.json()["id"] == emission_id
assert r.json()["run_id"] == run_id
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\event_testing\statistic_tests.py
# Compiled at: 2020-08-18 22:50:49
# Size of source mod 2**32: 59464 bytes
from event_testing.results import TestResult, TestResultNumeric
from event_testing.test_events import TestEvent, cached_test
from interactions import ParticipantType
from objects import ALL_HIDDEN_REASONS
from objects.object_tests import TunableObjectStateValueThreshold
from sims4.localization import TunableLocalizedStringFactory
from sims4.math import Operator
from sims4.tuning.tunable import TunableFactory, TunableEnumEntry, Tunable, TunableList, TunableThreshold, TunableVariant, HasTunableSingletonFactory, AutoFactoryInit, TunableOperator, TunableReference, TunablePackSafeReference, TunableSet, OptionalTunable, TunableTuple, TunableRange
from tag import TunableTags
import algos, event_testing.test_base, services, sims4.tuning.tunable, statistics.statistic
logger = sims4.log.Logger('Tests', default_owner='mkartika')
class SpecifiedStatThresholdMixin:
@TunableFactory.factory_option
def participant_type_override(participant_type_enum, participant_type_default):
return {'who': TunableEnumEntry(participant_type_enum, participant_type_default, description='Who or what to apply this test to')}
def __init__(self, *args, **kwargs):
(super().__init__)(args, safe_to_skip=True, **kwargs)
def _get_make_true_value(self):
if self.stat is not None:
for value in algos.binary_walk_gen(list(range(int(self.stat.min_value), int(self.stat.max_value) + 1))):
if self.threshold.compare(value):
return (
TestResult.TRUE, value)
operator_symbol = Operator.from_function(self.threshold.comparison).symbol
return (
TestResult(False, 'Could not find value to satisfy operation: {} {} {}', self.value.state, operator_symbol, self.value), None)
def goal_value(self):
return self.threshold.value
class _PointsValue(HasTunableSingletonFactory):
def get_value(self, sim, stat):
tracker = sim.get_tracker(stat)
return tracker.get_value(stat)
def validate(self, instance_class, stat):
pass
class _UserValue(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'highest_level_reached_instead': Tunable(description="\n If checked this will test against the highest level reached. This\n currently only works with Ranked Statistics. Other statistics do\n not have a notion of highest level reached. If we are using\n something that doesn't support highest level reached it will \n test against the current level instead.\n ",
tunable_type=bool,
default=False)}
def get_value(self, sim, stat):
tracker = sim.get_tracker(stat)
if self.highest_level_reached_instead:
from statistics.ranked_statistic import RankedStatistic
if issubclass(stat, (RankedStatistic,)):
stat = tracker.get_statistic(stat)
if stat is not None:
return stat.highest_level
return tracker.get_user_value(stat)
def validate(self, instance_class, stat):
pass
class _RankValue(HasTunableSingletonFactory):
def get_value(self, sim, stat):
tracker = sim.get_tracker(stat)
stat_inst = tracker.get_statistic(stat)
if stat_inst is not None:
return stat_inst.rank_level
return stat.initial_rank
def validate(self, instance_class, stat):
from statistics.ranked_statistic import RankedStatistic
if issubclass(stat, (RankedStatistic,)):
return
return 'Trying to do a Relative Stat Threshold Test using Rank instead of Value in {} but the stat {} is not a Ranked Statistic.'.format(instance_class, stat)
class StatThresholdTest(SpecifiedStatThresholdMixin, HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
test_events = (
TestEvent.SkillLevelChange, TestEvent.StatValueUpdate)
@staticmethod
def _verify_tunable_callback(instance_class, tunable_name, source, value):
if value.who == ParticipantType.Invalid or value.threshold is None:
logger.error('Missing or invalid argument at {}: {}', instance_class, tunable_name)
stat = value.stat
if stat is not None:
if 'Types.INTERACTION' in str(source):
if stat.is_skill:
threshold = value.threshold
if threshold.value == 1.0:
if threshold.comparison is sims4.math.Operator.GREATER_OR_EQUAL.function:
logger.error('StatThresholdTest for skill ({}) >= 1 is invalid in instance({}). Please remove the test.', stat, instance_class)
error_str = value.score_to_use.validate(instance_class, stat)
if error_str is not None:
logger.error(error_str)
@TunableFactory.factory_option
def stat_class_restriction_override(class_restrictions):
return {'stat': TunablePackSafeReference(description='\n The stat we are operating on.\n ',
manager=(services.statistic_manager()),
class_restrictions=class_restrictions)}
FACTORY_TUNABLES = {'verify_tunable_callback':_verify_tunable_callback,
'who':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'stat':TunablePackSafeReference(description='\n The stat we are operating on.\n ',
manager=services.statistic_manager()),
'threshold':TunableVariant(description='\n The value or state threshold to test against.\n ',
state_value_threshold=TunableObjectStateValueThreshold(description='\n The state threshold for this test.\n '),
value_threshold=TunableThreshold(description="\n The threshold to control availability based on the statistic's\n value.\n "),
default='value_threshold'),
'must_have_stat':Tunable(description='\n Setting this to True (checked) will ensure that this test only\n passes if the tested Sim actually has the statistic referenced. If\n left False (unchecked), this test will evaluate as if the Sim had\n the statistic at the value of 0\n ',
tunable_type=bool,
default=False),
'score_to_use':TunableVariant(description='\n Depending on the choice, this decides what value to use for the \n threshold comparison.\n ',
points=_PointsValue.TunableFactory(description='\n Use the raw points for the comparison in the test.\n '),
user_value=_UserValue.TunableFactory(description='\n Use the user value for the comparison in the test.\n '),
rank=_RankValue.TunableFactory(description='\n Use the rank value for the comparison in the test.\n '),
default='user_value')}
__slots__ = ('who', 'stat', 'threshold', 'must_have_stat')
def get_expected_args(self):
return {'test_targets':self.who,
'statistic':event_testing.test_constants.FROM_EVENT_DATA}
def get_test_events_to_register(self):
return ()
def get_custom_event_registration_keys(self):
keys = [
(
TestEvent.SkillLevelChange, self.stat),
(
TestEvent.StatValueUpdate, self.stat)]
return keys
@cached_test
def __call__(self, test_targets=(), statistic=None):
if statistic is not None:
if self.stat is not statistic:
return TestResult(False, 'Stat being looked for is not the stat that changed.')
for target in test_targets:
if target is None:
logger.error('Trying to call StatThresholdTest on {} which is None', target)
return TestResult(False, 'Target({}) does not exist', self.who)
curr_value = 0
if self.stat is not None:
tracker = target.get_tracker(self.stat)
if tracker is None:
logger.error('{} failed to get tracker for {} on {}', self, self.stat, target)
stat_inst = None
else:
stat_inst = tracker.get_statistic(self.stat)
if stat_inst is not None:
curr_value = self.stat.is_skill and stat_inst.is_initial_value or self.score_to_use.get_value(target, self.stat)
else:
stat_inst = None
if stat_inst is None:
if self.must_have_stat:
return TestResultNumeric(False,
'{} Does not have stat: {}.',
(self.who.name),
(self.stat),
current_value=curr_value,
goal_value=(self.threshold.value),
is_money=False,
tooltip=(self.tooltip))
operator_symbol = self.threshold.compare(curr_value) or Operator.from_function(self.threshold.comparison).symbol
return TestResultNumeric(False,
'{} failed stat check: {}.{} {} {} (current value: {})',
(self.who.name),
(target.__class__.__name__),
(self.stat),
operator_symbol,
(self.threshold.value),
curr_value,
current_value=curr_value,
goal_value=(self.threshold.value),
is_money=False,
tooltip=(self.tooltip))
return TestResult.TRUE
def __repr__(self):
return 'Stat: {}, Threshold: {} on Subject {}'.format(self.stat, self.threshold, self.who)
def validate_tuning_for_objective(self, objective):
if self.stat is not None:
if not self.stat.valid_for_stat_testing:
logger.error('Stat {} is not valid for testing in objective {}.', self.stat, objective)
class RelativeStatTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
@staticmethod
def _verify_tunable_callback(instance_class, tunable_name, source, value):
stat = value.stat
if stat is None:
return
target_stats = value.target_stats
error_str = value.score_to_use.validate(instance_class, stat)
if error_str is not None:
logger.error(error_str)
for target_stat in target_stats:
if target_stat is None:
continue
error_str = value.score_to_use.validate(instance_class, target_stat)
if error_str is not None:
logger.error(error_str)
FACTORY_TUNABLES = {'verify_tunable_callback':_verify_tunable_callback, 'source':TunableEnumEntry(description='\n Who or what to apply this test to\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor,
invalid_enums=(
ParticipantType.Invalid,)),
'target':TunableEnumEntry(description='\n Who or what to use for the comparison\n ',
tunable_type=ParticipantType,
default=ParticipantType.TargetSim),
'stat':TunablePackSafeReference(description='\n The stat we are using for the comparison\n ',
manager=services.get_instance_manager(sims4.resources.Types.STATISTIC)),
'target_stats':TunableList(description='\n The stat on the target we want to compare against.\n If there is more than one, all must pass the comparison.\n If there is none, it compares the same stat.\n ',
tunable=TunablePackSafeReference(manager=(services.get_instance_manager(sims4.resources.Types.STATISTIC)))),
'comparison':TunableOperator(description='\n The comparison to perform against the value. The test passes if (source_stat comparison target)\n ',
default=sims4.math.Operator.GREATER_OR_EQUAL),
'score_to_use':TunableVariant(description='\n Depending on the choice, this decides what value to use for the \n threshold comparison.\n ',
points=_PointsValue.TunableFactory(description='\n Use the raw points for the comparison in the test.\n '),
user_value=_UserValue.TunableFactory(description='\n Use the user value for the comparison in the test.\n '),
rank=_RankValue.TunableFactory(description='\n Use the rank value for the comparison in the test.\n '),
default='user_value'),
'difference':Tunable(description='\n The difference between the source and target stat in order to pass \n the threshold. This value is added to the source stat value and the \n threshold is checked against the resulting value.\n ',
tunable_type=int,
default=0)}
def __init__(self, *args, **kwargs):
(super().__init__)(args, safe_to_skip=True, **kwargs)
def get_expected_args(self):
return {'source_objects':self.source,
'target_objects':self.target}
@cached_test
def __call__(self, source_objects=None, target_objects=None):
if self.stat is None:
return TestResult(False, 'Stat failed to load.')
for source_obj in source_objects:
if source_obj is None:
logger.error('Trying to call RelativeStatThresholdTest on {} which is None for {}', source_obj)
return TestResult(False, 'Target({}) does not exist', self.source)
source_curr_value = self.score_to_use.get_value(source_obj, self.stat)
source_curr_value += self.difference
for target_obj in target_objects:
if target_obj is None:
logger.error('Trying to call RelativeStatThresholdTest on {} which is None for {}', target_obj)
return TestResult(False, 'Target({}) does not exist', self.target)
if self.target_stats:
for target_stat in self.target_stats:
target_curr_value = self.score_to_use.get_value(target_obj, target_stat)
threshold = sims4.math.Threshold(target_curr_value, self.comparison)
if not threshold.compare(source_curr_value):
operator_symbol = Operator.from_function(self.comparison).symbol
return TestResult(False, '{} failed relative stat check: {}.{} {} {} (current value: {})', self.source.name, target_obj.__class__.__name__, target_stat.__name__, operator_symbol, target_curr_value, source_curr_value)
else:
target_curr_value = self.score_to_use.get_value(target_obj, self.stat)
threshold = sims4.math.Threshold(target_curr_value, self.comparison)
operator_symbol = threshold.compare(source_curr_value) or Operator.from_function(self.comparison).symbol
return TestResult(False, '{} failed relative stat check: {}.{} {} {} (current value: {})', (self.source.name), (target_obj.__class__.__name__),
(self.stat.__name__), operator_symbol,
target_curr_value, source_curr_value, tooltip=(self.tooltip))
return TestResult.TRUE
class RankedStatThresholdTest(SpecifiedStatThresholdMixin, HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
test_events = (
TestEvent.RankedStatisticChange,)
@staticmethod
def _verify_tunable_callback(instance_class, tunable_name, source, value):
if value.who == ParticipantType.Invalid or value.threshold is None:
logger.error('Missing or invalid argument at {}: {}', instance_class, tunable_name)
ranked_stat = value.ranked_stat
if ranked_stat is not None:
from statistics.ranked_statistic import RankedStatistic
if not issubclass(ranked_stat, (RankedStatistic,)):
logger.error('Trying to Do a Ranked Stat Threshold Test in {} but the ranked_stat {} is not a Ranked Statistic.', instance_class, ranked_stat)
FACTORY_TUNABLES = {'verify_tunable_callback':_verify_tunable_callback, 'who':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'ranked_stat':TunablePackSafeReference(description='\n The ranked stat we are operating on.\n ',
manager=services.statistic_manager()),
'threshold':TunableVariant(description='\n The value or state threshold to test against.\n ',
state_value_threshold=TunableObjectStateValueThreshold(description='\n The state threshold for this test.\n '),
value_threshold=TunableThreshold(description="\n The threshold to control availability based on the ranked\n statistic's value.\n "),
default='value_threshold'),
'must_have_ranked_stat':Tunable(description='\n Setting this to True (checked) will ensure that this test only\n passes if the tested Sim actually has the ranked statistic \n referenced. If left False (unchecked), this test will evaluate \n as if the Sim had the ranked statistic at the value of 0\n ',
tunable_type=bool,
default=False),
'test_against_highest_rank':Tunable(description='\n When checked this test will only return True is the highest rank\n achieved is in the threshold specified, and not the current rank.\n ',
tunable_type=bool,
default=False),
'num_participants':OptionalTunable(description='\n If disabled, all participants must pass this stat test.\n If enabled, we test against this number for the number of participants\n that need this value of stat to pass. \n ',
tunable=TunableThreshold(description='\n The threshold of the number of participants who must meet the \n criteria individually.\n '),
disabled_name='all_participants')}
__slots__ = ('who', 'ranked_stat', 'threshold', 'must_have_ranked_stat')
def get_expected_args(self):
return {'test_targets':self.who,
'ranked_statistic':event_testing.test_constants.FROM_EVENT_DATA}
@cached_test
def __call__(self, test_targets=(), ranked_statistic=None):
if ranked_statistic is not None:
if self.ranked_stat is not ranked_statistic:
return TestResult(False, 'Ranked Stat being looked for is not the ranked_stat that changed.')
num_passed = 0
for target in test_targets:
if target is None:
logger.error('Trying to call RankedStatThresholdTest on {} which is None', target)
return TestResult(False, 'Target({}) does not exist', self.who)
value = 0
if self.ranked_stat is not None:
tracker = target.get_tracker(self.ranked_stat)
if tracker is None:
logger.error('Trying to call RankedStatThresholdTest on {} which has no rank tracker, test:{}', target, self)
return TestResult(False, 'Target({}) has no rank tracker', self.who)
ranked_stat_inst = tracker.get_statistic(self.ranked_stat)
if ranked_stat_inst is not None:
if self.ranked_stat.is_skill:
if ranked_stat_inst.is_initial_value or self.test_against_highest_rank:
value = ranked_stat_inst.highest_rank_achieved
else:
value = ranked_stat_inst.rank_level
else:
ranked_stat_inst = None
if ranked_stat_inst is None:
if self.must_have_ranked_stat:
if self.num_participants is None:
return TestResultNumeric(False,
'{} Does not have ranked stat: {}.',
(self.who.name),
(self.ranked_stat),
current_value=value,
goal_value=(self.threshold.value),
is_money=False,
tooltip=(self.tooltip))
if not self.threshold.compare(value):
operator_symbol = Operator.from_function(self.threshold.comparison).symbol
if self.num_participants is None:
return TestResultNumeric(False,
'{} failed ranked stat check: {}.{} {} {} (current value: {})',
(self.who.name),
(target.__class__.__name__),
(self.ranked_stat),
operator_symbol,
(self.threshold.value),
value,
current_value=value,
goal_value=(self.threshold.value),
is_money=False,
tooltip=(self.tooltip))
else:
num_passed += 1
if self.num_participants is not None:
if not self.num_participants.compare(num_passed):
return TestResult(False,
'Failed num participants needed for {}. Required {} {} but has {}.',
(self.ranked_stat),
(Operator.from_function(self.num_participants.comparison).symbol),
(self.num_participants.value),
num_passed,
tooltip=(self.tooltip))
return TestResult.TRUE
@property
def stat(self):
return self.ranked_stat
def __repr__(self):
return 'Ranked Stat: {}, Threshold: {} on Subject {}'.format(self.ranked_stat, self.threshold, self.who)
class MotiveThresholdTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
test_events = (
TestEvent.MotiveLevelChange,)
@TunableFactory.factory_option
def participant_type_override(participant_type_enum, participant_type_default):
return {'who': TunableEnumEntry(participant_type_enum, participant_type_default, description='Who or what to apply this test to')}
FACTORY_TUNABLES = {'who':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'stats':TunableList(description='\n The stat we are operating on.\n ',
tunable=TunableReference(manager=(services.get_instance_manager(sims4.resources.Types.STATISTIC)),
pack_safe=True)),
'threshold':TunableThreshold(description="\n The threshold to control availability based on the statistic's value.")}
def __init__(self, *args, **kwargs):
(super().__init__)(args, safe_to_skip=True, **kwargs)
def get_expected_args(self):
return {'test_targets': self.who}
@cached_test
def __call__(self, test_targets=()):
for target in test_targets:
if target is None:
logger.error('Trying to call MotiveThresholdTest on {} which is None', target)
return TestResult(False, 'Target({}) does not exist', self.who)
for stat in self.stats:
tracker = target.get_tracker(stat)
curr_value = tracker.get_user_value(stat)
if not self.threshold.compare(curr_value):
operator_symbol = Operator.from_function(self.threshold.comparison).symbol
return TestResult(False, '{} failed stat check: {}.{} {} {} (current value: {})', (self.who.name), (target.__class__.__name__), (stat.__name__), operator_symbol, (self.threshold.value), curr_value, tooltip=(self.tooltip))
return TestResult.TRUE
class StatInMotionTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
@TunableFactory.factory_option
def participant_type_override(participant_type_enum, participant_type_default):
return {'who': TunableEnumEntry(description='\n Who or what to apply this test to\n ',
tunable_type=participant_type_enum,
default=participant_type_default,
invalid_enums=(
ParticipantType.Invalid,))}
FACTORY_TUNABLES = {'who':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor,
invalid_enums=(
ParticipantType.Invalid,)),
'stat':TunableReference(description='\n The stat we are operating on.\n ',
manager=services.get_instance_manager(sims4.resources.Types.STATISTIC)),
'threshold':TunableThreshold(description='\n The threshold of loss or gain rate for this statistic in order to pass.\n ')}
def __init__(self, *args, **kwargs):
(super().__init__)(args, safe_to_skip=True, **kwargs)
def get_expected_args(self):
return {'test_targets': self.who}
@cached_test
def __call__(self, test_targets=()):
for target in test_targets:
if target is None:
logger.error('Trying to call StatInMotionTest on {} which is None', target)
return TestResult(False, 'Target({}) does not exist', self.who)
curr_value = target.get_statistic(self.stat).get_change_rate_without_decay()
return self.threshold.compare(curr_value) or TestResult(False, 'Failed stat motion check')
return TestResult.TRUE
class TunableStatOfCategoryTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
@TunableFactory.factory_option
def participant_type_override(participant_type_enum, participant_type_default):
return {'who': TunableEnumEntry(participant_type_enum, participant_type_default, description='Who or what to apply this test to')}
FACTORY_TUNABLES = {'who':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'statistic_category':TunableEnumEntry(description='\n The category to check for.\n ',
tunable_type=statistics.statistic_categories.StatisticCategory,
default=statistics.statistic_categories.StatisticCategory.INVALID,
pack_safe=True),
'check_for_existence':Tunable(description='\n If checked, this test will succeed if any statistic of the category\n exists. If unchecked, this test will succeed only if no statistics\n of the category exist.\n ',
tunable_type=bool,
default=True)}
def __init__(self, *args, **kwargs):
(super().__init__)(args, safe_to_skip=True, **kwargs)
def get_expected_args(self):
return {'test_targets': self.who}
@cached_test
def __call__(self, test_targets=()):
category = self.statistic_category
check_exist = self.check_for_existence
for target in test_targets:
found_category_on_sim = False
for commodity in target.commodity_tracker.get_all_commodities():
if category in commodity.get_categories():
if commodity.is_at_convergence() or check_exist:
found_category_on_sim = True
continue
else:
return TestResult(False, 'Sim has a commodity disallowed by StatOfCategoryTest')
if check_exist:
found_category_on_sim or TestResult(False, 'Sim does not have a commodity required by StatOfCategoryTest')
return TestResult.TRUE
class _AllObjectCommodityAdvertised(HasTunableSingletonFactory):
def get_objects_gen(self):
yield from services.object_manager().get_valid_objects_gen()
if False:
yield None
class _LaundryObjectCommodityAdvertised(HasTunableSingletonFactory):
def get_objects_gen(self):
laundry_service = services.get_laundry_service()
if laundry_service is not None:
yield from laundry_service.laundry_hero_objects
if False:
yield None
class _TaggedObjectCommodityAdvertised(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'object_tags': TunableTags(description='\n Find all of the objects that have at least one of the tuned \n tags.\n ',
filter_prefixes=('func', ))}
def get_objects_gen(self):
yield from (services.object_manager().get_objects_with_tags_gen)(*self.object_tags)
if False:
yield None
class TunableObjectCommodityAdvertisedVariant(TunableVariant):
def __init__(self, *args, **kwargs):
(super().__init__)(args, all_objects=_AllObjectCommodityAdvertised.TunableFactory(),
tagged_objects=_TaggedObjectCommodityAdvertised.TunableFactory(),
laundry_objects=_LaundryObjectCommodityAdvertised.TunableFactory(),
default='all_objects', **kwargs)
class CommodityAdvertisedTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
REQUIRE_ANY = 0
REQUIRE_ALL = 1
REQUIRE_NONE = 2
FACTORY_TUNABLES = {'commodities':TunableSet(description='\n A list of commodities that must be advertised by some interaction\n on the current lot.\n ',
tunable=TunableReference(description='\n The type of commodity to search for.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.STATISTIC)))),
'static_commodities':TunableSet(description='\n A list of static commodities that must be advertised by some\n interaction on the current lot.\n ',
tunable=TunableReference(description='\n The type of static commodity to search for.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.STATIC_COMMODITY)))),
'requirements':TunableVariant(description='\n A variant specifying the terms of this test with regards to the\n tuned commodities.\n \n * Require Any: The test will pass if any of the tuned commodities \n are found on an object.\n * Require All: The test will only pass if all of the tuned\n commodities are found on a single object.\n * Require None: The test will only pass if none of the tuned\n commodities are found on any object on the lot.\n ',
locked_args={'require_any':REQUIRE_ANY,
'require_all':REQUIRE_ALL,
'require_none':REQUIRE_NONE},
default='require_any'),
'require_reservable_by_participant':OptionalTunable(description='\n If enabled, the object that advertises the commodity must by reservable\n by the specified participant type.\n ',
tunable=TunableEnumEntry(description='\n The participant that must be able to reserve the object.\n ',
tunable_type=ParticipantType,
default=(ParticipantType.Actor))),
'tested_objects':TunableObjectCommodityAdvertisedVariant(description='\n The test will only check these objects for tuned advertised \n commodities.\n \n EX: to improve performance, when we know that tuned commodities \n will only be found on laundry objects, set this to Laundry Objects \n instead of All Objects.\n '),
'test_aops':Tunable(description='\n If checked, the obj that is advertising the tuned commodities must\n also have the aops that grant that commodity be able to run.\n \n EX: check if any dishes on the lot can be eaten. Even if the\n dishes advertise the eat static commodity, the individual dish themselves might\n not be able to be eaten because they are spoiled, empty, etc.\n ',
tunable_type=bool,
default=False),
'check_affordance_suppression':Tunable(description='\n If checked, suppressed affordances will not be considered.\n ',
tunable_type=bool,
default=False),
'test_connectivity_to_target':Tunable(description='\n If checked, this test will ensure the target Sim can pass a pt to\n pt connectivity check to the advertising object.\n ',
tunable_type=bool,
default=True),
'allow_targeted_objects':Tunable(description="\n If enabled, objects targeted (ParticipantType.Object) by the\n interaction are allowed to pass this test. Typically, for cleaning\n up dishes, we disallow targeted objects because we don't want you\n to run the affordance on dishes you are carrying.\n ",
tunable_type=bool,
default=False),
'test_autonomous_availability':Tunable(description='\n If enabled, this test will consider advertising objects that the\n Sim can use autonomously. This should be specifically disabled if\n we want to bypass on lot and off lot autonomy rules for the purpose\n of this test.\n ',
tunable_type=bool,
default=True),
'test_reservations':Tunable(description="\n If enabled, this test will consider advertising objects that the\n Sim can currently reserve. This should be specifically disabled if\n we don't care about object reservations.\n ",
tunable_type=bool,
default=True)}
def get_expected_args(self):
expected_args = {'target_objects':ParticipantType.Object,
'context':ParticipantType.InteractionContext,
'actor_set':ParticipantType.Actor}
if self.require_reservable_by_participant is not None:
expected_args['reserve_participants'] = self.require_reservable_by_participant
return expected_args
@property
def allow_failfast_tests(self):
return False
def _has_valid_aop(self, obj, motives, context, test_aops, check_suppression):
for affordance in obj.super_affordances(context):
if not affordance.commodity_flags & motives:
continue
for aop in affordance.potential_interactions(obj, context):
if check_suppression:
if obj.check_affordance_for_suppression(context.sim, aop, False):
continue
if test_aops:
test_result = aop.test(context)
if not test_result:
continue
return True
return False
@cached_test
def __call__(self, target_objects=None, reserve_participants=None, context=None, actor_set=None):
actor_info = next(iter(actor_set))
actor = actor_info.get_sim_instance()
if actor is None:
return TestResult(False, 'The actor Sim is not instantiated.')
reference_object = actor
targets = set()
if target_objects:
targets = set(target_objects)
for obj in target_objects:
if obj.is_sim:
sim_instance = obj.get_sim_instance()
if sim_instance is None:
continue
reference_object = sim_instance
break
if not obj.is_in_inventory():
reference_object = obj
break
motives = self.static_commodities.union(self.commodities)
autonomy_rule = actor.get_off_lot_autonomy_rule()
for obj in self.tested_objects.get_objects_gen():
if not self.allow_targeted_objects:
if obj in targets:
continue
else:
motive_intersection = obj.commodity_flags & motives
if not motive_intersection:
continue
if self.test_autonomous_availability:
if not actor.autonomy_component.get_autonomous_availability_of_object(obj,
autonomy_rule, reference_object=reference_object):
continue
else:
if self.test_reservations:
if reserve_participants is not None:
for sim in reserve_participants:
sim_instance = sim.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
if sim_instance is not None and obj.may_reserve(sim_instance):
break
else:
continue
if self.test_aops or self.check_affordance_suppression:
if not self._has_valid_aop(obj, motives, context, self.test_aops, self.check_affordance_suppression):
continue
if self.test_connectivity_to_target and not obj.is_connected(actor):
continue
if self.requirements == self.REQUIRE_NONE:
return TestResult(False, 'A specified commodity was found, but we are requiring that no specified commodities are found.', tooltip=(self.tooltip))
if self.requirements == self.REQUIRE_ANY:
return TestResult.TRUE
if self.requirements == self.REQUIRE_ALL and motive_intersection == motives:
return TestResult.TRUE
if self.requirements == self.REQUIRE_NONE:
return TestResult.TRUE
if reserve_participants is not None:
return TestResult(False, 'No required commodities or static commodities are advertising where the object is reservable by participant type {}.', (self.require_reservable_by_participant), tooltip=(self.tooltip))
return TestResult(False, 'No required commodities or static commodities are advertising.', tooltip=(self.tooltip))
class CommodityDesiredByOtherSims(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
FACTORY_TUNABLES = {'commodity':TunableTuple(commodity=TunableReference(description='\n The type of commodity to test.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.STATISTIC))),
threshold=TunableThreshold(description='\n The threashold to test for.\n ')),
'only_other_sims':Tunable(description='\n If checked, the sim running this test is not counted.',
tunable_type=bool,
default=True),
'only_household_sims':Tunable(description='\n If checked, only sims in the same household as the testing sim \n are considered.',
tunable_type=bool,
default=True),
'count':Tunable(description='\n The number of sims that must desire the commodity for this test\n to pass.',
tunable_type=int,
default=1),
'invert':Tunable(description='\n If checked, the test will be inverted. In other words, the test \n will fail if any sim desires the tuned commodity.',
tunable_type=bool,
default=False)}
def get_expected_args(self):
expected_args = {'context': ParticipantType.InteractionContext}
return expected_args
@cached_test
def __call__(self, context=None):
logger.assert_log((context is not None), 'Context is None in CommodityDesiredByOtherSims test.', owner='rez')
total_passed = 0
for sim in services.sim_info_manager().instanced_sims_gen():
if self.only_other_sims:
if context is not None:
if context.sim is sim:
continue
elif self.only_household_sims:
if context is not None and context.sim.household_id != sim.household_id:
continue
commodity_inst = sim.get_stat_instance(self.commodity.commodity)
if commodity_inst is not None and self.commodity.threshold.compare(commodity_inst.get_value()):
total_passed += 1
if total_passed >= self.count:
if not self.invert:
return TestResult.TRUE
return TestResult(False, 'Too many sims desire this commodity.', tooltip=(self.tooltip))
if not self.invert:
return TestResult(False, 'Not enough sims desire this commodity.', tooltip=(self.tooltip))
return TestResult.TRUE
class StatisticEquivalencyTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
FACTORY_TUNABLES = {'stat': TunableReference(description='\n The stat we are operating on.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.STATISTIC)))}
def get_expected_args(self):
return {'participant_stat': ParticipantType.PickedStatistic}
@cached_test
def __call__(self, participant_stat):
for tested_stat in participant_stat:
if tested_stat is not None and self.stat is tested_stat:
return TestResult.TRUE
return TestResult(False, 'Specified Stat({}) not the same as Participant Stat({})', self.stat, participant_stat)
class StatFromParticipantThresholdTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
FACTORY_TUNABLES = {'who':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'threshold':TunableVariant(description='\n The value or state threshold to test against.\n ',
state_value_threshold=TunableObjectStateValueThreshold(description='\n The state threshold for this test.\n '),
value_threshold=TunableThreshold(description="\n The threshold to control availability based on the statistic's\n value.\n "),
default='value_threshold'),
'must_have_stat':Tunable(description='\n Setting this to True (checked) will ensure that this test only\n passes if the tested Sim actually has the statistic referenced. If\n left False (unchecked), this test will evaluate as if the Sim had\n the statistic at the value of 0\n ',
tunable_type=bool,
default=False),
'score_to_use':TunableVariant(description='\n Depending on the choice, this decides what value to use for the \n threshold comparison.\n ',
points=_PointsValue.TunableFactory(description='\n Use the raw points for the comparison in the test.\n '),
user_value=_UserValue.TunableFactory(description='\n Use the user value for the comparison in the test.\n '),
rank=_RankValue.TunableFactory(description='\n Use the rank value for the comparison in the test.\n '),
default='user_value')}
def get_expected_args(self):
return {'test_targets':self.who,
'picked_statistics':ParticipantType.PickedStatistic}
@cached_test
def __call__(self, test_targets=(), picked_statistics=()):
for target in test_targets:
if target is None:
logger.error('Trying to call StatFromParticipantThresholdTest on {} which is None', target)
return TestResult(False, 'Target({}) does not exist', self.who)
if not picked_statistics:
return TestResult(False, 'No picked statistic(s)')
for stat in picked_statistics:
curr_value = 0
if stat is not None:
tracker = target.get_tracker(stat)
if tracker is None:
return TestResult(False, 'Stat({}) does not have a tracker', stat)
stat_inst = tracker.get_statistic(stat)
if stat_inst is not None:
if stat.is_skill:
curr_value = stat_inst.is_initial_value or self.score_to_use.get_value(target, stat)
else:
stat_inst = None
if stat_inst is None:
if self.must_have_stat:
return TestResultNumeric(False,
'{} Does not have stat: {}.',
(self.who.name),
stat,
current_value=curr_value,
goal_value=(self.threshold.value),
is_money=False,
tooltip=(self.tooltip))
operator_symbol = self.threshold.compare(curr_value) or Operator.from_function(self.threshold.comparison).symbol
return TestResultNumeric(False,
'{} failed stat check: {}.{} {} {} (current value: {})',
(self.who.name),
(target.__class__.__name__),
stat,
operator_symbol,
(self.threshold.value),
curr_value,
current_value=curr_value,
goal_value=(self.threshold.value),
is_money=False,
tooltip=(self.tooltip))
return TestResult.TRUE |
from external_services import ExternalServices
from k8s import K8sResource
class K8sIngress(K8sResource):
def __init__(self, data: dict, svc: ExternalServices = ExternalServices()) -> None:
super().__init__(data=data, svc=svc)
def is_available(self, state: dict) -> bool:
if 'status' not in state:
return False
status = state['status']
if 'loadBalancer' not in status:
return False
load_balancer_status = status['loadBalancer']
if 'ingress' not in load_balancer_status:
return False
if [ing for ing in load_balancer_status['ingress'] if 'hostname' in ing or 'ip' in ing]:
return True
else:
return False
|
#HOST = '192.168.0.199'
#CONNECTION_INFO = f"host='{HOST}' dbname='{DBNAME}' user='{USERNAME}' password='{PASSWORD}'"
DBNAME = 'words'
file_name = 'words.db' |
# Copyright 2021 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import crayons
import logging
import yaml
LOG = logging.getLogger(__name__)
class Config(object):
global_config_file = '/etc/cinfo/cinfo.yaml'
def __init__(self, file=global_config_file):
self.file = file
def load(self):
LOG.info("{}: {}".format(
crayons.yellow("loading conf"), self.file))
with open(self.file, 'r') as stream:
self.data = yaml.safe_load(stream)
return self.data
|
# Created by MechAviv
# Masque's Puzzle Damage Skin | (2435954)
if sm.addDamageSkin(2435954):
sm.chat("'Masque's Puzzle Damage Skin' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem() |
import base64
import logging
import json
from calendar import monthrange
import datetime
from httplib2 import Http
from json import dumps
def handle_notification(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
logging.info('Budget information: {}'.format(pubsub_message))
jsonPayload = json.loads(pubsub_message)
costAmount = jsonPayload['costAmount']
budgetAmount = jsonPayload['budgetAmount']
percentOfBudget = round((costAmount/budgetAmount) * 100,2)
budgetDisplayName = jsonPayload['budgetDisplayName']
costIntervalStart = jsonPayload['costIntervalStart']
percentOfMonth = calcMonthPercent(costIntervalStart)
trendingPercent = round(percentOfBudget - percentOfMonth,2)
#logging.info('costAmount: {}'.format(costAmount))
#logging.info('budgetAmount: {}'.format(budgetAmount))
#logging.info('percentOfBudget: {}'.format(percentOfBudget))
#logging.info('budgetDisplayName: {}'.format(budgetDisplayName))
if trendingPercent >= 1:
message_text = "{}".format(budgetDisplayName) + ": {}".format(trendingPercent) + "% higher than last month (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
elif trendingPercent < 1 and trendingPercent > -1:
message_text = "{}".format(budgetDisplayName) + ": On target (+/- 1%) (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
else:
message_text = "{}".format(budgetDisplayName) + ": {}".format(trendingPercent) + "% lower than last month (${:.2f}".format(costAmount) + "/${:.2f}".format(budgetAmount) + ")"
logging.info('message_text: {}'.format(message_text))
timeToSend = chatLimiter(percentOfBudget, percentOfMonth)
if timeToSend == True:
sendChatMessage(message_text)
def calcMonthPercent(costIntervalStart):
#Convert the interval timestamp to a DateTime object
intervalStart = datetime.datetime.strptime(costIntervalStart,"%Y-%m-%dT%H:%M:%SZ")
#Get a DateTime object for the date and time right now
timeNow = datetime.datetime.now()
#Calculate the difference between the start of the billing period and now
toNowCalc = timeNow - intervalStart
toNowDifference = toNowCalc.days * 86400 + toNowCalc.seconds
#logging.info('toNow: {}'.format(toNowDifference))
#Get a DateTime object for the end of the billing period
intervalMonth = intervalStart.month
intervalYear = intervalStart.year
daysInIntervalMonth = monthrange(intervalYear, intervalMonth)[1]
intervalEndTimestamp = str(intervalYear) + "-" + str(intervalMonth) + "-" + str(daysInIntervalMonth) + " 23:59:59"
intervalEndTime = datetime.datetime.strptime(intervalEndTimestamp, "%Y-%m-%d %H:%M:%S")
#Calculate the difference between the start and end of the billing period
toMonthEndCalc = intervalEndTime - intervalStart
toMonthEndDifference = toMonthEndCalc.days * 86400 + toMonthEndCalc.seconds
#logging.info('toMonthEnd: {}'.format(toMonthEndDifference))
#Calculate position in the billing period expressed as a percent
intervalPercent = round(toNowDifference/toMonthEndDifference * 100, 2)
#logging.info('intervalPercent: {}'.format(intervalPercent))
return intervalPercent
def chatLimiter(budgetPercent, intervalPercent):
#Get a DateTime object for the date and time right now
timeNow = datetime.datetime.now()
logging.info('timeNow: {}'.format(timeNow))
dayNow = timeNow.day
hourNow = timeNow.hour
minuteNow = timeNow.minute
overUnder = budgetPercent - intervalPercent
if overUnder > 1: #if over budget by more than 1%
if minuteNow >= 0 and minuteNow < 30: #PubSub notifications should arrive every 20-30 minutes
return True
else:
return False
else: #if not over budget by more than 1%
if minuteNow >= 0 and minuteNow < 30 and hourNow == 14: #send notifications for the 7AM Mountain time hour only (offset for GMT)
return True
else:
return False
def sendChatMessage(message_text):
url = 'https://chat.googleapis.com/v1/spaces/...' #insert your Google Chat Webhook URL here
bot_message = {'text' : '{}'.format(message_text)}
message_headers = { 'Content-Type': 'application/json; charset=UTF-8'}
http_obj = Http()
response = http_obj.request(
uri=url,
method='POST',
headers=message_headers,
body=dumps(bot_message),
)
logging.info('Message sent')
logging.info('Response: {}'.format(response))
|
import eHive
import os
from VCF.VcfNormalize import VcfNormalize
class VcfAllelicPrim(eHive.BaseRunnable):
"""Run vcfallelicprimitives on a VCF file"""
def run(self):
filepath = self.param_required('filepath')
self.warning('Analysing file: %s'% filepath)
file = os.path.split(filepath)[1]
work_dir = None
if self.param_is_defined('work_dir'):
work_dir = self.param_required('work_dir')
else:
work_dir = os.path.split(filepath)[0]
vcfNorm = VcfNormalize(vcf=filepath, vcflib_folder=self.param_required('vcflib_folder'),
bgzip_folder=self.param_required('bgzip_folder'))
downstream_pipe = None
if self.param_is_defined('downstream_pipe'):
downstream_pipe = self.param('downstream_pipe')
vcf_file = ""
if self.param_is_defined('compress'):
vcf_file = vcfNorm.run_vcfallelicprimitives(outprefix=file,
compress=True,
downstream_pipe=downstream_pipe,
outdir=work_dir)
else:
vcf_file = vcfNorm.run_vcfallelicprimitives(outprefix=file,
compress=False,
downstream_pipe=downstream_pipe,
outdir=work_dir)
self.param('out_vcf', vcf_file)
def write_output(self):
self.warning('Work is done!')
self.dataflow({'out_vcf': self.param('out_vcf')}, 1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Production d'un fichier ATOM XML
Definition : https://tools.ietf.org/html/rfc4287
Outil de validation : https://validator.w3.org/feed/
"""
__author__ = 'Frederic Laurent'
__version__ = "1.0"
__copyright__ = 'Copyright 2017, Frederic Laurent'
__license__ = "MIT"
import codecs
import datetime
import logging
import os.path
import lxml.etree
from lxml.etree import SubElement
from easy_atom import helpers, content
class Feed:
"""
Création d'un fichier de flux RSS au format ATOM
"""
ATOM_FEED_DIR = "feeds"
ATOM_CONFIG_DIR = "conf"
FEED_ENCODING = 'utf-8'
def __init__(self, domain, selfhref='', config_filename=None):
"""
Constructeur du générateur
Lien vers l'auto référence du flux : https://www.feedvalidator.org/docs/warning/MissingAtomSelfLink.html
:param domain: un domaine decrivant le flux. Permet d'identifier le fichier de configuration feed_config_<domain>.json
:param selfhref: HREF du flux XML une fois déployé pour son auto référence
"""
self.logger = logging.getLogger('feed')
self.domain = domain
self.selfhref = selfhref
self.feed_config = {}
self.feed_filename = None
self.rss2_filename = None
self.update_date = None
self.load_config(config_filename)
def get_config_filename(self):
return os.path.join(Feed.ATOM_CONFIG_DIR, "feed_config_{}.json".format(self.domain))
def load_config(self, config_filename=None):
"""
Chargement de la configuration du flux.
La configuration du flux se trouve par convention dans le fichier feed_config_<domain>.json
:return: -
"""
filename = config_filename
if not filename:
filename = self.get_config_filename()
self.logger.debug("Load config file : {}".format(filename))
self.feed_config = helpers.load_json(filename)
output = self.feed_config["output_dir"] if "output_dir" in self.feed_config else Feed.ATOM_FEED_DIR
self.logger.debug("Config Init - Output = {}".format(output))
if 'header' in self.feed_config and 'atom_feedname' in self.feed_config['header']:
self.feed_filename = os.path.join(output, self.feed_config["header"]["atom_feedname"])
if 'header' in self.feed_config and 'rss2_feedname' in self.feed_config['header']:
self.rss2_filename = os.path.join(output, self.feed_config["header"]["rss2_feedname"])
self.update_date = datetime.datetime.now(datetime.timezone.utc).isoformat(sep='T')
self.logger.debug("Feed : %s (RSS2 %s)" % (self.feed_filename, self.rss2_filename))
self.logger.debug("Config loaded : {}".format(self.feed_config))
def feed_url(self, feed_type='atom'):
"""
Fournit l'URL de flux Atom (ou RSS2)
L'URL est constuite en se basant sur l'URL d'auto référence des flux
:param feed_type: type du flux : rss2 ou atom
:return: URL du Flux
"""
_url = []
if self.selfhref:
if self.selfhref.endswith('/'):
_url.append(self.selfhref[:-1])
else:
_url.append(self.selfhref)
if feed_type == 'atom':
_url.append(self.feed_config["header"]["atom_feedname"])
else:
_url.append(self.feed_config["header"]["rss2_feedname"])
return '/'.join(_url)
def make_entry_id(self, id):
return "{}{}".format(self.feed_config["entry"]["urn_mask"], id)
def generate(self, entries):
"""
Génération du fichier XML Atom
:param entries: listes des entrées du fichier
:return: noeud XML du document Atom XML
"""
self.logger.debug("Feed to XML : {} entries".format(len(entries)))
root = content.xmlelt(None, "feed", None, {"xmlns": "http://www.w3.org/2005/Atom"})
content.xmlelt(root, "id", self.feed_config["header"]["id"])
content.xmlelt(root, "title", self.feed_config["header"]["title"])
content.xmlelt(root, "subtitle", self.feed_config["header"]["subtitle"])
content.xmlelt(root, "link", None,
{"href": self.feed_config["header"]["link"],
"rel": "related"})
content.xmlelt(root, "link", None,
{"href": self.feed_url(),
"rel": "self"})
content.xmlelt(root, "updated", self.update_date)
author = SubElement(root, "author")
content.xmlelt(author, "name", self.feed_config["header"]["author"]["name"])
content.xmlelt(author, "email", self.feed_config["header"]["author"]["email"])
content.xmlelt(root, "category", None,
{"term": self.feed_config["header"]["category"]})
content.xmlelt(root, "generator", "python program - atom.py",
{"uri": "https://github.com/flrt/atom_gen",
"version": "1.0"})
content.xmlelt(root, "rights", "CC BY-SA 3.0 FR")
for entry in entries:
self.logger.debug("current entry : %s" % entry)
self.make_entry(root, entry)
return root
def make_entry(self, xml_parent, entry):
"""
Fabrication d'une entry Atom
Utilisation de la proprietes files_props si définies qui contient les liens
vers les fichiers distants ainsi que les informations de taille et de disponibilité
Si files_props n'est pas disponible, utilisation de la propriété files qui ne contient
que la liste des fichiers distants
:param xml_parent: Noeud XML auquel rattacher l'entry
:param entry: données de l'entry
:return: -
"""
xml_entry = content.xmlelt(xml_parent, "entry")
content.xmlelt(xml_entry, "title", entry["title"])
# generation des liens avec les informations detaillees
if 'files_props' in entry:
self.add_entry_links(xml_entry,
list(map(lambda x: x['url'], entry['files_props'])))
# sinon avec seulement les liens vers les fichiers
elif 'files' in entry:
self.add_entry_links(xml_entry, entry['files'])
content.xmlelt(xml_entry, "id", entry["id"])
# fabrication du contenu
content.make_xhtml(xml_entry, entry)
content.xmlelt(xml_entry, "updated", entry["date"])
content.xmlelt(xml_entry, "summary", entry["summary"])
def add_entry_links(self, xml_parent, link_list):
"""
Création des liens atom à partir d'une liste de liens
:param xml_parent: noeud XML auquel rattacher ces liens
:param link_list: liste de liens
:return: -
"""
for link in link_list:
content.xmlelt(xml_parent, "link", None,
{"href": link, "rel": "related", "type": self.mimetype(link)})
@staticmethod
def mimetype(link):
"""
Detection du type mime en fonction de l'extension de l'URL
:param link: URL du fichier
:return: type mime associé. Type: str
"""
mimetypes = {".zip": "application/zip",
".dbf": "application/dbase",
".csv": "text/csv",
".json": "application/json",
".xml": "text/xml",
".txt": "text/plain",
".sha": "text/vnd.digest",
".diff": "text/vnd.diff"}
ext = link[link.rfind('.'):]
if ext in mimetypes:
return mimetypes[ext]
else:
return "application/octet-stream"
def save(self, root):
"""
Sauvegarde locale des données
:param root: noeud XML
:return: -
"""
if self.feed_filename:
self.logger.info("Save Atom {0}".format(self.feed_filename))
with codecs.open(self.feed_filename, "w", Feed.FEED_ENCODING) as fout:
fout.write(content.xml2text(root, Feed.FEED_ENCODING))
else:
self.logger.warning("Can't Save feed - no filename defined")
def rss2(self, feed=None):
"""
Conversion du document atom en ficher rss2
:param feed: arbre XML du flux Atom
:return: -
"""
if not self.feed_filename:
self.logger.warning("No source filename (Atom MXL")
return
try:
import atomtorss2
import atomtorss2.xslt_ext
self.logger.info("Save RSS2 {0}".format(self.rss2_filename))
# XSL
filedir = os.path.dirname(os.path.abspath(atomtorss2.__file__))
xslt_filename = os.path.join(filedir, atomtorss2.DEFAULT_XSLT_FILE)
proc = atomtorss2.xslt_ext.DateFormatterProcessor()
proc.load_xslt(xslt_filename)
# conversion RSS2
if feed is not None:
result_xml = proc.transform(feed)
else:
result_xml = proc.transform(lxml.etree.parse(self.feed_filename))
with codecs.open(self.rss2_filename, "w", Feed.FEED_ENCODING) as fout:
fout.write(content.xml2text(result_xml, Feed.FEED_ENCODING))
except ImportError:
self.logger.warn("No transformation library found : atom -> rss2")
|
'''
Created on 25 Jan 2018
@author: Slaporter
'''
import platform
def get_platform_info():
return (platform.platform())
if __name__ == '__main__':
get_platform_info() |
from typing import Any, Iterable, Sequence, Optional, Union, overload, TypeVar
import pandas as pd
import numpy as np
from amstramdam.game.geo import Point
from amstramdam.datasets.types import (
Filter,
Mask,
Rank,
PointCreationRecord,
PointUpdateRecord,
GuessRecord,
)
T = TypeVar("T")
def create_mask(df: pd.DataFrame, filter: Filter) -> Mask:
# Column to use for filtering
col = filter["column"]
assert col in df.columns, f"Unknown column '{col}'. Valid columns are {df.columns}"
# Values to use for filtering
values = filter.get("values", None)
if filter.get("method", None) is not None:
method = filter["method"]
elif isinstance(values, (list, tuple, set)):
method = "isin"
else:
method = "eq"
return getattr(df[col], method)(values)
def create_masks(df: pd.DataFrame, filters: Iterable[Filter]) -> Mask:
masks = None
for filter in filters:
# TODO: support OR and AND masking with recursivity
mask = create_mask(df, filter)
if masks is None:
masks = mask
else:
masks = masks & mask
return masks
def mask_df(df: pd.DataFrame, filters: Iterable[Filter]) -> pd.DataFrame:
masks = create_masks(df, filters)
return df if masks is None else df.loc[masks]
def autorank(
df: pd.DataFrame, column: str, ranks: Sequence[Rank], reverse: bool = False
) -> tuple[pd.DataFrame, dict[str, int]]:
"""Create G0, G1, Gi groups on-the-fly, from a list of group sizes"""
if len(ranks) == 0:
df.loc[:, "group"] = 0
params = dict(available_levels=1)
return df, params
if ranks[-1] == "all":
ranks[-1] = len(df)
def get_rank(r: int) -> int:
for i, threshold in enumerate(ranks):
if r < threshold: # type: ignore
return i
return len(ranks)
df.loc[:, "local_rank"] = np.argsort(-df[column].fillna(0))
df.loc[:, "group"] = df["local_rank"].map(get_rank)
params = dict(available_levels=len(ranks))
return df, params
class DataFrameLoader(object):
# List here the file that should be kept in memory
persistent: set[str] = {"data/places.world.csv"}
def __init__(self, dataframes: Optional[dict[str, pd.DataFrame]] = None) -> None:
if dataframes is None:
dataframes = dict()
self._dataframes = dataframes
def __contains__(self, item: str) -> bool:
return item in self._dataframes
def __len__(self) -> int:
return len(self._dataframes)
def load(self, filename: str, persist: bool = False, **kwargs: Any) -> pd.DataFrame:
df = pd.read_csv(filename, **kwargs)
df = df.fillna(0)
if "pid" not in df.columns:
df["pid"] = df.index
if persist:
self.persistent.add(filename)
if filename in self.persistent:
self._dataframes[filename] = df
return df
def __getitem__(self, filename: str) -> pd.DataFrame:
if filename in self._dataframes:
return self._dataframes[filename]
return self.load(filename)
def __delitem__(self, filename: str) -> None:
del self._dataframes[filename]
def edit(
self,
filename: str,
created: Iterable[PointCreationRecord],
updated: dict[str, PointUpdateRecord],
) -> pd.DataFrame:
"""
filename: original DF filename
inserted: list of inserted records. Each record is a dict whose keys are the
columns of DF, and values are the corresponding values
changed: a dictionnary from pids to changes. For each pid, changed[pid] is a
dictionnary mapping changed columns to their new values
"""
df = self.load(filename, persist=False)
types = {col: df[col].dtype for col in df.columns}
if created:
added = pd.DataFrame.from_records(
created, index=[o["pid"] for o in created]
)
added = added.astype(types)
df = df.append(added, verify_integrity=True)
for pid, changes in updated.items():
parsed_pid = int(pid)
for col, value in changes.items():
casted_value = np.array([value], dtype=types.get(col))
df.loc[parsed_pid, col] = casted_value
df = df.drop(columns=["pid"])
return df
class UnifiedDataFrame:
def __init__(
self,
df: pd.DataFrame,
mask: Mask = None,
col_place: str = "city",
col_hint: str = "admin",
col_lon: str = "lng",
col_lat: str = "lat",
col_group: str = "group",
col_rank: str = "population",
use_hint: bool = True,
single_group: bool = False,
special_char: str = "!",
) -> None:
self.df = df
self.mask = mask if mask is not None else pd.Series(True, index=self.df.index)
self.converter = {
k: v
for k, v in zip(
["place", "hint", "lon", "lat", "group", "rank"],
[col_place, col_hint, col_lon, col_lat, col_group, col_rank],
)
}
self.use_hint = use_hint and col_hint in self.df.columns
self.single_group = single_group or col_group not in self.df.columns
self.SPE = special_char
def unify_df(self, df: pd.DataFrame) -> "UnifiedDataFrame":
new_mask = self.mask.reindex_like(df) # & pd.Series(True, index=self.df.index)
return UnifiedDataFrame(
df,
mask=new_mask,
col_place=self.col("place"),
col_hint=self.col("hint"),
col_lon=self.col("lon"),
col_lat=self.col("lat"),
col_group=self.col("group"),
col_rank=self.col("rank"),
use_hint=self.use_hint,
single_group=self.single_group,
special_char=self.SPE,
)
def _col(self, key: str) -> str:
if key.startswith(self.SPE):
return key[len(self.SPE) :]
return self.converter.get(key, key)
@overload
def col(self, key: str) -> str:
pass
@overload
def col(self, key1: str, key2: str, *keys: str) -> list[str]:
pass
def col(self, *keys: str) -> Union[str, list[str]]:
converted = [self._col(key) for key in keys]
if len(keys) == 1:
return converted[0]
return converted
@property
def reversed_converter(self) -> dict[str, str]:
return {v: k for k, v in self.converter.items()}
def __len__(self) -> int:
return len(self.df.loc[self.mask])
def __getitem__(self, key: Union[list[str], pd.Series, str]) -> "UnifiedDataFrame":
if isinstance(key, list):
keys = self.col(*key)
return self.df.loc[self.mask, keys] # type: ignore
elif isinstance(key, pd.Series):
return self.unify_df(self.df.loc[self.mask & key])
return self.df[self.mask, self.col(key)] # type: ignore
def __getattr__(self, attr: str) -> Any:
own_attr = set(dir(self))
df_attr = set(dir(self.df))
if attr in df_attr - own_attr:
return getattr(self.df.loc[self.mask], attr)
# Raise an error
raise AttributeError(f"Unknown method '{attr}' for UnifiedGameMap")
def sample(self, *args: Any, **kwargs: Any) -> "UnifiedDataFrame":
sampled = self.df.loc[self.mask].sample(*args, **kwargs)
return self.unify_df(sampled)
def to_dict(
self, *args: Any, renamed: bool = True, **kwargs: Any
) -> Union[list[dict[str, Any], dict[str, Any]]]:
if not renamed:
return self.df.loc[self.mask].to_dict(*args, **kwargs) # type: ignore
renamed_df = self.df.loc[self.mask].rename(columns=self.reversed_converter)
if "hint" not in renamed_df.columns or not self.use_hint:
renamed_df["hint"] = ""
if "group" not in renamed_df.columns:
renamed_df["group"] = 0
return renamed_df.to_dict(*args, **kwargs) # type: ignore
@property
def place(self) -> pd.Series:
return self.df.loc[self.mask, self.col("place")]
@property
def hint(self) -> pd.Series:
if not self.use_hint:
return pd.Series("", index=self.df.index)
return self.df.loc[self.mask, self.col("hint")]
@property
def group(self) -> pd.Series:
if self.single_group:
return pd.Series(0, index=self.df.index)
return self.df.loc[self.mask, self.col("group")]
@property
def lon(self) -> pd.Series:
return self.df.loc[self.mask, self.col("lon")]
@property
def lat(self) -> pd.Series:
return self.df.loc[self.mask, self.col("lat")]
@property
def rank(self) -> pd.Series:
return self.df.loc[self.mask, self.col("rank")]
def jsonify_record(self, record: dict[str, T]) -> dict[str, T]:
return {self.col(k): v for k, v in record.items()}
def guessify_record(self, record: dict[str, Any]) -> GuessRecord:
if not self.use_hint:
hint = ""
else:
hint = record[self.col("hint")]
place, lon, lat = (record[key] for key in self.col("place", "lon", "lat"))
return ((place, hint), Point(lon, lat))
|
# 通用类
import cmath
import wave
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
class Speech:
def audiorecorder(self, path, len=2, formater=pyaudio.paInt16, rate=16000, frames_per_buffer=1024, channels=2):
p = pyaudio.PyAudio()
stream = p.open(format=formater, channels=channels, rate=rate, input=True, frames_per_buffer=frames_per_buffer)
print("start recording......")
frames = []
for i in range(0, int(rate / frames_per_buffer * len)):
data = stream.read(frames_per_buffer)
frames.append(data)
print("stop recording......")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(path, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(formater))
wf.setframerate(rate)
wf.writeframes(b''.join(frames))
wf.close()
def audioplayer(self, path, frames_per_buffer=1024):
wf = wave.open(self.path, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(frames_per_buffer)
while data != b'':
stream.write(data)
data = wf.readframes(frames_per_buffer)
stream.stop_stream()
stream.close()
p.terminate()
def audiowrite(self):
pass
def audioread(self, path, sr = None):
data, sample_rate = librosa.load(path, sr = sr)
return data, sample_rate
def soundplot(self, data=[], sr=22050, size=(14, 5)):
if len(data) == 0:
data, _ = self.audioread()
plt.figure(figsize=size)
librosa.display.waveplot(data, sr=sr)
plt.show()
def enframe(self, x, win, inc=None):
# print(x)
nx = len(x)
if isinstance(win, list):
nwin = len(win)
nlen = nwin # 帧长=窗长
elif isinstance(win, int):
nwin = 1
nlen = win # 设置为帧长
if inc is None:
inc = nlen
nf = (nx - nlen + inc) // inc # 计算帧数
frameout = np.zeros((nf, nlen)) # 初始化
indf = np.multiply(inc, np.array([i for i in range(nf)])) # 设置每帧在x中的位移量位置
for i in range(nf):
frameout[i, :] = x[indf[i]:indf[i] + nlen] # 分帧
if isinstance(win, list):
frameout = np.multiply(frameout, np.array(win)) # 每帧乘以窗函数的值
return frameout
def FrameTime(self, frameNum, frameLen, inc, fs):
"""
分帧后计算每帧对应的时间
"""
l = np.array([i for i in range(frameNum)])
return (l * inc + frameLen / 2) / fs
def SNR_singlech(self, I, In):
"""
calculate SNR of noisy speech signal
:param I: clean speech siganl
:param In: noisy speech siganl
:return snr:
"""
Ps = np.sum((I - np.mean(I)) ** 2) # signal power
Pn = np.sum((I - In) ** 2) # noise power
snr = 10 * np.log10(Ps / Pn)
return snr
def OverlapAdd2(self, X, A=None, W=None, S=None):
"""
reconstruction signal form spectrogram
:param X: FFT spectrum matrix (each column: frame fft)
:param A: phase angle (dimension = X), default = 0
:param W: window length (default: 2 * fft length)
:param S: shift length (default: W/2)
:return Y: reconstructed signal from its spectrogram
"""
if A is None:
A = np.angle(X)
if W is None:
W = X.shape[0] * 2
if S is None:
S = int(W / 2)
if int(S) != S: # frame shift not an integer
S = int(S)
print('The shift length have to be an integer as it is the number of samples.\n')
print('shift length is fixed to {}'.format(S))
FreqRes, FrameNum = X.shape # frame number, fft number
Spec = X * np.exp(A * cmath.sqrt(-1)) # complex spectrum
if np.mod(W, 2):
Spec = np.concatenate((Spec, np.flipud(np.conj(Spec[1::-1, :])))) # negative frequency
else:
Spec = np.concatenate((Spec, np.flipud(np.conj(Spec[1:(len(Spec) - 1), :])))) # negative frequency
sig = np.zeros((FrameNum - 1) * S + W) # initialization
weight = sig
for i in range(FrameNum): # overlap
start = i * S # start sample point of ith frame
spec = Spec[:, i] # ith frame spectrum
sig[start: (start + W)] = sig[start: (start + W)] + np.real(np.fft.ifft(spec, W, axis=0))
Y = sig
return Y
|
import json
import os.path
import re
import sys
import requests
from requests.cookies import RequestsCookieJar
from . import api
from . import errors
class CloudMail:
def __init__(self, login: str, password: str):
self.login = login
self.password = password
self.session = requests.Session()
self.api = api.API(self)
def is_cookies_valid(self) -> bool:
return self.api.tokens.csrf(True)["body"] != "user"
def auth(self) -> bool:
response = self.session.post(
self.api.config["endpoints"]["MAILRU_AUTH_ENDPOINT"],
params={"Login": self.login, "Password": self.password}
)
if ("fail=1" in response.url) and ("https://e.mail.ru/login" in response.url):
raise errors.CloudMailWrongAuthData("Wrong login/password data.")
if response.url == self.api.config["endpoints"]["TWO_FACTOR_AUTH_ENDPOINT"]:
self.session.post(
self.api.config["endpoints"]["TWO_FACTOR_AUTH_ENDPOINT"],
data={
"csrf": re.findall(r'"csrf":"(.+)","device"', response.text)[0],
"Login": self.login,
"AuthCode": int(input("Enter AuthCode: ")),
"Permanent": "1"
}
)
return self.is_cookies_valid()
def save_cookies_to_file(self, file_path="cookies.json") -> RequestsCookieJar:
with open(file_path, "w") as file:
json.dump(
requests.utils.dict_from_cookiejar(self.session.cookies), file, indent=4
)
return self.session.cookies
def load_cookies_from_file(self, file_path="cookies.json") -> RequestsCookieJar:
with open(file_path, "r") as file:
self.session.cookies = requests.utils.cookiejar_from_dict(json.load(file))
return self.session.cookies
def update_cookies_from_dict(self, dict_={}, **kwargs) -> RequestsCookieJar:
dict_.update(kwargs)
for k, v in dict_.items():
self.session.cookies[k] = v
return self.session.cookies
|
#!/usr/bin/env python3
'''
Authors: Patrick Kelly ([email protected]) and Garbo Loo
Last Updated: 3-18-2020
'''
import binascii
import os
import random
import argparse
import ctypes
import ctypes.util
from PIL import Image
from bitstring import BitArray
delimiter = 256 * '1'
#Convert the executable payload to a bitstream
def get_bitstream(filename):
fh = open(filename, 'rb')
binary_file = fh.read()
return BitArray(binary_file).bin
#Append a delimiter of N 1's to the end of bitstring
def add_delimiter(bitstring, N=256):
for i in range(N):
bitstring += '1'
return bitstring
#Get total number of LSBs in image
def image_bits(image):
x_size = image.size[0]
y_size = image.size[1]
return x_size, y_size
#Balance the proportion of 1s and 0s in the payload (to prevent a skewed RGB histogram)
def smoothing(bitstream, total_size):
zeros = bitstream.count('0')
ones = bitstream.count('1')
unfilled = total_size - len(bitstream)
zeros_weight = ((total_size/2) - zeros)/unfilled
ones_weight = ((total_size/2) - ones)/unfilled
for index in range(unfilled):
bitstream += str(random.choices(population=[0,1], weights=[zeros_weight,ones_weight])[0])
return bitstream
#Generate a pseudorandom list of tuples (x, y, rgb_value) with each tuple uniquely
#designating a random rgb value at a random x,y coordinate in the image. The list
#has length <streamlength> and is reproducible with seed <rand_seed>
def pix_list(image, rand_seed, x_size, y_size):
random.seed(rand_seed)
colors = ['r', 'b', 'g']
output_list = []
totalsize = x_size * y_size * 3 #Multiply by 3 to account for each of the rgb channels
pix_list = list(range(totalsize))
random.shuffle(pix_list) #Shuffle to generate nonrepeating list of <totalsize> integers
for index in range(totalsize): #Each integer corresponds to a unique (x,y,color) tuple
color = colors[pix_list[index] % 3]
x = (pix_list[index] // 3) % x_size
y = (pix_list[index] // 3) // x_size
newpix = (x, y, color)
output_list.append(newpix)
return output_list
#Replace the red, green, or blue LSB of each successive pixel in code_list with the correponding
#bitstream bit
def encode(image, bitstream, pixel_list, output):
rgbs = image.convert('RGB')
pixels = image.load()
#Iterate over the nonduplicate, randomly-ordered tuples in pixel_list
for index in range(len(bitstream)):
pixel_tuple = pixel_list[index]
x = pixel_tuple[0]
y = pixel_tuple[1]
r,g,b = pixels[x,y]
#Insert current bitstream bit into the indicated rgb value
if pixel_tuple[2] == 'r':
r_zero = (r & 0xFE) #Zero the LSB of the r value
new_bit = int(bitstream[index]) #Get the next bitstream bit
pixels[x,y] = (r_zero ^ new_bit, g, b) #XOR it with the LSB-zeroed r value
elif pixel_tuple[2] == 'g':
g_zero = (g & 0xFE)
new_bit = int(bitstream[index])
pixels[x,y] = (r, g_zero ^ new_bit, b)
elif pixel_tuple[2] == 'b':
b_zero = (b & 0xFE)
new_bit = int(bitstream[index])
pixels[x,y] = (r, g, b_zero ^ new_bit)
image.save(output)
return image
def extract(image, code_list, delimiter_length=256):
rgbs = image.convert('RGB')
pixels = image.load()
message = ''
ones_counter = 0 #To be used to check for the delimiter string
for index in range(len(code_list)):
pixel_tuple = code_list[index]
x = pixel_tuple[0]
y = pixel_tuple[1]
color = pixel_tuple[2]
r,g,b = rgbs.getpixel((x,y))
if color == 'r':
LSB = str(r & 1) #If color = 'r', use bitmask to retrieve the LSB
elif color == 'g':
LSB = str(g & 1)
else:
LSB = str(b & 1)
message += LSB #Add the LSB to the message
#Now check for the delimiter
if LSB == '1':
ones_counter += 1
else:
ones_counter = 0
if ones_counter == delimiter_length:
return message
#If there's no delimiter, there's no message. Exit the script.
exit(0)
#Execute the extracted binary in RAM
def execute(file_bytes):
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
fd = libc.syscall(319, b' ', 1) #Use memfd_create syscall to create a file descriptor in memory
assert fd >= 0
#Write the binary to the fd and execute it
f1 = file_bytes
f2 = open('/proc/self/fd/' + str(fd), mode='wb')
f2.write(f1)
f2.close()
os.execv('/proc/self/fd/' + str(fd), ['test'])
#Conceal the executable payload in the image
def concealer(image, payload, seed, output_name):
im = Image.open(image)
bitstream = add_delimiter(get_bitstream(payload))
x_size, y_size = image_bits(im)
totalsize = x_size * y_size * 3
bitstream = smoothing(bitstream, totalsize)
encoding_list = pix_list(im, seed, x_size, y_size)
image = encode(im, bitstream, encoding_list, output_name)
#Extract the payload from the stego image
def deployer(image, seed):
im = Image.open(image)
x_size, y_size = image_bits(im)
encoding_list = pix_list(im, seed, x_size, y_size)
message = extract(im, encoding_list)
delimit_index = message.find(delimiter)
output = message[:delimit_index]
return output
def main():
#Command line options (-c,-d,-i,-p,-o,-s,-g) with argparse module:
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", "--conceal", action="store_true", help="conceal ELF file in image; use with -i, -p, -o")
group.add_argument("-d", "--deploy", action="store_true", help="extract ELF file from image and execute in memory; use with -i")
parser.add_argument("-i", "--image", type=str, help="image input required as argument", metavar='<image file>')
parser.add_argument("-p", "--payload", type=str, help="elf executable file required as argument", metavar='<elf file>')
parser.add_argument("-o", "--output", type=str, help="save your output file in PNG format", metavar='<output file name.png>')
parser.add_argument("-s", "--seed", type=str, help="seed for reproducible initiation of the PRNG that determines the pixel sequence.")
parser.add_argument("-g", "--histogram", action="store_true", help="display histogram of superimposed input and output PNGs")
args = parser.parse_args()
#Set variables with command-line inputs
conceal = args.conceal
deploy = args.deploy
in_image = args.image
in_payload = args.payload
outfile = args.output
rand_seed = args.seed
histogram = args.histogram
#Runtime
if conceal:
concealer(in_image, in_payload, rand_seed, outfile)
#if -g, display a histogram of input and output images superimposed
if histogram:
import histogram
h = histogram.Histogram()
h.images = [in_image, outfile]
h.plotter()
elif deploy:
result = deployer(in_image, rand_seed)
hex_chars = hex(int(result, 2))
file_bytes = binascii.a2b_hex(hex_chars[2:])
execute(file_bytes)
if __name__ == '__main__':
main()
|
import scipy
from SloppyCell.ReactionNetworks import *
from Nets import *
alb_net.resetDynamicVariables()
alb_times = scipy.logspace(-6, -2, 1000)
alb_traj = Dynamics.integrate(alb_net, alb_times)
heme_net.resetDynamicVariables()
heme_times = scipy.logspace(-1, 3, 1000)
heme_traj = Dynamics.integrate(heme_net, heme_times)
Plotting.figure(1, figsize=(8,10))
Plotting.subplot(2,1,1)
Plotting.plot_trajectory(alb_traj, ['ligand_per_protein'], logx=True)
Plotting.axis([1e-6, 1e-2, 0, 6])
Plotting.title('Albumin best fit')
Plotting.subplot(2,1,2)
Plotting.plot_trajectory(heme_traj, ['ligand_per_protein'], logx=True)
Plotting.axis([1e-1, 1e3, 0, 4])
Plotting.title('Hemoglobin best fit')
|
from celery import Celery
celery = Celery()
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python2
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
next_line_tag = '\n'
navigation = [u"关于我",u"Java",u"数据库",u"设计模式",u"集成框架",u"Linux",u"Go",u"Python",u"Docker",u"大前端",u"工具",u"解决方案",u"管理相关",u"面试题"]
navigationName = 'navigation.md'
def walk_dir_get_data(dir,fileinfo):
for nav in navigation:
# 主目录
fileinfo.write(next_line_tag)
fileinfo.write('[' + nav + ']()' + next_line_tag)
fileinfo.write(next_line_tag)
dir2 = os.path.join(dir, nav)
if os.path.isdir(dir2):
# 二级目录
for file_name in os.listdir(dir2):
dir3 = os.path.join(dir2, file_name)
if os.path.isdir(dir3):
# 三级目录
fileinfo.write('* # '+file_name+next_line_tag)
for file_name_2 in os.listdir(dir3):
if os.path.isdir(os.path.join(dir3,file_name_2)):
continue
else:
if file_name_2.find('.md') != -1:
fileinfo.write('* [' + file_name_2.replace('.md','') + '](' + nav + '/' + file_name + '/' + file_name_2 + ')' + next_line_tag)
fileinfo.write('- - - -'+next_line_tag)
else:
if file_name.find('.md') != -1:
fileinfo.write('* [' + file_name.replace('.md', '') + '](' + nav + '/' + file_name + ')' + next_line_tag)
# 获取脚本文件的当前路径
def cur_file_dir():
# 获取脚本路径
path = sys.path[0]
# 判断为脚本文件还是py2exe编译后的文件,如果是脚本文件,则返回的是脚本的目录,如果是py2exe编译后的文件,则返回的是编译后的文件路径
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
def main():
# dir = raw_input('please input the path:')
dir = cur_file_dir()
os.remove(navigationName)
fileinfo = open(navigationName,'w')
fileinfo.write(u'# 非专业Java程序员博客'+next_line_tag)
fileinfo.write(u'[gimmick:theme](cerulean)'+next_line_tag)
walk_dir_get_data(dir, fileinfo)
if __name__ == '__main__':
main()
print 'init navigation success!'
|
#!/home/ec2-user/anaconda3/bin/python
# Import data from a sim into the database.
import logging
import os
import sys
import silk
import psycopg2
from netdata.run_collect import SimProgram, silk_str, epoch_utc
def log_info(msg):
"""
Print info log message.
:params msg: message text.
"""
logging.info(' ' + msg)
def log_debug(msg):
"""
Print debug log message.
:params msg: message text.
"""
logging.debug(msg)
def in_time_frame(sim_start_epoch, sim_end_epoch,
rec_start_epoch, rec_end_epoch):
"""
Check wheter record is inside the simulation time frame.
:param sim_start_epoch: simluation start.
:param sim_end_epoch: simulation end.
:param rec_start_epoch: record start.
:param rec_end_epoch: record end.
:return: True if record overlaps with the simulation time frame.
"""
outside = (rec_end_epoch <= sim_start_epoch or
sim_end_epoch <= rec_start_epoch)
return not outside
def main():
if silk.get_configuration('TIMEZONE_SUPPORT') != 'UTC':
print('silk must be configured for UTC')
quit()
if len(sys.argv) < 2:
print('Parameters: <simulation file> [any string for debug]')
quit()
if len(sys.argv) > 2:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level)
sim_file = sys.argv[1]
if not os.path.isfile(sim_file):
sim_file = 'sims/' + sim_file + '.json'
sim = SimProgram(sim_file)
log_info('Sim: {}'.format(sim.short_name))
silk.site.init_site()
log_debug('start: {}'.format(silk_str(sim.start_utc)))
log_debug(' end: {}'.format(silk_str(sim.end_utc)))
log_info('Sim {} epoch: {} - {}'.
format(sim.short_name,
sim.start_epoch, sim.end_epoch))
log_info('Sim {} utc: {} - {}'.
format(sim.short_name,
sim.start_utc, sim.end_utc))
it = silk.site.repository_iter(start=silk_str(sim.start_utc),
end=silk_str(sim.end_utc),
classtypes=('all', 'ext2ext'))
conn = psycopg2.connect("host=db0.chgate.net user=analyzer dbname=graphs")
cur = conn.cursor()
min_stime = 0
max_etime = 0
for file in it:
log_info('data file: {}'.format(file))
fh = silk.silkfile_open(file, silk.READ)
count = 0
dport_eq_sink_port = 0
for rec in fh:
count += 1
if rec.dport == sim.sink_port:
dport_eq_sink_port += 1
log_info('records processed: {0}, dport == sink_port: {1}'.
format(count, dport_eq_sink_port))
if in_time_frame(sim.start_epoch, sim.end_epoch,
rec.stime_epoch_secs, rec.etime_epoch_secs):
log_info('valid record')
record_start = rec.stime_epoch_secs \
if sim.start_epoch <= rec.stime_epoch_secs \
else sim.start_epoch
if rec.stime_epoch_secs < sim.start_epoch:
log_info('There is a record starting before sim.start: {}'.
format(count))
cur.execute('insert into edges \
(sip, sport, dip, dport, \
stime_epoch_secs, etime_epoch_secs)\
values (%s, %s, %s, %s, %s, %s);',
(str(rec.sip), rec.sport,
str(rec.dip), rec.dport,
record_start, rec.etime_epoch_secs))
print('etime: {}'.format(rec.etime_epoch_secs))
if min_stime == 0:
min_stime = record_start
else:
min_stime = min(min_stime, record_start)
max_etime = max(max_etime, rec.etime_epoch_secs)
conn.commit()
cur.execute('insert into frames (frame, start_epoch, end_epoch, sink_port) \
values (%s, %s, %s, %s);',
(sim.short_name, min_stime, max_etime,
sim.sink_port))
log_info('Recorded epoch: {} - {}'.
format(min_stime, max_etime))
log_info('Recorded utc: {} - {}'.
format(epoch_utc(min_stime), epoch_utc(max_etime)))
# log_info('Sim start == rec start: {}'.format(sim.start_epoch == min_stime))
conn.commit()
cur.close()
conn.close()
if __name__ == '__main__':
main()
|
#: E251 E251
def foo(bar = False):
'''Test function with an error in declaration'''
pass
#: E251
foo(bar= True)
#: E251
foo(bar =True)
#: E251 E251
foo(bar = True)
#: E251
y = bar(root= "sdasd")
#: E251:2:29
parser.add_argument('--long-option',
default=
"/rather/long/filesystem/path/here/blah/blah/blah")
#: E251:1:45
parser.add_argument('--long-option', default
="/rather/long/filesystem/path/here/blah/blah/blah")
#: Okay
foo(bar=(1 == 1))
foo(bar=(1 != 1))
foo(bar=(1 >= 1))
foo(bar=(1 <= 1))
(options, args) = parser.parse_args()
d[type(None)] = _deepcopy_atomic
|
import datetime
import logging
import os
import shutil
class Logger:
def __init__(self, context):
self.log_dir = context.workspace
self.path = self.log_dir / "log.txt"
self.logs_to_keep = context.config.get("logs_to_keep", 10)
def create_new_log_file(self):
"""Remove all handlers associated with the root logger object and create a NEW log file."""
self.previous_log_contents = self.get_contents() # used by `get_arcs_for_undoing()`
self.backup_current_log_file()
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=self.path, filemode="w", level=logging.DEBUG)
def backup_current_log_file(self):
"""
Copy the current log file with a timestamp appended.
Clean up the directory by keeping only the last self.logs_to_keep copies.
"""
if self.logs_to_keep == 0:
return
try:
modified = os.path.getmtime(self.path)
except FileNotFoundError:
return
suffix = datetime.datetime.fromtimestamp(modified).strftime("%Y-%m-%d_%H-%M-%S.%f")
destination = f"{self.path.parent}/log_{suffix}.txt"
shutil.copy2(self.path, destination)
for file in sorted(self.log_dir.glob("log_*.txt"), reverse=True)[self.logs_to_keep :]:
file.unlink()
def get_contents(self): # pragma: no cover
if self.path.is_file():
return self.path.read_text().strip()
else:
return ""
def warning(self, *args, **kwargs):
logging.warning(*args, **kwargs)
def info(self, *args, **kwargs):
logging.info(*args, **kwargs)
def error(self, *args, **kwargs):
logging.error(*args, **kwargs)
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Retrieves information about a Cloud SQL instance operation."""
from googlecloudsdk.calliope import base
from googlecloudsdk.sql import util
class Get(base.Command):
"""Retrieves information about a Cloud SQL instance operation."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use it to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'operation',
help='An identifier that uniquely identifies the operation.')
@util.ReraiseHttpException
def Run(self, args):
"""Retrieves information about a Cloud SQL instance operation.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource if the api request was
successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
resources = self.context['registry']
util.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
operation_ref = resources.Parse(
args.operation, collection='sql.operations',
params={'project': instance_ref.project,
'instance': instance_ref.instance})
result = sql_client.operations.Get(operation_ref.Request())
return result
def Display(self, unused_args, result):
"""Display prints information about what just happened to stdout.
Args:
unused_args: The same as the args in Run.
result: A dict object representing the operations resource if the api
request was successful.
"""
self.format(result)
|
### Import modules
from base import BaseClass
from utils import utils
from utils.variables import imagenet_map, imagenet_stats
import torchvision.transforms as transforms
import numpy as np
import os, json, cv2, torch, torchvision, PIL
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class Classifier(BaseClass):
def __init__(self, model, name='ImageNet_Classifier', device='cpu'):
super().__init__(name)
#Init name and metadata
self.name = name
self.device = device.lower()
# self.device = 'gpu' if torch.cuda.is_available() else 'cpu'
#Create net
self.predictor = torchvision.models.mobilenet_v2(pretrained=True)
self.predictor.to(self.device).eval()
#Init helper
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(imagenet_stats['mean'],
imagenet_stats['std'])])
def predict(self,image):
"""
Does classification on a single image. In order to perform batch classification,
you can either call this predict() function in a for-loop or alternatively (advanced)
try to modify this predict() function to perform batch-inferencing.
Input:
image: cv2 type object
Output:
predictions: torch.tensor object
"""
#Cast cv2 image to PIL format if needed
if type(image) != PIL.Image.Image:
image = utils.cv2_to_pil(image)
#Transform / preprocess as required by trained model
images_tf = self.transform(image).unsqueeze(0).to(self.device) #make batch dimension
#Predict / Inference
output = self.predictor(images_tf).squeeze(0) #remove batch dimension
return output
def visualize(self,image,output):
"""
Simple single plot visualizing function.
Input:
image: cv2 type object
outputs: torch.tensor object returned by the predict() function
Output:
None
"""
#Check
assert len(output.shape) <= 3, "Error! The visualize() function only accept individual image only, NOT batches."
#Cast cv2 image to PIL format if needed
if type(image) != PIL.Image.Image:
image = utils.cv2_to_pil(image)
#Plot image
plt.imshow(image)
plt.show()
#Print labels
print("This is a(n) {}".format(imagenet_map[output]))
|
from unittest.mock import patch
from fastapi.testclient import TestClient
@patch('app.config.get_config')
@patch('app.config.config')
@patch('app.config.configure_logging')
def get_app(a, b, c):
from app.main import app
return TestClient(app)
client = get_app() |
import csv
from happytransformer.happy_text_to_text import HappyTextToText, TTTrainArgs
from datasets import load_dataset
def main():
happy_tt = HappyTextToText("T5", "t5-base")
input_text = "grammar: This sentences had bad grammars and spelling. "
before_text = happy_tt.generate_text(input_text).text
# There's no training split. Just eval and test. So, we'll use eval for train and test for eval.
# 755 cases, but each case has 4 corrections so there are really 3020
train_dataset = load_dataset("jfleg", split='validation[:]')
# 748 cases, but again, each case has 4 correction so there are really
eval_dataset = load_dataset("jfleg", split='test[:]')
generate_csv("train.csv", train_dataset)
generate_csv("eval.csv", eval_dataset)
before_loss = happy_tt.eval("eval.csv").loss
happy_tt.train("train.csv")
after_text = happy_tt.generate_text(input_text).text
after_loss = happy_tt.eval("eval.csv").loss
print("before loss:", before_loss)
print("after loss:", after_loss)
print("------------------------------------")
print("input text:", input_text)
print("before text:", before_text)
print("after text:", after_text)
def generate_csv(csv_path, dataset):
with open(csv_path, 'w', newline='') as csvfile:
writter = csv.writer(csvfile)
writter.writerow(["input", "target"])
for case in dataset:
input_text = "grammar: " + case["sentence"]
for correction in case["corrections"]:
# a few of the case are have None values. We'll skip them
if input_text and correction:
writter.writerow([input_text, correction])
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import rpm
import logging
import shlex
import os
import sys
import yum
import subprocess
# Mask output codes so we can report failure by type if needed
health_code = {
'OK': 0x0,
'GENERAL_PYTHON_FAILURE': 0x1,
'GENERAL_CHECK_FAILURE': 0x2,
'RPM_BAD_TRANSACTION_SET': 0x4,
'RPM_BAD_DB_ENTRY': 0x8,
'RPM_YUM_CHECK_DEPENDENCIES_FAILURE': 0x10,
'RPM_YUM_CHECK_OBSOLETED_FAILURE': 0x20,
'RPM_YUM_CHECK_PROVIDES_FAILURE': 0x40,
'RPM_YUM_CHECK_DUPLICATES_FAILURE': 0x80,
'YUM_BUILD_TRANSACTION_FAILURE': 0x100,
'RPM_GENERAL_FAILURE': 0x200,
'YUM_GENERAL_FAILURE': 0x400,
}
class RPMDBPackageSack(object):
def __init__(self):
self.sack = yum.rpmsack.RPMDBPackageSack()
def check_dependencies(self):
if self.sack.check_dependencies():
return health_code['RPM_YUM_CHECK_DEPENDENCIES_FAILURE']
return health_code['OK']
def check_obsoleted(self):
if self.sack.check_obsoleted():
return health_code['RPM_YUM_CHECK_OBSOLETED_FAILURE']
return health_code['OK']
def check_provides(self):
if self.sack.check_provides():
return health_code['RPM_YUM_CHECK_PROVIDES_FAILURE']
return health_code['OK']
def check_duplicates(self):
if self.sack.check_duplicates():
return health_code['RPM_YUM_CHECK_DUPLICATES_FAILURE']
return health_code['OK']
class RPMDB(object):
def verify_transaction_set(self):
try:
transaction_set = rpm.TransactionSet()
# Verify structure of db is valid
rc = transaction_set.verifyDB()
# Verify entries are intact
if rc != 0 or transaction_set.check() or transaction_set.problems():
return health_code['RPM_BAD_TRANSACTION_SET']
except rpm._rpm.error:
return health_code['RPM_BAD_TRANSACTION_SET']
return health_code['OK']
def verify_entries(self):
try:
transaction_set = rpm.TransactionSet()
# Ensure each entry is readable
transaction_set.dbMatch()
except rpm._rpm.error:
return health_code['RPM_BAD_DB_ENTRY']
return health_code['OK']
def check_rpm_stderr(self):
'''
Berkeley DB sometimes prints to stderr if there's a problem with the
rpmdb. Unfortunately, BDB reports 'all clear' to rpm even if
there's a DB issue. The easiest way to check for errors this way is to
look if anything was printed to stderr. If there is, then we know
something is wrong with the DB.
'''
cmd = shlex.split('rpm --query --all --verify --quiet')
with open(os.devnull, 'w') as devnull:
proc = subprocess.Popen(
cmd, stdout=devnull, stderr=subprocess.PIPE)
# Ignore stdout since we direct to /dev/null in Popen.
stdout, stderr = proc.communicate()
if stderr:
return health_code['RPM_GENERAL_FAILURE']
return health_code['OK']
class YumDB(object):
def __init__(self):
self.base = yum.YumBase()
def build_transaction(self):
result_code, _ = self.base.buildTransaction()
if result_code:
return health_code['YUM_BUILD_TRANSACTION_FAILURE']
return health_code['OK']
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-q',
'--quiet',
help='Only print status code',
default=False,
action='store_true',
)
parser.add_argument(
'-f',
'--fast',
help='Only run fast checks',
default=False,
action='store_true',
)
parser.add_argument(
'-s',
'--skip-check',
help='Skip check by method name',
action='append',
dest='skipped_checks',
default=[],
)
args = parser.parse_args()
log_format = ("[%(levelname)8s] %(message)s")
logging.basicConfig(format=log_format, level=logging.INFO)
# Disable logging if --quiet passed. Also disable stderr because Berkeley DB
# logs to stderr from many of the calls in this script.
if args.quiet:
logging.disable(logging.CRITICAL)
rpmdb = RPMDB()
yumdb = YumDB()
sack = RPMDBPackageSack()
slow_checks = [
rpmdb.check_rpm_stderr,
]
checks = [
rpmdb.verify_transaction_set,
rpmdb.verify_entries,
yumdb.build_transaction,
sack.check_dependencies,
sack.check_duplicates,
sack.check_obsoleted,
sack.check_provides,
]
checks = [check for check in checks
if check.__name__ not in args.skipped_checks]
if not args.fast:
checks.extend(slow_checks)
# Bitwise OR check return values
exit_code = 0
for check in checks:
return_code = 0
try:
return_code = check()
except Exception as e:
logging.error('check %s raised an exception: %s',
check.__name__, e)
return_code = health_code['GENERAL_CHECK_FAILURE']
rc_to_str = [key for key, value in health_code.items()
if value == return_code]
logging.info(
'%16s %25s: %10s', check.im_class.__name__, check.__name__,
rc_to_str[0]
)
exit_code |= return_code
return exit_code
if __name__ == '__main__':
exit_code = main()
print('{:016b}'.format(exit_code))
sys.exit(1 if exit_code else 0)
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'shape': [
# c, x, y, output
((3, 2, 4),) * 4,
((4,), (3, 1, 1), (2, 1), (3, 2, 4)),
],
'dtype': [numpy.float16, numpy.float32, numpy.float32],
}))
class TestWhere(unittest.TestCase):
def setUp(self):
c_shape, x_shape, y_shape, out_shape = self.shape
self.c_data = numpy.random.uniform(-1, 1, c_shape) > 0
self.x_data = \
numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.y_data = \
numpy.random.uniform(-1, 1, y_shape).astype(self.dtype)
self.g_data = \
numpy.random.uniform(-1, 1, out_shape).astype(self.dtype)
self.check_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3,
})
def check_forward(self, c_data, x_data, y_data):
c = chainer.Variable(c_data)
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.where(c, x, y)
xp = c.xp
z_data_expected = xp.where(c_data, x_data, y_data)
testing.assert_allclose(z.array, z_data_expected)
def test_forward_cpu(self):
self.check_forward(self.c_data, self.x_data, self.y_data)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.c_data),
cuda.to_gpu(self.x_data),
cuda.to_gpu(self.y_data))
def check_backward(self, c_data, x_data, y_data, g_data):
gradient_check.check_backward(
functions.where, (c_data, x_data, y_data), g_data,
**self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.c_data, self.x_data, self.y_data, self.g_data)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.c_data),
cuda.to_gpu(self.x_data),
cuda.to_gpu(self.y_data),
cuda.to_gpu(self.g_data))
testing.run_module(__name__, __file__)
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import numpy as np
import dace
N = dace.symbol("N")
@dace.program
def add_one(A: dace.int64[N, N], result: dace.int64[N, N]):
result[:] = A + 1
def call_test():
@dace.program
def add_one_more(A: dace.int64[N, N]):
result = dace.define_local([N, N], dace.int64)
add_one(A, result)
return result + 1
A = np.random.randint(0, 10, size=(11, 11), dtype=np.int64)
result = add_one_more(A=A.copy())
assert np.allclose(result, A + 2)
def call_sdfg_test():
add_one_sdfg = add_one.to_sdfg()
@dace.program
def add_one_more(A: dace.int64[N, N]):
result = dace.define_local([N, N], dace.int64)
add_one_sdfg(A=A, result=result)
return result + 1
A = np.random.randint(0, 10, size=(11, 11), dtype=np.int64)
result = add_one_more(A=A.copy())
assert np.allclose(result, A + 2)
other_N = dace.symbol("N")
@dace.program
def add_one_other_n(A: dace.int64[other_N - 1, other_N - 1],
result: dace.int64[other_N - 1, other_N - 1]):
result[:] = A + 1
def call_sdfg_same_symbol_name_test():
add_one_sdfg = add_one_other_n.to_sdfg()
@dace.program
def add_one_more(A: dace.int64[N, N]):
result = dace.define_local([N, N], dace.int64)
add_one_sdfg(A=A, result=result)
return result + 1
A = np.random.randint(0, 10, size=(11, 11), dtype=np.int64)
result = add_one_more(A=A.copy())
assert np.allclose(result, A + 2)
if __name__ == "__main__":
call_test()
call_sdfg_test()
call_sdfg_same_symbol_name_test()
|
import csv
import collections
import subprocess
from io import StringIO
import base64
import re
from wtforms import form, fields, widgets
from actionform import ActionForm, webserver
def clean(x, subgraph=None):
x = x.strip().replace(" ", "_")
x = re.sub("\W", "_", x)
if subgraph:
subgraph = clean(str(subgraph))
x = "_{subgraph}__{x}".format(**locals())
return x
def dot2img(dot, format="png", layout="dot"):
dotformat = 'png' if format == 'html' else format
cmd = ['dot', '-T', dotformat,'-K', layout]
result = subprocess.check_output(cmd, input=dot.encode("utf-8"))
if format == 'html':
data = base64.b64encode(result).decode("utf-8")
result = "<object type='image/png' data='data:image/png;base64,{data}'></object>".format(**locals())
return result
COLUMNS = ["source", "subject", "object", "predicate", "quality", "weight"]
class Network(ActionForm):
default_template = "netviz.html"
class form_class(form.Form):
network = fields.StringField(widget=widgets.TextArea())
normalize = fields.BooleanField(label="Normalize weights")
qualabel = fields.BooleanField(label="Include quality in label")
predlabel = fields.BooleanField(label="Include predicate in label")
collapse = fields.BooleanField(label="Collapse arrows between nodes")
nosubgraphs = fields.BooleanField(label="Don't make subgraphs per source")
#blue = fields.BooleanField()
#bw = fields.BooleanField(label="Black & White")
delimiter = fields.SelectField(choices=[("","autodetect"), (";",";"), (",",","), ("\t","tab")])
def read_network(self, network, delimiter):
lines = [line.strip(' ') for line in network.split("\n")]
if not delimiter:
delimiters = {d : network.count(d) for d in ",;\t"}
delimiter = sorted(delimiters, key=delimiters.get)[-1]
return csv.reader(lines, delimiter=delimiter)
def normalize(self, network):
network = list(network)
header = [f.lower() for f in network[0]]
if not {"subject", "object"} - set(header):
# first row is header (so keep header but skip first row)
network = network[1:]
else:
# add default header, keep first row
header = COLUMNS
network = [dict(zip(header, row)) for row in network]
for i, edge in enumerate(network):
src, su, obj, pred, q, n = [edge.get(c) for c in COLUMNS]
if not su or not obj:
continue
if q: q = float(q)
if n: n = float(n)
yield src, su, obj, pred, q, n
def collapse(self, r):
edges = {} # src, su, obj: (totq, totn)
for src, su, obj, pred, q, n in r:
key = (src, su, obj)
if key not in edges:
edges[key] = [0,0, []]
if not n: n = 1
if not q: q = 0
edges[key][0] += q*n
edges[key][1] += n
if pred: edges[key][2] += [pred]
for (src, su, obj), (totq, totn, preds) in edges.items():
yield src, su, obj, "\\n".join(preds), totq/totn, totn
def get_graph(self, r, **options):
edges = collections.defaultdict(list)
maxweight = 0
for src, su, obj, pred, q, n in r:
if options.get('nosubgraphs'):
src = None
edges[src and src.strip()].append((su, obj, pred, q, n))
if n:
maxweight = max(maxweight, n)
dot = ["digraph g {"]
for i, src in enumerate(edges):
if src:
dot.append('subgraph cluster_%i {\nlabel="%s";' % (i, src))
nodes = {}
for node in set(node for (su, obj, pred, q, n) in edges[src] for node in (su,obj)):
id = clean(node, i if src else None)
nodes[node] = id
dot.append('{id} [label="{node}"];'.format(**locals()))
for su, obj, pred, q, n in edges[src]:
su = nodes[su]
obj = nodes[obj]
kargs = {}
lbl = []
if n:
if options.get('normalize'):
n = n * 5 / maxweight
kargs['style'] = 'setlinewidth(%1.3f)' % n
if q:
kargs['color'] = "%1.4f,%1.4f,%1.4f" % (.167 + .167 * q,1,1)
if options.get('predlabel') and pred:
lbl.append(pred)
if options.get('qualabel') and q is not None:
lbl.append("%+1.2f" % q)
if lbl:
kargs['label'] = "\\n".join(lbl)
kargs = ",".join('{k}="{v}"'.format(**locals()) for (k,v) in kargs.items())
dot.append('{su} -> {obj} [{kargs}];'.format(**locals()))
if src:
dot.append("}")
dot.append("}")
return "\n".join(dot)
def _run(self, network, delimiter, **options):
r = self.normalize(self.read_network(network, delimiter))
if options.get('collapse'):
r = self.collapse(r)
dot = self.get_graph(r, **options)
image = dot2img(dot, format='html')
return dict(dot=dot, image=image)
def render_result(self, result, template, context):
context.update(result)
return template.render(**context)
if __name__ == '__main__':
Network.run_webserver()
|
from sqlalchemy.exc import IntegrityError
from src.config import TAG_LOCAL_PICK_UP, TAG_LOCAL_DELIVERY, TAG_LOCAL_OPEN
from src.db.sqlalchemy import db_session
from src.model.local import Local
from src.helper import image as image_util, log
from src.service import category as category_service
from src.service import opening_hours_item as opening_hours_item_service
from src.service import review_local as review_local_service
from src.service import user as user_service
def add_dummy_data():
count = db_session().query(Local.id).count()
if count == 0:
log.info(f'Adding dummy data for {Local.__tablename__}...')
object_list = [
Local(
name='Bona Fruita Busquets', description='La fruiteria del teu barri.',
postal_address='Carrer de Sants, 258, 08028 Barcelona',
latitude=41.375647, longitude=2.127905, website=None, phone_number='933399118',
pick_up=True, delivery=True, image=image_util.decode_and_resize('test/mock/local_image_1.jpg'),
category_id=category_service.get_id_by_name('Fruiteria')
),
Local(
name='Farmacia Bassegoda', description='La farmacia del teu barri.',
postal_address='Carrer de Bassegoda, 11, 08028 Barcelona',
latitude=41.375191, longitude=2.125832, website=None, phone_number='934400955',
pick_up=True, delivery=False, image=image_util.decode_and_resize('test/mock/local_image_2.jpg'),
category_id=category_service.get_id_by_name('Farmacia')
),
Local(
name='Panet Olzinelles',
description='El millor forn de pa del barri de sants.',
postal_address='Carrer Olzinelles, 10, 08014 Barcelona',
latitude=41.374300,
longitude=2.136234,
website='panet.cat',
phone_number='672642565',
pick_up=True, delivery=False,
image=image_util.decode_and_resize('test/mock/panet.jpg'),
category_id=category_service.get_id_by_name('Fleca')
),
Local(
name='FORN DE PA TOÑI DEGUSTACIÓ',
description='Forn de pa de sants, de tota la vida',
postal_address='Baixos, Carrer Olzinelles, 103, 08014 Barcelona',
latitude=41.370550,
longitude=2.137626,
website=None,
phone_number='933536486',
pick_up=True, delivery=False,
image=image_util.decode_and_resize('test/mock/toni.jpg'),
category_id=category_service.get_id_by_name('Fleca')
),
Local(
name='El Primo',
description='Forn de pa el Primo, vina i esmorza',
postal_address='Carrer de Sants, 252, 08028 Barcelona',
latitude=41.370550,
longitude=2.137626,
website=None,
phone_number='931265329',
pick_up=True, delivery=False,
image=image_util.decode_and_resize('test/mock/primo.jpg'),
category_id=category_service.get_id_by_name('Fleca')
),
Local(
name='Ferreteria J. Valls',
description='Ferreteria valls, tot el que et pugui fer falta',
postal_address='Carrer de Sants, 172, 08028 Barcelona',
latitude=41.375467,
longitude=2.132898,
website='optimusweb.es',
phone_number='933396001',
pick_up=True, delivery=False,
image=image_util.decode_and_resize('test/mock/valls.jpg'),
category_id=category_service.get_id_by_name('Ferreteria')
),
Local(
name='Ferreteria Hijano',
description='Ferreteria de tota la vida',
postal_address='Carrer Progrés, 89, 08904 Hospitalet de Llobregat, Barcelona',
latitude=41.371972,
longitude=2.121046,
website='cadena88.com',
phone_number='934401879',
pick_up=True, delivery=False,
image=image_util.decode_and_resize('test/mock/hijano.jpg'),
category_id=category_service.get_id_by_name('Ferreteria')
),
Local(
name='Ferretería Rodríguez',
description='On trobaràs totes les eines que necessitis',
postal_address='Carrer de la Riera Blanca, 105, 08028 Hospitalet de Llobregat, Barcelona',
latitude=41.372091,
longitude=2.126644,
website=None,
phone_number='931626903',
pick_up=True, delivery=False,
image=image_util.decode_and_resize('test/mock/rodriguez.jpg'),
category_id=category_service.get_id_by_name('Ferreteria')
),
Local(
name='Farmàcia Valentines Gelabert',
description='La teva farmàcia de confiança',
postal_address='Carrer de Verdi, 7, 08012 Barcelona',
latitude=41.403156,
longitude=2.157599,
website=None,
phone_number='932136301',
pick_up=True, delivery=True,
image=image_util.decode_and_resize('test/mock/valentines.jpg'),
category_id=category_service.get_id_by_name('Farmacia')
),
Local(
name='Cal Juny',
description='Supermercat de tota la vida',
postal_address='Carrer de les Camèlies, 7, 08024 Barcelona',
latitude=41.410813,
longitude=2.158631,
website=None,
phone_number='932103020',
pick_up=True, delivery=False,
image=image_util.decode_and_resize('test/mock/juny.jpg'),
category_id=category_service.get_id_by_name('Supermercat')
),
]
db_session().bulk_save_objects(object_list)
db_session().commit()
else:
log.info(f'Skipping dummy data for {Local.__tablename__} because is not empty.')
def get(local_id):
local = db_session().query(Local).filter_by(id=local_id).first()
return local if local else None
def get_all():
return db_session().query(Local).all()
def create(
name, postal_address, user_id, latitude, longitude,
description=None, website=None, phone_number=None, pick_up=True, delivery=False, category_id=None, image=None
):
try:
local = Local(
name=name,
description=description,
postal_address=postal_address,
latitude=latitude,
longitude=longitude,
website=website,
phone_number=phone_number,
pick_up=pick_up,
delivery=delivery,
image=image,
category_id=category_id
)
if image:
decoded_image = image_util.resize(image)
if decoded_image:
local.image = decoded_image
db_session().add(local)
db_session().commit()
# Set local to user
user = user_service.get(user_id)
if user:
user.local_id = local.id
db_session().commit()
return local.id, None
except IntegrityError as e:
return None, str(e.args[0]).replace('\n', ' ')
def get_id_by_name(name):
local = db_session().query(Local).filter_by(name=name).first()
return local.id
def get_all_coordinates():
local_dict = dict()
for local in db_session().query(Local).all():
local_dict[local.id] = dict(latitude=local.latitude, longitude=local.longitude)
return local_dict
def edit(
local_id,
name=None, description=None, postal_address=None, latitude=None, longitude=None,
website=None, phone_number=None, pick_up=None, delivery=None, category=None, image=None
):
local = get(local_id)
if local:
local.name = local.name if name is None else name
local.description = local.description if description is None else description
local.postal_address = local.postal_address if postal_address is None else postal_address
local.latitude = local.latitude if latitude is None else latitude
local.longitude = local.longitude if longitude is None else longitude
local.website = local.website if website is None else website
local.phone_number = local.phone_number if phone_number is None else phone_number
local.pick_up = local.pick_up if pick_up is None else pick_up
local.delivery = local.delivery if delivery is None else delivery
local.category = local.category if category is None else category
if image:
decoded_image = image_util.resize(image)
if decoded_image:
local.image = decoded_image
db_session().commit()
return True
else:
return False
def get_tags(local_id):
tags = []
local = db_session().query(Local).filter_by(id=local_id).first()
if local.pick_up:
tags.append(TAG_LOCAL_PICK_UP)
if local.delivery:
tags.append(TAG_LOCAL_DELIVERY)
if opening_hours_item_service.is_open(local_id):
tags.append(TAG_LOCAL_OPEN)
return tags
def get_from_id_list(local_id_list):
local_list = []
local_orm_list = db_session().query(Local).filter(Local.id.in_(local_id_list)).all()
for local_orm in local_orm_list:
local_list.append(dict(
id=local_orm.id,
name=local_orm.name,
description=local_orm.description,
category=None if not local_orm.category_id else local_orm.category.name,
punctuation=review_local_service.get_average(local_orm.id),
image=local_orm.image,
tags=get_tags(local_orm.id)
))
return local_list
|
# -*- coding: utf-8 -*-
"""
Contain the implementation of the CSP algorithm. Developed for the train part of dataset IV-1-a of BCI competition.
This version (V2) implement the algorithm for data with two classes.
@author: Alberto Zancanaro (Jesus)
@organization: University of Padua (Italy)
"""
#%%
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal
import scipy.linalg as la
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
#%%
class FBCSP_V2():
def __init__(self, data_dict, fs, freqs_band = None, filter_order = 3):
self.fs = fs
self.train_sklearn = False
self.train_LDA = False
self.trials_dict = data_dict
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#Filter data section
# Filtered signal list
self.filtered_band_signal_list = []
# Frequencies band
if(freqs_band == None): self.freqs = np.linspace(4, 40, 10)
else: self.freqs = freqs_band
self.filterBankFunction(filter_order)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# CSP filters evaluation
self.evaluateW()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def filterBankFunction(self, filter_order = 3):
"""
Function that apply fhe fitlering for each pair of frequencies in the list self.freqs.
The results are saved in a list called self.filtered_band_signal_list. Each element of the list is a diciotinary with key the label of the various class.
Parameters
----------
filter_order : int, optional
The order of the filter. The default is 3.
"""
# Cycle for the frequency bands
for i in range(len(self.freqs) - 1):
# Dict for selected band that will contain the various filtered signals
filt_trial_dict = {}
# "Create" the band
band = [self.freqs[i], self.freqs[i+1]]
# Cycle for the classes
for key in self.trials_dict.keys():
# Filter the signal in each class for the selected frequency band
filt_trial_dict[key] = self.bandFilterTrials(self.trials_dict[key], band[0], band[1], filter_order = filter_order)
# Save the filtered signal in the list
self.filtered_band_signal_list.append(filt_trial_dict)
def bandFilterTrials(self, trials_matrix, low_f, high_f, filter_order = 3):
"""
Applying a pass-band fitlering to the data. The filter implementation was done with scipy.signal
Parameters
----------
trials_matrix : numpy matrix
Numpy matrix with the various EEG trials. The dimensions of the matrix must be n_trial x n_channel x n_samples
fs : int/double
Frequency sampling.
low_f : int/double
Low band of the pass band filter.
high_f : int/double
High band of the pass band filter..
filter_order : int, optional
Order of the filter. The default is 3.
Returns
-------
filter_trails_matrix : numpy matrix
Numpy matrix with the various filtered EEG trials. The dimensions of the matrix must be n_trial x n_channel x n_samples.
"""
# Evaluate low buond and high bound in the [0, 1] range
low_bound = low_f / (self.fs/2)
high_bound = high_f / (self.fs/2)
# Check input data
if(low_bound < 0): low_bound = 0
if(high_bound > 1): high_bound = 1
if(low_bound > high_bound): low_bound, high_bound = high_bound, low_bound
if(low_bound == high_bound): low_bound, high_bound = 0, 1
b, a = scipy.signal.butter(filter_order, [low_bound, high_bound], 'bandpass')
return scipy.signal.filtfilt(b, a, trials_matrix)
def evaluateW(self):
"""
Evaluate the spatial filter of the CSP algorithm for each filtered signal inside self.filtered_band_signal_list
Results are saved inside self.W_list_band.
So for each filter band the algorithm create n spatial filter where n is the number of classes. Each filter is used to maximize the variance between a class and all other classes.
If n = 2 only a spatial filter is evaluated.
"""
self.W_list_band = []
for filt_trial_dict in self.filtered_band_signal_list:
# Retrieve the key (class)
keys = list(filt_trial_dict.keys())
# List for the filter for each class
W_list_class = []
for key in keys:
trials_1, trials_2 = self.retrieveBinaryTrials(filt_trial_dict, key)
# Evaluate covariance matrix for the two classes
cov_1 = self.trialCovariance(trials_1)
cov_2 = self.trialCovariance(trials_2)
R = cov_1 + cov_2
# Evaluate whitening matrix
P = self.whitening(R)
# The mean covariance matrices may now be transformed
cov_1_white = np.dot(P, np.dot(cov_1, np.transpose(P)))
cov_2_white = np.dot(P, np.dot(cov_2, np.transpose(P)))
# Since CSP requires the eigenvalues and eigenvector be sorted in descending order we find and sort the generalized eigenvalues and eigenvector
E, U = la.eig(cov_1_white, cov_2_white)
order = np.argsort(E)
order = order[::-1]
E = E[order]
U = U[:, order]
# The projection matrix (the spatial filter) may now be obtained
W = np.dot(np.transpose(U), P)
# Save the filter for each class
W_list_class.append(W)
if(len(keys) == 2): break
self.W_list_band.append(W_list_class)
def retrieveBinaryTrials(self, filt_trial_dict, key):
"""
Function that return all the trials of a class on trials 1 and all the trials of all other classes in trials 2
Parameters
----------
filt_trial_dict : dict
Input dicionary. The key must be the label of the classes. Each item is all the trials of the corresponding class
key : dictionary key
Key for trials_1.
Returns
-------
trials_1 : Numpy 3D matrix
All the trials corresponding to the key passes.
trials_2 : Numpy 3D matrix
All other trials.
"""
# Retrieve trial associated with key
trials_1 = filt_trial_dict[key]
# Retrieve all other trials
dict_with_other_trials = {k:v for k,v in filt_trial_dict.items() if k not in [key]}
# Convert them in a numpy array
tmp_list = []
for key in dict_with_other_trials: tmp_list.append(dict_with_other_trials[key])
if(len(tmp_list) == 1):
tmp_key = list(dict_with_other_trials.keys())[0]
trials_2 = dict_with_other_trials[tmp_key]
else:
for i in range(len(tmp_list) - 1):
if(i == 0):
trials_2 = np.concatenate([tmp_list[0], tmp_list[1]], axis = 0)
else:
trials_2 = np.concatenate([trials_2, tmp_list[i + 1]], axis = 0)
return trials_1, trials_2
def trialCovariance(self, trials):
"""
Calculate the covariance for each trial and return their average
Parameters
----------
trials : numpy 3D-matrix
Trial matrix. The dimensions must be trials x channel x samples
Returns
-------
mean_cov : Numpy matrix
Mean of the covariance alongside channels.
"""
n_trials, n_channels, n_samples = trials.shape
covariance_matrix = np.zeros((n_trials, n_channels, n_channels))
for i in range(trials.shape[0]):
trial = trials[i, :, :]
covariance_matrix[i, :, :] = np.cov(trial)
mean_cov = np.mean(covariance_matrix, 0)
return mean_cov
def whitening(self, sigma, mode = 2):
"""
Calculate the whitening matrix for the input matrix sigma
Parameters
----------
sigma : Numpy square matrix
Input matrix.
mode : int, optional
Select how to evaluate the whitening matrix. The default is 1.
Returns
-------
x : Numpy square matrix
Whitening matrix.
"""
[u, s, vh] = np.linalg.svd(sigma)
if(mode != 1 and mode != 2): mode == 1
if(mode == 1):
# Whitening constant: prevents division by zero
epsilon = 1e-5
# ZCA Whitening matrix: U * Lambda * U'
x = np.dot(u, np.dot(np.diag(1.0/np.sqrt(s + epsilon)), u.T))
else:
# eigenvalue decomposition of the covariance matrix
d, V = np.linalg.eigh(sigma)
fudge = 10E-18
# A fudge factor can be used so that eigenvectors associated with small eigenvalues do not get overamplified.
D = np.diag(1. / np.sqrt(d+fudge))
# whitening matrix
x = np.dot(np.dot(V, D), V.T)
return x
|
#!@PYTHON_EXECUTABLE@
"""Example
@PYTHON_EXECUTABLE@ -m timemory.profiler -m 10 -- ./@FILENAME@
@PYTHON_EXECUTABLE@ -m timemory.line_profiler -v -- ./@FILENAME@
@PYTHON_EXECUTABLE@ -m timemory.trace -- ./@FILENAME@
"""
import sys
import numpy as np
def fib(n):
return n if n < 2 else (fib(n - 1) + fib(n - 2))
def inefficient(n):
print(f"inefficient: {n}")
a = 0
for i in range(n):
a += i
for j in range(n):
a += j
arr = np.random.rand(a * n * n * n)
sum = arr.sum()
print(f"sum: {sum}")
return sum
def run(nfib):
ret = 0
ret += fib(nfib)
ret += inefficient(nfib)
return ret
if __name__ == "__main__":
nfib = int(sys.argv[1]) if len(sys.argv) > 1 else 20
for i in range(5):
ans = run(nfib)
print(f"[{i}] fibonacci({nfib}) = {ans}")
|
#!/bin/python3
# author: Jan Hybs
import os
import subprocess
import sys
import threading
from time import monotonic as _time
def construct(start, *rest, shell=False):
args = start.split() if type(start) is str else start
args.extend(rest)
return ' '.join(args) if shell else args
def create_execute_command(logger_method, stdout):
def execute(*args, **kwargs):
command = construct(*args, shell=kwargs.get('shell', False))
cwd = kwargs.pop('dir', '.')
logger_method('$> %s', construct(*args, shell=True))
sys.stdout.flush()
if os.path.exists(cwd):
return subprocess.Popen(command, **kwargs, cwd=cwd, stdout=stdout, stderr=subprocess.STDOUT)
else:
return subprocess.Popen(command, **kwargs, stdout=stdout, stderr=subprocess.STDOUT)
return execute
class ComplexSemaphore(object):
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = threading.Condition(threading.Lock())
self._value = value
self._limit = value
@property
def value(self):
return self._value
@property
def limit(self):
return self._limit
def acquire(self, blocking=True, timeout=None, value=1):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
The value parameter specifies how many units to take from the semaphore
default value is 1.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
if value > self._limit:
raise ValueError("can't aquire the lock because specified value is greater then max value")
rc = False
endtime = None
with self._cond:
while self._value < value:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value -= value
rc = True
return rc
__enter__ = acquire
def release(self, value=1):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value += value
self._cond.notify()
def __exit__(self, t, v, tb):
self.release() |
import pytest
from typing import Tuple, List
import trees
import players
import games
##### TREES #####
class TreesTest:
def test_contains(self):
self.tree.insert('jon', (250, 250))
assert 'jon' in self.tree
assert 'joe' not in self.tree
def test_tree_contains_point(self):
self.tree.insert('jon', (250, 250))
assert self.tree.contains_point((250, 250))
assert not self.tree.contains_point((250, 251))
def test_insert_out_of_bounds(self):
try:
self.tree.insert('jon', (501, 250))
except trees.OutOfBoundsError:
return
raise Exception('this should have raised an OutOfBoundsError')
def test_insert_collision(self):
self.tree.insert('jon', (250, 250))
try:
self.tree.insert('jon', (250, 250))
except trees.OutOfBoundsError:
return
raise Exception('this should have raised an OutOfBoundsError')
def test_remove(self):
self.tree.insert('jon', (250, 250))
self.tree.remove('buddy')
assert 'jon' in self.tree
self.tree.remove('jon')
assert 'jon' not in self.tree
def test_remove_point(self):
self.tree.insert('jon', (250, 250))
self.tree.remove_point((250, 251))
assert 'jon' in self.tree
self.tree.remove_point((250, 250))
assert 'jon' not in self.tree
def test_move(self):
self.tree.insert('jon', (250, 250))
self.tree.move('jon', 'N', 10)
assert self.tree.contains_point((250, 240))
def test_move_collision(self):
self.tree.insert('jon', (250, 250))
self.tree.insert('joe', (250, 240))
try:
self.tree.move('jon', 'N', 10)
except trees.OutOfBoundsError:
return
raise Exception('this should have raised an OutOfBoundsError')
def test_move_out_of_bounds(self):
self.tree.insert('jon', (250, 250))
try:
self.tree.move('jon', 'E', 251)
except trees.OutOfBoundsError:
return
raise Exception('this should have raised an OutOfBoundsError')
def test_move_point(self):
self.tree.insert('jon', (250, 250))
self.tree.move_point((250, 250), 'N', 10)
assert self.tree.contains_point((250, 240))
def test_move_point_collision(self):
self.tree.insert('jon', (250, 250))
self.tree.insert('joe', (250, 240))
try:
self.tree.move_point((250, 250), 'N', 10)
except trees.OutOfBoundsError:
return
raise Exception('this should have raised an OutOfBoundsError')
def test_move_point_out_of_bounds(self):
self.tree.insert('jon', (250, 250))
try:
self.tree.move_point((250, 250), 'E', 251)
except trees.OutOfBoundsError:
return
raise Exception('this should have raised an OutOfBoundsError')
def test_names_in_range(self):
self.tree.insert('jon', (250, 250))
self.tree.insert('joe', (300, 300))
assert set(self.tree.names_in_range((200, 200), 'SE', 150)) == {'jon', 'joe'}
assert set(self.tree.names_in_range((350, 350), 'NW', 150)) == {'jon', 'joe'}
assert set(self.tree.names_in_range((0, 500), 'NE', 1000)) == {'jon', 'joe'}
assert set(self.tree.names_in_range((200, 200), 'SE', 90)) == {'jon'}
assert set(self.tree.names_in_range((350, 350), 'NW', 90)) == {'joe'}
assert len(self.tree.names_in_range((350, 350), 'NW', 10)) == 0
def test_is_empty(self):
assert self.tree.is_empty()
self.tree.insert('jon', (250, 250))
assert not self.tree.is_empty()
def test_is_leaf(self):
assert self.tree.is_leaf()
self.tree.insert('jon', (250, 250))
assert self.tree.is_leaf()
self.tree.insert('joe', (300, 300))
assert not self.tree.is_leaf()
class TestQuadTree(TreesTest):
def setup_method(self):
self.tree = trees.QuadTree((250, 250))
def test_height(self):
assert self.tree.height() == 1
self.tree.insert('jon', (250, 250))
assert self.tree.height() == 1
self.tree.insert('joe', (300, 300))
assert self.tree.height() == 2
self.tree.insert('job', (50, 50))
assert self.tree.height() == 3
def test_depth(self):
self.tree.insert('jon', (250, 250))
self.tree.insert('joe', (300, 300))
self.tree.insert('job', (50, 50))
jon = self.tree._nw._se
joe = self.tree._se
job = self.tree._nw._nw
assert self.tree.depth(jon) == 2
assert self.tree.depth(joe) == 1
assert jon.depth(job) is None
assert self.tree.depth(self.tree) is None
class Test2DTree(TreesTest):
def setup_method(self):
self.tree = trees.TwoDTree((0, 0), (500, 500))
def test_height(self):
assert self.tree.height() == 1
self.tree.insert('jon', (250, 250))
assert self.tree.height() == 1
self.tree.insert('joe', (300, 300))
assert self.tree.height() == 2
self.tree.insert('job', (50, 50))
assert self.tree.height() == 2
def test_depth(self):
self.tree.insert('jon', (250, 250))
self.tree.insert('joe', (300, 300))
self.tree.insert('job', (50, 50))
self.tree.insert('minnie_mouse', (50, 100))
jon = self.tree
joe = self.tree._gt
job = self.tree._lt
minnie = job._gt
assert jon.depth(jon) is None
assert joe.depth(minnie) is None
assert jon.depth(minnie) == 2
assert job.depth(minnie) == 1
##### PLAYERS #####
class PlayersTest:
def test_init(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
assert player._name == 'eric'
assert player._vision == 1
assert player._speed == 2
assert player._game == self.game
assert player._colour == 'green'
assert player._location == (100, 100)
assert player._points == 0
assert player._targets == []
assert player._enemies == []
assert player._direction in 'NSEW'
def test_set_colour(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
player.set_colour('purple')
assert player._colour == 'purple'
def test_increase_points(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
points = player._points
player.increase_points(20)
assert points + 20 == player._points
def test_get_points(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
player._points = 33
assert player.get_points() == 33
def test_select_target(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
targets = set(player._targets)
player.select_target('morton')
assert set(player._targets) - targets == {'morton'}
def test_ignore_target(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
player._targets = ['gill', 'eoin']
player.ignore_target('gill')
assert player._targets == ['eoin']
def test_get_targets(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
player._targets = ['gill', 'eoin']
assert set(player.get_targets()) == {'gill', 'eoin'}
def test_select_enemy(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
enemies = set(player._enemies)
player.select_enemy('morton')
assert set(player._enemies) - enemies == {'morton'}
def test_ignore_enemy(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
player._enemies = ['gill', 'eoin']
player.ignore_enemy('gill')
assert player._enemies == ['eoin']
def test_get_enemies(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
player._enemies = ['gill', 'eoin']
assert set(player.get_enemies()) == {'gill', 'eoin'}
def test_reverse_direction(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
dirs = 'NSNEWE'
direction = player._direction
player.reverse_direction()
assert player._direction == dirs[dirs.index(direction) + 1]
def test_set_speed(self):
player = players.Player('eric', 1, 2, self.game, 'green', (100, 100))
player.set_speed(1)
assert player._speed == 1
def _reset_player(self, player: players.Player, loc: Tuple[int, int]):
player._location = loc
player._targets = []
player._enemies = []
player._vision = 100
self.game.field.remove(player._name)
self.game.field.insert(player._name, loc)
def _move_into_starting_position(self,
coords: List[Tuple[int, int]],
targets: List[int],
enemies: List[int]) -> Tuple[players.Player, List[players.Player]]:
player, *others = self.game._players.values()
self._reset_player(player, (250, 250))
for i, (coord, other) in enumerate(zip(coords, others)):
self._reset_player(other, coord)
if i in targets:
player._targets.append(other._name)
if i in enemies:
player._enemies.append(other._name)
return player, others
def test_next_direction_no_best(self):
coords = [(50, 50), (50, 450), (450, 450), (450, 50)]
targets = []
enemies = []
player, _ = self._move_into_starting_position(coords, targets, enemies)
assert player.next_direction() == set('NSEW')
assert player._direction in set('NSEW')
def test_move_no_collision(self):
coords = [(50, 50), (50, 450), (450, 450), (450, 50)]
targets = []
enemies = []
player, _ = self._move_into_starting_position(coords, targets, enemies)
player._speed = 10
player._direction = 'E'
player.move()
assert player._location == (260, 250)
def test_move_collision(self):
coords = [(260, 250), (50, 450), (450, 450), (450, 50)]
targets = []
enemies = []
player, others = self._move_into_starting_position(coords, targets, enemies)
player._speed = 10
player._direction = 'E'
player.move()
assert player._location == (250, 250)
assert player._direction == 'W'
def test_move_out_of_bounds(self):
coords = [(50, 50), (50, 450), (450, 450), (450, 50)]
targets = []
enemies = []
player, _ = self._move_into_starting_position(coords, targets, enemies)
player._speed = 500
player._direction = 'E'
player.move()
assert player._location == (250, 250)
assert player._direction == 'W'
class TestPlayersQuadTree(PlayersTest):
def setup_method(self):
self.game = games.Tag(5, trees.QuadTree((250, 250)), 5, 3, 4)
class TestPlayers2DTree(PlayersTest):
def setup_method(self):
self.game = games.Tag(5, trees.TwoDTree((0, 0), (500, 500)), 5, 3, 4)
##### GAMES #####
### TAG ###
class TagTests:
def test_init(self):
game = games.Tag(10, self.tree, 5, 3, 4)
assert len(game._players) == 10
assert all(name in game.field for name in game._players)
assert game._it in game._players
assert game._players[game._it]._colour == 'purple'
def test_handle_collision_reverse_direction(self):
game = games.Tag(10, self.tree, 5, 3, 4)
player1, player2 = list(game._players.values())[:2]
dir1, dir2 = player1._direction, player2._direction
game.handle_collision(player1._name, player2._name)
assert dir1 != player1._direction
assert dir2 != player2._direction
def test_handle_collision_one_is_it(self):
game = games.Tag(10, self.tree, 5, 3, 4)
it = game._it
it_points = game._players[it].get_points()
not_it = next(p for p in game._players if p != game._it)
game.handle_collision(it, not_it)
assert game._it == not_it
assert it_points + 1 == game._players[game._it].get_points()
def test_check_for_winner_no_winner(self):
game = games.Tag(10, self.tree, 5, 3, 4)
assert game.check_for_winner() is None
def test_check_for_winner_one_left(self):
game = games.Tag(1, self.tree, 5, 3, 4)
assert game.check_for_winner() == list(game._players)[0]
def test_check_for_winner_two_left(self):
game = games.Tag(2, self.tree, 5, 3, 4)
winner = next(p for p in game._players if p != game._it)
assert game.check_for_winner() == winner
class TestTagQuadTree(TagTests):
def setup_method(self):
self.tree = trees.QuadTree((250, 250))
class TestTag2dTree(TagTests):
def setup_method(self):
self.tree = trees.TwoDTree((0, 0), (500, 500))
### ZOMBIE TAG ###
class ZombieTagTests:
def test_init(self):
game = games.ZombieTag(10, self.tree, 5, 3, 4)
assert len(game._humans) == 10
assert all(name in game.field for name in game._zombies)
assert all(name in game.field for name in game._humans)
assert len(game._zombies.keys() & game._humans.keys()) == 0
assert all(player._colour == 'green' for _, player in game._humans.items())
assert len(game._zombies) == 1
assert game._zombies.popitem()[1]._colour == 'purple'
def test_handle_collision_reverse_direction(self):
game = games.ZombieTag(10, self.tree, 5, 3, 4)
player1, player2 = list(game._humans.values())[:2]
dir1, dir2 = player1._direction, player2._direction
game.handle_collision(player1._name, player2._name)
assert dir1 != player1._direction
assert dir2 != player2._direction
def test_handle_collision_zombie_attack(self):
game = games.ZombieTag(10, self.tree, 5, 3, 4)
human = list(game._humans.values())[0]
zombie = list(game._zombies.values())[0]
game.handle_collision(human._name, zombie._name)
assert zombie._name in game._zombies
assert human._name in game._zombies
assert human._name not in game._humans
def test_check_for_winner_humans_win(self):
game = games.ZombieTag(2, self.tree, 5, 3, 4)
assert game.check_for_winner() == 'humans'
def test_check_for_winner_zombies_win(self):
game = games.ZombieTag(1, self.tree, 5, 3, 4)
human = list(game._humans.values())[0]
zombie = list(game._zombies.values())[0]
game.handle_collision(human._name, zombie._name)
assert game.check_for_winner() == 'zombies'
class TestZombieTagQuadTree(ZombieTagTests):
def setup_method(self):
self.tree = trees.QuadTree((250, 250))
class TestZombieTag2dTree(ZombieTagTests):
def setup_method(self):
self.tree = trees.TwoDTree((0, 0), (500, 500))
### ELIMINATION TAG ###
class EliminationTagTests:
def test_init(self):
game = games.EliminationTag(10, self.tree, 3, 4)
assert len(game._players) == 10
assert all(name in game.field for name in game._players)
assert all(player._colour == 'random' for _, player in game._players.items())
player = list(game._players.values())[0]
players = set()
while player not in players:
players.add(player)
player = game._players[player.get_targets()[0]]
# check to make sure that all players are targeting each other correctly
assert len(players) == 10
def test_handle_collision_do_not_eliminate(self):
game = games.EliminationTag(10, self.tree, 3, 4)
player1 = list(game._players)[0]
player2 = next(name for name, p in game._players.items() if player1 not in p.get_targets())
dir1 = game._players[player1]._direction
dir2 = game._players[player2]._direction
game.handle_collision(player1, player2)
assert player1 in game._players
assert player2 in game._players
def test_handle_collision_one_is_target(self):
game = games.EliminationTag(10, self.tree, 3, 4)
player1 = list(game._players)[0]
player2 = game._players[player1].get_targets()[0]
p2targets = game._players[player2].get_targets()
points = game._players[player1].get_points()
game.handle_collision(player1, player2)
assert player1 in game._players
assert player2 not in game._players
assert game._players[player1].get_targets()[0] == p2targets[0]
assert game._players[player1].get_points() - 1 == points
def test_check_for_winner_no_winner(self):
game = games.EliminationTag(10, self.tree, 3, 4)
assert game.check_for_winner() is None
def test_check_for_winner_one_winner(self):
game = games.EliminationTag(10, self.tree, 3, 4)
player1 = list(game._players)[0]
game._players[player1].increase_points(1)
assert game.check_for_winner() == player1
class TestEliminationTagQuadTree(EliminationTagTests):
def setup_method(self):
self.tree = trees.QuadTree((250, 250))
class TestEliminationTag2dTree(EliminationTagTests):
def setup_method(self):
self.tree = trees.TwoDTree((0, 0), (500, 500))
if __name__ == '__main__':
pytest.main(['tests.py'])
|
#!/usr/bin/python
# Classification (U)
"""Program: get_disks.py
Description: Unit testing of get_disks in elastic_class class.
Usage:
test/unit/elastic_class/get_disks.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import elastic_class
import version
__version__ = version.__version__
class Repo(object):
"""Class: Repo
Description: Class representation of the cat class.
Methods:
__init__
allocation
"""
def __init__(self):
"""Method: __init__
Description: Initialization instance of the class.
Arguments:
"""
disk_tot = "disk.total"
disk_avl = "disk.avail"
disk_use = "disk.used"
disk_per = "disk.percent"
disk_ind = "disk.indices"
self.format = None
self.disks = [
{"node": "nodename", disk_tot: "100gb", "shards": "101",
disk_avl: "75gb", disk_use: "20gb", "host": "servername",
disk_per: "21", "ip": "ip.addr", disk_ind: "15gb"},
{"node": "nodename2", disk_tot: "110gb", "shards": "101",
disk_avl: "65gb", disk_use: "30gb", "host": "servername2",
disk_per: "31", "ip": "ip.addr2", disk_ind: "20gb"}]
def allocation(self, **kwargs):
"""Method: allocation
Description: Stub holder for cat.allocation method.
Arguments:
"""
self.format = kwargs.get("format", None)
return self.disks
class Elasticsearch(object):
"""Class: ElasticSearch
Description: Class representation of the Elasticsearch class.
Methods:
__init__
"""
def __init__(self, host_list, port=9200):
"""Method: __init__
Description: Initialization instance of the class.
Arguments:
"""
self.hosts = host_list
self.port = port
self.cat = Repo()
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_default
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
disk_tot = "disk.total"
disk_avl = "disk.avail"
disk_use = "disk.used"
disk_per = "disk.percent"
disk_ind = "disk.indices"
self.host_list = ["host1", "host2"]
self.repo = "reponame"
self.els = Elasticsearch(self.host_list)
self.results = [
{"node": "nodename", disk_tot: "100gb", "shards": "101",
disk_avl: "75gb", disk_use: "20gb", "host": "servername",
disk_per: "21", "ip": "ip.addr", disk_ind: "15gb"},
{"node": "nodename2", disk_tot: "110gb", "shards": "101",
disk_avl: "65gb", disk_use: "30gb", "host": "servername2",
disk_per: "31", "ip": "ip.addr2", disk_ind: "20gb"}]
def test_default(self):
"""Function: test_default
Description: Test with default settings.
Arguments:
"""
self.assertEqual(elastic_class.get_disks(self.els), self.results)
if __name__ == "__main__":
unittest.main()
|
'''
Created on Jun 5, 2021
@author: immanueltrummer
'''
from cp.fact import Fact
from cp.query import QueryEngine
from gym import spaces
from sentence_transformers import SentenceTransformer, util
import gym
import logging
import numpy as np
import torch
from cp.sum import SumGenerator, SumEvaluator
class EmbeddingGraph():
""" Graph connecting nodes with similar label embeddings. """
def __init__(self, labels, degree):
""" Generate graph with given labels.
Args:
labels: text labels for nodes
degree: number of neighbors per node
"""
model = SentenceTransformer('paraphrase-MiniLM-L12-v2')
self.embeddings = model.encode(labels, convert_to_tensor=True)
cosine_sims = util.pytorch_cos_sim(self.embeddings, self.embeddings)
self.neighbors = []
nr_labels = len(labels)
for i in range(nr_labels):
prefs = cosine_sims[i,i:nr_labels]
k = min(prefs.shape[0], degree)
_, indices = torch.topk(prefs, k)
l_indices = indices.tolist()
l_indices += [0] * (degree - k)
self.neighbors.append(l_indices)
def get_embedding(self, node_id):
""" Returns embedding of given node.
Args:
node_id: retrieve embedding for this node
Returns:
transformer embedding for given node
"""
return self.embeddings[node_id]
def get_neighbor(self, node, n_id):
""" Retrieve specific neighbor of node.
Args:
node: ID of node whose neighbors to examine
n_id: index of node neighbor to retrieve
Returns:
i-th neighbor of j-th node
"""
return self.neighbors[node][n_id]
def get_reachable(self, start, steps):
""" Retrieve all nodes reachable within a given number of steps.
Args:
start: ID of start node
steps: maximal number of steps
Returns:
all nodes reachable within given number of steps
"""
reachable = set([start])
for _ in range(steps):
boundary = [nb for n in reachable for nb in self.neighbors[n]]
reachable.update(boundary)
return reachable
class PickingEnv(gym.Env):
""" Environment for selecting facts for a data summary. """
def __init__(self, connection, table, dim_cols,
agg_cols, cmp_pred, nr_facts, nr_preds,
degree, max_steps, preamble, dims_tmp,
aggs_txt, all_preds, cache):
""" Read database to initialize environment.
Args:
connection: connection to database
table: name of table to extract facts
dim_cols: names of all property columns
agg_cols: name of columns for aggregation
cmp_pred: compare data satisfying this predicate
nr_facts: at most that many facts in description
nr_preds: at most that many predicates per fact
degree: degree for all transition graphs
max_steps: number of steps per episode
preamble: starts each data summary
dims_tmp: assigns each dimension to text template
aggs_txt: assigns each aggregate to text snippet
all_preds: all possible predicates
cache: caches query results
"""
super(PickingEnv, self).__init__()
self.connection = connection
self.table = table
self.dim_cols = dim_cols
self.agg_cols = agg_cols
self.cmp_pred = cmp_pred
self.nr_facts = nr_facts
self.nr_preds = nr_preds
self.degree = degree
self.max_steps = max_steps
self.preamble = preamble
self.dims_tmp = dims_tmp
self.aggs_txt = aggs_txt
self.cache = cache
self.q_engine = QueryEngine(
connection, table, cmp_pred, cache)
self.s_gen = SumGenerator(
all_preds, preamble, dim_cols,
dims_tmp, agg_cols, aggs_txt,
self.q_engine)
self.s_eval = SumEvaluator()
self.agg_graph = EmbeddingGraph(agg_cols, degree)
self.all_preds = all_preds
pred_labels = [f'{p} is {v}' for p, v in self.all_preds]
self.pred_graph = EmbeddingGraph(pred_labels, degree)
self.cur_facts = []
for _ in range(nr_facts):
self.cur_facts.append(Fact(nr_preds))
self.props_per_fact = nr_preds + 1
action_dims = [nr_facts, self.props_per_fact, degree]
self.action_space = spaces.MultiDiscrete(action_dims)
self.nr_props = nr_facts * self.props_per_fact
self.observation_space = spaces.Box(
low=-10, high=10, shape=(self.nr_props, 384), dtype=np.float32)
self.eval_s = 0
self.nr_t_steps = 0
self.reset()
self._evaluate()
def best_summary(self):
""" Returns data summary with highest reward. """
return max(self.text_to_reward, key=self.text_to_reward.get)
def topk_summaries(self, k, best):
""" Returns the top-k data summaries.
Args:
k: retrieve that many summaries
best: return best (not worst) summaries
Returns:
returns up to k summaries with rewards
"""
sorted_sums = sorted(self.text_to_reward.items(),
key=lambda s: s[1], reverse=best)
if len(self.text_to_reward) < k:
return sorted_sums
else:
return sorted_sums[0:k]
def step(self, action):
""" Change fact or trigger evaluation. """
self.nr_ep_steps += 1
self.nr_t_steps += 1
self._expand_scope()
logging.debug(f'Step {self.nr_t_steps} (in episode: {self.nr_ep_steps})')
if self.nr_ep_steps >= self.max_steps:
done = True
reward = 0
else:
fact_idx = action[0]
prop_idx = action[1]
nb_idx = action[2]
fact = self.cur_facts[fact_idx]
cur_val = fact.get_prop(prop_idx)
if fact.is_agg(prop_idx):
new_val = self.agg_graph.get_neighbor(cur_val, nb_idx)
else:
new_val = self.pred_graph.get_neighbor(cur_val, nb_idx)
self.cur_facts[fact_idx].change(prop_idx, new_val)
done = False
reward = self._evaluate()
return self._observe(), reward, done, {}
def reset(self):
""" Reset data summary to default. """
self.nr_ep_steps = 0
for fact in self.cur_facts:
fact.reset()
return self._observe()
def statistics(self):
""" Returns performance statistics.
Returns:
Dictionary with performance statistics
"""
stats = self.q_engine.statistics().copy()
stats.update(self.s_gen.statistics())
stats.update(self.s_eval.statistics())
return stats
def _evaluate(self):
""" Evaluate quality of current summary. """
text = self.s_gen.generate(self.cur_facts)
return self.s_eval.evaluate(text)
def _expand_scope(self):
""" Expands scope for caching. """
pred_ids = []
for fact in self.cur_facts:
for pred_idx in fact.get_preds():
pred_ids += self.pred_graph.get_reachable(pred_idx, 1)
for pred_id in pred_ids:
pred = self.all_preds[pred_id]
self.cache.expand_scope(pred)
def _observe(self):
""" Returns observations for learning agent. """
components = []
for fact_ctr in range(self.nr_facts):
fact = self.cur_facts[fact_ctr]
preds = fact.get_preds()
for pred_idx in preds:
pred_emb = self.pred_graph.get_embedding(pred_idx)
components.append(pred_emb)
agg_idx = fact.get_agg()
agg_emb = self.agg_graph.get_embedding(agg_idx)
components.append(agg_emb)
return torch.stack(components, dim=0).to('cpu').numpy() |
"""
analyst.scale
Corey Rayburn Yung <[email protected]>
Copyright 2021, Corey Rayburn Yung
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
Contents:
"""
import abc
import dataclasses
from typing import (Any, Callable, ClassVar, Dict, Iterable, List, Mapping,
Optional, Sequence, Tuple, Type, Union)
import numpy as np
import pandas as pd
import sourdough
from . import base
import simplify
@dataclasses.dataclass
class Scale(sourdough.project.Step):
"""Wrapper for a Technique.
An instance will try to return attributes from 'contents' if the attribute
is not found in the Step instance.
Args:
name (str): designates the name of a class instance that is used for
internal referencing throughout sourdough. For example, if a
sourdough instance needs settings from a Configuration instance,
'name' should match the appropriate section name in a Configuration
instance. Defaults to None.
contents (Technique): stored Technique instance used by the 'implement'
method.
parameters (Mapping[Any, Any]]): parameters to be attached to 'contents'
when the 'implement' method is called. Defaults to an empty dict.
parallel (ClassVar[bool]): indicates whether this Component design is
meant to be at the end of a parallel workflow structure. Defaults to
True.
"""
name: str = 'scale'
contents: sourdough.project.Technique = None
parameters: Union[Mapping[str, Any], base.Parameters] = base.Parameters()
parallel: ClassVar[bool] = True
@dataclasses.dataclass
class MinMaxScale(simplify.components.SklearnTransformer):
"""Wrapper for a Technique.
Args:
name (str): designates the name of a class instance that is used for
internal referencing throughout sourdough. For example, if a
sourdough instance needs settings from a Configuration instance,
'name' should match the appropriate section name in a Configuration
instance. Defaults to None.
contents (Technique): stored Technique instance used by the 'implement'
method.
iterations (Union[int, str]): number of times the 'implement' method
should be called. If 'iterations' is 'infinite', the 'implement'
method will continue indefinitely unless the method stops further
iteration. Defaults to 1.
parameters (Mapping[Any, Any]]): parameters to be attached to 'contents'
when the 'implement' method is called. Defaults to an empty dict.
parallel (ClassVar[bool]): indicates whether this Component design is
meant to be at the end of a parallel workflow structure. Defaults to
False.
"""
name: str = 'min_max'
contents: Union[Callable, Type, object, str] = 'MinMaxScaler'
iterations: Union[int, str] = 1
parameters: Union[Mapping[str, Any],
base.Parameters] = base.Parameters(
name = 'min_max_scale',
default = {'copy': False},
selected = ['copy'])
module: str = 'sklearn.preprocessing'
parallel: ClassVar[bool] = False
scalers = sourdough.types.Library()
@dataclasses.dataclass
class MaxAbsoluteScale(simplify.components.SklearnTransformer):
"""Wrapper for a Technique.
Args:
name (str): designates the name of a class instance that is used for
internal referencing throughout sourdough. For example, if a
sourdough instance needs settings from a Configuration instance,
'name' should match the appropriate section name in a Configuration
instance. Defaults to None.
contents (Technique): stored Technique instance used by the 'implement'
method.
iterations (Union[int, str]): number of times the 'implement' method
should be called. If 'iterations' is 'infinite', the 'implement'
method will continue indefinitely unless the method stops further
iteration. Defaults to 1.
parameters (Mapping[Any, Any]]): parameters to be attached to 'contents'
when the 'implement' method is called. Defaults to an empty dict.
parallel (ClassVar[bool]): indicates whether this Component design is
meant to be at the end of a parallel workflow structure. Defaults to
False.
"""
name: str = 'max_absolute'
contents: Union[Callable, Type, object, str] = 'MaxAbsScaler'
iterations: Union[int, str] = 1
parameters: Union[Mapping[str, Any],
base.Parameters] = base.Parameters(
name = 'max_absolute_scale',
default = {'copy': False},
selected = ['copy'])
module: str = 'sklearn.preprocessing'
parallel: ClassVar[bool] = False
@dataclasses.dataclass
class NormalizeScale(simplify.components.SklearnTransformer):
"""Wrapper for a Technique.
Args:
name (str): designates the name of a class instance that is used for
internal referencing throughout sourdough. For example, if a
sourdough instance needs settings from a Configuration instance,
'name' should match the appropriate section name in a Configuration
instance. Defaults to None.
contents (Technique): stored Technique instance used by the 'implement'
method.
iterations (Union[int, str]): number of times the 'implement' method
should be called. If 'iterations' is 'infinite', the 'implement'
method will continue indefinitely unless the method stops further
iteration. Defaults to 1.
parameters (Mapping[Any, Any]]): parameters to be attached to 'contents'
when the 'implement' method is called. Defaults to an empty dict.
parallel (ClassVar[bool]): indicates whether this Component design is
meant to be at the end of a parallel workflow structure. Defaults to
False.
"""
name: str = 'normalize'
contents: Union[Callable, Type, object, str] = 'Normalizer'
iterations: Union[int, str] = 1
parameters: Union[Mapping[str, Any],
base.Parameters] = base.Parameters(
name = 'normalize_scale',
default = {'copy': False},
selected = ['copy'])
module: str = 'sklearn.preprocessing'
parallel: ClassVar[bool] = False
@dataclasses.dataclass
class QuantileScale(simplify.components.SklearnTransformer):
"""Wrapper for a Technique.
Args:
name (str): designates the name of a class instance that is used for
internal referencing throughout sourdough. For example, if a
sourdough instance needs settings from a Configuration instance,
'name' should match the appropriate section name in a Configuration
instance. Defaults to None.
contents (Technique): stored Technique instance used by the 'implement'
method.
iterations (Union[int, str]): number of times the 'implement' method
should be called. If 'iterations' is 'infinite', the 'implement'
method will continue indefinitely unless the method stops further
iteration. Defaults to 1.
parameters (Mapping[Any, Any]]): parameters to be attached to 'contents'
when the 'implement' method is called. Defaults to an empty dict.
parallel (ClassVar[bool]): indicates whether this Component design is
meant to be at the end of a parallel workflow structure. Defaults to
False.
"""
name: str = 'quantile'
contents: Union[Callable, Type, object, str] = 'QuantileTransformer'
iterations: Union[int, str] = 1
parameters: Union[Mapping[str, Any],
base.Parameters] = base.Parameters(
name = 'quantile_scale',
default = {'copy': False},
selected = ['copy'])
module: str = 'sklearn.preprocessing'
parallel: ClassVar[bool] = False
@dataclasses.dataclass
class RobustScale(simplify.components.SklearnTransformer):
"""Wrapper for a Technique.
Args:
name (str): designates the name of a class instance that is used for
internal referencing throughout sourdough. For example, if a
sourdough instance needs settings from a Configuration instance,
'name' should match the appropriate section name in a Configuration
instance. Defaults to None.
contents (Technique): stored Technique instance used by the 'implement'
method.
iterations (Union[int, str]): number of times the 'implement' method
should be called. If 'iterations' is 'infinite', the 'implement'
method will continue indefinitely unless the method stops further
iteration. Defaults to 1.
parameters (Mapping[Any, Any]]): parameters to be attached to 'contents'
when the 'implement' method is called. Defaults to an empty dict.
parallel (ClassVar[bool]): indicates whether this Component design is
meant to be at the end of a parallel workflow structure. Defaults to
False.
"""
name: str = 'robust'
contents: Union[Callable, Type, object, str] = 'RobustScaler'
iterations: Union[int, str] = 1
parameters: Union[Mapping[str, Any],
base.Parameters] = base.Parameters(
name = 'robust_scale',
default = {'copy': False},
selected = ['copy'])
module: str = 'sklearn.preprocessing'
parallel: ClassVar[bool] = False
@dataclasses.dataclass
class StandardScale(simplify.components.SklearnTransformer):
"""Wrapper for a Technique.
Args:
name (str): designates the name of a class instance that is used for
internal referencing throughout sourdough. For example, if a
sourdough instance needs settings from a Configuration instance,
'name' should match the appropriate section name in a Configuration
instance. Defaults to None.
contents (Technique): stored Technique instance used by the 'implement'
method.
iterations (Union[int, str]): number of times the 'implement' method
should be called. If 'iterations' is 'infinite', the 'implement'
method will continue indefinitely unless the method stops further
iteration. Defaults to 1.
parameters (Mapping[Any, Any]]): parameters to be attached to 'contents'
when the 'implement' method is called. Defaults to an empty dict.
parallel (ClassVar[bool]): indicates whether this Component design is
meant to be at the end of a parallel workflow structure. Defaults to
False.
"""
name: str = 'standard'
contents: Union[Callable, Type, object, str] = 'StandardScaler'
iterations: Union[int, str] = 1
parameters: Union[Mapping[str, Any],
base.Parameters] = base.Parameters(
name = 'standard_scale',
default = {'copy': False},
selected = ['copy'])
module: str = 'sklearn.preprocessing'
parallel: ClassVar[bool] = False
# @dataclasses.dataclass
# class GaussScale(simplify.components.SklearnTransformer):
# """Transforms data columns to more gaussian distribution.
# The particular method applied is chosen between 'box-cox' and 'yeo-johnson'
# based on whether the particular data column has values below zero.
# Args:
# step(str): name of step used.
# parameters(dict): dictionary of parameters to pass to selected
# algorithm.
# name(str): name of class for matching settings in the Idea instance
# and for labeling the columns in files exported by Critic.
# auto_draft(bool): whether 'finalize' method should be called when
# the class is instanced. This should generally be set to True.
# """
# name: str = 'box-cox & yeo-johnson'
# contents: str = None
# iterations: Union[int, str] = 1
# parameters: Union[Mapping[str, Any].
# base.Parameters] = base.Parameters(
# name = 'gauss_scale',
# default = {'rescaler': 'standard'})
# module: str = None
# parallel: ClassVar[bool] = False
# def __post_init__(self) -> None:
# self.idea_sections = ['analyst']
# super().__post_init__()
# return self
# def draft(self) -> None:
# self.rescaler = self.parameters['rescaler'](
# copy = self.parameters['copy'])
# del self.parameters['rescaler']
# self._publish_parameters()
# self.positive_tool = self.workers['box_cox'](
# method = 'box_cox', **self.parameters)
# self.negative_tool = self.workers['yeo_johnson'](
# method = 'yeo_johnson', **self.parameters)
# return self
# def publish(self, dataset, columns = None):
# if not columns:
# columns = dataset.numerics
# for column in columns:
# if dataset.x[column].min() >= 0:
# dataset.x[column] = self.positive_tool.fit_transform(
# dataset.x[column])
# else:
# dataset.x[column] = self.negative_tool.fit_transform(
# dataset.x[column])
# dataset.x[column] = self.rescaler.fit_transform(
# dataset.x[column])
# return dataset
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 22 13:06:45 2019
@author: nbaya
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns #; sns.set(color_codes=False)
import scipy.stats as stats
df = pd.read_csv('/Users/nbaya/Documents/lab/risk_gradients/gps_6dis_alex_4.25.19.csv.gz',compression='gzip')
df = df.rename(index=str,columns={'bmi1':'bmi'})
no_nan = df.loc[False==np.isnan(df.gpsbmi)].copy() #remove NaNs from gpsbmi to ensure bivariate plot for BMI works
"""
field descriptions:
eid (int): individual ID
age (int): age
sex (bool): 0 corresponds with female, 1 corresponds with male (based on assumptions from gpscad, breastcanc)
genotyping_array (str) : options={UKBB: 285370 of 288940 (~98.76%), UKBL: 3570 of 288940 (~1.236%)}
PCs (float: 1 to 10
gpscad (float): genome-wide polygenic score for coronary artery disease
cad (bool): diagnosed with CAD?
gpsbreastcanc (float): genome-wide polygenic score for breast cancer
breastcanc (bool): diagnosed with breast cancer?
gpsaf (float): genome-wide polygenic score of atrial fibrillation
af (bool): diagnosed with atrial fibrillation
gpsdm (float): genome-wide polygenic score of diabetes mellitus
dm (bool): diagnosed with diabetes mellitus
gpsibd (float): genome-wide polygenic score of inflammatory bowel disease
ibd (bool): diagnosed with inflammatory bowel disease
gpsbmi (float): genome-wide polygenic score of body mass index
bmi (float): body mass index
sevobese (bool): is severely obese
"""
diseases = {'cad':'CAD','af':'atrial fibrillation','dm':'type 2 diabetes',
'ibd':'inflammatory bowel disease','breastcanc':'breast cancer',
'bmi':'body mass index'}
def risk_gradient(df, disease, x_axis='percentile',y_axis='avg_prev'):
"""
Options for x_axis: percentile, avg_gps
y_axis: avg_prev, logit, inv_cdf, avg
"""
xlabel_dict = {'percentile': 'Percentile of polygenic score',
'avg_gps': 'Avg. GPS of percentile bin'}
ylabel_dict = {'avg_prev':f'Prevalence of {diseases[disease]} (%)',
'logit': 'log(K/(1-K))',
'inv_cdf':'Phi^-1(K)',
'avg':f'Average {diseases[disease]}'}
rank = stats.rankdata(df['gps'+disease])
percentile = rank/len(df)*100
bins = list(range(0,100))
bin_idx = np.digitize(percentile, bins, right=True)
df['bin_idx'] = bin_idx
avg = np.asarray([np.mean(df[df.bin_idx==i][disease]) for i in range(1,101)]) #average of disease in percentile bin
avg_prev = 100*avg
avg_gps = [np.mean(df[df.bin_idx==i]['gps'+disease]) for i in range(1,101)]
logit = [np.log(k/(1-k)) for k in avg_prev]
inv_cdf = [stats.norm.ppf(k) for k in avg_prev]
fig,ax=plt.subplots(figsize=(4.5,5))
if x_axis == 'percentile': #no transformation, x-axis is percentile of bins, y-axis is prevalence as %
if disease=='bmi':
y_axis='avg'
plt.scatter(range(1,101),avg,c=range(1,101),cmap='Blues',vmin=-130,vmax=100,s=20)
else:
plt.scatter(range(1,101),avg_prev,c=range(1,101),cmap='Blues',vmin=-130,vmax=100,s=20)
plt.xticks(np.linspace(0,100,11))
elif disease == 'bmi' and x_axis == 'avg_gps': #if disease is bmi and we transform x-axis to be the avg gps for each percentile bin
y_axis = 'avg'
plt.scatter(avg_gps,avg_prev,c=range(1,101),cmap='Blues',vmin=-130,vmax=100,s=20)
elif x_axis=='avg_gps' and y_axis=='logit': #x-axis is avg gps for each percentile bin, y-axis is logit transformation of prevalence
plt.scatter(avg_gps,logit,c=range(1,101),cmap='Blues',vmin=-130,vmax=100,s=20)
elif x_axis=='avg_gps' and y_axis=='inv_cdf': #x-axis is avg gps for each percentile bin, y-axis is inverse cdf transformation of prevalence
plt.scatter(avg_gps,inv_cdf,c=range(1,101),cmap='Blues',vmin=-130,vmax=100,s=20)
else:
return None
plt.xlabel(xlabel_dict[x_axis])
plt.ylabel(ylabel_dict[y_axis])
plt.title(f'Risk gradient for {diseases[disease]}')
plt.tight_layout()
fig=plt.gcf()
fig.savefig(f'/Users/nbaya/Documents/lab/risk_gradients/plots/{disease}_riskgradient_xaxis{x_axis}_yaxis{y_axis}.png',dpi=600)
plt.close()
for disease, fullname in diseases.items():
x_axis, y_axis = 'percentile','avg_gps'
risk_gradient(df, disease,x_axis=x_axis,y_axis=y_axis)
x_axis='avg_gps'
for y_axis in ['avg_gps','logit','inv_cdf']:
risk_gradient(df, disease,x_axis=x_axis,y_axis=y_axis)
# Plotting for sevobese
tmp = no_nan.copy() #use version of dataset with no NaNs in gpsbmi
rank = stats.rankdata(tmp['gpsbmi'])
percentile = rank/len(tmp)*100
bins = list(range(0,100))
bin_idx = np.digitize(percentile, bins, right=True)
tmp['bin_idx'] = bin_idx
avg_prev = np.asarray([np.mean(tmp[tmp.bin_idx==i]['sevobese']) for i in range(1,101)]) #average prevalence of disease in percentile bin
avg_gps = [np.mean(tmp[tmp.bin_idx==i]['gpsbmi']) for i in range(1,101)]
logit = [np.log(k/(1-k)) for k in avg_prev]
inv_cdf = [stats.norm.ppf(k) for k in avg_prev]
fig,ax=plt.subplots(figsize=(4.5,5))
plt.scatter(range(1,101),avg_prev*100,c=range(1,101),cmap='Blues',vmin=-130,vmax=100,s=20)
plt.xlabel('Percentile of polygenic score for BMI')
plt.ylabel('Prevalence of severe obesity (%)')
plt.title(f'Risk gradient for severe obesity')
plt.tight_layout()
fig=plt.gcf()
fig.savefig(f'/Users/nbaya/Documents/lab/risk_gradients/plots/sevobese_riskgradient_xaxisavg_gps_yaxisavg_prev.png',dpi=600)
ylabel_dict = {'logit': 'log(K/(1-K))',
'inv_cdf':'Phi^-1(K)'}
for y_axis, ylabel in ylabel_dict.items():
fig,ax=plt.subplots(figsize=(4.5,5))
if y_axis=='logit':
plt.scatter(avg_gps,logit,c=range(1,101),cmap='Blues',vmin=-130,vmax=100,s=20)
else:
plt.scatter(avg_gps,inv_cdf,c=range(1,101),cmap='Blues',vmin=-130,vmax=100,s=20)
plt.ylabel(ylabel)
plt.xlabel('Avg. GPS of percentile bin for BMI')
plt.title(f'Risk gradient for severe obesity')
plt.tight_layout()
fig=plt.gcf()
fig.savefig(f'/Users/nbaya/Documents/lab/risk_gradients/plots/sevobese_riskgradient_xaxisavg_gps_yaxis{y_axis}.png',dpi=600)
# Plotting bivariate distribution for BMI
R,p = stats.pearsonr(no_nan.bmi,no_nan.gpsbmi)
fig,ax = plt.subplots(figsize=(6,4))
plt.plot(np.linspace(10,40,2),R*(np.linspace(10,40,2)-np.mean(no_nan.bmi)),alpha=0.5)
sns.kdeplot(no_nan.bmi, no_nan.gpsbmi,n_levels=10,cmap='Blues',ax=ax,shade=True,shade_lowest=False)
plt.tight_layout()
fig=plt.gcf()
fig.savefig(f'/Users/nbaya/Documents/lab/risk_gradients/plots/bmi_bivariatekde.png',dpi=600)
# Plotting histograms
for disease, fullname in diseases.items():
fig,ax = plt.subplots(figsize=(6,4))
if disease=='bmi':
plt.hist(no_nan['gps'+disease],50)
else:
plt.hist(df['gps'+disease],50)
plt.title(f'Distribution of PGS for {fullname}')
plt.xlabel('PGS')
plt.ylabel('Count')
plt.tight_layout()
plt.xlim([-5,5])
plt.ylim([0,24000])
fig=plt.gcf()
fig.savefig(f'/Users/nbaya/Documents/lab/risk_gradients/plots/{disease}_hist.png',dpi=600)
# Make p-p plot
for disease, fullname in diseases.items():
for magnify in [10,100]:
if disease=='bmi':
cdf = np.sort([stats.norm.cdf(x) for x in no_nan['gps'+disease]])
else:
cdf = np.sort([stats.norm.cdf(x) for x in df['gps'+disease]])
x = np.linspace(0,1,len(cdf))
fig,ax=plt.subplots(figsize=(6,4))
plt.plot(x,cdf,'.',ms=1)
plt.plot([0,1],[0,1],'k--',alpha=0.5)
# plt.plot(x,x+(cdf-x)*magnify,'r-',lw=1,alpha=0.5)
plt.xlabel('expected percentile')
plt.ylabel('observed percentile')
plt.title(f'P-P plot for GPS of {fullname}\n(||exp-obs||={round(np.linalg.norm(x-cdf),3)})')
# plt.legend(['P-P','y=x',f'exp + (obs - exp)*{magnify}'])
plt.legend(['P-P','y=x'])
plt.xlim([0,1])
plt.ylim([0,1])
fig=plt.gcf()
# fig.savefig(f'/Users/nbaya/Documents/lab/risk_gradients/plots/{disease}_ppplot_magnify{magnify}.png',dpi=600)
fig.savefig(f'/Users/nbaya/Documents/lab/risk_gradients/plots/{disease}_ppplot.png',dpi=600)
plt.close()
# summary stats for disease GPS
for disease, fullname in diseases.items():
print(f'\nGPS for {fullname}')
if disease=='bmi':
tmp = no_nan
else:
tmp = df
print(f'mean: {np.mean(tmp["gps"+disease])}')
print(f'variance: {np.var(tmp["gps"+disease])}')
print(f'skew: {stats.skew(tmp["gps"+disease])}')
print(f'kurtosis: {stats.kurtosis(tmp["gps"+disease])}')
|
lista = []
while True:
num = int(input('digite um nr: '))
lista.append(num)
loop = ' '
while loop not in 'SN':
loop = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
if loop == 'N':
break
print(sorted(lista, reverse=True))
print(f'foram digitados {len(lista)} numeros')
print(f'o valor 5 foi digitado na lista' if 5 in lista else 'o valor 5 não foi digitado') |
import sys
n, k, *x = map(int, sys.stdin.read().split())
def main():
res = float("inf")
for i in range(n - k + 1):
j = i + k - 1
if x[i] < 0:
d = -x[i] if x[j] <= 0 else min(-x[i], x[j]) * 2 + max(-x[i], x[j])
else:
d = x[j]
res = min(res, d)
print(res)
if __name__ == "__main__":
main()
|
"""
CLI initialization command.
"""
import click
from rich.prompt import Prompt
from {{cookiecutter.project_slug}} import console
from {{cookiecutter.project_slug}}.constants import WELCOME_MESSAGE
@click.command()
def init():
"""
CLI Initialization demo.
"""
console.print(WELCOME_MESSAGE)
|
import os, sys
import pickle
sys.path.append("../../../")
from data.otb import *
from forecasters import load_forecaster
import torch as tc
if __name__ == "__main__":
dsld = loadOTB("../datasets/otb", 100, bb_format="xyxy")
ld_names = ['val1', 'val2', 'test']
lds = [dsld.val1, dsld.val2, dsld.test]
root = "../datasets/otb_precomp"
model = load_forecaster(None)
model.cuda()
model.eval()
# extract response
for ld_name, ld in zip(ld_names, lds):
subroot = os.path.join(root, ld_name)
os.makedirs(subroot, exist_ok=True)
i = 0
for xs, ys in ld:
xs = [x.cuda() for x in xs]
ys = ys.cuda()
yhs, yhs_var = model(xs)
ys = model.baseF.encode_bb(xs, ys, model.baseF.opts)
for y, yh, yh_var in zip(ys, yhs, yhs_var):
fn = os.path.join(subroot, "%d.pk"%(i))
print(fn)
yh = yh.detach().cpu()
yh_var = yh_var.detach().cpu()
y = y.detach().cpu()
pickle.dump((yh, yh_var, y), open(fn, "wb"))
i += 1
|
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
# from quantizer.utils import Parser
from quantizer.torch.convert import Converter
def main():
# Set up hyper parameters
# dataset = Dataset((40,40,1), 7)
# hparams = HParams(dataset, 0.997, 1e-05)
# Instantiate my specific model
# model = MyNet(hparams)
# model = models.__dict__["inception_v3"](pretrained = True)
# model = nn.Sequential(
# nn.Conv2d(3,20,5),
# nn.ReLU(),
# nn.Conv2d(20,64,5),
# nn.ReLU()
# )
model = models.__dict__["resnet18"](pretrained = True)
# input = torch.randn(1, 3, 600,600)
# trace, out = torch.jit.get_trace_graph(model, args=(input,))
# torch.onnx._optimize_trace(trace, torch.onnx.OperatorExportTypes.ONNX)
# torch_graph = trace.graph()
# print(out)
# sys.eixt()
# for torch_node in torch_graph.nodes():
# print(torch_node)
# op = torch_node.kind()
# print(op)
# params = {k: torch_node[k] for k in torch_node.attributeNames()}
# print(dir(torch_node))
# print(torch_node.output())
# sys.eixt()
# inputs = [i.unique() for i in torch_node.inputs()]
# outputs = [o.unique() for o in torch_node.outputs()]
# print(params)
# print("Inputs: {}".format(inputs))
# print("Outputs: {}".format(outputs))
conveter = Converter()
conveter.prepare(model)
conveter.convert()
# for name, module in model.named_modules():
# module.register_forward_hook(hk_fn)
# x = torch.randn(1, 3,224,224)
# y = model(x)
# print(model)
if __name__ == '__main__':
main() |
"""
Library Features:
Name: lib_rfarm_utils_mp
Author(s): Fabio Delogu ([email protected])
Date: '20170530'
Version: '3.5.0'
"""
#######################################################################################
# Logging
import logging
import os
import multiprocessing as mp
from src.hyde.algorithm.settings.model.rfarm.lib_rfarm_args import logger_name
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to get process id
def getProcessInfo(sTitle):
# Info
log_stream.info(' ------> Info: ' + str(sTitle) + ' ModuleName: ' + str(__name__))
if hasattr(os, 'getppid'): # only available on Unix
log_stream.info(' -------> Parent process id: ' + str(os.getppid()))
log_stream.info(' -------> Process id: ' + str(os.getppid()))
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get process signal start
def getProcessSignalStart():
# Info
log_stream.info(' ------> Process: ' + str(mp.current_process().name) + ' ... START')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get process signal end
def getProcessSignalEnd(oP):
# Info
log_stream.info(' ------> Process: ' + str(oP.name) + ' ExitCode: ' + str(oP.exitcode) + ' ... CLOSED')
# -------------------------------------------------------------------------------------
|
class Timecode():
frames = 0
def __init__(self, string=None, fps=25, total_frames=0):
self.fps = fps
if string is None:
self.total_frames = int(total_frames)
else:
unpacked = string.split(':')
if len(unpacked) == 1:
hours = minutes = seconds = 0
frames = int(unpacked[0])
elif len(unpacked) == 2:
hours = minutes = 0
seconds, frames = (int(each) for each in unpacked)
elif len(unpacked) == 3:
hours = 0
minutes, seconds, frames = (int(each) for each in unpacked)
elif len(unpacked) == 4:
hours, minutes, seconds, frames = (int(each) for each in unpacked)
else:
raise ValueError('Invalid timecode %s' % string)
if hours > 99 or minutes > 59 or seconds > 59 or frames >= fps:
raise ValueError('Invalid timecode %s' % string)
self.total_frames = ((hours*60 + minutes)*60 + seconds)*fps + frames
def __repr__(self):
return "Timecode('%s', fps=%i)" % (str(self), self.fps)
def __str__(self):
return '%02i:%02i:%02i:%02i' % self.components()
def __cmp__(self, other):
if not isinstance(other, Timecode):
raise TypeError
return cmp(self.total_frames, other.total_frames)
def __eq__(self, other):
return isinstance(other, Timecode) and self.total_frames == other.total_frames
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.total_frames)
def __int__(self):
return self.total_frames
def __add__(self, other):
self._assert_equal_fps(other)
return Timecode(total_frames=self.total_frames + int(other))
def __sub__(self, other):
self._assert_equal_fps(other)
return Timecode(total_frames=self.total_frames - int(other))
def components(self):
frames_per_hour = self.fps * 60 * 60
frames_per_minute = self.fps * 60
hours, hours_remainder = divmod(self.total_frames, frames_per_hour)
minutes, minutes_remainder = divmod(hours_remainder, frames_per_minute)
seconds, frames = divmod(minutes_remainder, self.fps)
return (hours, minutes, seconds, frames)
def _assert_equal_fps(self, other):
if self.fps != other.fps:
raise self.FPSMismatch
@property
def hours(self):
return self.components()[0]
@property
def minutes(self):
return self.components()[1]
@property
def seconds(self):
return self.components()[2]
@property
def frames(self):
return self.components()[3]
class FPSMismatch(Exception):
pass
|
# -*- coding: utf-8 -*-
from abc import ABC
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
class DeepQNetwork(nn.Module, ABC):
def __init__(self, lr, num_agents, action_size, input_size):
super(DeepQNetwork, self).__init__()
""" Set seed for reproducibility """
T.manual_seed(0)
self.num_agents = num_agents
""" Shared DNN - Convolutional """
self.conv1 = nn.Conv2d(4, 16, 3)
x_test = T.tensor(np.zeros(tuple([1]) + input_size)).float()
fc_input_size = self.size_of_conv_out(x_test)
""" Shared DNN - Dense """
self.fc1 = nn.Linear(fc_input_size, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 512)
""" Individual DNN """
self.ff1 = nn.Linear(512, action_size*num_agents)
if T.cuda.is_available():
self.device = T.device('cuda')
print('YOU ARE USING YOUR GPU. LETS HAVE SOME FUN!')
else:
self.device = T.device('cpu')
print('YOUR GPU IS MISSING. POOR CPU. ITS IN CHARGE OF EVERYTHING!')
self.to(self.device)
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.loss = nn.SmoothL1Loss()
self.loss2 = nn.SmoothL1Loss(reduction = 'none')
def forward(self, x):
""" Forward function. """
""" Shared DDN - Convolutional """
x = F.relu(self.conv1(x))
x = T.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
""" Paralel DDN - Linear """
Qf = F.relu(self.ff1(x)) # SI NO PONGO UNA CAPA DE ACTIVACIÓN DE SALIDA, EL RESULTADO ES PEOR. MISTERIO MISTERIO! #
return Qf
def size_of_conv_out(self, x):
"""
Function to extract the output size of the convolutional network.
:param x: Input of the convolutional network
:return: Integer with the size of the input of the next layer (FC)
"""
x = self.conv1(x)
x = T.flatten(x, start_dim=1)
return x.shape[1]
|
# Bundles for JS/CSS Minification
MINIFY_BUNDLES = {
'css': {
'common': (
'css/normalize.css',
'less/main.less',
'less/search.less',
),
'community': (
'less/wiki-content.less',
'less/community.less',
'less/select.less',
),
'mobile/common': (
'css/normalize.css',
'less/mobile/main.less',
),
'print': (
'css/print.css',
),
# TODO: remove dependency on jquery ui CSS and use our own
'jqueryui/jqueryui': (
'css/jqueryui/jqueryui.css',
),
'forums': (
'less/forums.less',
'less/reportabuse.less',
),
'questions': (
'less/questions.less',
'css/cannedresponses.css',
'less/reportabuse.less',
),
'questions.metrics': (
'less/questions.metrics.less',
),
'mobile/questions': (
'less/mobile/questions.less',
),
'mobile/aaq': (
'less/mobile/aaq.less',
),
'rickshaw': (
'css/jqueryui/jqueryui.css',
'css/rickshaw.css',
'less/rickshaw.sumo.less',
),
'mobile/search': (
'less/mobile/search.less',
),
'wiki': (
'css/users.autocomplete.css',
'css/users.list.css',
'less/wiki.less',
'less/wiki-content.less',
'css/screencast.css',
),
'mobile/wiki': (
'less/mobile/wiki.less',
'less/wiki-content.less',
),
'home': (
'less/home.less',
),
'gallery': (
'less/gallery.less',
),
'ie': (
'css/ie.css',
'css/ie8.css',
),
'ie8': ( # IE 8 needs some specific help.
'css/ie8.css',
),
'customercare': (
'less/customercare.less',
),
'users': (
'less/users.less',
'less/reportabuse.less',
),
'mobile/users': (
'less/mobile/users.less',
),
'monitor': (
'css/monitor.css',
),
'messages': (
'css/users.autocomplete.css',
'less/messages.less',
),
'mobile/messages': (
'less/mobile/messages.less',
),
'products': (
'less/products.less',
),
'mobile/products': (
'less/mobile/products.less',
),
'groups': (
'css/users.autocomplete.css',
'css/users.list.css',
'css/groups.css',
'css/wiki_syntax.css',
),
'karma.dashboard': (
'less/karma.dashboard.less',
),
'kpi.dashboard': (
'less/kpi.dashboard.less',
),
'locale-switcher': (
'less/locale-switcher.less',
),
'mobile/locale-switcher': (
'less/mobile/locales.less',
),
'kbdashboards': (
'less/kbdashboards.less',
),
'landings/get-involved': (
'less/landings/get-involved.less',
),
'mobile/landings/get-involved': (
'less/mobile/landings/get-involved.less',
),
'badges': (
'less/badges.less',
),
},
'js': {
'common': (
'js/i18n.js',
'js/libs/underscore.js',
'js/libs/moment-2.8.3.js',
'js/libs/jquery-1.10.1.min.js',
'js/libs/jquery.migrate.js',
'js/libs/jquery.cookie.js',
'js/libs/jquery.placeholder.js',
'js/templates/macros.js',
'js/templates/search-results-list.js',
'js/templates/search-results.js',
'js/libs/nunjucks-slim.js',
'js/nunjucks.js',
'js/cached_xhr.js',
'js/search_utils.js',
'js/browserdetect.js',
'js/kbox.js',
'js/main.js',
'js/format.js',
'js/libs/modernizr-2.6.1.js',
'js/geoip-locale.js',
'js/ui.js',
'js/analytics.js',
'js/surveygizmo.js',
'js/instant_search.js',
),
'community': (
'js/libs/jquery-1.10.1.min.js',
'js/community.js',
'js/select.js',
),
'mobile/common': (
'js/i18n.js',
'js/libs/underscore.js',
'js/libs/jquery-1.10.1.min.js',
'js/libs/jquery.migrate.js',
'js/libs/modernizr-2.6.1.js',
'js/browserdetect.js',
'js/aaq.js',
'js/mobile/ui.js',
'js/analytics.js',
),
'ie6-8': (
'js/libs/nwmatcher-1.2.5.js',
'js/libs/selectivizr-1.0.2.js',
),
'libs/jqueryui': (
'js/libs/jqueryui.js',
),
'questions': (
'js/markup.js',
'js/ajaxvote.js',
'js/ajaxpreview.js',
'js/aaq.js',
'js/questions.js',
'js/libs/jquery.tokeninput.js',
'js/tags.filter.js',
'js/tags.js',
'js/reportabuse.js',
'js/questions.metrics.js',
'js/libs/jquery.ajaxupload.js',
'js/upload.js',
),
'questions.metrics': (
'js/questions.metrics-dashboard.js',
),
'mobile/questions': (
'js/mobile/questions.js',
'js/questions.metrics.js',
),
'mobile/aaq': (
'js/aaq.js',
'js/mobile/aaq.js',
),
'products': (
'js/products.js',
),
'search': (
'js/search.js',
),
'forums': (
'js/markup.js',
'js/ajaxpreview.js',
'js/forums.js',
'js/reportabuse.js',
),
'gallery': (
'js/libs/jquery.ajaxupload.js',
'js/gallery.js',
),
'wiki': (
'js/markup.js',
'js/libs/django/urlify.js',
'js/libs/django/prepopulate.js',
'js/libs/swfobject.js',
'js/libs/jquery.lazyload.js',
'js/libs/jquery.tokeninput.js',
'js/users.autocomplete.js',
'js/screencast.js',
'js/showfor.js',
'js/ajaxvote.js',
'js/ajaxpreview.js',
'js/wiki.js',
'js/tags.js',
'js/dashboards.js',
'js/editable.js',
'js/wiki.metrics.js',
),
'rickshaw': (
'js/libs/jqueryui.js',
'js/libs/d3.js',
'js/libs/d3.layout.min.js',
'js/libs/rickshaw.js',
'js/rickshaw_utils.js',
),
'mobile/wiki': (
'js/libs/underscore.js',
'js/libs/jquery.cookie.js',
'js/libs/jquery.lazyload.js',
'js/browserdetect.js',
'js/showfor.js',
'js/ajaxform.js',
'js/mobile/wiki.js',
'js/wiki.metrics.js',
),
'wiki.history': (
'js/historycharts.js',
),
'wiki.diff': (
'js/libs/diff_match_patch_uncompressed.js',
'js/diff.js',
),
'wiki.editor': (
'js/libs/ace/ace.js',
'js/libs/ace/ext-language_tools.js',
'js/ace.mode-sumo.js',
),
'wiki.dashboard': (
'js/wiki.dashboard.js',
),
'customercare': (
'js/libs/jquery.cookie.js',
'js/libs/jquery.bullseye-1.0.min.js',
'js/libs/twitter-text.js',
'js/customercare.js',
'js/users.js',
),
'users': (
'js/users.js',
'js/reportabuse.js',
),
'messages': (
'js/markup.js',
'js/libs/jquery.autoresize.js',
'js/libs/jquery.tokeninput.js',
'js/users.autocomplete.js',
'js/ajaxpreview.js',
'js/messages.js',
),
'mobile/messages': (
'js/libs/jquery.tokeninput.js',
'js/users.autocomplete.js',
),
'groups': (
'js/libs/jquery.tokeninput.js',
'js/users.autocomplete.js',
'js/markup.js',
'js/groups.js',
'js/editable.js',
),
'karma.dashboard': (
'js/libs/backbone.js',
'js/karma.dashboard.js',
),
'kpi.dashboard': (
'js/kpi.dashboard.js',
),
},
}
|
from asciimatics.widgets import Label, Divider, Text
from viewmodels.ethertoken.state import State
"""
Our subviews simply take a layout and inject their content into it
"""
def inject_ether_token_state(layout, col=0):
layout.add_widget(Label('Ether Token'), col)
layout.add_widget(Divider(line_char='-'), col)
layout.add_widget(Text('Address:', 'ether_token_address'), col)
layout.add_widget(Text('Owner:', 'ether_token_owner'), col)
layout.add_widget(Text('Total Supply:', 'ether_token_total_supply'), col)
layout.add_widget(Text('Balance:', 'ether_token_balance'), col)
layout.add_widget(Label('Allowances: {'), col)
layout.add_widget(Text(' Reserve:', 'ether_token_reserve_allowance'), col)
layout.add_widget(Text(' Datatrust:', 'ether_token_datatrust_allowance'), col)
layout.add_widget(Label('}'), col)
def hydrate_ether_token_state(data={}):
"""
Given a dictionary, allow the viewmodel to hydrate the data needed by this view
"""
vm = State()
return vm.hydrate(data)
|
import redis
from django.contrib.auth import authenticate
from django.utils.encoding import smart_text
from rest_framework import serializers, status
from rest_framework.authentication import BaseAuthentication, get_authorization_header
from rest_framework.response import Response
from rest_framework_jwt.compat import PasswordField, get_username_field, Serializer
from rest_framework_jwt.serializers import VerificationBaseSerializer
from rest_framework_jwt.views import JSONWebTokenAPIView
from PG_AdminREST.settings import REDIS_HOST, REDIS_PORT, REDIS_USER_TOKEN_DB_NAME, ENV_NAME
from restapi.models import WithdrawOrder, Deposit, CollectionRecords, Address, CollectionConfig, Project, \
CollectionFeeConfig, WithdrawConfig, UserTokenBalances, UserAddressBalances, Subaddress, AssetDailyReport, \
UserOperationLog, AddAddressOrder, DjangoAdminLog
from restapi.utils import jwt_response_payload_handler, jwt_response_payload_error_handler, \
jwt_response_payload_code_error_handler
from calendar import timegm
from datetime import datetime, timedelta
import jwt
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext as _
from rest_framework import exceptions
from rest_framework_jwt.settings import api_settings
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
jwt_get_username_from_payload = api_settings.JWT_PAYLOAD_GET_USERNAME_HANDLER
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
rds = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_USER_TOKEN_DB_NAME,
decode_responses=True)
class UsersSerializer(serializers.ModelSerializer):
"""
用户信息序列化
"""
class Meta:
model = Project
fields = '__all__'
extra_kwargs = {
'api_key': {'required': False},
'bip44_account_index': {'required': False},
'create_time': {'required': False},
}
#重写高性能序列化
def withdrawOrder_to_representation(obj):
return {
"serial_id": obj.serial_id,
"order_id": obj.order_id,
"pro_id": obj.pro_id_id,
"token_name": obj.token_name,
"from_addr": obj.from_addr,
"to_addr": obj.to_addr,
"memo": obj.memo,
"amount": obj.amount,
"block_height": obj.block_height,
"tx_hash": obj.tx_hash,
"callback_url": obj.callback_url,
"tx_confirmations": obj.tx_confirmations,
"order_status": obj.order_status,
"transaction_status": obj.transaction_status,
"notify_status": obj.notify_status,
"notify_times": obj.notify_times,
"block_time": obj.block_time,
"complete_time": obj.complete_time,
"remark" : obj.remark,
"pro_name": obj.pro_id.pro_name
}
def deposit_to_representation(obj):
return {
"amount": obj.amount,
"block_height": obj.block_height,
"block_time": obj.block_time,
"from_addr": obj.from_addr,
"memo": obj.memo,
"notify_status": obj.notify_status,
"pro_id": obj.pro_id_id,
"to_addr": obj.to_addr,
"token_name": obj.token_name,
"tx_confirmations": obj.tx_confirmations,
"tx_hash": obj.tx_hash,
"id": obj.id,
"pro_name": obj.pro_id.pro_name
}
def addaddressorder_to_representation(obj):
# pro_name = select_pro_name(obj)
##出现新增用户不显示pro_name
return {
"order_id": obj.order_id,
"token_name": obj.token_name,
"apply_times": obj.apply_times,
"count": obj.count,
"start_addr_index": obj.start_addr_index,
"end_addr_index": obj.end_addr_index,
"audit_status": obj.audit_status,
"generate_status": obj.generate_status,
"order_create_time": obj.order_create_time,
"audit_complete_time": obj.audit_complete_time,
"order_complete_time": obj.order_complete_time,
"order_status": obj.order_status,
"remark": obj.remark,
"active_status": obj.active_status,
"pro_id": obj.pro_id_id,
"pro_name": obj.pro_id.pro_name
}
def address_to_representation(obj):
return {
"id": obj.id,
"token_name": obj.token_name,
"address_nums": obj.address_nums,
"uncharged_address_nums": obj.uncharged_address_nums,
"update_time": obj.update_time,
"pro_id": obj.pro_id_id,
"pro_name": obj.pro_id.pro_name
}
def usertokenbalances_to_representation(obj):
return {
"id": obj.id,
"pro_id": obj.pro_id_id,
"token_name": obj.token_name,
"all_balance": obj.all_balance,
"withdraw_address": obj.withdraw_address,
"withdraw_balance": obj.withdraw_balance,
"update_time": obj.update_time,
"pro_name": obj.pro_id.pro_name
}
def subaddress_to_representation(obj):
return {
"address": obj.address,
"token_name": obj.token_name,
"account_index": obj.account_index,
"address_index": obj.address_index,
"create_time": obj.create_time,
"pro_id": obj.pro_id_id,
}
def collectionrecords_to_representation(obj):
return {
"id": obj.id,
"pro_id": obj.pro_id_id,
"tx_hash": obj.tx_hash,
"complete_time": obj.complete_time,
"amount": obj.amount,
"token_name": obj.token_name,
"from_address": obj.from_address,
"to_address": obj.to_address,
"block_height": obj.block_height,
"block_time": obj.block_time,
"tx_confirmations": obj.tx_confirmations,
"transaction_status": obj.transaction_status,
"collection_type": obj.collection_type,
"pro_name": obj.pro_id.pro_name
}
def assetdailyreport_to_representation(obj):
return {
"pro_id": obj.pro_id_id,
"token_name": obj.token_name,
"deposit_amount": obj.deposit_amount,
"withdraw_amount": obj.withdraw_amount,
"collectionRecords_amount": obj.collectionRecords_amount,
"all_balance": obj.all_balance,
"update_time": obj.update_time,
"pro_name": obj.pro_id.pro_name
}
def useroperationlog_to_representation(obj):
return {
"pro_id": obj.pro_id_id,
"id": obj.id,
"operation_time": obj.operation_time,
"function_name": obj.function_name,
"operation_type": obj.operation_type,
"update_before_value": obj.update_before_value,
"last_after_value": obj.last_after_value,
"operation_status": obj.operation_status,
"pro_name": obj.pro_id.pro_name
}
class WithdrawOrderSerializer(serializers.ModelSerializer):
"""
提币视图序列化
"""
def to_representation(self, obj):
return withdrawOrder_to_representation(obj)
class Meta:
model = WithdrawOrder
fields = '__all__'
class DepositSerializer(serializers.ModelSerializer):
"""
充币视图序列化
"""
def to_representation(self, obj):
return deposit_to_representation(obj)
class Meta:
model = Deposit
fields = '__all__'
class CollectionRecordsSerializer(serializers.ModelSerializer):
"""
归集视图序列化
"""
def to_representation(self, obj):
return collectionrecords_to_representation(obj)
class Meta:
model = CollectionRecords
fields = '__all__'
class AddressSerializer(serializers.ModelSerializer):
"""
地址管理序列化
"""
def to_representation(self, obj):
return address_to_representation(obj)
class Meta:
model = Address
fields = '__all__'
class SubaddressSerializer(serializers.ModelSerializer):
"""
子地址序列化
"""
def to_representation(self, obj):
return subaddress_to_representation(obj)
class Meta:
model = Subaddress
fields = '__all__'
class CollectionConfigSerializer(serializers.ModelSerializer):
"""
归集配置序列化
"""
class Meta:
model = CollectionConfig
fields = '__all__'
class CollectionFeeConfigSerializer(serializers.ModelSerializer):
"""
手续费配置序列化
"""
class Meta:
model = CollectionFeeConfig
fields = '__all__'
class WithdrawConfigSerializer(serializers.ModelSerializer):
"""
提币配置序列化
"""
class Meta:
model = WithdrawConfig
fields = '__all__'
class UserAddressBalancesSerializer(serializers.ModelSerializer):
"""
用户资地址产序列化
"""
class Meta:
model = UserAddressBalances
fields = ['token_name', 'address', 'balance', 'update_time']
class UserTokenBalancesSerializer(serializers.ModelSerializer):
"""
用户币种资产序列化
"""
def to_representation(self, obj):
return usertokenbalances_to_representation(obj)
class Meta:
model = UserTokenBalances
fields = '__all__'
class AssetDailyReportSerializer(serializers.ModelSerializer):
"""
日报表序列化
"""
def to_representation(self, obj):
return assetdailyreport_to_representation(obj)
class Meta:
model = AssetDailyReport
fields = '__all__'
class UserOperationLogSerializer(serializers.ModelSerializer):
"""
用户操作日志
"""
def to_representation(self, obj):
return useroperationlog_to_representation(obj)
class Meta:
model = UserOperationLog
fields = '__all__'
class AdminOperationLogSerializer(serializers.ModelSerializer):
"""
管理员操作日志
"""
class Meta:
model = DjangoAdminLog
fields = ['action_time', 'object_id', 'object_repr', 'action_flag', 'change_message', 'user']
class AddAddressOrderSerializer(serializers.ModelSerializer):
"""
地址审核序列化
"""
def to_representation(self, obj):
return addaddressorder_to_representation(obj)
class Meta:
model = AddAddressOrder
fields = '__all__'
# extra_kwargs = {
# 'order_id':{'required': False},
# 'token_name': {'required': False},
# 'apply_times': {'required': False},
# 'count': {'required': False},
# 'start_addr_index': {'required': False},
# 'end_addr_index': {'required': False},
# 'generate_status': {'required': False},
# 'order_status': {'required': False},
# 'active_status': {'required': False},
# 'pro_id': {'required': False},
# 'remark': {
# 'required': True,
# 'help_text': '备注(remark)'
# }
# }
class CustomJWTSerializer(JSONWebTokenAPIView):
def options(self, request, *args, **kwargs):
return Response(f'请求方式错误',status=status.HTTP_400_BAD_REQUEST)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
#重写
try:
if serializer.is_valid():
user = serializer.object.get('user') or request.user
token = serializer.object.get('token')
# 真正成功登录返回
DjangoAdminLog.objects.create(user=user, change_message='登录',
action_time=datetime.now(), action_flag='200')
print("login success")
response_data = jwt_response_payload_handler(token, user, request)
response = Response(response_data)
if api_settings.JWT_AUTH_COOKIE:
expiration = (datetime.utcnow() +
api_settings.JWT_EXPIRATION_DELTA)
response.set_cookie(api_settings.JWT_AUTH_COOKIE,
token,
expires=expiration,
httponly=True)
return response
except Exception as e:
print(e)
if str(e) == "400":
response_data = jwt_response_payload_code_error_handler(request)
else:
response_data = jwt_response_payload_error_handler(request)
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
class MyJSONWebTokenSerializer(Serializer):
"""
重写jwt 序列化验证
Serializer class used to validate a username and password.
'username' is identified by the custom UserModel.USERNAME_FIELD.
Returns a JSON Web Token that can be used to authenticate later calls.
"""
def __init__(self, *args, **kwargs):
"""
Dynamically add the USERNAME_FIELD to self.fields.
"""
super(MyJSONWebTokenSerializer, self).__init__(*args, **kwargs)
self.fields[self.username_field] = serializers.CharField()
self.fields['password'] = PasswordField(write_only=True)
@property
def username_field(self):
return get_username_field()
def validate(self, attrs):
#重写
try:
code = self.initial_data["code"]
except Exception as e:
print(f'google_code is None : {e}')
msg = 'google code error'
raise serializers.ValidationError(msg)
credentials = {
self.username_field: attrs.get(self.username_field),
'password': attrs.get('password'),
'code' : code
}
#重写结束
if all(credentials.values()):
user = authenticate(**credentials)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg)
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
#user.username 为None 为成功登录状态
url = f'{ENV_NAME}_admin_token_{user.id}'
rds.set(url, token)
rds.expire(url, 60*60)
return {
'token': token,
'user': user
}
else:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg)
else:
msg = _('Must include "{username_field}" and "password".')
msg = msg.format(username_field=self.username_field)
raise serializers.ValidationError(msg)
class MyRefreshJSONWebTokenSerializer(VerificationBaseSerializer):
"""
Refresh an access token.
重写jwt token 刷新 ###为了单点登录
"""
def validate(self, attrs):
token = attrs['token']
payload = self._check_payload(token=token)
user = self._check_user(payload=payload)
# Get and check 'orig_iat'
orig_iat = payload.get('orig_iat')
# 刷新token频率 为30分钟一次
url = f'{ENV_NAME}_refresh_times_{user.id}'
is_exist = rds.get(url)
if is_exist:
msg = _('只能每30分钟一次刷新token')
raise serializers.ValidationError(msg)
if orig_iat:
# Verify expiration
refresh_limit = api_settings.JWT_REFRESH_EXPIRATION_DELTA
if isinstance(refresh_limit, timedelta):
refresh_limit = (refresh_limit.days * 24 * 3600 +
refresh_limit.seconds)
expiration_timestamp = orig_iat + int(refresh_limit)
now_timestamp = timegm(datetime.utcnow().utctimetuple())
if now_timestamp > expiration_timestamp:
msg = _('Refresh has expired.')
raise serializers.ValidationError(msg)
else:
msg = _('orig_iat field is required.')
raise serializers.ValidationError(msg)
new_payload = jwt_payload_handler(user)
new_payload['orig_iat'] = orig_iat
#额外添加
url = f'{ENV_NAME}_admin_token_{user.id}'
rds.set(url, jwt_encode_handler(new_payload))
rds.expire(url, 60*60)
url = f'{ENV_NAME}_refresh_times_{user.id}'
rds.set(url, datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
rds.expire(url, 60 * 30)
return {
'token': jwt_encode_handler(new_payload),
'user': user
}
class MyBaseJSONWebTokenAuthentication(BaseAuthentication):
"""
Token based authentication using the JSON Web Token standard.
重写jwt 单点登录token问题 用redis做状态
"""
def authenticate(self, request):
"""
Returns a two-tuple of `User` and token if a valid signature has been
supplied using JWT-based authentication. Otherwise returns `None`.
"""
jwt_value = self.get_jwt_value(request)
if jwt_value is None:
return None
try:
payload = jwt_decode_handler(jwt_value)
except jwt.ExpiredSignature:
msg = _('Signature has expired.')
raise exceptions.AuthenticationFailed(msg)
except jwt.DecodeError:
msg = _('Error decoding signature.')
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError:
raise exceptions.AuthenticationFailed()
# redis 判定jwt token 状态
url = f'{ENV_NAME}_admin_token_{payload["user_id"]}'
is_exist = rds.get(url)
if is_exist:
if is_exist == jwt_value.decode("utf8"):
pass
else:
raise exceptions.AuthenticationFailed('Signature has expired.')
else:
raise exceptions.AuthenticationFailed('Signature has expired.')
user = self.authenticate_credentials(payload)
return (user, jwt_value)
def authenticate_credentials(self, payload):
"""
Returns an active user that matches the payload's user id and email.
"""
User = get_user_model()
username = jwt_get_username_from_payload(payload)
if not username:
msg = _('Invalid payload.')
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get_by_natural_key(username)
except User.DoesNotExist:
msg = _('Invalid signature.')
raise exceptions.AuthenticationFailed(msg)
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.AuthenticationFailed(msg)
return user
#做转接,继承
class MyJSONWebTokenAuthentication(MyBaseJSONWebTokenAuthentication):
"""
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string specified in the setting
`JWT_AUTH_HEADER_PREFIX`. For example:
Authorization: JWT eyJhbGciOiAiSFMyNTYiLCAidHlwIj
"""
www_authenticate_realm = 'api'
def get_jwt_value(self, request):
auth = get_authorization_header(request).split()
auth_header_prefix = api_settings.JWT_AUTH_HEADER_PREFIX.lower()
if not auth:
if api_settings.JWT_AUTH_COOKIE:
return request.COOKIES.get(api_settings.JWT_AUTH_COOKIE)
return None
if smart_text(auth[0].lower()) != auth_header_prefix:
return None
if len(auth) == 1:
msg = _('Invalid Authorization header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid Authorization header. Credentials string '
'should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
return auth[1]
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return '{0} realm="{1}"'.format(api_settings.JWT_AUTH_HEADER_PREFIX, self.www_authenticate_realm) |
import gzip
import io
import random
from typing import List, Tuple
import numpy as np
from bsbolt.Impute.Imputation.GenomeImputation import GenomeImputation
from bsbolt.Impute.Impute_Utils.ImputationFunctions import get_bsb_matrix
class ImputeMissingValues:
"""
Launch and knn imputation task. This wrapper imports data for imputation, split data for batch imputation,
and combines data after imputation. Data is held in memory for access during imputation. If closest neighbors null,
null imputed value returned.
Params:
* *input_matrix_file (str)*: Path to bsbolt matrix
* *batch_size (int)*: Batch size for batch imputation
* *imputation_window_size (int)*: Size (bp) for imputation window, [3,000,000]
* *k (int)*: Nearest neighbors used for imputation, [5]
* *threads (int)*: Number of threads available for imputation, [1]
* *verbose (bool)*: Verbose imputation, [False]
* *sep (str)*: separator character used in methylation matrix, [\t]
* *output_path (str)*: output path
* *randomize_batch (bool)*: randomize batch, [False]
* *meth_matrix (np.ndarray)*: imputed methylation matrix
* *meth_site_order (list)*: ordered methylation sites, sorted by contig then position
* *sample_ids (list)*: sample names
"""
def __init__(self, input_matrix_file: str = None, batch_size: int = None, imputation_window_size: int = 3000000,
k: int = 5, threads: int = 4, verbose: bool = False, sep: str = '\t', output_path: str = None,
randomize_batch: bool = False, meth_matrix: np.ndarray = None, meth_site_order: list = None,
sample_ids: list = None):
self.input_matrix_file = input_matrix_file
self.meth_matrix = meth_matrix
self.meth_site_order = meth_site_order
self.sample_ids = sample_ids
self.batch_commands = [0, False]
if batch_size:
self.batch_commands = [batch_size, randomize_batch]
self.output_matrix = None
if output_path:
self.output_matrix = self.get_output_matrix(output_path)
self.tqdm_disable = False if verbose else True
self.sep = sep
self.imputation_kwargs = dict(imputation_window_size=imputation_window_size,
k=k,
threads=threads,
verbose=verbose)
def impute_values(self):
"""
Launch kNN imputation for each batch and set values in original matrix.
"""
imputation_order = [count for count in range(len(self.sample_ids[1]))]
if self.batch_commands[1]:
random.shuffle(imputation_order)
if not self.batch_commands[0]:
self.batch_commands[0] = len(self.sample_ids[1])
imputation_batches = self.process_batch(imputation_order)
for batch in imputation_batches:
batch_array, sample_labels = self.get_batch_data(batch)
batch_impute = self.launch_genome_imputation(batch_array, sample_labels)
for count, sample in enumerate(batch):
self.meth_matrix[:, sample] = batch_impute.genomic_array[:, count]
def process_batch(self, imputation_order: List[int]) -> List[List[int]]:
"""Generate sample batches"""
batches = []
if not self.batch_commands[0]:
self.batch_commands[0] = len(self.sample_ids)
batch_number = int(len(imputation_order) / self.batch_commands[0])
batch_remainder = len(imputation_order) % self.batch_commands[0]
if float(batch_remainder) / float(self.batch_commands[0]) <= .6 and batch_remainder != 0:
batch_addition = int(batch_remainder / batch_number)
self.batch_commands[0] += batch_addition + 1
print(f'Adjusting batch size, new batch size = {self.batch_commands[0]}')
start, end = 0, self.batch_commands[0]
while True:
batch_order = imputation_order[start: end]
if not batch_order:
break
batches.append(batch_order)
start += self.batch_commands[0]
end += self.batch_commands[0]
return batches
def get_batch_data(self, batch: List[int]) -> Tuple[np.ndarray, List[str]]:
"""Return methylation value for batch imputation
Returns:
* *batch_array (np.ndarry)*: array of methylation values
* *sample_labels (list)*: list of samples in batch
"""
batch_array = self.meth_matrix[:, batch]
sample_labels = [self.sample_ids[1][sample] for sample in batch]
return batch_array, sample_labels
def import_matrix(self):
self.meth_matrix, self.meth_site_order, self.sample_ids = get_bsb_matrix(self.input_matrix_file)
@staticmethod
def get_output_matrix(output_path: str):
"""Get output object"""
if output_path.endswith('.gz'):
out = io.BufferedWriter(gzip.open(output_path, 'wb'))
else:
out = open(output_path, 'w')
return out
def output_imputed_matrix(self):
"""Write imputed values"""
self.output_matrix.write('\t'.join([self.sample_ids[0]] + self.sample_ids[1]) + '\n')
for site_label, values in zip(self.meth_site_order, self.meth_matrix):
str_values = '\t'.join([str(value) for value in values])
self.output_matrix.write(f'{site_label}\t{str_values}\n')
self.output_matrix.close()
def launch_genome_imputation(self, meth_array: np.ndarray, sample_labels: List) -> object:
imputation_kwargs = dict(self.imputation_kwargs)
imputation_kwargs.update(dict(genomic_array=meth_array,
sample_labels=sample_labels,
row_labels=self.meth_site_order))
knn_imputation: GenomeImputation = GenomeImputation(**imputation_kwargs)
knn_imputation.impute_windows()
return knn_imputation
|
from gunicorn_logging.formatters import GunicornJsonFormatter
from gunicorn_logging.handlers import LogstashHandler |
def findProfession(level, pos):
# Base case
if level == 1:
return "Engineer"
# Recursively find parent's profession. If parent
# is a doctar, this node will be a doctal if it is
# at odd position and an engineer if at even position
if findProfession(level - 1, (pos + 1) // 2) == "Doctor":
if pos % 2:
return "Doctor"
else:
return "Engineer"
# If parent is an engineer, then current node will be
# an enginner if at add position and doctor if even
# position.
if pos % 2:
return "Engineer"
else:
return "Doctor"
|
/home/runner/.cache/pip/pool/16/e0/c9/461291bf12aa9b8a7c5fac61fd22c2aeae992fed1971617a77197b07c2 |
# -*- coding: utf-8 -*-
# @Author: XP
import logging
import os
import torch
import utils.data_loaders
import utils.helpers
from datetime import datetime
from tqdm import tqdm
from time import time
from tensorboardX import SummaryWriter
from core.test_c3d import test_net
from utils.average_meter import AverageMeter
from models.model import PMPNet as Model
from Chamfer3D.dist_chamfer_3D import chamfer_3DDist
chamfer_dist = chamfer_3DDist()
def chamfer(p1, p2):
d1, d2, _, _ = chamfer_dist(p1, p2)
return torch.mean(d1) + torch.mean(d2)
def chamfer_sqrt(p1, p2):
d1, d2, _, _ = chamfer_dist(p1, p2)
d1 = torch.mean(torch.sqrt(d1))
d2 = torch.mean(torch.sqrt(d2))
return (d1 + d2) / 2
def lr_lambda(epoch):
if 0 <= epoch <= 100:
return 1
elif 100 < epoch <= 150:
return 0.5
elif 150 < epoch <= 250:
return 0.1
else:
return 0.5
def train_net(cfg):
# Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
torch.backends.cudnn.benchmark = True
train_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TRAIN_DATASET](cfg)
test_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
train_data_loader = torch.utils.data.DataLoader(dataset=train_dataset_loader.get_dataset(
utils.data_loaders.DatasetSubset.TRAIN),
batch_size=cfg.TRAIN.BATCH_SIZE,
num_workers=cfg.CONST.NUM_WORKERS,
collate_fn=utils.data_loaders.collate_fn,
pin_memory=True,
shuffle=True,
drop_last=True)
val_data_loader = torch.utils.data.DataLoader(dataset=test_dataset_loader.get_dataset(
utils.data_loaders.DatasetSubset.VAL),
batch_size=cfg.TRAIN.BATCH_SIZE,
num_workers=cfg.CONST.NUM_WORKERS//2,
collate_fn=utils.data_loaders.collate_fn,
pin_memory=True,
shuffle=False)
# Set up folders for logs and checkpoints
output_dir = os.path.join(cfg.DIR.OUT_PATH, '%s', datetime.now().isoformat())
cfg.DIR.CHECKPOINTS = output_dir % 'checkpoints'
cfg.DIR.LOGS = output_dir % 'logs'
if not os.path.exists(cfg.DIR.CHECKPOINTS):
os.makedirs(cfg.DIR.CHECKPOINTS)
# Create tensorboard writers
train_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'train'))
val_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'test'))
model = Model(dataset=cfg.DATASET.TRAIN_DATASET)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda()
# Create the optimizers
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LEARNING_RATE,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
betas=cfg.TRAIN.BETAS)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
lr_lambda=lr_lambda)
init_epoch = 0
best_metrics = float('inf')
if 'WEIGHTS' in cfg.CONST and cfg.CONST.WEIGHTS:
logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
checkpoint = torch.load(cfg.CONST.WEIGHTS)
best_metrics = checkpoint['best_metrics']
model.load_state_dict(checkpoint['model'])
logging.info('Recover complete. Current epoch = #%d; best metrics = %s.' % (init_epoch, best_metrics))
# Training/Testing the network
for epoch_idx in range(init_epoch + 1, cfg.TRAIN.N_EPOCHS + 1):
epoch_start_time = time()
batch_time = AverageMeter()
data_time = AverageMeter()
model.train()
total_cd1 = 0
total_cd2 = 0
total_cd3 = 0
total_pmd = 0
batch_end_time = time()
n_batches = len(train_data_loader)
with tqdm(train_data_loader) as t:
for batch_idx, (taxonomy_ids, model_ids, data) in enumerate(t):
data_time.update(time() - batch_end_time)
for k, v in data.items():
data[k] = utils.helpers.var_or_cuda(v)
partial = data['partial_cloud']
gt = data['gtcloud']
pcds, deltas = model(partial)
cd1 = chamfer(pcds[0], gt)
cd2 = chamfer(pcds[1], gt)
cd3 = chamfer(pcds[2], gt)
loss_cd = cd1 + cd2 + cd3
delta_losses = []
for delta in deltas:
delta_losses.append(torch.sum(delta ** 2))
loss_pmd = torch.sum(torch.stack(delta_losses)) / 3
loss = loss_cd * cfg.TRAIN.LAMBDA_CD + loss_pmd * cfg.TRAIN.LAMBDA_PMD
optimizer.zero_grad()
loss.backward()
optimizer.step()
cd1_item = cd1.item() * 1e3
total_cd1 += cd1_item
cd2_item = cd2.item() * 1e3
total_cd2 += cd2_item
cd3_item = cd3.item() * 1e3
total_cd3 += cd3_item
pmd_item = loss_pmd.item()
total_pmd += pmd_item
n_itr = (epoch_idx - 1) * n_batches + batch_idx
train_writer.add_scalar('Loss/Batch/cd1', cd1_item, n_itr)
train_writer.add_scalar('Loss/Batch/cd2', cd2_item, n_itr)
train_writer.add_scalar('Loss/Batch/cd3', cd3_item, n_itr)
train_writer.add_scalar('Loss/Batch/pmd', pmd_item, n_itr)
batch_time.update(time() - batch_end_time)
batch_end_time = time()
t.set_description('[Epoch %d/%d][Batch %d/%d]' % (epoch_idx, cfg.TRAIN.N_EPOCHS, batch_idx + 1, n_batches))
t.set_postfix(loss='%s' % ['%.4f' % l for l in [cd1_item, cd2_item, cd3_item, pmd_item]])
avg_cd1 = total_cd1 / n_batches
avg_cd2 = total_cd2 / n_batches
avg_cd3 = total_cd3 / n_batches
avg_pmd = total_pmd / n_batches
lr_scheduler.step()
epoch_end_time = time()
train_writer.add_scalar('Loss/Epoch/cd1', avg_cd1, epoch_idx)
train_writer.add_scalar('Loss/Epoch/cd2', avg_cd2, epoch_idx)
train_writer.add_scalar('Loss/Epoch/cd3', avg_cd3, epoch_idx)
train_writer.add_scalar('Loss/Epoch/pmd', avg_pmd, epoch_idx)
logging.info(
'[Epoch %d/%d] EpochTime = %.3f (s) Losses = %s' %
(epoch_idx, cfg.TRAIN.N_EPOCHS, epoch_end_time - epoch_start_time, ['%.4f' % l for l in [avg_cd1, avg_cd2, avg_cd3, avg_pmd]]))
# Validate the current model
cd_eval = test_net(cfg, epoch_idx, val_data_loader, val_writer, model)
# Save checkpoints
if epoch_idx % cfg.TRAIN.SAVE_FREQ == 0 or cd_eval < best_metrics:
file_name = 'ckpt-best.pth' if cd_eval < best_metrics else 'ckpt-epoch-%03d.pth' % epoch_idx
output_path = os.path.join(cfg.DIR.CHECKPOINTS, file_name)
torch.save({
'epoch_index': epoch_idx,
'best_metrics': best_metrics,
'model': model.state_dict()
}, output_path)
logging.info('Saved checkpoint to %s ...' % output_path)
if cd_eval < best_metrics:
best_metrics = cd_eval
train_writer.close()
val_writer.close()
|
# uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\clip_control.py
# Compiled at: 2019-04-23 16:19:13
from __future__ import absolute_import, print_function, unicode_literals
from itertools import chain
from contextlib import contextmanager
from MidiRemoteScript import MutableVector
from ableton.v2.base import listens, listens_group, liveobj_valid, listenable_property, task
from ableton.v2.control_surface import Component, WrappingParameter
from ableton.v2.control_surface.control import ButtonControl, EncoderControl, MappedSensitivitySettingControl, ToggleButtonControl
from ableton.v2.control_surface.mode import ModesComponent
from pushbase.clip_control_component import convert_beat_length_to_bars_beats_sixteenths, convert_beat_time_to_bars_beats_sixteenths, LoopSettingsControllerComponent as LoopSettingsControllerComponentBase, AudioClipSettingsControllerComponent as AudioClipSettingsControllerComponentBase, ONE_YEAR_AT_120BPM_IN_BEATS, WARP_MODE_NAMES
from pushbase.note_editor_component import DEFAULT_START_NOTE
from .clip_decoration import ClipDecoratorFactory
from .colors import COLOR_INDEX_TO_SCREEN_COLOR
from .decoration import find_decorated_object
from .drum_group_component import DrumPadColorNotifier
from .real_time_channel import RealTimeDataComponent
from .timeline_navigation import ObjectDescription
PARAMETERS_LOOPED = (
b'Loop position', b'Loop length', b'Start offset')
PARAMETERS_NOT_LOOPED = (b'Start', b'End')
PARAMETERS_AUDIO = (b'Warp', b'Transpose', b'Detune', b'Gain')
def make_color_vector(color_indices):
color_vector = MutableVector()
for color_index in color_indices:
color_vector.append(COLOR_INDEX_TO_SCREEN_COLOR[color_index].as_remote_script_color())
return color_vector
def make_vector(items):
vector = MutableVector()
for item in items:
vector.append(item)
return vector
class LoopSetting(WrappingParameter):
min = -ONE_YEAR_AT_120BPM_IN_BEATS
max = ONE_YEAR_AT_120BPM_IN_BEATS
def __init__(self, use_length_conversion=False, *a, **k):
super(LoopSetting, self).__init__(*a, **k)
assert self.canonical_parent is not None
self._conversion = convert_beat_length_to_bars_beats_sixteenths if use_length_conversion else convert_beat_time_to_bars_beats_sixteenths
self._recording = False
self.set_property_host(self._parent)
self.__on_clip_changed.subject = self.canonical_parent
self.__on_clip_changed()
return
@property
def recording(self):
return self._recording
@recording.setter
def recording(self, value):
self._recording = value
self.notify_value()
@listens(b'clip')
def __on_clip_changed(self):
self.__on_signature_numerator_changed.subject = self.canonical_parent.clip
self.__on_signature_denominator_changed.subject = self.canonical_parent.clip
@listens(b'signature_numerator')
def __on_signature_numerator_changed(self):
self.notify_value()
@listens(b'signature_denominator')
def __on_signature_denominator_changed(self):
self.notify_value()
@property
def display_value(self):
if not liveobj_valid(self.canonical_parent.clip):
return unicode(b'-')
if self.recording:
return unicode(b'...')
return unicode(self._conversion((
self.canonical_parent.clip.signature_numerator,
self.canonical_parent.clip.signature_denominator), self._get_property_value()))
class LoopSettingsControllerComponent(LoopSettingsControllerComponentBase):
__events__ = ('looping', 'loop_parameters', 'zoom', 'clip')
ZOOM_DEFAULT_SENSITIVITY = MappedSensitivitySettingControl.DEFAULT_SENSITIVITY
ZOOM_FINE_SENSITIVITY = MappedSensitivitySettingControl.FINE_SENSITIVITY
zoom_encoder = MappedSensitivitySettingControl()
zoom_touch_encoder = EncoderControl()
loop_button = ToggleButtonControl(toggled_color=b'Clip.Option', untoggled_color=b'Clip.OptionDisabled')
crop_button = ButtonControl(color=b'Clip.Action')
def __init__(self, *a, **k):
super(LoopSettingsControllerComponent, self).__init__(*a, **k)
self._looping_settings = [
LoopSetting(name=PARAMETERS_LOOPED[0], parent=self._loop_model, source_property=b'position'),
LoopSetting(name=PARAMETERS_LOOPED[1], parent=self._loop_model, use_length_conversion=True, source_property=b'loop_length'),
LoopSetting(name=PARAMETERS_LOOPED[2], parent=self._loop_model, source_property=b'start_marker')]
self._non_looping_settings = [
LoopSetting(name=PARAMETERS_NOT_LOOPED[0], parent=self._loop_model, source_property=b'loop_start'),
LoopSetting(name=PARAMETERS_NOT_LOOPED[1], parent=self._loop_model, source_property=b'loop_end')]
for setting in self._looping_settings + self._non_looping_settings:
self.register_disconnectable(setting)
self.__on_looping_changed.subject = self._loop_model
self.__on_looping_changed()
def update(self):
super(LoopSettingsControllerComponent, self).update()
if self.is_enabled():
self.notify_timeline_navigation()
self.notify_clip()
@loop_button.toggled
def loop_button(self, toggled, button):
self._loop_model.looping = toggled
@crop_button.pressed
def crop_button(self, button):
if liveobj_valid(self.clip):
self.clip.crop()
@property
def looping(self):
if self.clip:
return self._loop_model.looping
return False
@property
def loop_parameters(self):
if not liveobj_valid(self.clip):
return []
parameters = self._looping_settings if self.looping else self._non_looping_settings
if self.zoom:
return [self.zoom] + parameters
return parameters
@property
def zoom(self):
if liveobj_valid(self.clip):
return getattr(self.clip, b'zoom', None)
else:
return
@listenable_property
def timeline_navigation(self):
if liveobj_valid(self.clip):
return getattr(self.clip, b'timeline_navigation', None)
else:
return
@listens(b'is_recording')
def __on_is_recording_changed(self):
self._update_recording_state()
@listens(b'is_overdubbing')
def __on_is_overdubbing_changed(self):
self._update_recording_state()
def _update_recording_state(self):
clip = self._loop_model.clip
if liveobj_valid(clip):
recording = clip.is_recording and not clip.is_overdubbing
self._looping_settings[1].recording = recording
self._non_looping_settings[1].recording = recording
@listens(b'looping')
def __on_looping_changed(self):
self._update_and_notify()
def _update_loop_button(self):
self.loop_button.enabled = liveobj_valid(self.clip)
if liveobj_valid(self.clip):
self.loop_button.is_toggled = self._loop_model.looping
def _on_clip_changed(self):
if self.timeline_navigation is not None:
self.timeline_navigation.reset_focus_and_animation()
self._update_and_notify()
self.__on_is_recording_changed.subject = self._loop_model.clip
self.__on_is_overdubbing_changed.subject = self._loop_model.clip
self._update_recording_state()
self.crop_button.enabled = liveobj_valid(self.clip) and self.clip.is_midi_clip
self._connect_encoder()
if self.is_enabled():
self.notify_timeline_navigation()
return
def _on_clip_start_marker_touched(self):
if self.timeline_navigation is not None:
self.timeline_navigation.touch_object(self.timeline_navigation.start_marker_focus)
return
def _on_clip_position_touched(self):
if self.timeline_navigation is not None:
self.timeline_navigation.touch_object(self.timeline_navigation.loop_start_focus)
return
def _on_clip_end_touched(self):
if self.timeline_navigation is not None:
self.timeline_navigation.touch_object(self.timeline_navigation.loop_end_focus)
return
def _on_clip_start_marker_released(self):
if self.timeline_navigation is not None:
self.timeline_navigation.release_object(self.timeline_navigation.start_marker_focus)
return
def _on_clip_position_released(self):
if self.timeline_navigation is not None:
self.timeline_navigation.release_object(self.timeline_navigation.loop_start_focus)
return
def _on_clip_end_released(self):
if self.timeline_navigation is not None:
self.timeline_navigation.release_object(self.timeline_navigation.loop_end_focus)
return
@zoom_touch_encoder.touched
def zoom_touch_encoder(self, encoder):
if self.timeline_navigation is not None:
self.timeline_navigation.touch_object(self.timeline_navigation.zoom_focus)
return
@zoom_touch_encoder.released
def zoom_touch_encoder(self, encoder):
if self.timeline_navigation is not None:
self.timeline_navigation.release_object(self.timeline_navigation.zoom_focus)
return
def _update_and_notify(self):
self._update_loop_button()
self.notify_looping()
self.notify_loop_parameters()
self.notify_zoom()
def _connect_encoder(self):
self.zoom_encoder.mapped_parameter = self.zoom
self.zoom_encoder.update_sensitivities(self.ZOOM_DEFAULT_SENSITIVITY, self.ZOOM_FINE_SENSITIVITY)
def set_zoom_encoder(self, encoder):
self.zoom_encoder.set_control_element(encoder)
self.zoom_touch_encoder.set_control_element(encoder)
self._connect_encoder()
class GainSetting(WrappingParameter):
def __init__(self, *a, **k):
super(GainSetting, self).__init__(*a, **k)
self.set_property_host(self._parent)
@property
def display_value(self):
return unicode(self._property_host.clip.gain_display_string if self._property_host.clip else b'')
class PitchSetting(WrappingParameter):
def __init__(self, min_value, max_value, unit, *a, **k):
super(PitchSetting, self).__init__(*a, **k)
self._min = min_value
self._max = max_value
self._unit = unit
self.set_property_host(self._parent)
@property
def min(self):
return self._min
@property
def max(self):
return self._max
@property
def display_value(self):
value = int(round(float(self._get_property_value())))
positive_indicator = b'+' if value > 0 else b''
return positive_indicator + str(value) + self._unit
class WarpSetting(WrappingParameter):
def __init__(self, *a, **k):
super(WarpSetting, self).__init__(*a, **k)
self.set_property_host(self._parent)
@property
def max(self):
return len(self._property_host.available_warp_modes) - 1
@property
def is_quantized(self):
return True
@property
def value_items(self):
return map(lambda x: unicode(WARP_MODE_NAMES[x]), self._property_host.available_warp_modes)
def _get_property_value(self):
return self._property_host.available_warp_modes.index(getattr(self._property_host, self._source_property))
class AudioClipSettingsControllerComponent(AudioClipSettingsControllerComponentBase):
__events__ = ('audio_parameters', 'warping', 'gain')
def __init__(self, *a, **k):
super(AudioClipSettingsControllerComponent, self).__init__(*a, **k)
self._audio_clip_parameters = [
WarpSetting(name=PARAMETERS_AUDIO[0], parent=self._audio_clip_model, source_property=b'warp_mode'),
PitchSetting(name=PARAMETERS_AUDIO[1], parent=self._audio_clip_model, source_property=b'pitch_coarse', min_value=-49.0, max_value=49.0, unit=b'st'),
PitchSetting(name=PARAMETERS_AUDIO[2], parent=self._audio_clip_model, source_property=b'pitch_fine', min_value=-51.0, max_value=51.0, unit=b'ct'),
GainSetting(name=PARAMETERS_AUDIO[3], parent=self._audio_clip_model, source_property=b'gain')]
self._playhead_real_time_data = RealTimeDataComponent(channel_type=b'playhead', parent=self)
self._waveform_real_time_data = RealTimeDataComponent(channel_type=b'waveform', parent=self)
for parameter in self._audio_clip_parameters:
self.register_disconnectable(parameter)
self.__on_warping_changed.subject = self._audio_clip_model
self.__on_gain_changed.subject = self._audio_clip_model
self.__on_warping_changed()
self.__on_gain_changed()
def disconnect(self):
super(AudioClipSettingsControllerComponent, self).disconnect()
self._playhead_real_time_data.set_data(None)
self._waveform_real_time_data.set_data(None)
return
@property
def audio_parameters(self):
if liveobj_valid(self.clip):
return self._audio_clip_parameters
return []
@property
def warping(self):
if liveobj_valid(self.clip):
return self._audio_clip_model.warping
return False
@property
def gain(self):
if liveobj_valid(self.clip):
return self._audio_clip_model.gain
return 0.0
@property
def waveform_real_time_channel_id(self):
return self._waveform_real_time_data.channel_id
@property
def playhead_real_time_channel_id(self):
return self._playhead_real_time_data.channel_id
def _on_clip_changed(self):
self._playhead_real_time_data.set_data(self.clip)
self._waveform_real_time_data.set_data(self.clip)
self.__on_file_path_changed.subject = self.clip
self.notify_audio_parameters()
self.notify_warping()
self.notify_gain()
def _on_transpose_encoder_value(self, value):
self._audio_clip_model.set_clip_pitch_coarse(value, False)
def _on_detune_encoder_value(self, value):
self._audio_clip_model.set_clip_pitch_fine(value, False)
@listens(b'warping')
def __on_warping_changed(self):
self.notify_warping()
@listens(b'gain')
def __on_gain_changed(self):
self.notify_gain()
@listens(b'file_path')
def __on_file_path_changed(self):
self._waveform_real_time_data.invalidate()
def register_matrix_mode(matrix_map, name, modes_component=None, parent_path=None):
def find_leaf(tree, path):
key_name, rest = path[0], path[1:]
if key_name not in tree:
tree[key_name] = dict(modes_component=None, children={})
sub_tree = tree[key_name][b'children']
if len(rest) == 0:
return sub_tree
else:
return find_leaf(sub_tree, rest)
matrix_map_to_edit = matrix_map if parent_path is None else find_leaf(matrix_map, parent_path)
children_to_add_to_map = matrix_map_to_edit[name].get(b'children', {}) if name in matrix_map_to_edit else {}
matrix_map_to_edit[name] = dict(modes_component=modes_component, children=children_to_add_to_map)
return
class MatrixModeWatcherComponent(Component):
def __init__(self, matrix_mode_map=None, *a, **k):
super(MatrixModeWatcherComponent, self).__init__(*a, **k)
self._matrix_mode_map = matrix_mode_map
def connect_listeners(dct):
for key, value in dct.iteritems():
if key == b'modes_component':
self.register_slot(value, self.__on_matrix_mode_changed, b'selected_mode')
else:
connect_listeners(value)
connect_listeners(matrix_mode_map)
self._matrix_mode_path = None
self.matrix_mode_path = self.create_mode_path(matrix_mode_map)
return
@staticmethod
def create_mode_path(initial_mode_map):
mode_path = []
def create_mode_path_recursive(mode_map):
mode_entry = mode_map.keys()[0]
mode_path.append(mode_entry)
parent = mode_map[mode_entry]
children = parent[b'children']
modes_comp = parent[b'modes_component']
selected_mode = modes_comp.selected_mode
if selected_mode in children:
if len(children[selected_mode][b'children'].keys()) > 0:
return create_mode_path_recursive({selected_mode: children[selected_mode]})
mode_path.append(selected_mode)
return mode_path
return (b'.').join(create_mode_path_recursive(initial_mode_map))
@listenable_property
def matrix_mode_path(self):
return self._matrix_mode_path
@matrix_mode_path.setter
def matrix_mode_path(self, mode):
if self._matrix_mode_path != mode:
self._matrix_mode_path = mode
self.notify_matrix_mode_path()
def __on_matrix_mode_changed(self, mode):
if mode is not None:
self.matrix_mode_path = self.create_mode_path(self._matrix_mode_map)
return
_MATRIX_MODE_PATH_TO_DATA = {b'matrix_modes.note.instrument.play': {b'Fold': True, b'NumDisplayKeys': 0, b'ShowGridWindow': False, b'ShowScrollbarCursor': False, b'NumPitches': 128, b'PitchOffset': 0, b'ShowStepLengthGrid': False, b'ShowMultipleGridWindows': False},
b'matrix_modes.note.instrument.sequence': {b'Fold': False, b'NumDisplayKeys': 23,
b'ShowGridWindow': True, b'ShowScrollbarCursor': True, b'NumPitches': 128, b'PitchOffset': 0, b'ShowStepLengthGrid': True, b'ShowMultipleGridWindows': False},
b'matrix_modes.note.instrument.split_melodic_sequencer': {b'Fold': True, b'NumDisplayKeys': 32,
b'ShowGridWindow': True,
b'ShowScrollbarCursor': True,
b'NumPitches': 128,
b'PitchOffset': 0,
b'ShowStepLengthGrid': True,
b'ShowMultipleGridWindows': True},
b'matrix_modes.note.drums.64pads': {
b'Fold': True, b'NumDisplayKeys': 0,
b'ShowGridWindow': False,
b'ShowScrollbarCursor': False, b'NumPitches': 128,
b'PitchOffset': 0,
b'ShowStepLengthGrid': False,
b'ShowMultipleGridWindows': False},
b'matrix_modes.note.drums.sequencer_loop': {
b'Fold': False,
b'NumDisplayKeys': 17,
b'ShowGridWindow': True, b'ShowScrollbarCursor': True, b'NumPitches': 128, b'PitchOffset': 0, b'ShowStepLengthGrid': True, b'ShowMultipleGridWindows': False},
b'matrix_modes.note.drums.sequencer_velocity_levels': {b'Fold': False, b'NumDisplayKeys': 17, b'ShowGridWindow': True, b'ShowScrollbarCursor': True, b'NumPitches': 128, b'PitchOffset': 0, b'ShowStepLengthGrid': True, b'ShowMultipleGridWindows': False}, b'matrix_modes.note.slicing.64pads': {b'Fold': True, b'NumDisplayKeys': 0, b'ShowGridWindow': False, b'ShowScrollbarCursor': False, b'NumPitches': 64, b'PitchOffset': 36, b'ShowStepLengthGrid': False, b'ShowMultipleGridWindows': False}, b'matrix_modes.note.slicing.sequencer_loop': {b'Fold': False, b'NumDisplayKeys': 17, b'ShowGridWindow': True, b'ShowScrollbarCursor': True, b'NumPitches': 64, b'PitchOffset': 36, b'ShowStepLengthGrid': True, b'ShowMultipleGridWindows': False}, b'matrix_modes.note.slicing.sequencer_velocity_levels': {b'Fold': False, b'NumDisplayKeys': 17, b'ShowGridWindow': True, b'ShowScrollbarCursor': True, b'NumPitches': 64, b'PitchOffset': 36, b'ShowStepLengthGrid': True, b'ShowMultipleGridWindows': False}, b'matrix_modes.session': {b'Fold': True, b'NumDisplayKeys': 0, b'ShowGridWindow': False, b'ShowScrollbarCursor': False, b'NumPitches': 128, b'PitchOffset': 0, b'ShowStepLengthGrid': False, b'ShowMultipleGridWindows': False}}
_DEFAULT_VIEW_DATA = {b'Fold': True, b'NumDisplayKeys': 0, b'ShowGridWindow': False, b'ShowScrollbarCursor': False, b'MinPitch': DEFAULT_START_NOTE, b'MaxSequenceablePitch': DEFAULT_START_NOTE, b'MinSequenceablePitch': DEFAULT_START_NOTE, b'PageIndex': 0, b'PageLength': 1.0, b'MinGridWindowPitch': DEFAULT_START_NOTE, b'MaxGridWindowPitch': DEFAULT_START_NOTE, b'NumPitches': 128, b'PitchOffset': 0, b'ShowStepLengthGrid': False, b'IsRecording': False, b'ShowMultipleGridWindows': False}
def get_static_view_data(matrix_mode_path):
return _MATRIX_MODE_PATH_TO_DATA.get(matrix_mode_path, _DEFAULT_VIEW_DATA)
class MidiClipControllerComponent(Component):
grid_window_focus = b'grid_window_start'
def __init__(self, *a, **k):
super(MidiClipControllerComponent, self).__init__(*a, **k)
self._configure_vis_task = self._tasks.add(task.sequence(task.delay(1), task.run(self._configure_visualisation))).kill()
self._clip = None
self._matrix_mode_watcher = None
self._most_recent_base_note = DEFAULT_START_NOTE
self._most_recent_max_note = DEFAULT_START_NOTE
self._loose_follow_base_note = DEFAULT_START_NOTE
self._most_recent_editable_pitches = (DEFAULT_START_NOTE, DEFAULT_START_NOTE)
self._most_recent_row_start_times = []
self._most_recent_step_length = 1.0
self._most_recent_page_index = 0
self._most_recent_page_length = 1.0
self._most_recent_editing_note_regions = []
self._visualisation_real_time_data = RealTimeDataComponent(channel_type=b'visualisation', parent=self)
self.__on_visualisation_channel_changed.subject = self._visualisation_real_time_data
self.__on_visualisation_attached.subject = self._visualisation_real_time_data
self._instruments = []
self._sequencers = []
self._mute_during_track_change_components = []
self._note_settings_component = None
self._note_editor_settings_component = None
self._real_time_data_attached = False
self._drum_rack_finder = None
self._drum_pad_color_notifier = self.register_disconnectable(DrumPadColorNotifier())
self.__on_note_colors_changed.subject = self._drum_pad_color_notifier
return
@property
def clip(self):
return self._clip
@clip.setter
def clip(self, clip):
self._clip = clip
self._on_clip_changed()
@listenable_property
def visualisation_real_time_channel_id(self):
return self._visualisation_real_time_data.channel_id
def set_drum_rack_finder(self, finder_component):
self._drum_rack_finder = finder_component
self.__on_drum_rack_changed.subject = self._drum_rack_finder
self.__on_drum_rack_changed()
def set_matrix_mode_watcher(self, watcher):
self._matrix_mode_watcher = watcher
self.__on_matrix_mode_changed.subject = watcher
def external_regions_of_interest_creator(self, region_of_interest_creator):
def grid_start_time():
return self._most_recent_page_index * self._most_recent_page_length
return {b'grid_window': region_of_interest_creator(start_identifier=self.grid_window_focus, getter=lambda : (
grid_start_time(),
grid_start_time() + self._most_recent_page_length))}
@property
def external_focusable_object_descriptions(self):
return {self.grid_window_focus: ObjectDescription(('start_end', 'loop', 'grid_window'), self.grid_window_focus)}
def set_note_settings_component(self, note_settings_component):
self._note_settings_component = note_settings_component
self.__on_note_settings_enabled_changed.subject = note_settings_component
def set_note_editor_settings_component(self, note_editor_settings_component):
self._note_editor_settings_component = note_editor_settings_component
self.__on_note_editor_settings_touched_changed.subject = note_editor_settings_component
def add_instrument_component(self, instrument):
self.__on_instrument_position_changed.add_subject(instrument)
self._instruments.append(instrument)
def add_mute_during_track_change_component(self, component):
self._mute_during_track_change_components.append(component)
def add_paginator(self, paginator):
self.__on_paginator_page_index_changed.add_subject(paginator)
self.__on_paginator_page_length_changed.add_subject(paginator)
def add_sequencer(self, sequencer):
self.__on_editable_pitches_changed.add_subject(sequencer)
self.__on_row_start_times_changed.add_subject(sequencer)
self.__on_step_length_changed.add_subject(sequencer)
self.__on_selected_notes_changed.add_subject(sequencer)
self._sequencers.append(sequencer)
def disconnect(self):
super(MidiClipControllerComponent, self).disconnect()
self._visualisation_real_time_data.set_data(None)
return
def update(self):
super(MidiClipControllerComponent, self).update()
self._update_notification_mutes()
if self.is_enabled():
self.__on_matrix_mode_changed()
def _on_clip_changed(self):
self._visualisation_real_time_data.set_data(getattr(self.clip, b'proxied_object', self.clip))
self.__on_clip_color_changed.subject = self.clip
timeline_navigation = getattr(self.clip, b'timeline_navigation', None)
self.__on_visible_region_changed.subject = timeline_navigation
self.__on_focus_marker_changed.subject = timeline_navigation
self.__on_show_focus_changed.subject = timeline_navigation
return
def _focus_grid_window(self):
if liveobj_valid(self.clip) and self.get_static_view_data()[b'ShowGridWindow']:
self.clip.timeline_navigation.change_object(self.grid_window_focus)
def _configure_visualisation_delayed(self):
self._configure_vis_task.restart()
@listens(b'instrument')
def __on_drum_rack_changed(self):
self._drum_pad_color_notifier.set_drum_group(self._drum_rack_finder.drum_group)
@listens(b'enabled')
def __on_note_settings_enabled_changed(self, _):
self._configure_visualisation()
@listens(b'is_touched')
def __on_note_editor_settings_touched_changed(self):
self._configure_visualisation()
@listens_group(b'editing_note_regions')
def __on_selected_notes_changed(self, sequencer):
self._most_recent_editing_note_regions = sequencer.editing_note_regions
self._configure_visualisation()
@listens(b'channel_id')
def __on_visualisation_channel_changed(self):
self.notify_visualisation_real_time_channel_id()
@listens(b'attached')
def __on_visualisation_attached(self):
self._real_time_data_attached = True
self._configure_visualisation()
@listens(b'color_index')
def __on_clip_color_changed(self):
self._configure_visualisation()
@listens(b'visible_region')
def __on_visible_region_changed(self, *a):
self._configure_visualisation()
@listens(b'focus_marker')
def __on_focus_marker_changed(self, *a):
self._configure_visualisation()
@listens(b'show_focus')
def __on_show_focus_changed(self, *a):
self._configure_visualisation()
@listens(b'matrix_mode_path')
def __on_matrix_mode_changed(self):
if self.is_enabled() and self._matrix_mode_watcher:
static_view_data = self.get_static_view_data()
if self.matrix_mode_path() == b'matrix_modes.note.instrument.sequence':
num_visible_keys = static_view_data[b'NumDisplayKeys']
lower = self._most_recent_editable_pitches[0]
upper = self._most_recent_editable_pitches[(-1)]
self._loose_follow_base_note = (lower + upper) // 2 - num_visible_keys // 2
if static_view_data[b'ShowGridWindow']:
self._focus_grid_window()
elif liveobj_valid(self.clip):
nav = self.clip.timeline_navigation
nav.set_focus_marker_without_updating_visible_region(b'start_marker')
self._configure_visualisation()
self._update_notification_mutes()
@listens_group(b'position')
def __on_instrument_position_changed(self, instrument):
self._most_recent_base_note = instrument.min_pitch
self._most_recent_max_note = instrument.max_pitch
self._configure_visualisation()
@listens(b'note_colors')
def __on_note_colors_changed(self):
self._configure_visualisation()
@listens_group(b'editable_pitches')
def __on_editable_pitches_changed(self, sequencer):
self._most_recent_editable_pitches = sequencer.editable_pitches
if self.is_enabled():
self._configure_visualisation_delayed()
@listens_group(b'row_start_times')
def __on_row_start_times_changed(self, sequencer):
if sequencer.is_enabled():
self._most_recent_row_start_times = sequencer.row_start_times
self._configure_visualisation_delayed()
@listens_group(b'step_length')
def __on_step_length_changed(self, sequencer):
if sequencer.is_enabled():
self._most_recent_step_length = sequencer.step_length
self._configure_visualisation()
@listens_group(b'page_index')
def __on_paginator_page_index_changed(self, paginator):
self._most_recent_page_index = paginator.page_index
self._focus_grid_window()
self._configure_visualisation()
@listens_group(b'page_length')
def __on_paginator_page_length_changed(self, paginator):
self._most_recent_page_length = paginator.page_length
self._focus_grid_window()
self._configure_visualisation()
def get_static_view_data(self):
return get_static_view_data(self._matrix_mode_watcher.matrix_mode_path)
def _add_items_to_view_data(self, view_data):
for key, value in self.get_static_view_data().iteritems():
view_data[key] = value
def matrix_mode_path(self):
if self._matrix_mode_watcher is not None:
return self._matrix_mode_watcher.matrix_mode_path
else:
return
def _update_notification_mutes(self):
for component in chain(self._sequencers, self._instruments):
if hasattr(component, b'show_notifications'):
component.show_notifications = not (self.is_enabled() and self.get_static_view_data()[b'ShowScrollbarCursor'])
def mute_components_during_track_change(self, muted):
if self.is_enabled():
for component in self._mute_during_track_change_components:
component.muted = muted
@contextmanager
def changing_track(self):
self.mute_components_during_track_change(True)
self._real_time_data_attached = False
yield
self.mute_components_during_track_change(False)
def _update_minimum_pitch(self):
if self.matrix_mode_path() == b'matrix_modes.note.instrument.sequence':
num_visible_keys = self.get_static_view_data()[b'NumDisplayKeys']
lower = self._most_recent_editable_pitches[0]
upper = self._most_recent_editable_pitches[(-1)]
window_size = upper - lower
base_note = self._loose_follow_base_note
if window_size >= num_visible_keys / 3:
base_note = (lower + upper) / 2 - num_visible_keys / 2
elif lower - window_size < base_note:
base_note = lower - window_size
else:
if upper + window_size > base_note + num_visible_keys:
base_note = upper + window_size - num_visible_keys
self._loose_follow_base_note = max(0, min(127 - num_visible_keys, base_note))
return self._loose_follow_base_note
return self._most_recent_base_note
def _update_maximum_sequenceable_pitch(self):
if self.matrix_mode_path() == b'matrix_modes.note.instrument.sequence':
return self._most_recent_editable_pitches[(-1)]
return self._most_recent_max_note
def _update_minimum_sequenceable_pitch(self):
if self.matrix_mode_path() == b'matrix_modes.note.instrument.sequence':
return self._most_recent_editable_pitches[0]
return self._most_recent_base_note
def _update_note_colors(self):
matrix_mode = self.matrix_mode_path()
in_correct_mode = matrix_mode is not None and matrix_mode.startswith(b'matrix_modes.note.drums') or matrix_mode == b'matrix_modes.session'
note_colors = self._drum_pad_color_notifier.note_colors if in_correct_mode and self._drum_pad_color_notifier.has_drum_group else []
return make_color_vector(note_colors)
def _configure_visualisation(self):
visualisation = self._visualisation_real_time_data.device_visualisation()
if liveobj_valid(visualisation) and liveobj_valid(self.clip) and self._real_time_data_attached:
color = COLOR_INDEX_TO_SCREEN_COLOR[self.clip.color_index]
visible_region = self.clip.zoom.visible_region
focus_marker = self.clip.timeline_navigation.focus_marker
new_data = {b'ClipColor': color.as_remote_script_color(),
b'PageIndex': self._most_recent_page_index,
b'PageLength': float(self._most_recent_page_length),
b'RowStartTimes': make_vector(self._most_recent_row_start_times),
b'StepLength': float(self._most_recent_step_length),
b'MinGridWindowPitch': self._most_recent_editable_pitches[0],
b'MaxGridWindowPitch': self._most_recent_editable_pitches[(-1)],
b'GridWindowPitches': make_vector(self._most_recent_editable_pitches),
b'MinPitch': self._update_minimum_pitch(),
b'MaxSequenceablePitch': self._update_maximum_sequenceable_pitch(),
b'MinSequenceablePitch': self._update_minimum_sequenceable_pitch(),
b'NoteColors': self._update_note_colors(),
b'IsRecording': liveobj_valid(self.clip) and self.clip.is_recording and not self.clip.is_overdubbing,
b'NoteSettingsMode': self._note_settings_component is not None and self._note_settings_component.is_enabled(),
b'NoteSettingsTouched': self._note_editor_settings_component is not None and self._note_editor_settings_component.is_enabled() and self._note_editor_settings_component.is_touched,
b'EditingNotePitches': make_vector([ pitch for pitch, (start, end) in self._most_recent_editing_note_regions
]),
b'EditingNoteStarts': make_vector([ float(start) for pitch, (start, end) in self._most_recent_editing_note_regions
]),
b'EditingNoteEnds': make_vector([ float(end) for pitch, (start, end) in self._most_recent_editing_note_regions
]),
b'DisplayStartTime': float(visible_region.start),
b'DisplayEndTime': float(visible_region.end),
b'FocusMarkerName': focus_marker.name,
b'FocusMarkerPosition': focus_marker.position,
b'ShowFocus': self.clip.timeline_navigation.show_focus}
view_data = visualisation.get_view_data()
if self._matrix_mode_watcher is not None:
self._add_items_to_view_data(view_data)
for key, value in new_data.iteritems():
view_data[key] = value
visualisation.set_view_data(view_data)
return
class ClipControlComponent(Component):
__events__ = ('clip', )
def __init__(self, decorator_factory=None, *a, **k):
super(ClipControlComponent, self).__init__(*a, **k)
self._clip = None
self.midi_loop_controller = LoopSettingsControllerComponent(parent=self)
self.audio_loop_controller = LoopSettingsControllerComponent(parent=self)
self.audio_clip_controller = AudioClipSettingsControllerComponent(parent=self)
self.midi_clip_controller = MidiClipControllerComponent(parent=self)
self.mode_selector = ModesComponent(parent=self)
self._decorator_factory = decorator_factory or ClipDecoratorFactory()
self.__on_selected_scene_changed.subject = self.song.view
self.__on_selected_track_changed.subject = self.song.view
self.__on_selected_clip_changed.subject = self.song.view
self.__on_has_clip_changed.subject = self.song.view.highlighted_clip_slot
self._update_controller()
return
@listens(b'selected_scene')
def __on_selected_scene_changed(self):
self._update_controller()
@listens(b'selected_track')
def __on_selected_track_changed(self):
self._update_controller()
@listens(b'detail_clip')
def __on_selected_clip_changed(self):
self._update_controller()
def on_enabled_changed(self):
super(ClipControlComponent, self).on_enabled_changed()
self._update_controller()
def _decorate_clip(self, clip):
return find_decorated_object(clip, self._decorator_factory) or self._decorator_factory.decorate(clip)
@listens(b'has_clip')
def __on_has_clip_changed(self):
self._update_controller()
def _update_controller(self):
if self.is_enabled():
clip = self.song.view.detail_clip
track = None
if liveobj_valid(clip):
if clip.is_arrangement_clip:
track = clip.canonical_parent
else:
clip_slot = clip.canonical_parent
track = clip_slot.canonical_parent if clip_slot else None
if track != self.song.view.selected_track:
clip = None
self._update_selected_mode(clip)
audio_clip = None
midi_clip = None
if liveobj_valid(clip) and clip.is_audio_clip:
audio_clip = clip
else:
midi_clip = clip
self.audio_clip_controller.clip = audio_clip
self.audio_loop_controller.clip = self._decorate_clip(audio_clip)
decorated_midi_clip = self._decorate_clip(midi_clip)
self.midi_clip_controller.clip = decorated_midi_clip
self.midi_loop_controller.clip = decorated_midi_clip
self.__on_has_clip_changed.subject = self.song.view.highlighted_clip_slot
self._clip = clip
self.notify_clip()
return
def _update_selected_mode(self, clip):
if liveobj_valid(clip):
self.mode_selector.selected_mode = b'audio' if clip.is_audio_clip else b'midi'
else:
self.mode_selector.selected_mode = b'no_clip'
@property
def clip(self):
return self._clip |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ARB_sync'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_ARB_sync',False)
_p.unpack_constants( """GL_MAX_SERVER_WAIT_TIMEOUT 0x9111
GL_OBJECT_TYPE 0x9112
GL_SYNC_CONDITION 0x9113
GL_SYNC_STATUS 0x9114
GL_SYNC_FLAGS 0x9115
GL_SYNC_FENCE 0x9116
GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117
GL_UNSIGNALED 0x9118
GL_SIGNALED 0x9119
GL_ALREADY_SIGNALED 0x911A
GL_TIMEOUT_EXPIRED 0x911B
GL_CONDITION_SATISFIED 0x911C
GL_WAIT_FAILED 0x911D
GL_SYNC_FLUSH_COMMANDS_BIT 0x1
GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFF""", globals())
@_f
@_p.types(_cs.GLsync,_cs.GLenum,_cs.GLbitfield)
def glFenceSync( condition,flags ):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLsync)
def glIsSync( sync ):pass
@_f
@_p.types(None,_cs.GLsync)
def glDeleteSync( sync ):pass
@_f
@_p.types(_cs.GLenum,_cs.GLsync,_cs.GLbitfield,_cs.GLuint64)
def glClientWaitSync( sync,flags,timeout ):pass
@_f
@_p.types(None,_cs.GLsync,_cs.GLbitfield,_cs.GLuint64)
def glWaitSync( sync,flags,timeout ):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLint64Array)
def glGetInteger64v( pname,params ):pass
@_f
@_p.types(None,_cs.GLsync,_cs.GLenum,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLintArray)
def glGetSynciv( sync,pname,bufSize,length,values ):pass
def glInitSyncARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
|
from typing import Dict
import tensorflow as tf
__all__ = ["TargetLengthCrop1D"]
class TargetLengthCrop1D(tf.keras.layers.Layer):
def __init__(self, target_length: int, name="target_length_crop", **kwargs) -> None:
super(TargetLengthCrop1D, self).__init__(name=name, **kwargs)
self._target_length = target_length
def call(self, inputs: tf.Tensor) -> tf.Tensor:
trim = (inputs.shape[-2] - self._target_length) // 2
if trim < 0:
raise ValueError("inputs longer than target length")
return inputs[..., trim:-trim, :]
def get_config(self) -> Dict:
config = {"target_length": self._target_length}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape: tf.TensorShape) -> tf.TensorShape:
return input_shape
|
from crypt import methods
from flask import Flask, jsonify, request
from random import randint
import psycopg2
conn = None
try:
conn = psycopg2.connect("dbname='rolldb' user='flaskadmin' host='localhost' password='flaskadmin1'")
except:
print("I am unable to connect to the database")
app = Flask(__name__)
@app.route('/')
def handle_root():
return '<p>Hello World</p>'
@app.route('/data/<id>')
def handle_data(id):
data = {
'users': [
{
'id': id,
'first_name': 'Vjeko',
'last_name': 'Slav',
'age': 24
}
]
}
return jsonify(data)
@app.route('/roll', methods=['POST'])
def handle_roll():
data = request.get_json()
total = 0
rollResults = []
diceSides = data['diceSides']
numberOfRolls = data['numberOfRolls']
for _ in range(numberOfRolls):
roll = randint(1, diceSides)
rollResults.append(roll)
total += roll
response = {
'query': str(numberOfRolls) + 'd' + str(diceSides),
'rollResults': rollResults,
'total': total
}
return jsonify(response)
@app.route('/stuff', methods=['GET'])
def handle_stuff():
cur = conn.cursor()
cur.execute('select * from stuff;')
result = cur.fetchall()
return jsonify(result) |
def build(soup, payload):
build_link(soup, payload)
build_summary(soup, payload)
build_tags(soup, payload)
def build_link(soup, payload):
tag = soup.find('link', rel="canonical")
if tag:
payload['id'] = tag.get('href')
payload['url'] = tag.get('href')
def build_summary(soup, payload):
tag = soup.find('meta', attrs={'name':'description'})
if tag:
payload['summary'] = tag.get('content')
def build_tags(soup, payload):
tag = soup.find('meta', attrs={'name':'keywords'})
if tag:
payload['tags'] = tag.get('content')
|
"""Unit test package for arcliv."""
|
from __future__ import unicode_literals, print_function, absolute_import
from builtins import input
import bibtexparser
# from . import __version__
# from lxml.etree import ParserError
import re
from title2bib.crossref import get_bib_from_title
from scihub2pdf.scihub import SciHub
from scihub2pdf.libgen import LibGen
from scihub2pdf.arxiv import Arxiv
headers = {
# "Connection": "keep-alive",
# "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
}
# print("\n\t Checking state of scihub...")
# url_state = "https://raw.githubusercontent.com/bibcure/scihub_state/master/state.txt"
# try:
# r = requests.get(url_state, headers=headers)
# state_scihub = [i.split(">>")[1] for i in r.iter_lines()]
# url_captcha_scihub = state_scihub[0]
# url_libgen = state_scihub[1]
# url_scihub = state_scihub[2]
# xpath_libgen_a = state_scihub[3]
# xpath_scihub_captcha = state_scihub[4]
# xpath_scihub_iframe = state_scihub[5]
# xpath_scihub_pdf = state_scihub[6]
# has_update = state_scihub[7] != __version__
# if has_update:
# print("\n\t\tWill be better if you upgrade scihub2pdf.")
# print("\t\tFor that, just do:\n")
# print("\t\t\t sudo pip install scihub2pdf --upgrade\n")
# except:
# s = None
libgen_url = "http://libgen.io/scimag/ads.php"
libgen_xpath_pdf_url = "/html/body/table/tr/td[3]/a"
xpath_captcha = "//*[@id='captcha']"
xpath_pdf = "//*[@id='pdf']"
xpath_input = "/html/body/div/table/tbody/tr/td/form/input"
xpath_form = "/html/body/div/table/tbody/tr/td/form"
domain_scihub = "http://sci-hub.cc/"
ScrapSci = SciHub(headers,
xpath_captcha,
xpath_pdf,
xpath_input,
xpath_form,
domain_scihub
)
ScrapArx = Arxiv(headers)
ScrapLib = LibGen(headers=headers,
libgen_url=libgen_url,
xpath_pdf_url=libgen_xpath_pdf_url)
def start_scihub():
ScrapSci.start()
def start_libgen():
ScrapLib.start()
def start_arxiv():
ScrapArx.start()
def download_from_libgen(doi, pdf_file):
found, r = ScrapLib.navigate_to(doi, pdf_file)
if not found:
return False, r
success, tree = ScrapLib.generate_tree()
if not success:
return False, r
found, url = ScrapLib.get_pdf_url()
if not found:
return False, r
found, r = ScrapLib.download()
return found, r
def download_from_arxiv(value, location, field="id"):
pdf_file = location
if not location.endswith(".pdf"):
pdf_file = location+value+".pdf"
found, pdf_url = ScrapArx.navigate_to(value, pdf_file, field)
if found:
found, r = ScrapArx.download()
return found, pdf_url
def download_from_scihub(doi, pdf_file):
found, r = ScrapSci.navigate_to(doi, pdf_file)
if not found:
return False, r
has_captcha, has_iframe = ScrapSci.check_captcha()
while (has_captcha and has_iframe):
captcha_img = ScrapSci.get_captcha_img()
captcha_img.show()
captcha_text = input("\tPut captcha:\n\t")
has_captcha, has_iframe = ScrapSci.solve_captcha(captcha_text)
if has_iframe:
found, r = ScrapSci.download()
found = has_iframe
return has_iframe, r
def download_pdf_from_bibs(bibs, location="",
use_libgen=False):
def put_pdf_location(bib):
pdf_name = bib["ID"] if "ID" in bib else bib["doi"].replace("/", "_")
pdf_name += ".pdf"
bib["pdf_file"] = location+pdf_name
return bib
# bibs_with_doi = list(filter(lambda bib: "doi" in bib, bibs))
bibs_with_doi = []
# bibs_arxiv = []
for bib in bibs:
if "journal" in bib:
if bool(re.match("arxiv:", bib["journal"], re.I)):
pdf_file = location+bib["journal"]+".pdf"
download_from_arxiv(bib["journal"], pdf_file, "id")
elif "doi" in bib:
bibs_with_doi.append(bib)
elif "doi" in bib:
bibs_with_doi.append(bib)
# bibs_journal = list(filter(lambda bib: "journal" in bib, bibs))
# bibs_arxiv = list(
# filter(
# lambda bib: bool(re.match("arxiv:", bib["journal"], re.I)) in bib, bibs_journal
# )
# )
bibs_with_doi = list(map(put_pdf_location, bibs_with_doi))
# libgen has no captcha, try to use multiprocessing?
if use_libgen:
list(map(
lambda bib: download_from_libgen(bib["doi"], bib["pdf_file"]
),
bibs_with_doi))
else:
for bib in bibs_with_doi:
found, bib = download_from_scihub(bib["doi"], bib["pdf_file"])
def download_from_doi(doi, location="", use_libgen=False):
pdf_name = "{}.pdf".format(doi.replace("/", "_"))
pdf_file = location+pdf_name
if use_libgen:
download_from_libgen(doi, pdf_file)
else:
download_from_scihub(doi, pdf_file)
def download_from_title(title, location="", use_libgen=False):
found, bib_string = get_bib_from_title(title)
if found:
bib = bibtexparser.loads(bib_string).entries[0]
if "doi" in bib:
pdf_name = "{}.pdf".format(
bib["doi"].replace("/", "_")
)
bib["pdf_file"] = location+pdf_name
if use_libgen:
download_from_libgen(bib["doi"], bib["pdf_file"])
else:
found, bib = download_from_scihub(bib["doi"], bib["pdf_file"])
else:
print("\tAbsent DOI")
|
import sys
import argparse
from pathlib import Path
from src.utility import *
from src.nut_detector import NutDetector
# st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
sys.stdout = open("log.txt", '+w')
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Commandline application to detect Nuts in a video.')
parser.add_argument('--video_path', type=str, default='../Test/test_video.avi',
help='path of the video to run detection')
parser.add_argument('--result_path', type=str, default='../Results',
help='path to the folder to save evaluation results.')
parser.add_argument('--frozen_graph', type=str, default='../Dependencies/FRCNN_Tray.pb',
help='path to the tensorflow frozen graph.')
parser.add_argument('--pbtxt_path', type=str, default='../Dependencies/opencv_frcnn_tray.pbtxt',
help='path to the pbtxt path.')
args = parser.parse_args()
# -------------------------------------------------------
if not (args.video_path or args.result_path or args.frozen_graph or args.pbtxt_path):
print("Valid '--video_path', '--result_path', --frozen_graph or --pbtxt_path not set.")
# ------
if Path(args.video_path).is_file():
print(INFO("Video found successfully."))
else:
print(ERROR("Video file not valid."))
sys.exit()
# ------
if Path(args.frozen_graph).is_file():
print(INFO("Frozen graph found successfully."))
else:
print(ERROR("Frozen graph file not found."))
sys.exit()
# ------
if Path(args.pbtxt_path).is_file():
print(INFO("Pbtxt found successfully."))
else:
print(ERROR("Pbtxt file not found."))
sys.exit()
# -------------------------------------------------------
nut_detector = NutDetector(args.video_path,args.result_path,args.frozen_graph,args.pbtxt_path)
nut_detector.extract_most_stable_frame()
nut_detector.run_detection()
nut_detector.get_results()
sys.stdout.close() |
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional
from core.common.drivers.base import BaseDialect
from core.common.drivers.column import Column
from . import MonitorConfiguration, MonitorDefinition
from .base import Monitor
from .metrics import MetricBase
from .schedule import Schedule
@dataclass
class SchemaMonitorConfigurationDefaults:
columns: List[Dict[str, str]] = field(default_factory=list)
@dataclass
class SchemaMonitorConfiguration:
table: str
@dataclass
class SchemaMonitorDefinition(MonitorConfiguration, SchemaMonitorConfigurationDefaults, MonitorDefinition, SchemaMonitorConfiguration):
def to_monitor(self, workspace):
return SchemaMonitor.from_definition(self, workspace)
def to_dict(self):
return {
"name": self.name,
"description": self.description,
"type": "schema",
"table": self.table,
"columns": self.columns,
# "schedule_minutes": self.schedule_minutes,
# "schedule_type": self.schedule_type,
# "schedule": Schedule(self.schedule_minutes).to_dict(),
}
def extract_or_default(obj, key, default):
return obj[key] if key in obj else default
class SchemaMetricType(Enum):
COLUMN_NAMES = 'name'
COLUMN_TYPES = 'data_type'
COLUMN_ORDERS = 'order'
@classmethod
def all(cls):
return [
cls.COLUMN_NAMES,
cls.COLUMN_TYPES,
cls.COLUMN_ORDERS,
]
@dataclass
class SchemaMetric(MetricBase):
type: SchemaMetricType
col_to_metric: Dict[str, Any]
@classmethod
def from_columns(cls, metric_type: SchemaMetricType, columns: List[Column]):
col_to_metric_map = {}
for col in columns:
col_to_metric_map[col.name] = getattr(col, metric_type._value_)
return cls(
type=metric_type,
col_to_metric=col_to_metric_map,
)
def compile(self, dialect: BaseDialect):
return ""
@dataclass
class SchemaMonitor(SchemaMonitorConfigurationDefaults, Monitor, SchemaMonitorConfiguration):
columns: List[Column] = field(default_factory=list)
metrics: List[SchemaMetric] = field(default_factory=list)
def retrieve_metrics(self):
return self.metrics
def info(self):
info_str = "Schema Monitor: {}".format(self.table)
if self.description:
info_str += ": {}".format(self.description)
return info_str
@classmethod
def validate(cls, monitor_dict):
if 'table' not in monitor_dict:
raise Exception("Table key was missing in the schema monitor definition.")
if 'columns' not in monitor_dict:
raise Exception("Columns key was missing in the schema monitor definition.")
if len(monitor_dict['table'].split('.')) < 3:
raise Exception("Database and schema not found.")
@classmethod
def _create_metrics(cls, columns: List[Column]):
metrics = []
for metric_type in SchemaMetricType.all():
metric = SchemaMetric.from_columns(metric_type, columns)
metrics.append(metric)
return metrics
@classmethod
def from_dict(cls, value: Dict[str, Any]) -> 'SchemaMonitor':
table = value['table'] # Required
columns = [Column.from_dict(col) for col in value['columns']]
metrics = cls._create_metrics(columns)
description = extract_or_default(value, 'description', None)
schedule = Schedule.from_dict(extract_or_default(value, 'schedule', {}))
return cls(
table=table,
columns=columns,
metrics=metrics,
description=description,
schedule=schedule,
)
def to_dict(self):
return {
'type': 'schema',
'table': self.table,
'columns': [col.to_dict() for col in self.columns],
}
def database_name(self):
table_parts = self.table.split('.')
if len(table_parts) < 3:
raise Exception("Table name not fully qualified with database")
return table_parts[0].lower()
def schema_name(self):
table_parts = self.table.split('.')
if len(table_parts) < 3:
raise Exception("Schema name not fully qualified with database")
return table_parts[1].lower()
def table_name(self):
table_parts = self.table.split('.')
if len(table_parts) < 3:
raise Exception("Table name not fully qualified with database")
return table_parts[2].lower()
def base_sql_statement(self, select_sql, dialect):
return dialect.metadata_query().format(
database_name=self.database_name(),
schema_name = self.schema_name(),
table_name=self.table_name())
@classmethod
def from_definition(cls, definition: SchemaMonitorDefinition, workspace):
# monitor_base = super().from_definition(definition, workspace)
driver_config = workspace.get_driver_config(definition.datasource)
columns = [Column.from_dict(col) for col in definition.columns]
metrics = cls._create_metrics(columns)
enabled = definition.enabled or True
return cls(
# name=monitor_base.name,
# description=monitor_base.description,
# enabled=monitor_base.enabled,
# driver_config=monitor_base.driver_config,
name=definition.name,
description=definition.description,
schedule=Schedule(definition.schedule_minutes),
enabled=enabled,
driver_config=driver_config,
table=definition.table,
columns=columns,
metrics=metrics,
)
|
import os
import discord
import numpy as np
from PIL import ImageFont, Image
from discord_slash.model import SlashCommandPermissionType
from discord_slash.utils.manage_commands import create_choice, create_permission
from dotenv import load_dotenv
load_dotenv()
def _int(name: str):
try:
return int(name)
except ValueError:
return name
DISCORD_TOKEN = os.getenv('DISCORD_TOKEN')
DISCORD_GUILD_ID = _int(os.getenv('DISCORD_GUILD_ID'))
DISCORD_VOICE_CHANNEL_DEFAULT_NAME = os.getenv("VOICE_CHANNEL_DEFAULT_NAME")
DISCORD_VOICE_CHANNEL_CATEGORY = _int(os.getenv('DISCORD_VOICE_CHANNEL_CATEGORY'))
DISCORD_LOG_CHANNEL = _int(os.getenv('DISCORD_LOG_CHANNEL'))
DISCORD_ROLEBOT_SETTINGS_CHANNEL = _int(os.getenv('DISCORD_ROLEBOT_SETTINGS_CHANNEL'))
DISCORD_COMMAND_PERMISSION_ROLE = _int(os.getenv('DISCORD_COMMAND_PERMISSION_ROLE'))
DISCORD_GUILD_IDS = [DISCORD_GUILD_ID]
INTENTS = discord.Intents(members=True, presences=True, voice_states=True, guild_messages=True, guilds=True)
DISCORD_COGS = [
create_choice(name="rolebot", value="RoleBot"),
create_choice(name="commands", value="Commands"),
create_choice(name="voicechannelbot", value="VoiceChannelBot"),
create_choice(name="pollbot", value="PollBot"),
]
DISCORD_COMMAND_PERMISSIONS = {
DISCORD_GUILD_ID: [create_permission(DISCORD_COMMAND_PERMISSION_ROLE, SlashCommandPermissionType.ROLE, True),]
}
DISCORD_TTF_SCALE_FACTOR = 10
DISCORD_TTF_POLL_NORMAL = ImageFont.truetype("data/Helvetica.ttf", 15 * DISCORD_TTF_SCALE_FACTOR)
DISCORD_TTF_POLL_BOLD = ImageFont.truetype("data/Helvetica-Bold-Font.ttf", 15 * DISCORD_TTF_SCALE_FACTOR)
DISCORD_POLL_EMPTY, DISCORD_POLL_FULL, DISCORD_POLL_WIN = np.split(np.array(Image.open('data/basepollimages.png')), 3) |
"""
GraphQL output types and resolvers.
"""
import contextlib
import functools
import inspect
import operator
import types
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from typing import Callable, List, Optional
import pyarrow as pa
import pyarrow.compute as pc
import strawberry
from cached_property import cached_property
from strawberry.field import StrawberryField
from typing_extensions import Annotated
from .core import Column as C, ListChunk
from .inputs import BooleanQuery, IntQuery, LongQuery, FloatQuery, DecimalQuery, DateQuery
from .inputs import DateTimeQuery, TimeQuery, DurationQuery, BinaryQuery, StringQuery
from .scalars import Long, type_map
def selections(*fields) -> set:
"""Return set of field name selections from strawberry `SelectedField`."""
return {selection.name for field in fields for selection in field.selections}
def doc_field(func: Optional[Callable] = None, **kwargs: str) -> StrawberryField:
"""Return strawberry field with argument and docstring descriptions."""
if func is None:
return functools.partial(doc_field, **kwargs)
for name in kwargs:
argument = strawberry.argument(description=kwargs[name])
func.__annotations__[name] = Annotated[func.__annotations__[name], argument]
return strawberry.field(func, description=inspect.getdoc(func))
@strawberry.interface(description="column interface")
class Column:
def __init__(self, array):
self.array = array
def __init_subclass__(cls):
cls.__init__ = Column.__init__
@doc_field
def type(self) -> str:
"""[arrow type](https://arrow.apache.org/docs/python/api/datatypes.html)"""
return str(self.array.type)
@doc_field
def length(self) -> Long:
"""number of rows"""
return len(self.array)
@classmethod
def cast(cls, array: pa.ChunkedArray) -> 'Column':
"""Return typed column based on array type."""
return cls.type_map[type_map[C.scalar_type(array).id]](array) # type: ignore
def map(self, func: Callable, **kwargs) -> 'Column':
return self.cast(C.map(self.array, func, **kwargs))
@classmethod
def fromscalar(cls, scalar: pa.ListScalar) -> Optional['Column']:
return None if scalar.values is None else cls.cast(pa.chunked_array([scalar.values]))
def unique(self, info):
"""unique values and counts"""
if 'counts' in selections(*info.selected_fields):
return self.Set(*self.array.value_counts().flatten())
return self.Set(self.array.unique())
def count(self, **query) -> Long:
"""Return number of matching values.
Optimized for `null`, and an empty query is equivalent to not `null`.
"""
if query == {'equal': None}:
return self.array.null_count
if query in ({}, {'not_equal': None}):
return len(self.array) - self.array.null_count
return C.count(C.mask(self.array, **query), True)
def index(self, value, start: Long = 0, end: Optional[Long] = None) -> Long:
"""Return first index of occurrence of value; -1 indicates not found.
May be faster than `count` for membership test.
"""
return C.index(self.array, value, start, end)
def values(self):
"""list of values"""
return self.array.to_pylist()
@cached_property
def min_max(self):
return C.min_max(self.array)
def min(self):
"""minimum value"""
return self.min_max['min']
def max(self):
"""maximum value"""
return self.min_max['max']
def sort(self, reverse: bool = False, length: Optional[Long] = None):
"""Return sorted values. Optimized for fixed length."""
return C.sort(self.array, reverse, length).to_pylist()
def drop_null(self):
"""remove missing values from an array"""
return type(self)(self.array.drop_null())
def fill_null(self, value):
"""Return values with null elements replaced."""
return type(self)(C.fill_null(self.array, value))
def min_element_wise(self, value, skip_nulls: bool = True):
"""Return element-wise minimum compared to scalar."""
return type(self)(pc.min_element_wise(self.array, value, skip_nulls=skip_nulls))
def max_element_wise(self, value, skip_nulls: bool = True):
"""Return element-wise maximum compared to scalar."""
return type(self)(pc.max_element_wise(self.array, value, skip_nulls=skip_nulls))
def between(self, unit: str, start=None, end=None) -> 'LongColumn':
"""Return duration between start and end."""
if [start, end].count(None) != 1:
raise ValueError("exactly one of `start` or `end` required")
convert = functools.partial(pa.scalar, type=self.array.type)
args = (self.array, convert(end)) if start is None else (convert(start), self.array)
return LongColumn(getattr(pc, f'{unit}_between')(*args))
@strawberry.interface(description="unique values")
class Set:
length = doc_field(Column.length)
counts: List[Long] = strawberry.field(description="list of counts")
def __init__(self, array, counts=pa.array([])):
self.array, self.counts = array, counts.to_pylist()
@classmethod
def subclass(base, cls, name, description):
namespace = {
'__init__': Set.__init__,
'values': annotate(Column.values, List[Optional[cls]]),
}
return strawberry.type(description=description)(type(name, (base,), namespace))
def annotate(func, return_type, **annotations):
"""Return field from an annotated clone of the function."""
clone = types.FunctionType(func.__code__, func.__globals__)
annotations['return'] = return_type
clone.__annotations__.update(func.__annotations__, **annotations)
clone.__defaults__ = func.__defaults__
return strawberry.field(clone, description=inspect.getdoc(func))
@strawberry.interface(description="numeric column interface")
class NumericColumn:
@doc_field
def any(self) -> Optional[bool]:
"""whether any values evaluate to true"""
return C.any(self.array) # type: ignore
@doc_field
def all(self) -> Optional[bool]:
"""whether all values evaluate to true"""
return C.all(self.array) # type: ignore
def sum(self):
"""sum of the values"""
return pc.sum(self.array).as_py()
def product(self):
"""product of the values"""
return pc.product(self.array).as_py()
@doc_field
def mean(self) -> Optional[float]:
"""mean of the values"""
return pc.mean(self.array).as_py() # type: ignore
@doc_field
def stddev(self) -> Optional[float]:
"""standard deviation of the values"""
return pc.stddev(self.array).as_py() # type: ignore
@doc_field
def variance(self) -> Optional[float]:
"""variance of the values"""
return pc.variance(self.array).as_py() # type: ignore
@doc_field
def quantile(self, q: List[float] = [0.5], interpolation: str = 'linear') -> List[float]:
"""Return list of quantiles for values, defaulting to the median."""
return pc.quantile(self.array, q=q, interpolation=interpolation).to_pylist() # type: ignore
@doc_field
def tdigest(
self, q: List[float] = [0.5], delta: int = 100, buffer_size: int = 500
) -> List[float]:
"""Return list of approximate quantiles for values, defaulting to the median."""
return pc.tdigest(self.array, q=q, delta=delta, buffer_size=buffer_size).to_pylist() # type: ignore
@doc_field
def logb(self, base: float) -> 'FloatColumn':
"""Return log of values to base."""
return FloatColumn(pc.logb(self.array, base)) # type: ignore
def mode(self, length: int = 1):
"""mode of the values"""
return self.Set(*pc.mode(self.array, length).flatten()) # type: ignore
def add(self, value):
"""Return values added to scalar."""
return type(self)(pc.add(value, self.array))
def subtract(self, value):
"""Return values subtracted *from* scalar."""
return type(self)(pc.subtract(value, self.array))
def multiply(self, value):
"""Return values multiplied by scalar."""
return type(self)(pc.multiply(value, self.array))
def divide(self, value):
"""Return values divided *into* scalar."""
return type(self)(pc.divide(value, self.array))
def power(self, base=None, exponent=None):
"""Return values raised to power."""
if [base, exponent].count(None) != 1:
raise ValueError("exactly one of `base` or `exponent` required")
args = (self.array, exponent) if base is None else (base, self.array)
return type(self)(pc.power(*args))
@strawberry.type(description="column of booleans")
class BooleanColumn(Column):
count = BooleanQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=bool)
any = doc_field(NumericColumn.any)
all = doc_field(NumericColumn.all)
values = annotate(Column.values, List[Optional[bool]])
Set = Set.subclass(bool, "BooleanSet", "unique booleans")
unique = annotate(Column.unique, Set)
@strawberry.type(description="column of ints")
class IntColumn(Column, NumericColumn):
count = IntQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=int)
values = annotate(Column.values, List[Optional[int]])
Set = Set.subclass(int, "IntSet", "unique ints")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[int]])
sum = annotate(NumericColumn.sum, Optional[int])
product = annotate(NumericColumn.product, Optional[int])
mode = annotate(NumericColumn.mode, Set)
min = annotate(Column.min, Optional[int])
max = annotate(Column.max, Optional[int])
drop_null = annotate(Column.drop_null, 'IntColumn')
fill_null = annotate(Column.fill_null, 'IntColumn', value=int)
add = annotate(NumericColumn.add, 'IntColumn', value=int)
subtract = annotate(NumericColumn.subtract, 'IntColumn', value=int)
multiply = annotate(NumericColumn.multiply, 'IntColumn', value=int)
divide = annotate(NumericColumn.divide, 'IntColumn', value=int)
power = annotate(NumericColumn.power, 'IntColumn', base=Optional[int], exponent=Optional[int])
min_element_wise = annotate(Column.min_element_wise, 'IntColumn', value=int)
max_element_wise = annotate(Column.max_element_wise, 'IntColumn', value=int)
@strawberry.type(description="column of longs")
class LongColumn(Column, NumericColumn):
count = LongQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=Long)
values = annotate(Column.values, List[Optional[Long]])
Set = Set.subclass(Long, "LongSet", "unique longs")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[Long]])
sum = annotate(NumericColumn.sum, Optional[Long])
product = annotate(NumericColumn.product, Optional[Long])
mode = annotate(NumericColumn.mode, Set)
min = annotate(Column.min, Optional[Long])
max = annotate(Column.max, Optional[Long])
drop_null = annotate(Column.drop_null, 'LongColumn')
fill_null = annotate(Column.fill_null, 'LongColumn', value=Long)
add = annotate(NumericColumn.add, 'LongColumn', value=Long)
subtract = annotate(NumericColumn.subtract, 'LongColumn', value=Long)
multiply = annotate(NumericColumn.multiply, 'LongColumn', value=Long)
divide = annotate(NumericColumn.divide, 'LongColumn', value=Long)
power = annotate(
NumericColumn.power, 'LongColumn', base=Optional[Long], exponent=Optional[Long]
)
min_element_wise = annotate(Column.min_element_wise, 'LongColumn', value=Long)
max_element_wise = annotate(Column.max_element_wise, 'LongColumn', value=Long)
@strawberry.type(description="column of floats")
class FloatColumn(Column, NumericColumn):
count = FloatQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=float)
values = annotate(Column.values, List[Optional[float]])
Set = Set.subclass(float, "FloatSet", "unique floats")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[float]])
sum = annotate(NumericColumn.sum, Optional[float])
product = annotate(NumericColumn.product, Optional[float])
mode = annotate(NumericColumn.mode, Set)
min = annotate(Column.min, Optional[float])
max = annotate(Column.max, Optional[float])
drop_null = annotate(Column.drop_null, 'FloatColumn')
fill_null = annotate(Column.fill_null, 'FloatColumn', value=float)
add = annotate(NumericColumn.add, 'FloatColumn', value=float)
subtract = annotate(NumericColumn.subtract, 'FloatColumn', value=float)
multiply = annotate(NumericColumn.multiply, 'FloatColumn', value=float)
divide = annotate(NumericColumn.divide, 'FloatColumn', value=float)
power = annotate(
NumericColumn.power, 'FloatColumn', base=Optional[float], exponent=Optional[float]
)
min_element_wise = annotate(Column.min_element_wise, 'FloatColumn', value=float)
max_element_wise = annotate(Column.max_element_wise, 'FloatColumn', value=float)
@doc_field
def round(
self, ndigits: int = 0, multiple: float = 1.0, round_mode: str = 'half_to_even'
) -> 'FloatColumn':
"""Return log of values to base."""
if ndigits != 0 and multiple != 1.0:
raise ValueError("only one of `ndigits` or `multiple` allowed")
if multiple == 1:
array = pc.round(self.array, ndigits=ndigits, round_mode=round_mode)
else:
array = pc.round_to_multiple(self.array, multiple=multiple, round_mode=round_mode)
return FloatColumn(array)
@strawberry.type(description="column of decimals")
class DecimalColumn(Column):
count = DecimalQuery.resolver(Column.count)
values = annotate(Column.values, List[Optional[Decimal]])
Set = Set.subclass(Decimal, "DecimalSet", "unique decimals")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[Decimal]])
min = annotate(Column.min, Optional[Decimal])
max = annotate(Column.max, Optional[Decimal])
@strawberry.type(description="column of dates")
class DateColumn(Column):
count = DateQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=date)
values = annotate(Column.values, List[Optional[date]])
Set = Set.subclass(date, "DateSet", "unique dates")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[date]])
min = annotate(Column.min, Optional[date])
max = annotate(Column.max, Optional[date])
drop_null = annotate(Column.drop_null, 'DateColumn')
fill_null = annotate(Column.fill_null, 'DateColumn', value=date)
min_element_wise = annotate(Column.min_element_wise, 'DateColumn', value=date)
max_element_wise = annotate(Column.max_element_wise, 'DateColumn', value=date)
between = annotate(Column.between, LongColumn, start=Optional[date], end=Optional[date])
@doc_field
def strftime(self, format: str = '%Y-%m-%dT%H:%M:%S', locale: str = 'C') -> 'StringColumn':
"""Return formatted temporal values according to a format string."""
return StringColumn(pc.strftime(self.array, format=format, locale=locale))
@strawberry.type(description="column of datetimes")
class DateTimeColumn(Column):
count = DateTimeQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=datetime)
values = annotate(Column.values, List[Optional[datetime]])
Set = Set.subclass(datetime, "DatetimeSet", "unique datetimes")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[datetime]])
min = annotate(Column.min, Optional[datetime])
max = annotate(Column.max, Optional[datetime])
drop_null = annotate(Column.drop_null, 'DateTimeColumn')
fill_null = annotate(Column.fill_null, 'DateTimeColumn', value=datetime)
min_element_wise = annotate(Column.min_element_wise, 'DateTimeColumn', value=datetime)
max_element_wise = annotate(Column.max_element_wise, 'DateTimeColumn', value=datetime)
between = annotate(Column.between, LongColumn, start=Optional[datetime], end=Optional[datetime])
strftime = doc_field(DateColumn.strftime)
@doc_field
def subtract(self, value: datetime) -> 'DurationColumn':
"""Return values subtracted *from* scalar."""
return DurationColumn(pc.subtract(value, self.array))
@strawberry.type(description="column of times")
class TimeColumn(Column):
count = TimeQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=time)
values = annotate(Column.values, List[Optional[time]])
Set = Set.subclass(time, "TimeSet", "unique times")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[time]])
min = annotate(Column.min, Optional[time])
max = annotate(Column.max, Optional[time])
drop_null = annotate(Column.drop_null, 'TimeColumn')
fill_null = annotate(Column.fill_null, 'TimeColumn', value=time)
min_element_wise = annotate(Column.min_element_wise, 'TimeColumn', value=time)
max_element_wise = annotate(Column.max_element_wise, 'TimeColumn', value=time)
between = annotate(Column.between, LongColumn, start=Optional[time], end=Optional[time])
@strawberry.type(description="column of durations")
class DurationColumn(Column):
count = DurationQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=timedelta)
values = annotate(Column.values, List[Optional[timedelta]])
@strawberry.type(description="column of binaries")
class BinaryColumn(Column):
count = BinaryQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=bytes)
any = doc_field(NumericColumn.any)
all = doc_field(NumericColumn.all)
values = annotate(Column.values, List[Optional[bytes]])
Set = Set.subclass(bytes, "BinarySet", "unique binaries")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[bytes]])
drop_null = annotate(Column.drop_null, 'BinaryColumn')
fill_null = annotate(Column.fill_null, 'BinaryColumn', value=bytes)
@doc_field
def binary_replace_slice(self, start: int, stop: int, replacement: str) -> 'BinaryColumn':
"""Replace a slice of a binary string with `replacement`."""
kwargs = dict(start=start, stop=stop, replacement=replacement)
return BinaryColumn(pc.binary_replace_slice(self.array, **kwargs))
@strawberry.type(description="column of strings")
class StringColumn(Column):
count = StringQuery.resolver(Column.count)
index = annotate(Column.index, Long, value=str)
any = doc_field(NumericColumn.any)
all = doc_field(NumericColumn.all)
values = annotate(Column.values, List[Optional[str]])
Set = Set.subclass(str, "StringSet", "unique strings")
unique = annotate(Column.unique, Set)
sort = annotate(Column.sort, List[Optional[str]])
min = annotate(Column.min, Optional[str])
max = annotate(Column.max, Optional[str])
drop_null = annotate(Column.drop_null, 'StringColumn')
fill_null = annotate(Column.fill_null, 'StringColumn', value=str)
@doc_field
def split(
self, pattern: str = '', max_splits: int = -1, reverse: bool = False, regex: bool = False
) -> 'ListColumn':
"""Return strings split on pattern, by default whitespace."""
kwargs = {'max_splits': max_splits, 'reverse': reverse}
if pattern:
func = pc.split_pattern_regex if regex else pc.split_pattern
return ListColumn(func(self.array, pattern=pattern, **kwargs))
return ListColumn(pc.utf8_split_whitespace(self.array, **kwargs))
@doc_field
def utf8_ltrim(self, characters: str = '') -> 'StringColumn':
"""Trim leading characters, by default whitespace."""
if characters:
return StringColumn(pc.utf8_ltrim(self.array, characters=characters))
return StringColumn(pc.utf8_ltrim_whitespace(self.array))
@doc_field
def utf8_rtrim(self, characters: str = '') -> 'StringColumn':
"""Trim trailing characters, by default whitespace."""
if characters:
return StringColumn(pc.utf8_rtrim(self.array, characters=characters))
return StringColumn(pc.utf8_rtrim_whitespace(self.array))
@doc_field
def utf8_trim(self, characters: str = '') -> 'StringColumn':
"""Trim trailing characters, by default whitespace."""
if characters:
return StringColumn(pc.utf8_trim(self.array, characters=characters))
return StringColumn(pc.utf8_trim_whitespace(self.array))
@doc_field
def utf8_lpad(self, width: int, padding: str = ' ') -> 'StringColumn':
"""Right-align strings by padding with a given character."""
return StringColumn(pc.utf8_lpad(self.array, width=width, padding=padding))
@doc_field
def utf8_rpad(self, width: int, padding: str = ' ') -> 'StringColumn':
"""Left-align strings by padding with a given character."""
return StringColumn(pc.utf8_rpad(self.array, width=width, padding=padding))
@doc_field
def utf8_center(self, width: int, padding: str = ' ') -> 'StringColumn':
"""Center strings by padding with a given character."""
return StringColumn(pc.utf8_center(self.array, width=width, padding=padding))
@doc_field
def utf8_replace_slice(self, start: int, stop: int, replacement: str) -> 'StringColumn':
"""Replace a slice of a string with `replacement`."""
kwargs = dict(start=start, stop=stop, replacement=replacement)
return StringColumn(pc.utf8_replace_slice(self.array, **kwargs))
@doc_field
def replace_substring(
self, pattern: str, replacement: str, max_replacements: int = -1
) -> 'StringColumn':
"""Replace non-overlapping substrings that match pattern."""
kwargs = dict(pattern=pattern, replacement=replacement, max_replacements=max_replacements)
return StringColumn(pc.replace_substring(self.array, **kwargs))
@doc_field
def strptime(self, format: str = '%Y-%m-%dT%H:%M:%S', unit: str = 'ms') -> DateTimeColumn:
"""Return parsed timestamps."""
return DateTimeColumn(pc.strptime(self.array, format=format, unit=unit))
@strawberry.type(description="column of lists")
class ListColumn(Column):
@doc_field
def values(self) -> List[Optional[Column]]:
"""list of columns"""
return list(map(self.fromscalar, self.array))
@doc_field
def count(self, mode: str = 'only_valid') -> LongColumn:
"""non-null count of each list scalar"""
return LongColumn(self.map(ListChunk.count, mode=mode).array)
@doc_field
def count_distinct(self, mode: str = 'only_valid') -> LongColumn:
"""non-null distinct count of each list scalar"""
return LongColumn(self.map(ListChunk.count_distinct, mode=mode).array)
@doc_field
def value_length(self) -> LongColumn:
"""length of each list scalar"""
return LongColumn(pc.list_value_length(self.array))
@doc_field
def flatten(self) -> Column:
"""concatenation of all sub-lists"""
return self.cast(pc.list_flatten(self.array))
@doc_field
def unique(self) -> 'ListColumn':
"""unique values within each scalar"""
return self.map(ListChunk.unique) # type: ignore
@doc_field
def distinct(self, mode: str = 'only_valid') -> 'ListColumn':
"""non-null distinct values within each scalar"""
return self.map(ListChunk.distinct, mode=mode) # type: ignore
@doc_field
def element(self, index: Long = 0) -> Column:
"""element at index of each list scalar; defaults to null"""
with contextlib.suppress(ValueError):
return self.cast(pc.list_element(self.array, index))
return self.map(ListChunk.element, index=index)
@doc_field
def min(self, skip_nulls: bool = True, min_count: int = 1) -> Column:
"""min value of each list scalar"""
return self.map(ListChunk.min, skip_nulls=skip_nulls, min_count=min_count)
@doc_field
def max(self, skip_nulls: bool = True, min_count: int = 1) -> Column:
"""max value of each list scalar"""
return self.map(ListChunk.max, skip_nulls=skip_nulls, min_count=min_count)
@doc_field
def sum(self, skip_nulls: bool = True, min_count: int = 1) -> Column:
"""sum of each list scalar"""
return self.map(ListChunk.sum, skip_nulls=skip_nulls, min_count=min_count)
@doc_field
def product(self, skip_nulls: bool = True, min_count: int = 1) -> Column:
"""product of each list scalar"""
return self.map(ListChunk.product, skip_nulls=skip_nulls, min_count=min_count)
@doc_field
def mean(self, skip_nulls: bool = True, min_count: int = 1) -> FloatColumn:
"""mean of each list scalar"""
return self.map(ListChunk.mean, skip_nulls=skip_nulls, min_count=min_count) # type: ignore
@doc_field
def mode(self, n: int = 1, skip_nulls: bool = True, min_count: int = 0) -> 'ListColumn':
"""mode of each list scalar"""
return self.map(ListChunk.mode, n=n, skip_nulls=skip_nulls, min_count=min_count) # type: ignore
@doc_field
def quantile(
self,
q: List[float] = [0.5],
interpolation: str = 'linear',
skip_nulls: bool = True,
min_count: int = 0,
) -> 'ListColumn':
"""quantile of each list scalar"""
return self.map(ListChunk.quantile, q=q, interpolation=interpolation, skip_nulls=skip_nulls, min_count=min_count) # type: ignore
@doc_field
def tdigest(
self,
q: List[float] = [0.5],
delta: int = 100,
buffer_size: int = 500,
skip_nulls: bool = True,
min_count: int = 0,
) -> 'ListColumn':
"""approximate quantile of each list scalar"""
return self.map(ListChunk.tdigest, q=q, delta=delta, buffer_size=buffer_size, skip_nulls=skip_nulls, min_count=min_count) # type: ignore
@doc_field
def stddev(self, ddof: int = 0, skip_nulls: bool = True, min_count: int = 0) -> FloatColumn:
"""stddev of each list scalar"""
return self.map(ListChunk.stddev, ddof=ddof, skip_nulls=skip_nulls, min_count=min_count) # type: ignore
@doc_field
def variance(self, ddof: int = 0, skip_nulls: bool = True, min_count: int = 0) -> FloatColumn:
"""variance of each list scalar"""
return self.map(ListChunk.variance, ddof=ddof, skip_nulls=skip_nulls, min_count=min_count) # type: ignore
@doc_field
def any(self, skip_nulls: bool = True, min_count: int = 1) -> BooleanColumn:
"""any true of each list scalar"""
return self.map(ListChunk.any, skip_nulls=skip_nulls, min_count=min_count) # type: ignore
@doc_field
def all(self, skip_nulls: bool = True, min_count: int = 1) -> BooleanColumn:
"""all true of each list scalar"""
return self.map(ListChunk.all, skip_nulls=skip_nulls, min_count=min_count) # type: ignore
@doc_field
def binary_join(self, separator: bytes) -> BinaryColumn:
"""Join a list of binary strings together with a `separator` to form a single string."""
return BinaryColumn(pc.binary_join(self.array, separator))
@doc_field
def string_join(self, separator: str) -> StringColumn:
"""Join a list of strings together with a `separator` to form a single string."""
return StringColumn(pc.binary_join(self.array, separator))
@strawberry.type(description="column of structs")
class StructColumn(Column):
@doc_field
def names(self) -> List[str]:
"""field names"""
return [field.name for field in self.array.type]
@doc_field
def column(self, name: str) -> Column:
"""Return struct field as a column."""
return self.map(operator.methodcaller('field', name))
Column.type_map = { # type: ignore
bool: BooleanColumn,
int: IntColumn,
Long: LongColumn,
float: FloatColumn,
Decimal: DecimalColumn,
date: DateColumn,
datetime: DateTimeColumn,
time: TimeColumn,
timedelta: DurationColumn,
bytes: BinaryColumn,
str: StringColumn,
list: ListColumn,
dict: StructColumn,
}
|
#!/usr/bin/env python3
from pathlib import PurePath, Path
from typing import List, Dict, Union, Iterator, NamedTuple, Any, Sequence, Optional, Set
import json
from pathlib import Path
from datetime import datetime
import logging
import pytz
from .exporthelpers.dal_helper import PathIsh, Json, Res
def get_logger():
return logging_helper.logger('8slp')
class DAL:
def __init__(self, sources: Sequence[PathIsh]) -> None:
self.sources = [p if isinstance(p, Path) else Path(p) for p in sources]
def raw(self):
for f in sorted(self.sources):
with f.open(encoding="utf-8") as fo:
yield f, json.load(fo)
def sessions(self) -> Iterator[Res[Json]]:
for src in self.sources:
try:
j = json.loads(src.read_text())
except Exception as e:
ex = RuntimeError(f'While processing {src}')
ex.__cause__ = e
yield ex
continue
# TODO Dedupe
for session in j['sessions']:
yield DAL._parseSession(session)
def _parseSession(session):
if 'timeseries' not in session:
return session
session['tossAndTurns'] = list()
for tnt in session['timeseries']['tnt']:
session['tossAndTurns'].append({
'timestamp': tnt[0],
'value': tnt[1]
})
session['tempRoomC'] = list()
for roomC in session['timeseries']['tempRoomC']:
session['tempRoomC'].append({
'timestamp': roomC[0],
'value': roomC[1]
})
session['tempBedC'] = list()
for bedC in session['timeseries']['tempBedC']:
session['tempBedC'].append({
'timestamp': bedC[0],
'value': bedC[1]
})
session['respiratoryRate'] = list()
for rate in session['timeseries']['respiratoryRate']:
session['respiratoryRate'].append({
'timestamp': rate[0],
'value': rate[1]
})
session['heartRate'] = list()
for heartRate in session['timeseries']['heartRate']:
session['heartRate'].append({
'timestamp': heartRate[0],
'value': heartRate[1]
})
session['hrv'] = list()
for hrv in session['timeseries']['hrv']:
session['hrv'].append({
'timestamp': hrv[0],
'value': hrv[1]
})
session['rmssd'] = list()
for rmssd in session['timeseries']['rmssd']:
session['rmssd'].append({
'timestamp': rmssd[0],
'value': rmssd[1]
})
return session
if __name__ == '__main__':
dal_helper.main(DAL=DAL)
|
from flask import Blueprint
from flask import (render_template,
url_for,
flash,
redirect,
request,
abort,
Blueprint)
from flask_login import current_user, login_required
from MessageBoard import db
from MessageBoard.models import Post, Attachment
from MessageBoard.posts.forms import PostForm, UploadForm
from MessageBoard.posts.utils import save_attachment
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
formatter = logging.Formatter('%(levelname)s:%(message)s:%(asctime)s')
file_handler = logging.FileHandler('posts_routes.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
posts = Blueprint('posts', __name__)
# I have not added logging to many of these as it felt a little excessive for every update and
# to be honest, a bit invasive.
@posts.route("/post/new", methods = ['GET', 'POST'])
@login_required
def create_post():
form = PostForm()
if form.validate_on_submit():
# if form.attachment.data:
# attachment = save_attachment(form.attachment.data)
# attachment = attachment
post = Post(
title=form.title.data,
content=form.content.data,
# attachment=form.attachment.data,
author=current_user
)
db.session.add(post)
db.session.commit()
flash('Your message has been posted.', 'success')
return redirect(url_for('main.home'))
# attachment = url_for('static', filename='attachments/' + attachment)
return render_template('create_post.html', title='Create Post',
form=form, legend="Post an Update")
# @posts.route("/post/new", methods=['GET', "POST"])
# @login_required
# def upload_attachment():
# attachment = UploadForm()
# if attachment.validate_on_submit():
# attachment = save_attachment(attachment.attachments.data)
# db.session.add(attachment)
# db.session.commit()
# flash('Your attachment has been successfully uploaded', 'success')
# return redirect(url_for('main.home'))
# return render_template('create_post.html', title='Create Post',
# form=attachment, legend="Upload an Attachment")
@posts.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@posts.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
#attach_form = UploadForm()
if post.author != current_user:
abort(403)
logger.warning(f'{current_user} attempted to update a post made by {post.author}')
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
form.attachment = form.attachment.data
#logic for adding updating attachments via the UploadForm, only if user (via post) is authenticated.
#May need to put the attach logic outside that if statement though as it might be the wrong scope...or
# have if form.validate_on_submit() and or attach_form.validate_on_submit()??
db.session.commit()
flash('Your update has been successfully edited', 'success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
# attach_form.attachment= attach_form.attachment.data
# Not sure about keeping the above line as I am not sure I want to make GET requests
# to display attachments if they are super big. No need to view the attachement really,
# just update/delete them
return render_template('create_post.html', title='Update Post',
form=form, legend="Edit Your Update")
@posts.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
logger.warning(f'{current_user} attempted to delete a post made by {post.author}')
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.home'))
|
import os
import pytest
import requests
def just_test_if_mainnet_node() -> str:
mainnet_node_url = os.environ.get('ETHEREUM_MAINNET_NODE')
if not mainnet_node_url:
pytest.skip("Mainnet node not defined, cannot test oracles", allow_module_level=True)
elif requests.get(mainnet_node_url).status_code == 404:
pytest.skip("Cannot connect to mainnet node", allow_module_level=True)
return mainnet_node_url
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 07_autoencoder.ipynb (unless otherwise specified).
__all__ = ['get_pixel', 'change_image_background', 'create_augmentor_pipeline', 'load_data']
# Cell
import Augmentor
import os
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
# Cell
def get_pixel(image, i, j):
""" Returns a pixel at coordinate (`i`, `j`). """
return image.getpixel((i,j))
def change_image_background(orig_dir_path, converted_path):
""" Changes the image background from white to black and foreground from black to white,
for all the images at folder `orig_dir_path` and place them into folder `converted_path`."""
files = os.listdir(dir_path)
num_files = len(files)
data = []
counter = 1
for f in tqdm(files, total=num_files):
img = Image.open(os.path.join(dir_path,f))
out_img = Image.new('RGB',img.size,color=1)
width, height = img.size
for w in range(width):
for h in range(height):
r, g, b = get_pixel(img, w,h)
if r > 128 or g > 128 or b > 128:
r = g = b = 0
else:
r = g = b = 255
out_img.putpixel((w,h),(r,g,b))
file_name = os.path.join(converted_path, str(counter) + '.png')
out_img.save(file_name)
counter += 1
return data
def create_augmentor_pipeline(dir_path):
""" Creates a pipeline for generating extra images from images at folder `dir_path`."""
p = Augmentor.Pipeline(dir_path)
p.resize(probability=1,width=64,height=64)
p.rotate90(probability=0.1)
p.rotate(probability=0.2, max_left_rotation=5, max_right_rotation=10)
p.skew_left_right(probability=0.1)
p.greyscale(probability=1)
return p
def load_data(dir_path):
""" Loads all the images from directory `dir_path`, converts them to matrices and return a list."""
files = os.listdir(dir_path)
num_files = len(files)
data = []
for f in tqdm(files, total=num_files):
img = Image.open(os.path.join(dir_path,f))
img_array = np.array(img)
data.append(img_array)
return data |
from scipy.cluster.hierarchy import cut_tree
import numpy as np
def cut_tree_balanced(Z, max_cluster_size):
"""
Given a linkage matrix Z and max cluster size, return a balanced cut tree.
The function looks recursively along the hierarchical tree, from the root
(single cluster gathering all the samples) to the leaves (i.e. the clusters
with only one sample), retrieving the biggest possible clusters containing
a number of samples lower than a given maximum. If a cluster at a specific
tree level contains a number of samples higher than the given maximum, it
is ignored and its offspring (smaller) sub-clusters are taken into
consideration. If the cluster contains a number of samples lower than the
given maximum, it is taken as result and its offspring sub-clusters not
further processed.
Parameters
----------
Z : ndarray
The linkage matrix resulting from calling `ward` or `linkage`. I.e. it
contains the hierarchical clustering encoded as a linkage matrix.
max_cluster_size : int
Maximum number of data samples contained within the resulting clusters.
Thus, all resulting clusters will contain a number of data samples
``<= max_cluster_size``. Must be >= 1.
Returns
-------
cluster_id : ndarray
One-dimensional array of integers containing for each input sample its
corresponding cluster id. The cluster id is an integer which is higher
for deeper tree levels.
cluster_level : ndarray
One-dimensional array of integer arrays containing for each input
sample its corresponding cluster tree level, i.e. a sequence of
0's and 1's. Note that the cluster level is longer for deeper tree
levels, being [0] the root cluster, [0, 0] and [0, 1] its offspring,
and so on. Also note that in each cluster splitting, the label 0
denotes the bigger cluster, while the label 1 denotes the smallest.
See Also
--------
cut_tree
Notes
-----
There are several implemented methods following the same idea, i.e.
performing a tree cut in which the resulting clusters are at different tree
levels, but using more elaborated algorithms (in which the threshold of
``max_cluster_size`` is dynamically computed). The CRAN R package
dynamicTreeCut (github.com/cran/dynamicTreeCut) implements novel
dynamic branch cutting methods for detecting clusters in a dendrogram
depending on their shape. Further, MLCut (github.com/than8/MLCut)
provides interactive methods to cut tree branches at multiple levels.
Note that in the present method, the ``max_cluster_size`` threshold is a
fixed value given as input.
Further, note that this algorithm uses :math:`O(n^2)` memory, i.e. the same
as `cut_tree` because a full cut tree of the linkage matrix is performed
as the beginning. This data structure ``full_cut`` is used in order to
perform the successive computations.
Graphical examples of this algorithm can be found at the original repo
describing this method (github.com/vreyespue/cut_tree_balanced).
Examples
--------
>>> from scipy.cluster import hierarchy
>>> from scipy import stats
Initialize the random seed.
>>> np.random.seed(14)
Create a input matrix containing 100 data samples with 4 dimensions.
Note: using `gamma` in order to generate an unbalanced distribution.
If a regular ``cut_tree()`` would be performed, one big and many small
clusters would be obtained.
>>> X = stats.gamma.rvs(0.1, size=400).reshape((100, 4))
Compute the linkage matrix using the scipy ward() or linkage() method:
>>> Z = hierarchy.ward(X)
Perform a balanced cut tree of the linkage matrix:
>>> cluster_id, cluster_level = hierarchy.cut_tree_balanced(
... Z, max_cluster_size=10)
>>> cluster_id[:10]
array([18, 3, 9, 11, 19, 11, 13, 8, 14, 1])
>>> cluster_level[:10]
array([array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]),
array([0, 0, 0, 1]),
array([0, 0, 0, 0, 0, 0, 0, 0, 1]),
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]),
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]),
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]),
array([0, 0, 0, 0, 0, 0, 0, 1]),
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]),
array([0, 0, 1, 0])], dtype=object)
Note that clusters with more similar values for ``cluster_level`` denote
clusters with less distance in between, thus representing vectors which are
closer in the multidimensional space. This information contained within
``cluster_level`` is not usually present in ``cluster_id``.
"""
# Assert that the input max_cluster_size is >= 1
if not max_cluster_size >= 1:
raise ValueError(
"max_cluster_size should be >= 1, is: {}".format(max_cluster_size)
)
# Perform a full cut tree of the linkage matrix
full_cut = cut_tree(Z)
# Initialize the variable containing the current cluster id (it will be
# higher for each newly found valid cluster)
last_cluster_id = 0
# Initialize the resulting cluster id vector (containing for each row in
# input_data_x_sample its corresponding cluster id)
ndim = full_cut.shape[1]
cluster_id = np.zeros(ndim, dtype=int)
# Initialize the resulting cluster level vector (containing for each data
# sample its corresponding cluster tree level)
cluster_level = np.empty((ndim,), dtype=object)
for i in range(ndim):
cluster_level[i] = np.array([0], int)
# Scan the full cut matrix from the last column (root tree level) to the
# first column (leaves tree level)
for icol in range(ndim - 1, -1, -1):
# Get a list of unique group ids and their count within the current
# tree level
values, counts = np.unique(full_cut[:, icol], return_counts=True)
# Stop if all samples have been already selected (i.e. if all data
# samples have been already clustered)
if (values.size == 1) and (values[0] == -1):
break
# For each group id within the current tree level
for ival in range(values.size):
# If it is a valid group id (i.e. not yet marked with -1)
# Note: data samples which were alredy included in a valid
# cluster id are marked with the group id -1 (see below)
if values[ival] >= 0:
# Select the current group id
selected_curr_value = values[ival]
# Look for the vector positions (related to rows in
# input_data_x_sample) belonging to the current group id
selected_curr_elems = np.where(full_cut[:, icol] == selected_curr_value)
# Major step #1: Populate the resulting vector of cluster
# levels for each data sample, if we are not at the root
if icol < (ndim - 1):
# Get the ancestor values and element positions
selected_ancestor_value = full_cut[
selected_curr_elems[0][0], icol + 1
]
selected_ancestor_elems = np.where(
full_cut[:, icol + 1] == selected_ancestor_value
)
# Compute the values and counts of the offspring and sort
# them by their count (so that the biggest cluster gets the
# offspring_elem_label = 0, see below)
offspring_values, offspring_counts = np.unique(
full_cut[selected_ancestor_elems, icol], return_counts=True
)
count_sort_ind = np.argsort(-offspring_counts)
offspring_values = offspring_values[count_sort_ind]
offspring_counts = offspring_counts[count_sort_ind]
# If the size of the offspring is > 1
if offspring_values.shape[0] > 1:
# Select the label of the current value (i.e. 0 or 1)
# and append it to the cluster level
offspring_elem_label = np.where(
offspring_values == selected_curr_value
)[0][0]
for i in selected_curr_elems[0]:
cluster_level[i] = np.hstack(
(cluster_level[i], offspring_elem_label)
)
# Major step #2: Populate the resulting vector of cluster ids
# for each data sample, and mark them as clustered (-1)
# If the number of elements is below max_cluster_size
if counts[ival] <= max_cluster_size:
# Relate vector positions to the current cluster id
cluster_id[selected_curr_elems] = last_cluster_id
# Delete these vector positions at lower tree levels for
# further processing (i.e. mark as clustered)
full_cut[selected_curr_elems, 0:icol] = -1
# Update the cluster id
last_cluster_id += 1
# Return the resulting clustering array (containing for each row in
# input_data_x_sample its corresponding cluster id)
return cluster_id, cluster_level
|
# --------------------------------------------------------
# R-C3D
# Copyright (c) 2017 Boston University
# Licensed under The MIT License [see LICENSE for details]
# Written by Huijuan Xu
# --------------------------------------------------------
import os
import copy
import json
import cPickle
import subprocess
import numpy as np
# import cv2
from util import *
FPS = 25
LENGTH = 30
min_length = 0
overlap_thresh = 0.1
STEP = LENGTH / 4
WINS = [LENGTH * 8]
print(WINS)
META_FILE = './activity_net.v1-3.min.json'
data = json.load(open(META_FILE))
print ('Generate Classes')
classes = generate_classes(data)
print ('Generate Training Segments')
train_segment = generate_segment('training', data, classes)
path = './preprocess/activityNet/frames/'
def generate_roi(rois1, rois2, video, start, end, stride, split):
tmp = {}
tmp['wins'] = ( rois1 - start ) / stride
tmp['durations'] = tmp['wins'][:,1] - tmp['wins'][:,0]
tmp['gt_classes'] = rois2
tmp['max_classes'] = rois2
tmp['max_overlaps'] = np.ones(len(rois1))
tmp['flipped'] = False
tmp['frames'] = np.array([[0, start, end, stride]])
tmp['bg_name'] = path + split + '/' + video
tmp['fg_name'] = path + split + '/' + video
if not os.path.isfile('../../' + tmp['bg_name'] + '/image_' + str(end-1).zfill(5) + '.jpg'):
print ('../../' + tmp['bg_name'] + '/image_' + str(end-1).zfill(5) + '.jpg')
raise
return tmp
def generate_roidb(split, segment1, segment2):
VIDEO_PATH = 'frames/%s/' % split
video_list = set(os.listdir(VIDEO_PATH))
duration = []
roidb = []
# global WINS
for vid in segment1:
if vid in video_list:
# print(vid)
length = len(os.listdir('./frames/' + split + '/' + vid))
db1 = np.array(segment1[vid])
db2 = np.array(segment2[vid])
if len(db1) == 0:
# print("0 length")
continue
db1[:,:2] = db1[:,:2] * FPS
for win in WINS:
# print(win)
stride = win / LENGTH
step = stride * STEP
# Forward Direction
for start in xrange(0, max(1, length - win + 1), step):
# print(start)
end = min(start + win, length)
assert end <= length
truelist = (np.logical_not(np.logical_or(db1[:,0] >= end, db1[:,1] <= start)))
rois1 = db1[truelist]
rois2 = db2[truelist]
# print(rois1)
# print rois1
# print rois2
# print("db", db1)
# Remove duration less than min_length
if len(rois1) > 0:
duration = rois1[:,1] - rois1[:,0]
rois1 = rois1[duration >= min_length]
rois2 = rois2[duration >= min_length]
# Remove overlap less than overlap_thresh
if len(rois1) > 0:
time_in_wins = (np.minimum(end, rois1[:,1]) - np.maximum(start, rois1[:,0]))*1.0
overlap = time_in_wins / (rois1[:,1] - rois1[:,0])
assert min(overlap) >= 0
assert max(overlap) <= 1
rois1 = rois1[overlap >= overlap_thresh]
rois2 = rois2[overlap >= overlap_thresh]
# Append data
# # print("len",len(rois1))
# print(rois1)
if len(rois1) > 0:
# print("true")
rois1[:,0] = np.maximum(start, rois1[:,0])
rois1[:,1] = np.minimum(end, rois1[:,1])
tmp = generate_roi(rois1, rois2, vid, start, end, stride, split)
roidb.append(tmp)
if USE_FLIPPED:
flipped_tmp = copy.deepcopy(tmp)
flipped_tmp['flipped'] = True
roidb.append(flipped_tmp)
# Backward Direction
for end in xrange(length, win-1, - step):
start = end - win
assert start >= 0
rois1 = db1[np.logical_not(np.logical_or(db1[:,0] >= end, db1[:,1] <= start))]
rois2 = db2[np.logical_not(np.logical_or(db1[:,0] >= end, db1[:,1] <= start))]
# Remove duration less than min_length
if len(rois1) > 0:
duration = rois1[:,1] - rois1[:,0]
rois1 = rois1[duration > min_length]
# Remove overlap less than overlap_thresh
if len(rois1) > 0:
time_in_wins = (np.minimum(end, rois1[:,1]) - np.maximum(start, rois1[:,0]))*1.0
overlap = time_in_wins / (rois1[:,1] - rois1[:,0])
assert min(overlap) >= 0
assert max(overlap) <= 1
rois1 = rois1[overlap > overlap_thresh]
rois2 = rois2[overlap > overlap_thresh]
# Append data
if len(rois1) > 0:
rois1[:,0] = np.maximum(start, rois1[:,0])
rois1[:,1] = np.minimum(end, rois1[:,1])
tmp = generate_roi(rois1, rois2, vid, start, end, stride, split)
roidb.append(tmp)
if USE_FLIPPED:
flipped_tmp = copy.deepcopy(tmp)
flipped_tmp['flipped'] = True
roidb.append(flipped_tmp)
# print(roidb)
return roidb
USE_FLIPPED = True
train_roidb = generate_roidb('training', train_segment[0], train_segment[1])
print ("Save dictionary")
cPickle.dump(train_roidb, open('train_data_3fps_flipped.pkl','w'), cPickle.HIGHEST_PROTOCOL)
# def generate_roidb(split, segment1, segment2):
# VIDEO_PATH = 'frames/%s/' % split
# video_list = set(os.listdir(VIDEO_PATH))
# duration = []
# roidb = []
# for vid in segment1:
# if vid in video_list:
# length = len(os.listdir('./frames/' + split + '/' + vid))
# # db = np.array(segment[vid])
# # print("\n\n\n\n\n")
# # print segment[vid]
# # print("\n\n\n\n\n")
# # for xi in segment[vid]:
# # print("\n\n")
# # print "xi",xi
# # print("\n\n")
# # print(np.array(xi))
# # print([np.array(xi) for xi in segment[vid]])
# # db=np.array([np.array(xi) for xi in segment[vid]])
# # for i, xi in enumerate(segment[vid]):
# # for j, x2i in enumerate(xi):
# # if x2i.__class__.__name__ in ('list'):
# # continue
# # segment[vid][i][j] = [segment[vid][i][j]]
# # db=np.array([np.array(xi) for xi in segment[vid]])
# # print(segment[vid])
# # db=np.array([np.array([np.array(x2i) for xi2 in xi]) for xi in segment[vid]])
# # print(segment[vid])
# db1 = segment1[vid]
# # db=np.array([np.array(xi) for xi in segment[vid][0]])
# # db = np.array(segment[vid])
# # print(db[0].shape)
# # print(db[0][2])
# # print(db[:,:2])
# if len(db) == 0:
# continue
# # print(db)
# # print(db)
# for row in db:
# row[0] = row[0]*FPS
# row[1] = row[1]*FPS
# # db[:,:2] = db[:,:2] * FPS
# # print(db)
# for win in WINS:
# stride = win / LENGTH
# step = stride * STEP
# # Forward Direction
# for start in xrange(0, max(1, length - win + 1), step):
# end = min(start + win, length)
# assert end <= length
# # rois = db[np.logical_not(np.logical_or(db[:,0] >= end, db[:,1] <= start))]
# rois = []
# # be careful of the less than and equal to here
# for row in db:
# if row[0]< end and row[1] > start:
# rois.append(row)
# # print(rois)
# # Remove duration less than min_length
# # if len(rois) > 0:
# # duration = rois[:,1] - rois[:,0]
# # rois = rois[duration >= min_length]
# # Remove overlap less than overlap_thresh
# # print(rois[:,1])
# print(rois[0][1])
# if len(rois) > 0:
# time_in_wins = (np.minimum(end, rois[:,1]) - np.maximum(start, rois[:,0]))*1.0
# overlap = time_in_wins / (rois[:,1] - rois[:,0])
# assert min(overlap) >= 0
# assert max(overlap) <= 1
# rois = rois[overlap >= overlap_thresh]
# # Append data
# if len(rois) > 0:
# rois[:,0] = np.maximum(start, rois[:,0])
# rois[:,1] = np.minimum(end, rois[:,1])
# tmp = generate_roi(rois, vid, start, end, stride, split)
# roidb.append(tmp)
# if USE_FLIPPED:
# flipped_tmp = copy.deepcopy(tmp)
# flipped_tmp['flipped'] = True
# roidb.append(flipped_tmp)
# # Backward Direction
# # for end in xrange(length, win-1, - step):
# # start = end - win
# # assert start >= 0
# # rois = db[np.logical_not(np.logical_or(db[:,0] >= end, db[:,1] <= start))]
# # # Remove duration less than min_length
# # # if len(rois) > 0:
# # # duration = rois[:,1] - rois[:,0]
# # # rois = rois[duration > min_length]
# # # Remove overlap less than overlap_thresh
# # if len(rois) > 0:
# # time_in_wins = (np.minimum(end, rois[:,1]) - np.maximum(start, rois[:,0]))*1.0
# # overlap = time_in_wins / (rois[:,1] - rois[:,0])
# # assert min(overlap) >= 0
# # assert max(overlap) <= 1
# # rois = rois[overlap > overlap_thresh]
# # # Append data
# # if len(rois) > 0:
# # rois[:,0] = np.maximum(start, rois[:,0])
# # rois[:,1] = np.minimum(end, rois[:,1])
# # tmp = generate_roi(rois, vid, start, end, stride, split)
# # print(tmp)
# # roidb.append(tmp)
# # if USE_FLIPPED:
# # flipped_tmp = copy.deepcopy(tmp)
# # flipped_tmp['flipped'] = True
# # roidb.append(flipped_tmp)
# return roidb
|
from __future__ import print_function
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import numpy as np
import matplotlib.pyplot as plt
import function_blocks as fb
def main():
# Softmax layer
softmax = lambda s: s / np.sum(s, axis=1, dtype=np.float64, keepdims=True)
# SC network parameters
len_lst = [pow(2,5),pow(2,8),pow(2,11),pow(2,13),pow(2,14),pow(2,15),pow(2,16),pow(2,17)]
l1_params = {
'matmul_inp': 1, # Matric multiplication input scaling
'matmul_int': None, # Matrix multiplication intermediate scaling
'matmul_out': 1024, # Matrix multiplication output scaling
'matmul_nod': None, # Number of nodes in dot product decomposition
'matmul_usc': True, # Upscale the result of matrix multiplication
'matmul_gint':32, # Gain factor for intermediate dot products
'matmul_gout':None, # Gain factor for the output of dot product
'matadd_inp': 32, # Matrix addition input scaling
'matadd_out': 32, # Matrix addition output scaling
'matadd_usc': True # Upscale the result of matrix addition
}
lo_params = {
'matmul_inp': 32, # Matrix multiplication input scaling
'matmul_int': None, # Matrix multiplication intermediate scaling
'matmul_out': 2048, # Matrix multiplication output scaling
'matmul_nod': None, # Number of nodes in dot product decomposition
'matmul_usc': True, # Upscale the result of matrix multiplication
'matmul_gint':8, # Gain factor for intermediate dot products
'matmul_gout':None, # Gain factor for the output of dot product
'matadd_inp': 256, # Matrix addition input scaling
'matadd_out': 256, # Matrix addition output scaling
'matadd_usc': True # Upscale the result of matrix addition
}
# Load data
features = np.genfromtxt('../../../data/features.csv', delimiter=',')
labels = np.genfromtxt('../../../data/labels.csv', delimiter=',')
# Load trained coefficients
weights = {
'h1': np.genfromtxt('coeffs/W1.csv', delimiter=','),
'out': np.genfromtxt('coeffs/Wout.csv', delimiter=','),
}
biases = {
'b1': np.genfromtxt('coeffs/b1.csv', delimiter=','),
'out': np.genfromtxt('coeffs/bout.csv', delimiter=','),
}
# Slice a subset of the data
test_size = 10
X = features[:test_size,:]
Y = labels[:test_size,:]
print('Data & Model Restored')
sc_accuracy_lst = []
for no_samples in len_lst:
print('SC inference using bit-stream length of', no_samples)
# Convert data to SC bit-streams
S = fb.mat_sng(X,no_samples)
print('Bit-streams Generated')
# SC network graph
l1_matmul = fb.sc_matmul(S,
weights['h1'],
Sin=l1_params['matmul_inp'],
Sout=l1_params['matmul_out'],
no_nodes=l1_params['matmul_nod'],
Sint=l1_params['matmul_int'],
upscale=l1_params['matmul_usc'],
g_int=l1_params['matmul_gint'],
g_out=l1_params['matmul_gout'])
print('L1 matmul done')
l1_matadd = fb.sc_matvec_add(l1_matmul,
biases['b1'],
l1_params['matadd_inp'],
l1_params['matadd_usc'])
print('L1 matadd done')
l2_matmul = fb.sc_matmul(l1_matadd,
weights['out'],
Sin=lo_params['matmul_inp'],
Sout=lo_params['matmul_out'],
no_nodes=lo_params['matmul_nod'],
Sint=lo_params['matmul_int'],
upscale=lo_params['matmul_usc'],
g_int=lo_params['matmul_gint'],
g_out=lo_params['matmul_gout'])
print('L2 matmul done')
l2_matadd = fb.sc_matvec_add(l2_matmul,
biases['out'],
lo_params['matadd_inp'],
lo_params['matadd_usc'])
print('L2 matadd done')
# Convert back to floating point & calculate accuracy
logits = fb.mat_sc_value(l2_matadd)
logits = logits*lo_params['matadd_out']
probs = np.exp(logits,dtype=np.float64)
prediction = softmax(probs)
correct_pred = np.equal(np.argmax(prediction,axis=1),np.argmax(Y,axis=1))
accuracy = np.mean(correct_pred)
sc_accuracy_lst.append(accuracy)
print("Testing Accuracy: ", accuracy)
# Plot the results
float_net_accuracy = 0.870455
float_net_accuracy_lst = np.ones(len(len_lst),dtype=np.float64)*float_net_accuracy
plt.semilogx(np.array(len_lst),np.array(accuracy_lst),basex=2)
plt.semilogx(np.array(len_lst),float_net_accuracy_lst,color='r',basex=2)
plt.title('Classification Accuracy versus Bit-Stream length')
plt.ylabel('Classification Accuracy')
plt.xlabel('Bit-Stream Length')
plt.grid(True)
if __name__ == "__main__":
main()
plt.show() |
"""
Dinghy daily digest tool.
"""
__version__ = "0.11.2"
|
#!/usr/bin/python
# coding:utf-8
import sys
import subprocess
import socket
import psutil
import json
import datetime
from Crypto.PublicKey import RSA
from hashlib import sha512
device_white = ['eth0', 'eth1', 'eth2', 'eth3', 'bond0', 'bond1']
def get_system_serial_number():
ret = {}
cmd = "dmidecode -s system-serial-number"
serial_number = subprocess.Popen(cmd, shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
sn = serial_number.stdout.readline().decode().replace("\n","")
ret["serial_number"] = sn
return ret
def get_mem_info():
ret = {}
with open("/proc/meminfo") as f:
tmp = int(f.readline().split()[1])
ret["mem"] = tmp / 1024
return ret
def get_cpu_info():
ret = {'cpu':'', 'num':0}
with open('/proc/cpuinfo') as f:
for line in f:
tmp = line.split(":")
key = tmp[0].strip()
if key == "processor":
ret['num'] += 1
if key == "model name":
ret['cpu'] = tmp[1].strip()
return ret
def get_disk_info():
cmd = """/sbin/fdisk -l|grep Disk|egrep -v 'identifier|mapper|Disk label'"""
disk_data = subprocess.Popen(cmd, shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
patition_size = []
for dev in disk_data.stdout.readlines():
size = int(str(dev).strip().split(',')[1].split()[0]) / 1024 / 1024/ 1024
patition_size.append(str(size))
ret = {}
ret["disk"] = " + ".join(patition_size)
return ret
def get_host_info():
ret = {}
name = socket.getfqdn(socket.gethostname())
ret["host"] = name
return ret
def get_net_info():
ret = []
a = psutil.net_if_addrs()
for device in a:
device_info = a[device]
if device in device_white:
tmp_device = {}
for sinc in device_info:
if sinc.family == 2:
tmp_device['ip'] = sinc.address
if sinc.family == 17:
tmp_device['mac'] = sinc.address
ret.append(tmp_device)
r = {}
r["net"] = ret
return r
def pack():
#a = get_mem_info()
#b = get_cpu_info()
#c = get_disk_info()
#d = get_host_info()
#e = get_net_info()
#f = get_system_serial_number()
#ret = {**f}
ret = {}
return ret
def verify_certification():
try:
file = open("public.pem",'r')
public_key = RSA.import_key(file.read())
file.close()
except:
print("ERROR: invalid public key file -> public.pem")
exit(-1)
n = public_key.n
e = public_key.e
try:
file = open("certification.cert",'r')
signature = file.read()
file.close()
except:
print("ERROR: miss signature file -> certification.cert")
exit(-1)
try:
signature = int(signature)
except:
print("ERROR: invalid signature file -> certification.cert")
exit(-1)
try:
p = pack()
j = json.dumps(p)
except:
print("ERROR: couldn't get full computer identification info")
exit(-1)
# hash = int.from_bytes(sha512(j).digest(), byteorder='big')
# hash = int.from_bytes(j, byteorder='big')
try:
hashFromSignature = pow(signature, e, n)
except:
print("ERROR: invalid public key")
exit(-1)
cipher_msg = hashFromSignature.to_bytes(length=hashFromSignature.bit_length()//8+1, byteorder='big').decode()
pos = cipher_msg.rfind("+")
date = cipher_msg[pos+1:]
if is_expired(date):
print("WARN: Signature already expire")
exit(0)
cipher_msg = cipher_msg[0:pos]
pos = cipher_msg.rfind("+")
cipher_msg = cipher_msg[0:pos]
print("INFO: currently:"+j)
return j == cipher_msg
def is_expired(date):
if datetime.datetime.now() >= datetime.datetime.strptime(date,'%Y-%m-%d %H:%M:%S'):
return True
else:
return False
if __name__ == "__main__":
len = len(sys.argv)
if len != 1:
print("invalid arguments")
exit(-1)
print("verifying...")
print(verify_certification())
|
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from torch.nn import Parameter
import copy
import argparse
parser=argparse.ArgumentParser(description="Basic Pointer Network.")
parser.add_argument('--seq_len', default=10, type=int, choices=[5,10,20])
parser.add_argument('--load', default=False, action='store_true')
parser.add_argument('--save', default=False, action='store_true')
args=vars(parser.parse_args())
SEQ_LEN = args['seq_len']
MAX_EPOCHS = 10000
INPUT_DIM = 2
HIDDEN_DIM = 512
BATCH_SIZE = 128
LEARNING_RATE = 0.0005
ENCODER_LAYERS = 2
LOAD_FROM_EXISTED_MODEL = args['load']
SAVE_MODEL=args['save']
if torch.cuda.is_available():
USE_CUDA = True
print('Using GPU, %i devices.' % torch.cuda.device_count())
else:
USE_CUDA = False
class PtrNet(nn.Module):
def __init__(self, batch_size, input_dim, hidden_dim, encoder_layers):
super(PtrNet, self).__init__()
self.batch_size = 0
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.encoder_layers = encoder_layers
self.seq_len = 0
self.encoder = nn.LSTM(self.input_dim, self.hidden_dim, self.encoder_layers)
self.decoder = nn.LSTMCell(self.input_dim, self.hidden_dim)
self.ptr_W1 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.ptr_W2 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.ptr_v = nn.Linear(self.hidden_dim, 1)
self.combine_hidden = nn.Linear(self.hidden_dim * 2, self.hidden_dim)
def forward(self, input):
# input: batch_size*seq_len*input_dim
input = torch.tensor(input)
self.batch_size=input.shape[0]
self.seq_len = input.shape[1]
hidden_state = torch.zeros([self.encoder_layers, self.batch_size, self.hidden_dim]).float()
cell_state = torch.zeros([self.encoder_layers, self.batch_size, self.hidden_dim]).float()
if USE_CUDA:
hidden_state=hidden_state.cuda()
cell_state=cell_state.cuda()
# input-> seq_len*batch_size*input_dim
input = input.transpose(0, 1).float()
# encoding_hidden_states: seq_len * batch_size * hidden_dim
# hidden_state & cell_state: encoder_layers * batch_size * hidden_dim
encoding_hidden_states, (hidden_state, cell_state) = self.encoder(input, (hidden_state, cell_state))
# W_1e: seq_len*batch_size*hidden_dim
W_1e = self.ptr_W1(encoding_hidden_states)
# encoding_hidden_states -> batch_size*seq_len*hidden_dim
encoding_hidden_states = encoding_hidden_states.transpose(0, 1)
current_input = torch.full((self.batch_size, self.input_dim), -1.0)
if USE_CUDA:
current_input=current_input.cuda()
# hidden_state & cell_state-> batch_size * hidden_dim
hidden_state = hidden_state[-1]
cell_state = cell_state[-1]
# input-> batch_size*seq_len*input_dim
input = input.transpose(0, 1)
output = []
for i in range(self.seq_len):
u_i = []
(hidden_state, cell_state) = self.decoder(current_input, (hidden_state, cell_state))
for j in range(self.seq_len):
# u_i.append( (batch_size*1)->batchsize )
u_i.append(self.ptr_v(torch.tanh(W_1e[j] + self.ptr_W2(hidden_state))).squeeze(1))
# u_i-> batch_size*seq_len
u_i = torch.stack(u_i).t()
# a_i:batch_size*seq_len
a_i = F.softmax(u_i, 1)
output.append(a_i)
# chosen_value:batch_size
chosen_value = a_i.argmax(1)
# current_input: batch_size*input_dim
current_input = [input[i][chosen_value[i]] for i in range(self.batch_size)]
current_input = torch.stack(current_input)
# a_i: batch_size*seq_len -> batch_size*seq_len*hidden_dim (same data)
a_i = a_i.unsqueeze(2).expand(self.batch_size, self.seq_len, self.hidden_dim)
# hidden_calced: batch_size*hidden_dim
hidden_calced = torch.sum(torch.mul(a_i, encoding_hidden_states), 1)
hidden_state = self.combine_hidden(torch.cat((hidden_calced, hidden_state), 1))
# return: seq_len*batch_size*seq_len -> batch_size*seq_len*seq_len
return torch.stack(output).transpose(0, 1)
def beam_search(output,beam_size):
batch_size=output.shape[0]
seq_len=output.shape[1]
lnpro=torch.log(output).data
print(lnpro.size())
ans=[]
for case in range(batch_size):
res=[([],0)]*beam_size
for i in range(seq_len):
# print("res",res)
tmp=[]
for nodes,prob in res:
# print("nodes,prob",nodes,prob)
for j in range(seq_len):
selected=False
if len(nodes)>0:
for node in nodes:
if node==j:
selected=True
break
if selected:
continue
next=copy.deepcopy(nodes)
next.append(j)
tmp.append((next,prob+lnpro[case][i][j]))
res=sorted(tmp,key=lambda p: p[1],reverse=True)[0:beam_size]
# print(res)
ans.append(res[0][0])
return ans
class Trainer:
def __init__(self, batch_size,input_dim, hidden_dim, encoder_layers, learning_rate, from_former_model):
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.learning_rate = learning_rate
self.ptrNet = PtrNet(batch_size, input_dim, hidden_dim, encoder_layers)
# self.ptrNet=PointerNet(128,hidden_dim,encoder_layers,0.,False)
if USE_CUDA:
self.ptrNet.cuda()
net = torch.nn.DataParallel(self.ptrNet, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
self.optimizer = torch.optim.Adam(self.ptrNet.parameters(), lr=learning_rate)
# for name,param in self.ptrNet.named_parameters():
# print(name,param.requires_grad)
self.CEL = torch.nn.CrossEntropyLoss()
self.episode = 0
self.seq_len = 0
self.filename = "../model/" + str(SEQ_LEN) + "mydata.pt"
if from_former_model:
self.load_model()
self.outf=open("../result/"+str(SEQ_LEN)+"prs_result.txt","w")
def train(self, input, ground_truth):
self.seq_len = input.shape[1]
batch_size=input.shape[0]
output = self.ptrNet(input.float())
# loss = torch.sqrt(torch.mean(torch.pow(output - truth, 2)))
calc_output = output.reshape((batch_size * self.seq_len, self.seq_len))
calc_ground_truth = ground_truth.reshape(-1)
loss = self.CEL(calc_output, calc_ground_truth.long())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.episode += 1
if self.episode % 5 == 0:
print(output[0])
print(ground_truth[0])
print(loss.data)
self.outf.write(str(self.episode) + " " +self.check_result(input, output, ground_truth)+" "+"{:.12f}\n".format(loss.data))
if SAVE_MODEL and self.episode % 200 == 0:
self.save_model()
def check_result(self, input, output, truth):
# output:batch_size*seq_len*seq_len
# truth:batch_size*seq_len
batch_size=input.shape[0]
seq_len=input.shape[1]
tour_length = 0.0
optimal_length = 0.0
ans_length=0.0
tot_optimal_length=0.0
ans = np.array(beam_search(output.cpu(), 2))
output = output.cpu().data.numpy()
truth = truth.cpu().data.numpy()
invalid_cnt = 0
for case in range(batch_size):
for i in range(1, seq_len):
ans_length += torch.sqrt(torch.sum(torch.pow(input[case][ans[case][i]] - input[case][ans[case][i - 1]], 2)))
tot_optimal_length += torch.sqrt(
torch.sum(torch.pow(input[case][truth[case][i]] - input[case][truth[case][i - 1]], 2)))
tot_optimal_length += torch.sqrt(
torch.sum(torch.pow(input[case][truth[case][0]] - input[case][truth[case][seq_len - 1]], 2)))
ans_length += torch.sqrt(
torch.sum(torch.pow(input[case][ans[case][0]] - input[case][ans[case][seq_len - 1]], 2)))
tour = [np.argmax(output[case][i]) for i in range(input.shape[1])]
flag = 0
for i in range(seq_len):
for j in range(i + 1, seq_len):
if tour[i] == tour[j]:
flag = 1
invalid_cnt += flag
if flag == 1:
continue
for i in range(1, seq_len):
tour_length += torch.sqrt(torch.sum(torch.pow(input[case][tour[i]] - input[case][tour[i - 1]], 2)))
optimal_length += torch.sqrt(
torch.sum(torch.pow(input[case][truth[case][i]] - input[case][truth[case][i - 1]], 2)))
tour_length += torch.sqrt(
torch.sum(torch.pow(input[case][tour[0]] - input[case][tour[seq_len - 1]], 2)))
optimal_length += torch.sqrt(
torch.sum(torch.pow(input[case][truth[case][0]] - input[case][truth[case][seq_len - 1]], 2)))
score = 0.0
for i in range(batch_size):
for j in range(seq_len):
if np.argmax(output[i][j]) == truth[i][j]:
score += 1.0
score=score/batch_size/seq_len
valid_ratio=(batch_size-invalid_cnt)/batch_size
average_ans=ans_length.cpu().numpy()/batch_size
tot_optimal_length/=batch_size
print(str(self.episode) + "th score: " + str(score ))
print(str(self.episode) + "th valid_ratio: " + str(valid_ratio))
if input.shape[0] == invalid_cnt:
print("No valid output!!!")
else:
print(str(self.episode) + "th length_ratio: " + str((tour_length/optimal_length).cpu().numpy()))
print(str(self.episode) + "th average_ans: " + str(average_ans))
return str(score)+" "+str(valid_ratio)+" "+str(average_ans)+" "+str(tot_optimal_length.cpu().numpy())
def save_model(self):
torch.save(self.ptrNet.state_dict(), self.filename)
print("Saved model")
def load_model(self):
self.ptrNet.load_state_dict(torch.load(self.filename, map_location=torch.device('cpu')))
print("loaded model")
def get_one_hot_output(output):
# output:batch_size*seq_len
# one_hot:batch_size*seq_len*seq_len
for i in range(output.shape[0]):
for j in range(output.shape[1]):
output[i][j] -= 1
return output
class TSPdataset(Dataset):
def __init__(self, filename, seq_len):
super(TSPdataset, self).__init__()
self.filename = filename
self.seq_len = seq_len
self.load_data()
def load_data(self):
f = open(self.filename, "r")
data = []
for line in f:
input, ground_truth = line.strip().split("output")
input = list(map(float, input.strip().split(" ")))
ground_truth = list(map(int, ground_truth.strip().split(" ")))[0:-1]
input = np.array(input).reshape((self.seq_len, 2))
ground_truth = np.array(ground_truth)
data.append((input, ground_truth))
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
input, ground_truth = self.data[index]
return input, ground_truth
dataset = TSPdataset("../train/tsp_correct_" + str(SEQ_LEN) + ".txt", SEQ_LEN)
dataloader = DataLoader(dataset, shuffle=True, batch_size=BATCH_SIZE)
import warnings
warnings.filterwarnings("ignore",category=Warning)
trainer = Trainer(BATCH_SIZE,INPUT_DIM, HIDDEN_DIM, ENCODER_LAYERS, LEARNING_RATE, LOAD_FROM_EXISTED_MODEL)
from tqdm import tqdm
for i in range(MAX_EPOCHS):
for input, ground_truth in dataloader:
# if input.shape[0] != BATCH_SIZE:
# break
# print(input.shape)
# print(ground_truth.shape)
# print(input)
ground_truth = get_one_hot_output(ground_truth)
if USE_CUDA:
input = input.cuda()
ground_truth = ground_truth.cuda()
# print(ground_truth)
trainer.train(input, ground_truth)
|
#import numpy as np
MIN_SIZE=-1
"""This is the minimum size allowed!"""
_pro_min_size=-2
__pri_min_size=-3
def mini(x, y):
"""
Take the max between two numbers
**Parameters**
> **x:** `float` -- Description of parameter `x`.
> **y:** `float` -- Description of parameter `y`.
**Returns**
> `float` -- Description of returned object.
"""
#return np.min(x, y)
return y
def mini2peutetre(x, y):
"""
Take the max between two numbers
**Parameters**
> **x:** `float` -- Description of parameter `x`.
> **y:** `float` -- Description of parameter `y`.
**Returns**
> `float` -- Description of returned object.
"""
#return np.min(x, y)
return y
|
#!/usr/bin/env python
import argparse
import time
import sys
from sys import exit
from Emulator import Emulator
import generators
from generators.fba2x.fba2xGenerator import Fba2xGenerator
from generators.kodi.kodiGenerator import KodiGenerator
from generators.linapple.linappleGenerator import LinappleGenerator
from generators.libretro.libretroGenerator import LibretroGenerator
from generators.moonlight.moonlightGenerator import MoonlightGenerator
from generators.mupen.mupenGenerator import MupenGenerator
from generators.ppsspp.ppssppGenerator import PPSSPPGenerator
from generators.reicast.reicastGenerator import ReicastGenerator
from generators.dolphin.dolphinGenerator import DolphinGenerator
from generators.scummvm.scummvmGenerator import ScummVMGenerator
from generators.dosbox.dosboxGenerator import DosBoxGenerator
from generators.vice.viceGenerator import ViceGenerator
from generators.advancemame.advMameGenerator import AdvMameGenerator
import controllersConfig as controllers
import utils.runner as runner
import signal
import recalboxFiles
import os
generators = {
'fba2x': Fba2xGenerator(),
'kodi': KodiGenerator(),
'linapple': LinappleGenerator(os.path.join(recalboxFiles.HOME_INIT, '.linapple'),
os.path.join(recalboxFiles.HOME, '.linapple')),
'libretro': LibretroGenerator(),
'moonlight': MoonlightGenerator(),
'scummvm': ScummVMGenerator(),
'dosbox': DosBoxGenerator(),
'mupen64plus': MupenGenerator(),
'vice': ViceGenerator(),
'reicast': ReicastGenerator(),
'dolphin': DolphinGenerator(),
'ppsspp': PPSSPPGenerator(),
'advancemame' : AdvMameGenerator()
}
# List emulators with their cores rest mupen64, scummvm
emulators = dict()
# Nintendo
emulators["snes"] = Emulator(name='snes', emulator='libretro', core='pocketsnes')
emulators["nes"] = Emulator(name='nes', emulator='libretro', core='fceunext')
emulators["n64"] = Emulator(name='n64', emulator='mupen64plus', core='gliden64')
emulators["gba"] = Emulator(name='gba', emulator='libretro', core='gpsp')
emulators["gb"] = Emulator(name='gb', emulator='libretro', core='gambatte')
emulators["gbc"] = Emulator(name='gbc', emulator='libretro', core='gambatte')
emulators["fds"] = Emulator(name='fds', emulator='libretro', core='nestopia')
emulators["virtualboy"] = Emulator(name='virtualboy', emulator='libretro', core='vb')
emulators["gamecube"] = Emulator(name='gamecube', emulator='dolphin')
emulators["wii"] = Emulator(name='wii', emulator='dolphin')
# Sega
emulators["sg1000"] = Emulator(name='sg1000', emulator='libretro', core='genesisplusgx')
emulators["mastersystem"] = Emulator(name='mastersystem', emulator='libretro', core='picodrive')
emulators["megadrive"] = Emulator(name='megadrive', emulator='libretro', core='picodrive')
emulators["gamegear"] = Emulator(name='gamegear', emulator='libretro', core='genesisplusgx')
emulators["sega32x"] = Emulator(name='sega32x', emulator='libretro', core='picodrive')
emulators["segacd"] = Emulator(name='segacd', emulator='libretro', core='picodrive')
emulators["dreamcast"] = Emulator(name='dreamcast', emulator='reicast')
# Arcade
emulators["neogeo"] = Emulator(name='neogeo', emulator='fba2x')
emulators["mame"] = Emulator(name='mame', emulator='libretro', core='mame078')
emulators["fba"] = Emulator(name='fba', emulator='fba2x')
emulators["fba_libretro"] = Emulator(name='fba_libretro', emulator='libretro', core='fba')
emulators["advancemame"] = Emulator(name='advancemame', emulator='advmame')
# Computers
emulators["msx"] = Emulator(name='msx', emulator='libretro', core='bluemsx')
emulators["msx1"] = Emulator(name='msx1', emulator='libretro', core='bluemsx')
emulators["msx2"] = Emulator(name='msx2', emulator='libretro', core='bluemsx')
emulators["amiga"] = Emulator(name='amiga', emulator='libretro', core='puae')
emulators["amstradcpc"] = Emulator(name='amstradcpc', emulator='libretro', core='cap32')
emulators["apple2"] = Emulator(name='apple2', emulator='linapple', videomode='default')
emulators["atarist"] = Emulator(name='atarist', emulator='libretro', core='hatari')
emulators["zxspectrum"] = Emulator(name='zxspectrum', emulator='libretro', core='fuse')
emulators["o2em"] = Emulator(name='odyssey2', emulator='libretro', core='o2em')
emulators["zx81"] = Emulator(name='zx81', emulator='libretro', core='81')
emulators["dos"] = Emulator(name='dos', emulator='dosbox', videomode='default')
emulators["c64"] = Emulator(name='c64', emulator='vice', core='x64')
#
emulators["ngp"] = Emulator(name='ngp', emulator='libretro', core='mednafen_ngp')
emulators["ngpc"] = Emulator(name='ngpc', emulator='libretro', core='mednafen_ngp')
emulators["gw"] = Emulator(name='gw', emulator='libretro', core='gw')
emulators["vectrex"] = Emulator(name='vectrex', emulator='libretro', core='vecx')
emulators["lynx"] = Emulator(name='lynx', emulator='libretro', core='mednafen_lynx')
emulators["lutro"] = Emulator(name='lutro', emulator='libretro', core='lutro')
emulators["wswan"] = Emulator(name='wswan', emulator='libretro', core='mednafen_wswan', ratio='16/10')
emulators["wswanc"] = Emulator(name='wswanc', emulator='libretro', core='mednafen_wswan', ratio='16/10')
emulators["pcengine"] = Emulator(name='pcengine', emulator='libretro', core='mednafen_supergrafx')
emulators["pcenginecd"] = Emulator(name='pcenginecd', emulator='libretro', core='mednafen_supergrafx')
emulators["supergrafx"] = Emulator(name='supergrafx', emulator='libretro', core='mednafen_supergrafx')
emulators["atari2600"] = Emulator(name='atari2600', emulator='libretro', core='stella')
emulators["atari7800"] = Emulator(name='atari7800', emulator='libretro', core='prosystem')
emulators["prboom"] = Emulator(name='prboom', emulator='libretro', core='prboom')
emulators["psx"] = Emulator(name='psx', emulator='libretro', core='pcsx_rearmed')
emulators["cavestory"] = Emulator(name='cavestory', emulator='libretro', core='nxengine')
emulators["imageviewer"] = Emulator(name='imageviewer', emulator='libretro', core='imageviewer')
emulators["scummvm"] = Emulator(name='scummvm', emulator='scummvm', videomode='default')
emulators["colecovision"] = Emulator(name='colecovision', emulator='libretro', core='bluemsx')
emulators["kodi"] = Emulator(name='kodi', emulator='kodi', videomode='default')
emulators["moonlight"] = Emulator(name='moonlight', emulator='moonlight')
emulators["psp"] = Emulator(name='psp', emulator='ppsspp')
def main(args):
playersControllers = dict()
if not args.demo:
# Read the controller configuration
playersControllers = controllers.loadControllerConfig(args.p1index, args.p1guid, args.p1name, args.p1devicepath, args.p1nbaxes,
args.p2index, args.p2guid, args.p2name, args.p2devicepath, args.p2nbaxes,
args.p3index, args.p3guid, args.p3name, args.p3devicepath, args.p3nbaxes,
args.p4index, args.p4guid, args.p4name, args.p4devicepath, args.p4nbaxes,
args.p5index, args.p5guid, args.p5name, args.p5devicepath, args.p5nbaxes)
systemName = args.system
# Main Program
# A generator will configure its emulator, and return a command
if systemName in emulators:
system = emulators[systemName]
system.configure(args.emulator, args.core, args.ratio, args.netplay)
# Save dir
dirname = os.path.join(recalboxFiles.savesDir, system.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
if system.config['emulator'] not in recalboxFiles.recalboxBins:
strErr = "ERROR : {} is not a known emulator".format(system.config['emulator'])
print >> sys.stderr, strErr
exit(2)
command = generators[system.config['emulator']].generate(system, args.rom, playersControllers)
# The next line is commented and will eventually be used instead of the previous one
# if we even want the binary to be set from here rather than from the generator
# command.array.insert(0, recalboxFiles.recalboxBins[system.config['emulator']])
print(command.array)
return runner.runCommand(command)
else:
sys.stderr.write("Unknown system: {}".format(systemName))
return 1
def config_upgrade(version):
'''
Upgrade all generators user's configuration files with new values added
to their system configuration file upgraded by S11Share:do_upgrade()
Args:
version (str): New Recalbox version
Returns (bool):
Returns True if all generators sucessfully handled the upgraded.
'''
res = True
for g in generators.values():
res &= g.config_upgrade(version)
return res
def signal_handler(signal, frame):
print('Exiting')
if runner.proc:
print('killing runner.proc')
runner.proc.kill()
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(description='emulator-launcher script')
parser.add_argument("-p1index", help="player1 controller index", type=int, required=False)
parser.add_argument("-p1guid", help="player1 controller SDL2 guid", type=str, required=False)
parser.add_argument("-p1name", help="player1 controller name", type=str, required=False)
parser.add_argument("-p1devicepath", help="player1 controller device", type=str, required=False)
parser.add_argument("-p1nbaxes", help="player1 controller number of axes", type=str, required=False)
parser.add_argument("-p2index", help="player2 controller index", type=int, required=False)
parser.add_argument("-p2guid", help="player2 controller SDL2 guid", type=str, required=False)
parser.add_argument("-p2name", help="player2 controller name", type=str, required=False)
parser.add_argument("-p2devicepath", help="player2 controller device", type=str, required=False)
parser.add_argument("-p2nbaxes", help="player2 controller number of axes", type=str, required=False)
parser.add_argument("-p3index", help="player3 controller index", type=int, required=False)
parser.add_argument("-p3guid", help="player3 controller SDL2 guid", type=str, required=False)
parser.add_argument("-p3name", help="player3 controller name", type=str, required=False)
parser.add_argument("-p3devicepath", help="player3 controller device", type=str, required=False)
parser.add_argument("-p3nbaxes", help="player3 controller number of axes", type=str, required=False)
parser.add_argument("-p4index", help="player4 controller index", type=int, required=False)
parser.add_argument("-p4guid", help="player4 controller SDL2 guid", type=str, required=False)
parser.add_argument("-p4name", help="player4 controller name", type=str, required=False)
parser.add_argument("-p4devicepath", help="player4 controller device", type=str, required=False)
parser.add_argument("-p4nbaxes", help="player4 controller number of axes", type=str, required=False)
parser.add_argument("-p5index", help="player5 controller index", type=int, required=False)
parser.add_argument("-p5guid", help="player5 controller SDL2 guid", type=str, required=False)
parser.add_argument("-p5name", help="player5 controller name", type=str, required=False)
parser.add_argument("-p5devicepath", help="player5 controller device", type=str, required=False)
parser.add_argument("-p5nbaxes", help="player5 controller number of axes", type=str, required=False)
parser.add_argument("-system", help="select the system to launch", type=str, required=True)
parser.add_argument("-rom", help="rom absolute path", type=str, required=True)
parser.add_argument("-emulator", help="force emulator", type=str, required=False)
parser.add_argument("-core", help="force emulator core", type=str, required=False)
parser.add_argument("-ratio", help="force game ratio", type=str, required=False)
parser.add_argument("-demo", help="mode demo", type=bool, required=False)
parser.add_argument("-netplay", help="host/client", type=str, required=False)
args = parser.parse_args()
exitcode = main(args)
time.sleep(1)
exit(exitcode)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
import os
# print(os.getcwd())
# print(os.path.abspath(__file__))
# print(os.path.dirname(os.path.abspath(__file__)))
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(os.getcwd())
file = open(r"test\nfiles\elzero.txt")
|
import runpy
if __name__ == "__main__":
runpy.run_module("pyfahrplan", run_name="__main__")
|
from typing import List, Tuple
from itertools import islice
from functools import reduce
from .cards import Card
from .enums import HandRanking, CardRank, CardSuit
def eval_hand(hand: List[Card]) -> Tuple[int, List[int]]:
""" Evaluate hand of cards
Params
------
hand : list of cards
Up to seven cards to evaluate
as a poker hand.
Returns
-------
tuple
A tuple where the first item
is the hand ranking, as in
HandRanking enum, and the second
is a list of kickers used to
break ties. The number of kickers
depends on the type of hand; for
instance, only one kicker is
returned for a straight, and five
are returned for a flush.
"""
if not hand: return HandRanking.NONE, []
elif len(hand) == 1: return HandRanking.HIGH, [hand[0].rank]
elif len(hand) == 2:
first, second = hand
if first.rank == second.rank: return HandRanking.PAIR, [first.rank]
else: return HandRanking.HIGH, [max(first.rank, second.rank), min(first.rank, second.rank)]
else:
# Sort by rank
rank_sorted = sorted(hand, key=lambda c: c.rank, reverse=True)
suit_sorted = sorted(hand, key=lambda c: (c.suit << 4) | c.rank, reverse=True)
idx = 0
flush = CardSuit.NUM_SUITS
both = CardSuit.NUM_SUITS
kind = 0
straight = 0
fourakind = []
threeakind = []
twoakind = []
for rank, suit in zip(rank_sorted, suit_sorted):
# This block checks flush and straight flush
if suit.suit == (flush & 0xf):
flush += 0x100
# Check straigth flush
if suit.rank + (both >> 8) == (both >> 4 & 0xf): both += 0x100
else: both = 0x100 | (suit.rank << 4) | suit.suit
elif (flush >> 8) < 5: both = flush = 0x100 | (suit.rank << 4) | suit.suit; flush_start = idx
# This block checks N of a kind and straights
if rank.rank == (kind & 0xf): kind += 0x10
else:
# Save as pair, tris or poker
numakind = kind >> 4
if numakind == 2: twoakind.append(kind & 0xf)
elif numakind == 3: threeakind.append(kind & 0xf)
elif numakind == 4: fourakind.append(kind & 0xf)
# Reset kind and check straight
kind = 0x10 | rank.rank
if rank.rank + (straight >> 4) == (straight & 0xf): straight += 0x10
elif (straight >> 4) < 5: straight = 0x10 | rank.rank
idx += 1
# Check last kind
numakind = kind >> 4
if numakind == 2: twoakind.append(kind & 0xf)
elif numakind == 3: threeakind.append(kind & 0xf)
elif numakind == 4: fourakind.append(kind & 0xf)
# Handle special case of 5-A straight
if (both >> 8) == 4 and (both >> 4 & 0xf) == CardRank.FIVE:
ace = next((True for c in hand if c.rank == CardRank.ACE and c.suit == (both & 0xf)), False)
if ace: return HandRanking.STRAIGHT_FLUSH, [CardRank.FIVE]
elif (straight >> 4) == 4 and (straight & 0xf) == CardRank.FIVE:
ace = next((True for c in hand if c.rank == CardRank.ACE), False)
if ace: return HandRanking.STRAIGHT, [CardRank.FIVE]
if (both >> 8) >= 5: return HandRanking.STRAIGHT_FLUSH, [both >> 4 & 0xf]
elif fourakind: return HandRanking.POKER, [fourakind[0], *islice((c.rank for c in rank_sorted if c.rank != fourakind[0]), 1)]
elif len(threeakind) > 1: return HandRanking.FULL, [threeakind[0], threeakind[1]]
elif threeakind and twoakind: return HandRanking.FULL, [threeakind[0], twoakind[0]]
elif (flush >> 8) >= 5: return HandRanking.FLUSH, [c.rank for c in suit_sorted[flush_start:flush_start + 5]]
elif (straight >> 4) >= 5: return HandRanking.STRAIGHT, [straight & 0xf]
elif threeakind: return HandRanking.TRIS, [threeakind[0], *islice((c.rank for c in rank_sorted if c.rank != threeakind[0]), 2)]
elif len(twoakind) > 1: return HandRanking.TWO_PAIR, [twoakind[0], twoakind[1], *islice((c.rank for c in rank_sorted if c.rank != twoakind[0] and c.rank != twoakind[1]), 1)]
elif twoakind: return HandRanking.PAIR, [twoakind[0], *islice((c.rank for c in rank_sorted if c.rank != twoakind[0]), 3)]
else: return HandRanking.HIGH, [c.rank for c in rank_sorted[:5]]
def get_kickers_value(kickers) -> int:
""" Returns the kickers value as a bit-packed integer
For instance, the kickers `[9, 8, 3]`
are returned as `(9 << 8) | (8 << 4)
| 3`
"""
return reduce(lambda value, kicker: value | (kicker[1] << (kicker[0] << 2)), enumerate(reversed(kickers)), 0)
def compare_rankings(rankings: List[Tuple[int, List[int]]]) -> Tuple[List[int], List[int], Tuple[int, List[int]]]:
""" Compares multiple hands
Returns the winner and the
rankings of each hand
Params
------
`rankings` : list of hand rankings
A list of hand rankings with
kickers, as output by `eval_hand`
Returns
-------
tuple
Returns a tuple where:
- the first element is a one-hot
encoded array of the winners;
- the second element is a list
with the indices of the winner.
"""
winners = []
best_rank = HandRanking.NONE
best_kicker = 0
idx = 0
for rank, kickers in rankings:
kicker = get_kickers_value(kickers)
if rank < best_rank:
# Flush winners
best_rank = rank
best_kicker = kicker
winners = [idx]
elif rank == best_rank:
if kicker > best_kicker:
# Flush winners
kicker = best_kicker
winners = [idx]
elif kicker == best_kicker:
# Add winner
winners.append(idx)
# Next hand
idx += 1
onehot = [int(idx in winners) for idx, _ in enumerate(rankings)]
return onehot, winners
def compare_hands(hands: List[List[Card]]) -> Tuple[List[int], List[int], Tuple[int, List[int]]]:
""" Compares multiple hands
Returns the winner and the
rankings of each hand
Params
------
`hand` : list of hands
A list of lists of cards, one for
each player to compare. Ideally they
should all have the same number of
cards
Returns
-------
tuple
Returns a tuple where:
- the first element is a one-hot
encoded array of the winners;
- the second element is a list
with the indices of the winner;
- the last element is a list of
hand rankings, as output by
the `eval_hand` function, one
for each hand.
"""
rankings = [eval_hand(hand) for hand in hands]
return compare_rankings(rankings) + (rankings,) |
from petisco.application.application_config import ApplicationConfig
from tests.integration.flask_app.toy_app.application.use_cases.create_user import (
CreateUser,
)
from tests.integration.flask_app.toy_app.application.use_cases.get_user_name import (
GetUserName,
)
class UseCaseBuilder:
@staticmethod
def create_user():
config = ApplicationConfig.get_instance()
user_repository = config.repositories_provider()["user"]
return CreateUser(
user_repository=user_repository, event_manager=config.event_manager
)
@staticmethod
def get_user_name():
config = ApplicationConfig.get_instance()
user_repository = config.repositories_provider()["user"]
return GetUserName(user_repository=user_repository)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
import re
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages # noqa
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M,
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def main():
setup(
name="pydocxs3upload",
version=find_version('pydocxs3upload', '__init__.py'),
description="PyDocX mixin - S3 image upload",
author="Jeremy Baker, Chirica Gheorghe",
author_email="[email protected], [email protected]",
url="https://github.com/jhubert/pydocx-s3-images",
platforms=["any"],
license="BSD",
packages=find_packages(),
scripts=[],
zip_safe=False,
install_requires=[
'requests>=2.7.0',
'six>=1.10.0'
],
cmdclass={},
classifiers=[
# "Development Status :: 1 - Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: XML",
],
long_description=read('README.rst'),
)
if __name__ == '__main__':
main()
|
from datetime import datetime, timedelta
DATETIME_FMT = "%a, %d %b %Y %H:%M:%S %Z"
class Repository:
def __init__(self, repo):
self.repo = repo
def to_list(self):
"""
Return relevant fields as list, e.g. for exporting as CSV.
"""
return [
datetime.now().isoformat(),
self.name,
self.stargazers_count,
self.forks_count,
self.open_issues_count,
]
@property
def description(self):
return self.repo.description
@property
def forks_count(self):
return self.repo.forks_count
@property
def homepage(self):
"""
If the project has an official homepage listed, prefer it over the GitHub URL.
"""
if self.repo.homepage:
return self.repo.homepage
return self.repo.git_url.replace("git", "https", 1).rstrip(".git")
@property
def last_commit(self):
last_modified = datetime.strptime(self.repo.last_modified, DATETIME_FMT)
delta = datetime.today() - last_modified
last_modified_in_days = delta / timedelta(days=1)
return round(last_modified_in_days)
@property
def name(self):
return self.repo.name
@property
def open_issues_count(self):
return self.repo.open_issues_count
@property
def organization_name(self):
return self.repo.organization.name
@property
def stargazers_count(self):
return self.repo.stargazers_count
|
import logging
from .routes import routes
from loafer.managers import LoaferManager
logger = logging.getLogger(__name__)
logger.info("STARTED APLICATION - GOAT TWEETER ANALYZER")
manager = LoaferManager(routes=routes)
manager.run() |
import ssl
import pandas as pd
import json
from urllib import request as rq
def get_ssl_certificate():
url = "https://fga.unb.br/guia-fga/horario-dos-onibus-intercampi"
context = ssl._create_unverified_context()
response = rq.urlopen(url, context=context)
html = response.read()
return html
def test_get_from_fga_site():
html = get_ssl_certificate()
tables = pd.read_html(html, header=0)
first_table = tables[0]
# convert to dataframe
first_table = pd.DataFrame(first_table)
first_table = first_table.to_json(orient='values')
# convert string to json
first_table = json.loads(first_table)
result = []
for item in first_table:
element = {}
element = {'horario_saida': item[0], 'destino': item[2],
'origem': item[1]}
result.append(element)
first = ""
for each in result:
first = each['destino']
break
assert first == 'Darcy Ribeiro'
|
#Desafio 032: Faça um programa que leia um ano qualquer e mostre se ele é BISSEXTO.
import datetime
ano = int(input('Em que ano estamos? Coloque 0 para analisar o ano atual se preferir. '))
if ano == 0:
ano = datetime.date.today().year
if ano%4==0 and ano%100 !=0 or ano%400==0: #!= é diferente
print(f'{ano} é BISSEXTO.')
else:
print(f'{ano} não é BISSEXTO.') |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
from sensor_correction.utils import sensor_unproject
from sensor_correction.gp_cpu import GPRegressor
def select_data(temps, poses, all_depths_ir, all_depths_rgb, Kinv, xy, target='rgb'):
sel_xyzt = []
sel_deltas = []
for p in poses:
if target == 'rgb':
depth_target = all_depths_rgb[(p, temps[0])]
elif target == 'ir':
depth_target = all_depths_ir[(p, temps[0])]
d_target = depth_target[xy[:,1], xy[:,0]]
for t in temps:
depth_ir = all_depths_ir[(p, t)] # Actual
d_ir = depth_ir[xy[:,1], xy[:,0]]
xyz = sensor_unproject(xy, d_ir, Kinv)
xyzt = np.empty((xyz.shape[0], 4), dtype=np.float32)
xyzt[:, :3] = xyz
xyzt[:, 3] = t
delta = d_target - d_ir
mask = d_ir > 0.
"""
plt.imshow(depth_rgb - depth_ir)
plt.plot(xy[:,0][mask], xy[:,1][mask], 'k+')
plt.colorbar()
plt.show()
"""
sel_xyzt.append(xyzt[mask])
sel_deltas.append(delta[mask])
sel_xyzt = np.concatenate(sel_xyzt)
sel_deltas = np.concatenate(sel_deltas)
return sel_xyzt, sel_deltas
if __name__ == '__main__':
np.random.seed(1)
import argparse
parser = argparse.ArgumentParser(description='Train Gaussian Process for depth correction.')
parser.add_argument('depth', type=str, help='Preprocessed depth data')
parser.add_argument('intrinsics', type=str, help='Camera intrinsics')
parser.add_argument('--output', type=str, help='Result regressor filename', default='gpr.pkl')
parser.add_argument('--target', type=str, help='Target depth to train for, RGB or IR.', default='rgb')
args = parser.parse_args()
# Load depth data
data = np.load(args.depth)
temps = data['temps']
poses = data['poses']
all_depths_ir = data['depth_ir'][()]
all_depths_rgb = data['depth_rgb'][()]
h, w = all_depths_ir[(poses[0], temps[0])].shape
# Load intrinsics
K = np.loadtxt(args.intrinsics).reshape(3,3)
Kinv = np.linalg.inv(K)
# Create train and test data
x = np.linspace(0, w-1, 8, dtype=np.int32)
y = np.linspace(0, h-1, 8, dtype=np.int32)
xx, yy = np.meshgrid(x, y)
xy_train = np.hstack((xx.reshape(-1,1), yy.reshape(-1,1)))
train_xyzt, train_deltae = select_data(
temps[::2],
poses,
all_depths_ir,
all_depths_rgb,
Kinv,
xy_train,
target=args.target.lower())
xy_test = np.random.uniform(0, [w-1,h-1], size=(10,2)).astype(np.int32)
test_xyzt, test_deltae = select_data(
temps[::2],
poses[::2],
all_depths_ir,
all_depths_rgb,
Kinv,
xy_test,
target=args.target.lower())
r = GPRegressor()
r.fit(train_xyzt, train_deltae, length_scale=[0.5, 0.5, 0.5, 10], signal_std=1., noise_std=0.002, optimize=True, normalize=True, repeat=2)
ypred = r.predict(test_xyzt)
d = ypred - test_deltae
rmse = np.sqrt(np.mean(np.square(d)))
print('RMSE {:e}'.format(rmse))
print('Optimized length scale {}'.format(r.length_scale))
print('Optimized signal std {}'.format(r.signal_std))
print('Optimized noise std {}'.format(r.noise_std))
r.save(args.output) |
import logging
from datetime import datetime
from pathlib import Path
from secrets import token_bytes
from typing import Dict, List, Optional, Tuple
from blspy import AugSchemeMPL, G1Element, PrivateKey
from chiapos import DiskPlotter
from ecostake.daemon.keychain_proxy import KeychainProxy, connect_to_keychain_and_validate, wrap_local_keychain
from ecostake.plotting.util import add_plot_directory, stream_plot_info_ph, stream_plot_info_pk
from ecostake.types.blockchain_format.proof_of_space import ProofOfSpace
from ecostake.types.blockchain_format.sized_bytes import bytes32
from ecostake.util.bech32m import decode_puzzle_hash
from ecostake.util.config import config_path_for_filename, load_config
from ecostake.util.keychain import Keychain
from ecostake.util.path import mkdir
from ecostake.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_local_sk, master_sk_to_pool_sk
log = logging.getLogger(__name__)
class PlotKeys:
def __init__(
self,
farmer_public_key: G1Element,
pool_public_key: Optional[G1Element],
pool_contract_address: Optional[str],
):
self.farmer_public_key = farmer_public_key
self.pool_public_key = pool_public_key
self.pool_contract_address = pool_contract_address
@property
def pool_contract_puzzle_hash(self) -> Optional[bytes32]:
if self.pool_contract_address is not None:
return decode_puzzle_hash(self.pool_contract_address)
return None
class PlotKeysResolver:
def __init__(
self,
farmer_public_key: str,
alt_fingerprint: int,
pool_public_key: str,
pool_contract_address: str,
root_path: Path,
log: logging.Logger,
connect_to_daemon=False,
):
self.farmer_public_key = farmer_public_key
self.alt_fingerprint = alt_fingerprint
self.pool_public_key = pool_public_key
self.pool_contract_address = pool_contract_address
self.root_path = root_path
self.log = log
self.connect_to_daemon = connect_to_daemon
self.resolved_keys: Optional[PlotKeys] = None
async def resolve(self) -> PlotKeys:
if self.resolved_keys is not None:
return self.resolved_keys
keychain_proxy: Optional[KeychainProxy] = None
if self.connect_to_daemon:
keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log)
else:
keychain_proxy = wrap_local_keychain(Keychain(), log=self.log)
farmer_public_key: G1Element
if self.farmer_public_key is not None:
farmer_public_key = G1Element.from_bytes(bytes.fromhex(self.farmer_public_key))
else:
farmer_public_key = await self.get_farmer_public_key(keychain_proxy)
pool_public_key: Optional[G1Element] = None
if self.pool_public_key is not None:
if self.pool_contract_address is not None:
raise RuntimeError("Choose one of pool_contract_address and pool_public_key")
pool_public_key = G1Element.from_bytes(bytes.fromhex(self.pool_public_key))
else:
if self.pool_contract_address is None:
# If nothing is set, farms to the provided key (or the first key)
pool_public_key = await self.get_pool_public_key(keychain_proxy)
self.resolved_keys = PlotKeys(farmer_public_key, pool_public_key, self.pool_contract_address)
return self.resolved_keys
async def get_sk(self, keychain_proxy: Optional[KeychainProxy] = None) -> Optional[Tuple[PrivateKey, bytes]]:
sk: Optional[PrivateKey] = None
if keychain_proxy:
try:
if self.alt_fingerprint is not None:
sk = await keychain_proxy.get_key_for_fingerprint(self.alt_fingerprint)
else:
sk = await keychain_proxy.get_first_private_key()
except Exception as e:
log.error(f"Keychain proxy failed with error: {e}")
else:
sk_ent: Optional[Tuple[PrivateKey, bytes]] = None
keychain: Keychain = Keychain()
if self.alt_fingerprint is not None:
sk_ent = keychain.get_private_key_by_fingerprint(self.alt_fingerprint)
else:
sk_ent = keychain.get_first_private_key()
if sk_ent:
sk = sk_ent[0]
return sk
async def get_farmer_public_key(self, keychain_proxy: Optional[KeychainProxy] = None) -> G1Element:
sk: Optional[PrivateKey] = await self.get_sk(keychain_proxy)
if sk is None:
raise RuntimeError(
"No keys, please run 'ecostake keys add', 'ecostake keys generate' or provide a public key with -f"
)
return master_sk_to_farmer_sk(sk).get_g1()
async def get_pool_public_key(self, keychain_proxy: Optional[KeychainProxy] = None) -> G1Element:
sk: Optional[PrivateKey] = await self.get_sk(keychain_proxy)
if sk is None:
raise RuntimeError(
"No keys, please run 'ecostake keys add', 'ecostake keys generate' or provide a public key with -p"
)
return master_sk_to_pool_sk(sk).get_g1()
async def resolve_plot_keys(
farmer_public_key: str,
alt_fingerprint: int,
pool_public_key: str,
pool_contract_address: str,
root_path: Path,
log: logging.Logger,
connect_to_daemon=False,
) -> PlotKeys:
return await PlotKeysResolver(
farmer_public_key, alt_fingerprint, pool_public_key, pool_contract_address, root_path, log, connect_to_daemon
).resolve()
async def create_plots(
args, keys: PlotKeys, root_path, use_datetime=True, test_private_keys: Optional[List] = None
) -> Tuple[Dict[bytes32, Path], Dict[bytes32, Path]]:
config_filename = config_path_for_filename(root_path, "config.yaml")
config = load_config(root_path, config_filename)
if args.tmp2_dir is None:
args.tmp2_dir = args.tmp_dir
assert (keys.pool_public_key is None) != (keys.pool_contract_puzzle_hash is None)
num = args.num
if args.size < config["min_mainnet_k_size"] and test_private_keys is None:
log.warning(f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet")
if args.size < 22:
log.warning("k under 22 is not supported. Increasing k to 22")
args.size = 22
if keys.pool_public_key is not None:
log.info(
f"Creating {num} plots of size {args.size}, pool public key: "
f"{bytes(keys.pool_public_key).hex()} farmer public key: {bytes(keys.farmer_public_key).hex()}"
)
else:
assert keys.pool_contract_puzzle_hash is not None
log.info(
f"Creating {num} plots of size {args.size}, pool contract address: "
f"{keys.pool_contract_address} farmer public key: {bytes(keys.farmer_public_key).hex()}"
)
tmp_dir_created = False
if not args.tmp_dir.exists():
mkdir(args.tmp_dir)
tmp_dir_created = True
tmp2_dir_created = False
if not args.tmp2_dir.exists():
mkdir(args.tmp2_dir)
tmp2_dir_created = True
mkdir(args.final_dir)
created_plots: Dict[bytes32, Path] = {}
existing_plots: Dict[bytes32, Path] = {}
for i in range(num):
# Generate a random master secret key
if test_private_keys is not None:
assert len(test_private_keys) == num
sk: PrivateKey = test_private_keys[i]
else:
sk = AugSchemeMPL.key_gen(token_bytes(32))
# The plot public key is the combination of the harvester and farmer keys
# New plots will also include a taproot of the keys, for extensibility
include_taproot: bool = keys.pool_contract_puzzle_hash is not None
plot_public_key = ProofOfSpace.generate_plot_public_key(
master_sk_to_local_sk(sk).get_g1(), keys.farmer_public_key, include_taproot
)
# The plot id is based on the harvester, farmer, and pool keys
if keys.pool_public_key is not None:
plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk(keys.pool_public_key, plot_public_key)
plot_memo: bytes32 = stream_plot_info_pk(keys.pool_public_key, keys.farmer_public_key, sk)
else:
assert keys.pool_contract_puzzle_hash is not None
plot_id = ProofOfSpace.calculate_plot_id_ph(keys.pool_contract_puzzle_hash, plot_public_key)
plot_memo = stream_plot_info_ph(keys.pool_contract_puzzle_hash, keys.farmer_public_key, sk)
if args.plotid is not None:
log.info(f"Debug plot ID: {args.plotid}")
plot_id = bytes32(bytes.fromhex(args.plotid))
if args.memo is not None:
log.info(f"Debug memo: {args.memo}")
plot_memo = bytes.fromhex(args.memo)
# Uncomment next two lines if memo is needed for dev debug
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")
if use_datetime:
filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot"
else:
filename = f"plot-k{args.size}-{plot_id}.plot"
full_path: Path = args.final_dir / filename
resolved_final_dir: str = str(Path(args.final_dir).resolve())
plot_directories_list: str = config["harvester"]["plot_directories"]
if args.exclude_final_dir:
log.info(f"NOT adding directory {resolved_final_dir} to harvester for farming")
if resolved_final_dir in plot_directories_list:
log.warning(f"Directory {resolved_final_dir} already exists for harvester, please remove it manually")
else:
if resolved_final_dir not in plot_directories_list:
# Adds the directory to the plot directories if it is not present
log.info(f"Adding directory {resolved_final_dir} to harvester for farming")
config = add_plot_directory(root_path, resolved_final_dir)
if not full_path.exists():
log.info(f"Starting plot {i + 1}/{num}")
# Creates the plot. This will take a long time for larger plots.
plotter: DiskPlotter = DiskPlotter()
plotter.create_plot_disk(
str(args.tmp_dir),
str(args.tmp2_dir),
str(args.final_dir),
filename,
args.size,
plot_memo,
plot_id,
args.buffer,
args.buckets,
args.stripe_size,
args.num_threads,
args.nobitfield,
)
created_plots[plot_id] = full_path
else:
log.info(f"Plot {filename} already exists")
existing_plots[plot_id] = full_path
log.info("Summary:")
if tmp_dir_created:
try:
args.tmp_dir.rmdir()
except Exception:
log.info(f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty.")
if tmp2_dir_created:
try:
args.tmp2_dir.rmdir()
except Exception:
log.info(f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty.")
log.info(f"Created a total of {len(created_plots)} new plots")
for created_path in created_plots.values():
log.info(created_path.name)
return created_plots, existing_plots
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.